2019-05-27 09:55:08 +03:00
// SPDX-License-Identifier: GPL-2.0-or-later
2013-12-10 23:26:21 +04:00
/*
* Freescale i . MX23 / i . MX28 Data Co - Processor driver
*
* Copyright ( C ) 2013 Marek Vasut < marex @ denx . de >
*/
# include <linux/dma-mapping.h>
# include <linux/interrupt.h>
# include <linux/io.h>
# include <linux/kernel.h>
# include <linux/kthread.h>
# include <linux/module.h>
# include <linux/of.h>
# include <linux/platform_device.h>
# include <linux/stmp_device.h>
2018-11-07 18:33:32 +03:00
# include <linux/clk.h>
2013-12-10 23:26:21 +04:00
# include <crypto/aes.h>
2020-11-13 08:20:21 +03:00
# include <crypto/sha1.h>
# include <crypto/sha2.h>
2013-12-10 23:26:21 +04:00
# include <crypto/internal/hash.h>
2016-06-29 13:04:02 +03:00
# include <crypto/internal/skcipher.h>
2020-02-25 18:05:52 +03:00
# include <crypto/scatterwalk.h>
2013-12-10 23:26:21 +04:00
# define DCP_MAX_CHANS 4
# define DCP_BUF_SZ PAGE_SIZE
2018-10-02 22:01:50 +03:00
# define DCP_SHA_PAY_SZ 64
2013-12-10 23:26:21 +04:00
2014-03-03 04:23:15 +04:00
# define DCP_ALIGNMENT 64
2018-10-02 22:01:50 +03:00
/*
* Null hashes to align with hw behavior on imx6sl and ull
* these are flipped for consistency with hw output
*/
2018-10-11 04:49:48 +03:00
static const uint8_t sha1_null_hash [ ] =
2018-10-02 22:01:50 +03:00
" \x09 \x07 \xd8 \xaf \x90 \x18 \x60 \x95 \xef \xbf "
" \x55 \x32 \x0d \x4b \x6b \x5e \xee \xa3 \x39 \xda " ;
2018-10-11 04:49:48 +03:00
static const uint8_t sha256_null_hash [ ] =
2018-10-02 22:01:50 +03:00
" \x55 \xb8 \x52 \x78 \x1b \x99 \x95 \xa4 "
" \x4c \x93 \x9b \x64 \xe4 \x41 \xae \x27 "
" \x24 \xb9 \x6f \x99 \xc8 \xf4 \xfb \x9a "
" \x14 \x1c \xfc \x98 \x42 \xc4 \xb0 \xe3 " ;
2013-12-10 23:26:21 +04:00
/* DCP DMA descriptor. */
struct dcp_dma_desc {
uint32_t next_cmd_addr ;
uint32_t control0 ;
uint32_t control1 ;
uint32_t source ;
uint32_t destination ;
uint32_t size ;
uint32_t payload ;
uint32_t status ;
} ;
/* Coherent aligned block for bounce buffering. */
struct dcp_coherent_block {
uint8_t aes_in_buf [ DCP_BUF_SZ ] ;
uint8_t aes_out_buf [ DCP_BUF_SZ ] ;
uint8_t sha_in_buf [ DCP_BUF_SZ ] ;
2018-10-02 22:01:50 +03:00
uint8_t sha_out_buf [ DCP_SHA_PAY_SZ ] ;
2013-12-10 23:26:21 +04:00
uint8_t aes_key [ 2 * AES_KEYSIZE_128 ] ;
struct dcp_dma_desc desc [ DCP_MAX_CHANS ] ;
} ;
struct dcp {
struct device * dev ;
void __iomem * base ;
uint32_t caps ;
struct dcp_coherent_block * coh ;
struct completion completion [ DCP_MAX_CHANS ] ;
2018-09-21 18:03:18 +03:00
spinlock_t lock [ DCP_MAX_CHANS ] ;
2013-12-10 23:26:21 +04:00
struct task_struct * thread [ DCP_MAX_CHANS ] ;
struct crypto_queue queue [ DCP_MAX_CHANS ] ;
2018-11-07 18:33:32 +03:00
struct clk * dcp_clk ;
2013-12-10 23:26:21 +04:00
} ;
enum dcp_chan {
DCP_CHAN_HASH_SHA = 0 ,
DCP_CHAN_CRYPTO = 2 ,
} ;
struct dcp_async_ctx {
/* Common context */
enum dcp_chan chan ;
uint32_t fill ;
/* SHA Hash-specific context */
struct mutex mutex ;
uint32_t alg ;
unsigned int hot : 1 ;
/* Crypto-specific context */
2020-07-07 09:31:59 +03:00
struct crypto_skcipher * fallback ;
2013-12-10 23:26:21 +04:00
unsigned int key_len ;
uint8_t key [ AES_KEYSIZE_128 ] ;
} ;
2014-01-14 21:31:01 +04:00
struct dcp_aes_req_ctx {
unsigned int enc : 1 ;
unsigned int ecb : 1 ;
2020-07-07 09:31:59 +03:00
struct skcipher_request fallback_req ; // keep at the end
2014-01-14 21:31:01 +04:00
} ;
2013-12-10 23:26:21 +04:00
struct dcp_sha_req_ctx {
unsigned int init : 1 ;
unsigned int fini : 1 ;
} ;
2018-10-02 22:01:48 +03:00
struct dcp_export_state {
struct dcp_sha_req_ctx req_ctx ;
struct dcp_async_ctx async_ctx ;
} ;
2013-12-10 23:26:21 +04:00
/*
* There can even be only one instance of the MXS DCP due to the
* design of Linux Crypto API .
*/
static struct dcp * global_sdcp ;
/* DCP register layout. */
# define MXS_DCP_CTRL 0x00
# define MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES (1 << 23)
# define MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING (1 << 22)
# define MXS_DCP_STAT 0x10
# define MXS_DCP_STAT_CLR 0x18
# define MXS_DCP_STAT_IRQ_MASK 0xf
# define MXS_DCP_CHANNELCTRL 0x20
# define MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK 0xff
# define MXS_DCP_CAPABILITY1 0x40
# define MXS_DCP_CAPABILITY1_SHA256 (4 << 16)
# define MXS_DCP_CAPABILITY1_SHA1 (1 << 16)
# define MXS_DCP_CAPABILITY1_AES128 (1 << 0)
# define MXS_DCP_CONTEXT 0x50
# define MXS_DCP_CH_N_CMDPTR(n) (0x100 + ((n) * 0x40))
# define MXS_DCP_CH_N_SEMA(n) (0x110 + ((n) * 0x40))
# define MXS_DCP_CH_N_STAT(n) (0x120 + ((n) * 0x40))
# define MXS_DCP_CH_N_STAT_CLR(n) (0x128 + ((n) * 0x40))
/* DMA descriptor bits. */
# define MXS_DCP_CONTROL0_HASH_TERM (1 << 13)
# define MXS_DCP_CONTROL0_HASH_INIT (1 << 12)
# define MXS_DCP_CONTROL0_PAYLOAD_KEY (1 << 11)
# define MXS_DCP_CONTROL0_CIPHER_ENCRYPT (1 << 8)
# define MXS_DCP_CONTROL0_CIPHER_INIT (1 << 9)
# define MXS_DCP_CONTROL0_ENABLE_HASH (1 << 6)
# define MXS_DCP_CONTROL0_ENABLE_CIPHER (1 << 5)
# define MXS_DCP_CONTROL0_DECR_SEMAPHORE (1 << 1)
# define MXS_DCP_CONTROL0_INTERRUPT (1 << 0)
# define MXS_DCP_CONTROL1_HASH_SELECT_SHA256 (2 << 16)
# define MXS_DCP_CONTROL1_HASH_SELECT_SHA1 (0 << 16)
# define MXS_DCP_CONTROL1_CIPHER_MODE_CBC (1 << 4)
# define MXS_DCP_CONTROL1_CIPHER_MODE_ECB (0 << 4)
# define MXS_DCP_CONTROL1_CIPHER_SELECT_AES128 (0 << 0)
static int mxs_dcp_start_dma ( struct dcp_async_ctx * actx )
{
2021-07-01 21:56:37 +03:00
int dma_err ;
2013-12-10 23:26:21 +04:00
struct dcp * sdcp = global_sdcp ;
const int chan = actx - > chan ;
uint32_t stat ;
2015-02-07 11:09:41 +03:00
unsigned long ret ;
2013-12-10 23:26:21 +04:00
struct dcp_dma_desc * desc = & sdcp - > coh - > desc [ actx - > chan ] ;
dma_addr_t desc_phys = dma_map_single ( sdcp - > dev , desc , sizeof ( * desc ) ,
DMA_TO_DEVICE ) ;
2021-07-01 21:56:37 +03:00
dma_err = dma_mapping_error ( sdcp - > dev , desc_phys ) ;
if ( dma_err )
return dma_err ;
2013-12-10 23:26:21 +04:00
reinit_completion ( & sdcp - > completion [ chan ] ) ;
/* Clear status register. */
writel ( 0xffffffff , sdcp - > base + MXS_DCP_CH_N_STAT_CLR ( chan ) ) ;
/* Load the DMA descriptor. */
writel ( desc_phys , sdcp - > base + MXS_DCP_CH_N_CMDPTR ( chan ) ) ;
/* Increment the semaphore to start the DMA transfer. */
writel ( 1 , sdcp - > base + MXS_DCP_CH_N_SEMA ( chan ) ) ;
ret = wait_for_completion_timeout ( & sdcp - > completion [ chan ] ,
msecs_to_jiffies ( 1000 ) ) ;
if ( ! ret ) {
dev_err ( sdcp - > dev , " Channel %i timeout (DCP_STAT=0x%08x) \n " ,
chan , readl ( sdcp - > base + MXS_DCP_STAT ) ) ;
return - ETIMEDOUT ;
}
stat = readl ( sdcp - > base + MXS_DCP_CH_N_STAT ( chan ) ) ;
if ( stat & 0xff ) {
dev_err ( sdcp - > dev , " Channel %i error (CH_STAT=0x%08x) \n " ,
chan , stat ) ;
return - EINVAL ;
}
dma_unmap_single ( sdcp - > dev , desc_phys , sizeof ( * desc ) , DMA_TO_DEVICE ) ;
return 0 ;
}
/*
* Encryption ( AES128 )
*/
2014-01-14 21:31:01 +04:00
static int mxs_dcp_run_aes ( struct dcp_async_ctx * actx ,
2019-11-09 20:09:41 +03:00
struct skcipher_request * req , int init )
2013-12-10 23:26:21 +04:00
{
2021-07-01 21:56:37 +03:00
dma_addr_t key_phys , src_phys , dst_phys ;
2013-12-10 23:26:21 +04:00
struct dcp * sdcp = global_sdcp ;
struct dcp_dma_desc * desc = & sdcp - > coh - > desc [ actx - > chan ] ;
2019-11-09 20:09:41 +03:00
struct dcp_aes_req_ctx * rctx = skcipher_request_ctx ( req ) ;
2013-12-10 23:26:21 +04:00
int ret ;
2021-07-01 21:56:37 +03:00
key_phys = dma_map_single ( sdcp - > dev , sdcp - > coh - > aes_key ,
2 * AES_KEYSIZE_128 , DMA_TO_DEVICE ) ;
ret = dma_mapping_error ( sdcp - > dev , key_phys ) ;
if ( ret )
return ret ;
src_phys = dma_map_single ( sdcp - > dev , sdcp - > coh - > aes_in_buf ,
DCP_BUF_SZ , DMA_TO_DEVICE ) ;
ret = dma_mapping_error ( sdcp - > dev , src_phys ) ;
if ( ret )
goto err_src ;
dst_phys = dma_map_single ( sdcp - > dev , sdcp - > coh - > aes_out_buf ,
DCP_BUF_SZ , DMA_FROM_DEVICE ) ;
ret = dma_mapping_error ( sdcp - > dev , dst_phys ) ;
if ( ret )
goto err_dst ;
2013-12-10 23:26:21 +04:00
2018-10-02 22:01:52 +03:00
if ( actx - > fill % AES_BLOCK_SIZE ) {
dev_err ( sdcp - > dev , " Invalid block size! \n " ) ;
ret = - EINVAL ;
goto aes_done_run ;
}
2013-12-10 23:26:21 +04:00
/* Fill in the DMA descriptor. */
desc - > control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
MXS_DCP_CONTROL0_INTERRUPT |
MXS_DCP_CONTROL0_ENABLE_CIPHER ;
/* Payload contains the key. */
desc - > control0 | = MXS_DCP_CONTROL0_PAYLOAD_KEY ;
2014-01-14 21:31:01 +04:00
if ( rctx - > enc )
2013-12-10 23:26:21 +04:00
desc - > control0 | = MXS_DCP_CONTROL0_CIPHER_ENCRYPT ;
if ( init )
desc - > control0 | = MXS_DCP_CONTROL0_CIPHER_INIT ;
desc - > control1 = MXS_DCP_CONTROL1_CIPHER_SELECT_AES128 ;
2014-01-14 21:31:01 +04:00
if ( rctx - > ecb )
2013-12-10 23:26:21 +04:00
desc - > control1 | = MXS_DCP_CONTROL1_CIPHER_MODE_ECB ;
else
desc - > control1 | = MXS_DCP_CONTROL1_CIPHER_MODE_CBC ;
desc - > next_cmd_addr = 0 ;
desc - > source = src_phys ;
desc - > destination = dst_phys ;
desc - > size = actx - > fill ;
desc - > payload = key_phys ;
desc - > status = 0 ;
ret = mxs_dcp_start_dma ( actx ) ;
2018-10-02 22:01:52 +03:00
aes_done_run :
2021-07-01 21:56:37 +03:00
dma_unmap_single ( sdcp - > dev , dst_phys , DCP_BUF_SZ , DMA_FROM_DEVICE ) ;
err_dst :
dma_unmap_single ( sdcp - > dev , src_phys , DCP_BUF_SZ , DMA_TO_DEVICE ) ;
err_src :
2013-12-10 23:26:21 +04:00
dma_unmap_single ( sdcp - > dev , key_phys , 2 * AES_KEYSIZE_128 ,
DMA_TO_DEVICE ) ;
return ret ;
}
static int mxs_dcp_aes_block_crypt ( struct crypto_async_request * arq )
{
struct dcp * sdcp = global_sdcp ;
2019-11-09 20:09:41 +03:00
struct skcipher_request * req = skcipher_request_cast ( arq ) ;
2013-12-10 23:26:21 +04:00
struct dcp_async_ctx * actx = crypto_tfm_ctx ( arq - > tfm ) ;
2019-11-09 20:09:41 +03:00
struct dcp_aes_req_ctx * rctx = skcipher_request_ctx ( req ) ;
2013-12-10 23:26:21 +04:00
struct scatterlist * dst = req - > dst ;
struct scatterlist * src = req - > src ;
2021-07-01 21:56:38 +03:00
int dst_nents = sg_nents ( dst ) ;
2013-12-10 23:26:21 +04:00
const int out_off = DCP_BUF_SZ ;
uint8_t * in_buf = sdcp - > coh - > aes_in_buf ;
uint8_t * out_buf = sdcp - > coh - > aes_out_buf ;
uint32_t dst_off = 0 ;
2021-07-01 21:56:38 +03:00
uint8_t * src_buf = NULL ;
2018-10-02 22:01:52 +03:00
uint32_t last_out_len = 0 ;
2013-12-10 23:26:21 +04:00
uint8_t * key = sdcp - > coh - > aes_key ;
int ret = 0 ;
2021-07-01 21:56:38 +03:00
unsigned int i , len , clen , tlen = 0 ;
2013-12-10 23:26:21 +04:00
int init = 0 ;
2018-10-02 22:01:52 +03:00
bool limit_hit = false ;
2013-12-10 23:26:21 +04:00
actx - > fill = 0 ;
/* Copy the key from the temporary location. */
memcpy ( key , actx - > key , actx - > key_len ) ;
2014-01-14 21:31:01 +04:00
if ( ! rctx - > ecb ) {
2013-12-10 23:26:21 +04:00
/* Copy the CBC IV just past the key. */
2019-11-09 20:09:41 +03:00
memcpy ( key + AES_KEYSIZE_128 , req - > iv , AES_KEYSIZE_128 ) ;
2013-12-10 23:26:21 +04:00
/* CBC needs the INIT set. */
init = 1 ;
} else {
memset ( key + AES_KEYSIZE_128 , 0 , AES_KEYSIZE_128 ) ;
}
2022-01-22 20:07:53 +03:00
for_each_sg ( req - > src , src , sg_nents ( req - > src ) , i ) {
2013-12-10 23:26:21 +04:00
src_buf = sg_virt ( src ) ;
len = sg_dma_len ( src ) ;
2018-10-02 22:01:52 +03:00
tlen + = len ;
2019-11-09 20:09:41 +03:00
limit_hit = tlen > req - > cryptlen ;
2018-10-02 22:01:52 +03:00
if ( limit_hit )
2019-11-09 20:09:41 +03:00
len = req - > cryptlen - ( tlen - len ) ;
2013-12-10 23:26:21 +04:00
do {
if ( actx - > fill + len > out_off )
clen = out_off - actx - > fill ;
else
clen = len ;
memcpy ( in_buf + actx - > fill , src_buf , clen ) ;
len - = clen ;
src_buf + = clen ;
actx - > fill + = clen ;
/*
* If we filled the buffer or this is the last SG ,
* submit the buffer .
*/
2018-10-02 22:01:52 +03:00
if ( actx - > fill = = out_off | | sg_is_last ( src ) | |
2021-07-01 21:56:38 +03:00
limit_hit ) {
2014-01-14 21:31:01 +04:00
ret = mxs_dcp_run_aes ( actx , req , init ) ;
2013-12-10 23:26:21 +04:00
if ( ret )
return ret ;
init = 0 ;
2021-07-01 21:56:38 +03:00
sg_pcopy_from_buffer ( dst , dst_nents , out_buf ,
actx - > fill , dst_off ) ;
dst_off + = actx - > fill ;
2018-10-02 22:01:52 +03:00
last_out_len = actx - > fill ;
2021-07-01 21:56:38 +03:00
actx - > fill = 0 ;
2013-12-10 23:26:21 +04:00
}
} while ( len ) ;
2018-10-02 22:01:52 +03:00
if ( limit_hit )
break ;
}
/* Copy the IV for CBC for chaining */
if ( ! rctx - > ecb ) {
if ( rctx - > enc )
2019-11-09 20:09:41 +03:00
memcpy ( req - > iv , out_buf + ( last_out_len - AES_BLOCK_SIZE ) ,
2018-10-02 22:01:52 +03:00
AES_BLOCK_SIZE ) ;
else
2019-11-09 20:09:41 +03:00
memcpy ( req - > iv , in_buf + ( last_out_len - AES_BLOCK_SIZE ) ,
2018-10-02 22:01:52 +03:00
AES_BLOCK_SIZE ) ;
2013-12-10 23:26:21 +04:00
}
return ret ;
}
static int dcp_chan_thread_aes ( void * data )
{
struct dcp * sdcp = global_sdcp ;
const int chan = DCP_CHAN_CRYPTO ;
struct crypto_async_request * backlog ;
struct crypto_async_request * arq ;
int ret ;
2018-09-21 18:03:18 +03:00
while ( ! kthread_should_stop ( ) ) {
set_current_state ( TASK_INTERRUPTIBLE ) ;
2013-12-10 23:26:21 +04:00
2018-09-21 18:03:18 +03:00
spin_lock ( & sdcp - > lock [ chan ] ) ;
2013-12-10 23:26:21 +04:00
backlog = crypto_get_backlog ( & sdcp - > queue [ chan ] ) ;
arq = crypto_dequeue_request ( & sdcp - > queue [ chan ] ) ;
2018-09-21 18:03:18 +03:00
spin_unlock ( & sdcp - > lock [ chan ] ) ;
if ( ! backlog & & ! arq ) {
schedule ( ) ;
continue ;
}
set_current_state ( TASK_RUNNING ) ;
2013-12-10 23:26:21 +04:00
if ( backlog )
2023-01-31 11:02:40 +03:00
crypto_request_complete ( backlog , - EINPROGRESS ) ;
2013-12-10 23:26:21 +04:00
if ( arq ) {
ret = mxs_dcp_aes_block_crypt ( arq ) ;
2023-01-31 11:02:40 +03:00
crypto_request_complete ( arq , ret ) ;
2013-12-10 23:26:21 +04:00
}
2018-09-21 18:03:18 +03:00
}
2013-12-10 23:26:21 +04:00
return 0 ;
}
2019-11-09 20:09:41 +03:00
static int mxs_dcp_block_fallback ( struct skcipher_request * req , int enc )
2013-12-10 23:26:21 +04:00
{
2019-11-09 20:09:41 +03:00
struct crypto_skcipher * tfm = crypto_skcipher_reqtfm ( req ) ;
2020-07-07 09:31:59 +03:00
struct dcp_aes_req_ctx * rctx = skcipher_request_ctx ( req ) ;
2019-11-09 20:09:41 +03:00
struct dcp_async_ctx * ctx = crypto_skcipher_ctx ( tfm ) ;
2013-12-10 23:26:21 +04:00
int ret ;
2020-07-07 09:31:59 +03:00
skcipher_request_set_tfm ( & rctx - > fallback_req , ctx - > fallback ) ;
skcipher_request_set_callback ( & rctx - > fallback_req , req - > base . flags ,
req - > base . complete , req - > base . data ) ;
skcipher_request_set_crypt ( & rctx - > fallback_req , req - > src , req - > dst ,
2019-11-09 20:09:41 +03:00
req - > cryptlen , req - > iv ) ;
2013-12-10 23:26:21 +04:00
if ( enc )
2020-07-07 09:31:59 +03:00
ret = crypto_skcipher_encrypt ( & rctx - > fallback_req ) ;
2013-12-10 23:26:21 +04:00
else
2020-07-07 09:31:59 +03:00
ret = crypto_skcipher_decrypt ( & rctx - > fallback_req ) ;
2013-12-10 23:26:21 +04:00
return ret ;
}
2019-11-09 20:09:41 +03:00
static int mxs_dcp_aes_enqueue ( struct skcipher_request * req , int enc , int ecb )
2013-12-10 23:26:21 +04:00
{
struct dcp * sdcp = global_sdcp ;
struct crypto_async_request * arq = & req - > base ;
struct dcp_async_ctx * actx = crypto_tfm_ctx ( arq - > tfm ) ;
2019-11-09 20:09:41 +03:00
struct dcp_aes_req_ctx * rctx = skcipher_request_ctx ( req ) ;
2013-12-10 23:26:21 +04:00
int ret ;
if ( unlikely ( actx - > key_len ! = AES_KEYSIZE_128 ) )
return mxs_dcp_block_fallback ( req , enc ) ;
2014-01-14 21:31:01 +04:00
rctx - > enc = enc ;
rctx - > ecb = ecb ;
2013-12-10 23:26:21 +04:00
actx - > chan = DCP_CHAN_CRYPTO ;
2018-09-21 18:03:18 +03:00
spin_lock ( & sdcp - > lock [ actx - > chan ] ) ;
2013-12-10 23:26:21 +04:00
ret = crypto_enqueue_request ( & sdcp - > queue [ actx - > chan ] , & req - > base ) ;
2018-09-21 18:03:18 +03:00
spin_unlock ( & sdcp - > lock [ actx - > chan ] ) ;
2013-12-10 23:26:21 +04:00
wake_up_process ( sdcp - > thread [ actx - > chan ] ) ;
2019-03-30 08:52:21 +03:00
return ret ;
2013-12-10 23:26:21 +04:00
}
2019-11-09 20:09:41 +03:00
static int mxs_dcp_aes_ecb_decrypt ( struct skcipher_request * req )
2013-12-10 23:26:21 +04:00
{
return mxs_dcp_aes_enqueue ( req , 0 , 1 ) ;
}
2019-11-09 20:09:41 +03:00
static int mxs_dcp_aes_ecb_encrypt ( struct skcipher_request * req )
2013-12-10 23:26:21 +04:00
{
return mxs_dcp_aes_enqueue ( req , 1 , 1 ) ;
}
2019-11-09 20:09:41 +03:00
static int mxs_dcp_aes_cbc_decrypt ( struct skcipher_request * req )
2013-12-10 23:26:21 +04:00
{
return mxs_dcp_aes_enqueue ( req , 0 , 0 ) ;
}
2019-11-09 20:09:41 +03:00
static int mxs_dcp_aes_cbc_encrypt ( struct skcipher_request * req )
2013-12-10 23:26:21 +04:00
{
return mxs_dcp_aes_enqueue ( req , 1 , 0 ) ;
}
2019-11-09 20:09:41 +03:00
static int mxs_dcp_aes_setkey ( struct crypto_skcipher * tfm , const u8 * key ,
2013-12-10 23:26:21 +04:00
unsigned int len )
{
2019-11-09 20:09:41 +03:00
struct dcp_async_ctx * actx = crypto_skcipher_ctx ( tfm ) ;
2013-12-10 23:26:21 +04:00
/*
* AES 128 is supposed by the hardware , store key into temporary
* buffer and exit . We must use the temporary buffer here , since
* there can still be an operation in progress .
*/
actx - > key_len = len ;
if ( len = = AES_KEYSIZE_128 ) {
memcpy ( actx - > key , key , len ) ;
return 0 ;
}
/*
* If the requested AES key size is not supported by the hardware ,
* but is supported by in - kernel software implementation , we use
* software fallback .
*/
2020-07-07 09:31:59 +03:00
crypto_skcipher_clear_flags ( actx - > fallback , CRYPTO_TFM_REQ_MASK ) ;
crypto_skcipher_set_flags ( actx - > fallback ,
2016-06-29 13:04:02 +03:00
tfm - > base . crt_flags & CRYPTO_TFM_REQ_MASK ) ;
2020-07-07 09:31:59 +03:00
return crypto_skcipher_setkey ( actx - > fallback , key , len ) ;
2013-12-10 23:26:21 +04:00
}
2019-11-09 20:09:41 +03:00
static int mxs_dcp_aes_fallback_init_tfm ( struct crypto_skcipher * tfm )
2013-12-10 23:26:21 +04:00
{
2019-11-09 20:09:41 +03:00
const char * name = crypto_tfm_alg_name ( crypto_skcipher_tfm ( tfm ) ) ;
struct dcp_async_ctx * actx = crypto_skcipher_ctx ( tfm ) ;
2020-07-07 09:31:59 +03:00
struct crypto_skcipher * blk ;
2013-12-10 23:26:21 +04:00
2020-07-07 09:31:59 +03:00
blk = crypto_alloc_skcipher ( name , 0 , CRYPTO_ALG_NEED_FALLBACK ) ;
2013-12-10 23:26:21 +04:00
if ( IS_ERR ( blk ) )
return PTR_ERR ( blk ) ;
actx - > fallback = blk ;
2020-07-07 09:31:59 +03:00
crypto_skcipher_set_reqsize ( tfm , sizeof ( struct dcp_aes_req_ctx ) +
crypto_skcipher_reqsize ( blk ) ) ;
2013-12-10 23:26:21 +04:00
return 0 ;
}
2019-11-09 20:09:41 +03:00
static void mxs_dcp_aes_fallback_exit_tfm ( struct crypto_skcipher * tfm )
2013-12-10 23:26:21 +04:00
{
2019-11-09 20:09:41 +03:00
struct dcp_async_ctx * actx = crypto_skcipher_ctx ( tfm ) ;
2013-12-10 23:26:21 +04:00
2020-07-07 09:31:59 +03:00
crypto_free_skcipher ( actx - > fallback ) ;
2013-12-10 23:26:21 +04:00
}
/*
* Hashing ( SHA1 / SHA256 )
*/
static int mxs_dcp_run_sha ( struct ahash_request * req )
{
struct dcp * sdcp = global_sdcp ;
int ret ;
struct crypto_ahash * tfm = crypto_ahash_reqtfm ( req ) ;
struct dcp_async_ctx * actx = crypto_ahash_ctx ( tfm ) ;
struct dcp_sha_req_ctx * rctx = ahash_request_ctx ( req ) ;
struct dcp_dma_desc * desc = & sdcp - > coh - > desc [ actx - > chan ] ;
2014-03-03 16:40:30 +04:00
dma_addr_t digest_phys = 0 ;
2013-12-10 23:26:21 +04:00
dma_addr_t buf_phys = dma_map_single ( sdcp - > dev , sdcp - > coh - > sha_in_buf ,
DCP_BUF_SZ , DMA_TO_DEVICE ) ;
2021-07-01 21:56:37 +03:00
ret = dma_mapping_error ( sdcp - > dev , buf_phys ) ;
if ( ret )
return ret ;
2013-12-10 23:26:21 +04:00
/* Fill in the DMA descriptor. */
desc - > control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
MXS_DCP_CONTROL0_INTERRUPT |
MXS_DCP_CONTROL0_ENABLE_HASH ;
if ( rctx - > init )
desc - > control0 | = MXS_DCP_CONTROL0_HASH_INIT ;
desc - > control1 = actx - > alg ;
desc - > next_cmd_addr = 0 ;
desc - > source = buf_phys ;
desc - > destination = 0 ;
desc - > size = actx - > fill ;
desc - > payload = 0 ;
desc - > status = 0 ;
2018-10-02 22:01:50 +03:00
/*
* Align driver with hw behavior when generating null hashes
*/
if ( rctx - > init & & rctx - > fini & & desc - > size = = 0 ) {
struct hash_alg_common * halg = crypto_hash_alg_common ( tfm ) ;
const uint8_t * sha_buf =
( actx - > alg = = MXS_DCP_CONTROL1_HASH_SELECT_SHA1 ) ?
sha1_null_hash : sha256_null_hash ;
memcpy ( sdcp - > coh - > sha_out_buf , sha_buf , halg - > digestsize ) ;
ret = 0 ;
goto done_run ;
}
2013-12-10 23:26:21 +04:00
/* Set HASH_TERM bit for last transfer block. */
if ( rctx - > fini ) {
2018-10-02 22:01:50 +03:00
digest_phys = dma_map_single ( sdcp - > dev , sdcp - > coh - > sha_out_buf ,
DCP_SHA_PAY_SZ , DMA_FROM_DEVICE ) ;
2021-07-01 21:56:37 +03:00
ret = dma_mapping_error ( sdcp - > dev , digest_phys ) ;
if ( ret )
goto done_run ;
2013-12-10 23:26:21 +04:00
desc - > control0 | = MXS_DCP_CONTROL0_HASH_TERM ;
desc - > payload = digest_phys ;
}
ret = mxs_dcp_start_dma ( actx ) ;
2014-03-03 16:40:30 +04:00
if ( rctx - > fini )
2018-10-02 22:01:50 +03:00
dma_unmap_single ( sdcp - > dev , digest_phys , DCP_SHA_PAY_SZ ,
2014-03-03 16:40:30 +04:00
DMA_FROM_DEVICE ) ;
2018-10-02 22:01:50 +03:00
done_run :
2013-12-10 23:26:21 +04:00
dma_unmap_single ( sdcp - > dev , buf_phys , DCP_BUF_SZ , DMA_TO_DEVICE ) ;
return ret ;
}
static int dcp_sha_req_to_buf ( struct crypto_async_request * arq )
{
struct dcp * sdcp = global_sdcp ;
struct ahash_request * req = ahash_request_cast ( arq ) ;
struct crypto_ahash * tfm = crypto_ahash_reqtfm ( req ) ;
struct dcp_async_ctx * actx = crypto_ahash_ctx ( tfm ) ;
struct dcp_sha_req_ctx * rctx = ahash_request_ctx ( req ) ;
struct hash_alg_common * halg = crypto_hash_alg_common ( tfm ) ;
uint8_t * in_buf = sdcp - > coh - > sha_in_buf ;
2018-10-02 22:01:50 +03:00
uint8_t * out_buf = sdcp - > coh - > sha_out_buf ;
2013-12-10 23:26:21 +04:00
struct scatterlist * src ;
2020-02-25 18:05:52 +03:00
unsigned int i , len , clen , oft = 0 ;
2013-12-10 23:26:21 +04:00
int ret ;
int fin = rctx - > fini ;
if ( fin )
rctx - > fini = 0 ;
2020-02-25 18:05:52 +03:00
src = req - > src ;
len = req - > nbytes ;
2013-12-10 23:26:21 +04:00
2020-02-25 18:05:52 +03:00
while ( len ) {
if ( actx - > fill + len > DCP_BUF_SZ )
clen = DCP_BUF_SZ - actx - > fill ;
else
clen = len ;
scatterwalk_map_and_copy ( in_buf + actx - > fill , src , oft , clen ,
0 ) ;
len - = clen ;
oft + = clen ;
actx - > fill + = clen ;
/*
* If we filled the buffer and still have some
* more data , submit the buffer .
*/
if ( len & & actx - > fill = = DCP_BUF_SZ ) {
ret = mxs_dcp_run_sha ( req ) ;
if ( ret )
return ret ;
actx - > fill = 0 ;
rctx - > init = 0 ;
}
2013-12-10 23:26:21 +04:00
}
if ( fin ) {
rctx - > fini = 1 ;
/* Submit whatever is left. */
2014-03-03 16:40:30 +04:00
if ( ! req - > result )
return - EINVAL ;
2013-12-10 23:26:21 +04:00
ret = mxs_dcp_run_sha ( req ) ;
2014-03-03 16:40:30 +04:00
if ( ret )
2013-12-10 23:26:21 +04:00
return ret ;
2014-03-03 16:40:30 +04:00
2013-12-10 23:26:21 +04:00
actx - > fill = 0 ;
2018-10-02 22:01:50 +03:00
/* For some reason the result is flipped */
for ( i = 0 ; i < halg - > digestsize ; i + + )
req - > result [ i ] = out_buf [ halg - > digestsize - i - 1 ] ;
2013-12-10 23:26:21 +04:00
}
return 0 ;
}
static int dcp_chan_thread_sha ( void * data )
{
struct dcp * sdcp = global_sdcp ;
const int chan = DCP_CHAN_HASH_SHA ;
struct crypto_async_request * backlog ;
struct crypto_async_request * arq ;
2019-04-10 05:47:42 +03:00
int ret ;
2013-12-10 23:26:21 +04:00
2018-09-21 18:03:18 +03:00
while ( ! kthread_should_stop ( ) ) {
set_current_state ( TASK_INTERRUPTIBLE ) ;
2013-12-10 23:26:21 +04:00
2018-09-21 18:03:18 +03:00
spin_lock ( & sdcp - > lock [ chan ] ) ;
2013-12-10 23:26:21 +04:00
backlog = crypto_get_backlog ( & sdcp - > queue [ chan ] ) ;
arq = crypto_dequeue_request ( & sdcp - > queue [ chan ] ) ;
2018-09-21 18:03:18 +03:00
spin_unlock ( & sdcp - > lock [ chan ] ) ;
if ( ! backlog & & ! arq ) {
schedule ( ) ;
continue ;
}
set_current_state ( TASK_RUNNING ) ;
2013-12-10 23:26:21 +04:00
if ( backlog )
2023-01-31 11:02:40 +03:00
crypto_request_complete ( backlog , - EINPROGRESS ) ;
2013-12-10 23:26:21 +04:00
if ( arq ) {
ret = dcp_sha_req_to_buf ( arq ) ;
2023-01-31 11:02:40 +03:00
crypto_request_complete ( arq , ret ) ;
2013-12-10 23:26:21 +04:00
}
2018-09-21 18:03:18 +03:00
}
2013-12-10 23:26:21 +04:00
return 0 ;
}
static int dcp_sha_init ( struct ahash_request * req )
{
struct crypto_ahash * tfm = crypto_ahash_reqtfm ( req ) ;
struct dcp_async_ctx * actx = crypto_ahash_ctx ( tfm ) ;
struct hash_alg_common * halg = crypto_hash_alg_common ( tfm ) ;
/*
* Start hashing session . The code below only inits the
* hashing session context , nothing more .
*/
memset ( actx , 0 , sizeof ( * actx ) ) ;
if ( strcmp ( halg - > base . cra_name , " sha1 " ) = = 0 )
actx - > alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA1 ;
else
actx - > alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA256 ;
actx - > fill = 0 ;
actx - > hot = 0 ;
actx - > chan = DCP_CHAN_HASH_SHA ;
mutex_init ( & actx - > mutex ) ;
return 0 ;
}
static int dcp_sha_update_fx ( struct ahash_request * req , int fini )
{
struct dcp * sdcp = global_sdcp ;
struct dcp_sha_req_ctx * rctx = ahash_request_ctx ( req ) ;
struct crypto_ahash * tfm = crypto_ahash_reqtfm ( req ) ;
struct dcp_async_ctx * actx = crypto_ahash_ctx ( tfm ) ;
int ret ;
/*
* Ignore requests that have no data in them and are not
* the trailing requests in the stream of requests .
*/
if ( ! req - > nbytes & & ! fini )
return 0 ;
mutex_lock ( & actx - > mutex ) ;
rctx - > fini = fini ;
if ( ! actx - > hot ) {
actx - > hot = 1 ;
rctx - > init = 1 ;
}
2018-09-21 18:03:18 +03:00
spin_lock ( & sdcp - > lock [ actx - > chan ] ) ;
2013-12-10 23:26:21 +04:00
ret = crypto_enqueue_request ( & sdcp - > queue [ actx - > chan ] , & req - > base ) ;
2018-09-21 18:03:18 +03:00
spin_unlock ( & sdcp - > lock [ actx - > chan ] ) ;
2013-12-10 23:26:21 +04:00
wake_up_process ( sdcp - > thread [ actx - > chan ] ) ;
mutex_unlock ( & actx - > mutex ) ;
2019-03-30 08:52:21 +03:00
return ret ;
2013-12-10 23:26:21 +04:00
}
static int dcp_sha_update ( struct ahash_request * req )
{
return dcp_sha_update_fx ( req , 0 ) ;
}
static int dcp_sha_final ( struct ahash_request * req )
{
ahash_request_set_crypt ( req , NULL , req - > result , 0 ) ;
req - > nbytes = 0 ;
return dcp_sha_update_fx ( req , 1 ) ;
}
static int dcp_sha_finup ( struct ahash_request * req )
{
return dcp_sha_update_fx ( req , 1 ) ;
}
static int dcp_sha_digest ( struct ahash_request * req )
{
int ret ;
ret = dcp_sha_init ( req ) ;
if ( ret )
return ret ;
return dcp_sha_finup ( req ) ;
}
2018-10-02 22:01:48 +03:00
static int dcp_sha_import ( struct ahash_request * req , const void * in )
2018-01-18 21:34:00 +03:00
{
2018-10-02 22:01:48 +03:00
struct dcp_sha_req_ctx * rctx = ahash_request_ctx ( req ) ;
struct crypto_ahash * tfm = crypto_ahash_reqtfm ( req ) ;
struct dcp_async_ctx * actx = crypto_ahash_ctx ( tfm ) ;
const struct dcp_export_state * export = in ;
memset ( rctx , 0 , sizeof ( struct dcp_sha_req_ctx ) ) ;
memset ( actx , 0 , sizeof ( struct dcp_async_ctx ) ) ;
memcpy ( rctx , & export - > req_ctx , sizeof ( struct dcp_sha_req_ctx ) ) ;
memcpy ( actx , & export - > async_ctx , sizeof ( struct dcp_async_ctx ) ) ;
return 0 ;
2018-01-18 21:34:00 +03:00
}
2018-10-02 22:01:48 +03:00
static int dcp_sha_export ( struct ahash_request * req , void * out )
2018-01-18 21:34:00 +03:00
{
2018-10-02 22:01:48 +03:00
struct dcp_sha_req_ctx * rctx_state = ahash_request_ctx ( req ) ;
struct crypto_ahash * tfm = crypto_ahash_reqtfm ( req ) ;
struct dcp_async_ctx * actx_state = crypto_ahash_ctx ( tfm ) ;
struct dcp_export_state * export = out ;
memcpy ( & export - > req_ctx , rctx_state , sizeof ( struct dcp_sha_req_ctx ) ) ;
memcpy ( & export - > async_ctx , actx_state , sizeof ( struct dcp_async_ctx ) ) ;
return 0 ;
2018-01-18 21:34:00 +03:00
}
2013-12-10 23:26:21 +04:00
static int dcp_sha_cra_init ( struct crypto_tfm * tfm )
{
crypto_ahash_set_reqsize ( __crypto_ahash_cast ( tfm ) ,
sizeof ( struct dcp_sha_req_ctx ) ) ;
return 0 ;
}
static void dcp_sha_cra_exit ( struct crypto_tfm * tfm )
{
}
/* AES 128 ECB and AES 128 CBC */
2019-11-09 20:09:41 +03:00
static struct skcipher_alg dcp_aes_algs [ ] = {
2013-12-10 23:26:21 +04:00
{
2019-11-09 20:09:41 +03:00
. base . cra_name = " ecb(aes) " ,
. base . cra_driver_name = " ecb-aes-dcp " ,
. base . cra_priority = 400 ,
. base . cra_alignmask = 15 ,
. base . cra_flags = CRYPTO_ALG_ASYNC |
2013-12-10 23:26:21 +04:00
CRYPTO_ALG_NEED_FALLBACK ,
2019-11-09 20:09:41 +03:00
. base . cra_blocksize = AES_BLOCK_SIZE ,
. base . cra_ctxsize = sizeof ( struct dcp_async_ctx ) ,
. base . cra_module = THIS_MODULE ,
. min_keysize = AES_MIN_KEY_SIZE ,
. max_keysize = AES_MAX_KEY_SIZE ,
. setkey = mxs_dcp_aes_setkey ,
. encrypt = mxs_dcp_aes_ecb_encrypt ,
. decrypt = mxs_dcp_aes_ecb_decrypt ,
. init = mxs_dcp_aes_fallback_init_tfm ,
. exit = mxs_dcp_aes_fallback_exit_tfm ,
2013-12-10 23:26:21 +04:00
} , {
2019-11-09 20:09:41 +03:00
. base . cra_name = " cbc(aes) " ,
. base . cra_driver_name = " cbc-aes-dcp " ,
. base . cra_priority = 400 ,
. base . cra_alignmask = 15 ,
. base . cra_flags = CRYPTO_ALG_ASYNC |
2013-12-10 23:26:21 +04:00
CRYPTO_ALG_NEED_FALLBACK ,
2019-11-09 20:09:41 +03:00
. base . cra_blocksize = AES_BLOCK_SIZE ,
. base . cra_ctxsize = sizeof ( struct dcp_async_ctx ) ,
. base . cra_module = THIS_MODULE ,
. min_keysize = AES_MIN_KEY_SIZE ,
. max_keysize = AES_MAX_KEY_SIZE ,
. setkey = mxs_dcp_aes_setkey ,
. encrypt = mxs_dcp_aes_cbc_encrypt ,
. decrypt = mxs_dcp_aes_cbc_decrypt ,
. ivsize = AES_BLOCK_SIZE ,
. init = mxs_dcp_aes_fallback_init_tfm ,
. exit = mxs_dcp_aes_fallback_exit_tfm ,
2013-12-10 23:26:21 +04:00
} ,
} ;
/* SHA1 */
static struct ahash_alg dcp_sha1_alg = {
. init = dcp_sha_init ,
. update = dcp_sha_update ,
. final = dcp_sha_final ,
. finup = dcp_sha_finup ,
. digest = dcp_sha_digest ,
2018-10-02 22:01:48 +03:00
. import = dcp_sha_import ,
. export = dcp_sha_export ,
2013-12-10 23:26:21 +04:00
. halg = {
. digestsize = SHA1_DIGEST_SIZE ,
2018-10-02 22:01:48 +03:00
. statesize = sizeof ( struct dcp_export_state ) ,
2013-12-10 23:26:21 +04:00
. base = {
. cra_name = " sha1 " ,
. cra_driver_name = " sha1-dcp " ,
. cra_priority = 400 ,
. cra_alignmask = 63 ,
. cra_flags = CRYPTO_ALG_ASYNC ,
. cra_blocksize = SHA1_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct dcp_async_ctx ) ,
. cra_module = THIS_MODULE ,
. cra_init = dcp_sha_cra_init ,
. cra_exit = dcp_sha_cra_exit ,
} ,
} ,
} ;
/* SHA256 */
static struct ahash_alg dcp_sha256_alg = {
. init = dcp_sha_init ,
. update = dcp_sha_update ,
. final = dcp_sha_final ,
. finup = dcp_sha_finup ,
. digest = dcp_sha_digest ,
2018-10-02 22:01:48 +03:00
. import = dcp_sha_import ,
. export = dcp_sha_export ,
2013-12-10 23:26:21 +04:00
. halg = {
. digestsize = SHA256_DIGEST_SIZE ,
2018-10-02 22:01:48 +03:00
. statesize = sizeof ( struct dcp_export_state ) ,
2013-12-10 23:26:21 +04:00
. base = {
. cra_name = " sha256 " ,
. cra_driver_name = " sha256-dcp " ,
. cra_priority = 400 ,
. cra_alignmask = 63 ,
. cra_flags = CRYPTO_ALG_ASYNC ,
. cra_blocksize = SHA256_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct dcp_async_ctx ) ,
. cra_module = THIS_MODULE ,
. cra_init = dcp_sha_cra_init ,
. cra_exit = dcp_sha_cra_exit ,
} ,
} ,
} ;
static irqreturn_t mxs_dcp_irq ( int irq , void * context )
{
struct dcp * sdcp = context ;
uint32_t stat ;
int i ;
stat = readl ( sdcp - > base + MXS_DCP_STAT ) ;
stat & = MXS_DCP_STAT_IRQ_MASK ;
if ( ! stat )
return IRQ_NONE ;
/* Clear the interrupts. */
writel ( stat , sdcp - > base + MXS_DCP_STAT_CLR ) ;
/* Complete the DMA requests that finished. */
for ( i = 0 ; i < DCP_MAX_CHANS ; i + + )
if ( stat & ( 1 < < i ) )
complete ( & sdcp - > completion [ i ] ) ;
return IRQ_HANDLED ;
}
static int mxs_dcp_probe ( struct platform_device * pdev )
{
struct device * dev = & pdev - > dev ;
struct dcp * sdcp = NULL ;
int i , ret ;
int dcp_vmi_irq , dcp_irq ;
if ( global_sdcp ) {
dev_err ( dev , " Only one DCP instance allowed! \n " ) ;
2014-05-12 15:44:28 +04:00
return - ENODEV ;
2013-12-10 23:26:21 +04:00
}
dcp_vmi_irq = platform_get_irq ( pdev , 0 ) ;
2019-07-30 21:15:05 +03:00
if ( dcp_vmi_irq < 0 )
2014-05-12 15:44:28 +04:00
return dcp_vmi_irq ;
2014-02-14 07:04:44 +04:00
2013-12-10 23:26:21 +04:00
dcp_irq = platform_get_irq ( pdev , 1 ) ;
2019-07-30 21:15:05 +03:00
if ( dcp_irq < 0 )
2014-05-12 15:44:28 +04:00
return dcp_irq ;
2013-12-10 23:26:21 +04:00
sdcp = devm_kzalloc ( dev , sizeof ( * sdcp ) , GFP_KERNEL ) ;
2014-05-12 15:44:28 +04:00
if ( ! sdcp )
return - ENOMEM ;
2013-12-10 23:26:21 +04:00
sdcp - > dev = dev ;
2019-06-06 19:13:48 +03:00
sdcp - > base = devm_platform_ioremap_resource ( pdev , 0 ) ;
2014-05-12 15:44:28 +04:00
if ( IS_ERR ( sdcp - > base ) )
return PTR_ERR ( sdcp - > base ) ;
2013-12-10 23:26:21 +04:00
ret = devm_request_irq ( dev , dcp_vmi_irq , mxs_dcp_irq , 0 ,
" dcp-vmi-irq " , sdcp ) ;
if ( ret ) {
dev_err ( dev , " Failed to claim DCP VMI IRQ! \n " ) ;
2014-05-12 15:44:28 +04:00
return ret ;
2013-12-10 23:26:21 +04:00
}
ret = devm_request_irq ( dev , dcp_irq , mxs_dcp_irq , 0 ,
" dcp-irq " , sdcp ) ;
if ( ret ) {
dev_err ( dev , " Failed to claim DCP IRQ! \n " ) ;
2014-05-12 15:44:28 +04:00
return ret ;
2013-12-10 23:26:21 +04:00
}
/* Allocate coherent helper block. */
2014-03-03 04:23:15 +04:00
sdcp - > coh = devm_kzalloc ( dev , sizeof ( * sdcp - > coh ) + DCP_ALIGNMENT ,
GFP_KERNEL ) ;
2014-05-12 15:44:28 +04:00
if ( ! sdcp - > coh )
return - ENOMEM ;
2013-12-10 23:26:21 +04:00
2014-03-03 04:23:15 +04:00
/* Re-align the structure so it fits the DCP constraints. */
sdcp - > coh = PTR_ALIGN ( sdcp - > coh , DCP_ALIGNMENT ) ;
2018-11-07 18:33:32 +03:00
/* DCP clock is optional, only used on some SOCs */
2023-03-26 17:14:25 +03:00
sdcp - > dcp_clk = devm_clk_get_optional_enabled ( dev , " dcp " ) ;
if ( IS_ERR ( sdcp - > dcp_clk ) )
return PTR_ERR ( sdcp - > dcp_clk ) ;
2013-12-10 23:26:21 +04:00
2018-11-07 18:33:32 +03:00
/* Restart the DCP block. */
ret = stmp_reset_block ( sdcp - > base ) ;
if ( ret ) {
dev_err ( dev , " Failed reset \n " ) ;
2023-03-26 17:14:25 +03:00
return ret ;
2018-11-07 18:33:32 +03:00
}
2013-12-10 23:26:21 +04:00
/* Initialize control register. */
writel ( MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES |
MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING | 0xf ,
sdcp - > base + MXS_DCP_CTRL ) ;
/* Enable all DCP DMA channels. */
writel ( MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK ,
sdcp - > base + MXS_DCP_CHANNELCTRL ) ;
/*
* We do not enable context switching . Give the context buffer a
* pointer to an illegal address so if context switching is
* inadvertantly enabled , the DCP will return an error instead of
* trashing good memory . The DCP DMA cannot access ROM , so any ROM
* address will do .
*/
writel ( 0xffff0000 , sdcp - > base + MXS_DCP_CONTEXT ) ;
for ( i = 0 ; i < DCP_MAX_CHANS ; i + + )
writel ( 0xffffffff , sdcp - > base + MXS_DCP_CH_N_STAT_CLR ( i ) ) ;
writel ( 0xffffffff , sdcp - > base + MXS_DCP_STAT_CLR ) ;
global_sdcp = sdcp ;
platform_set_drvdata ( pdev , sdcp ) ;
for ( i = 0 ; i < DCP_MAX_CHANS ; i + + ) {
2018-09-21 18:03:18 +03:00
spin_lock_init ( & sdcp - > lock [ i ] ) ;
2013-12-10 23:26:21 +04:00
init_completion ( & sdcp - > completion [ i ] ) ;
crypto_init_queue ( & sdcp - > queue [ i ] , 50 ) ;
}
/* Create the SHA and AES handler threads. */
sdcp - > thread [ DCP_CHAN_HASH_SHA ] = kthread_run ( dcp_chan_thread_sha ,
NULL , " mxs_dcp_chan/sha " ) ;
if ( IS_ERR ( sdcp - > thread [ DCP_CHAN_HASH_SHA ] ) ) {
dev_err ( dev , " Error starting SHA thread! \n " ) ;
2018-11-07 18:33:32 +03:00
ret = PTR_ERR ( sdcp - > thread [ DCP_CHAN_HASH_SHA ] ) ;
2023-03-26 17:14:25 +03:00
return ret ;
2013-12-10 23:26:21 +04:00
}
sdcp - > thread [ DCP_CHAN_CRYPTO ] = kthread_run ( dcp_chan_thread_aes ,
NULL , " mxs_dcp_chan/aes " ) ;
if ( IS_ERR ( sdcp - > thread [ DCP_CHAN_CRYPTO ] ) ) {
dev_err ( dev , " Error starting SHA thread! \n " ) ;
ret = PTR_ERR ( sdcp - > thread [ DCP_CHAN_CRYPTO ] ) ;
goto err_destroy_sha_thread ;
}
/* Register the various crypto algorithms. */
sdcp - > caps = readl ( sdcp - > base + MXS_DCP_CAPABILITY1 ) ;
if ( sdcp - > caps & MXS_DCP_CAPABILITY1_AES128 ) {
2019-11-09 20:09:41 +03:00
ret = crypto_register_skciphers ( dcp_aes_algs ,
ARRAY_SIZE ( dcp_aes_algs ) ) ;
2013-12-10 23:26:21 +04:00
if ( ret ) {
/* Failed to register algorithm. */
dev_err ( dev , " Failed to register AES crypto! \n " ) ;
goto err_destroy_aes_thread ;
}
}
if ( sdcp - > caps & MXS_DCP_CAPABILITY1_SHA1 ) {
ret = crypto_register_ahash ( & dcp_sha1_alg ) ;
if ( ret ) {
dev_err ( dev , " Failed to register %s hash! \n " ,
dcp_sha1_alg . halg . base . cra_name ) ;
goto err_unregister_aes ;
}
}
if ( sdcp - > caps & MXS_DCP_CAPABILITY1_SHA256 ) {
ret = crypto_register_ahash ( & dcp_sha256_alg ) ;
if ( ret ) {
dev_err ( dev , " Failed to register %s hash! \n " ,
dcp_sha256_alg . halg . base . cra_name ) ;
goto err_unregister_sha1 ;
}
}
return 0 ;
err_unregister_sha1 :
if ( sdcp - > caps & MXS_DCP_CAPABILITY1_SHA1 )
crypto_unregister_ahash ( & dcp_sha1_alg ) ;
err_unregister_aes :
if ( sdcp - > caps & MXS_DCP_CAPABILITY1_AES128 )
2019-11-09 20:09:41 +03:00
crypto_unregister_skciphers ( dcp_aes_algs , ARRAY_SIZE ( dcp_aes_algs ) ) ;
2013-12-10 23:26:21 +04:00
err_destroy_aes_thread :
kthread_stop ( sdcp - > thread [ DCP_CHAN_CRYPTO ] ) ;
err_destroy_sha_thread :
kthread_stop ( sdcp - > thread [ DCP_CHAN_HASH_SHA ] ) ;
2018-11-07 18:33:32 +03:00
2013-12-10 23:26:21 +04:00
return ret ;
}
static int mxs_dcp_remove ( struct platform_device * pdev )
{
struct dcp * sdcp = platform_get_drvdata ( pdev ) ;
if ( sdcp - > caps & MXS_DCP_CAPABILITY1_SHA256 )
crypto_unregister_ahash ( & dcp_sha256_alg ) ;
if ( sdcp - > caps & MXS_DCP_CAPABILITY1_SHA1 )
crypto_unregister_ahash ( & dcp_sha1_alg ) ;
if ( sdcp - > caps & MXS_DCP_CAPABILITY1_AES128 )
2019-11-09 20:09:41 +03:00
crypto_unregister_skciphers ( dcp_aes_algs , ARRAY_SIZE ( dcp_aes_algs ) ) ;
2013-12-10 23:26:21 +04:00
kthread_stop ( sdcp - > thread [ DCP_CHAN_HASH_SHA ] ) ;
kthread_stop ( sdcp - > thread [ DCP_CHAN_CRYPTO ] ) ;
platform_set_drvdata ( pdev , NULL ) ;
global_sdcp = NULL ;
return 0 ;
}
static const struct of_device_id mxs_dcp_dt_ids [ ] = {
{ . compatible = " fsl,imx23-dcp " , . data = NULL , } ,
{ . compatible = " fsl,imx28-dcp " , . data = NULL , } ,
{ /* sentinel */ }
} ;
MODULE_DEVICE_TABLE ( of , mxs_dcp_dt_ids ) ;
static struct platform_driver mxs_dcp_driver = {
. probe = mxs_dcp_probe ,
. remove = mxs_dcp_remove ,
. driver = {
. name = " mxs-dcp " ,
. of_match_table = mxs_dcp_dt_ids ,
} ,
} ;
module_platform_driver ( mxs_dcp_driver ) ;
MODULE_AUTHOR ( " Marek Vasut <marex@denx.de> " ) ;
MODULE_DESCRIPTION ( " Freescale MXS DCP Driver " ) ;
MODULE_LICENSE ( " GPL " ) ;
MODULE_ALIAS ( " platform:mxs-dcp " ) ;