2019-06-04 11:11:33 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2010-09-03 15:16:02 +04:00
/*
* Cryptographic API .
*
* Support for OMAP AES HW acceleration .
*
* Copyright ( c ) 2010 Nokia Corporation
* Author : Dmitry Kasatkin < dmitry . kasatkin @ nokia . com >
2013-01-08 22:57:46 +04:00
* Copyright ( c ) 2011 Texas Instruments Incorporated
2010-09-03 15:16:02 +04:00
*/
2013-08-18 09:56:11 +04:00
# define pr_fmt(fmt) "%20s: " fmt, __func__
# define prn(num) pr_debug(#num "=%d\n", num)
# define prx(num) pr_debug(#num "=%x\n", num)
2010-09-03 15:16:02 +04:00
# include <linux/err.h>
# include <linux/module.h>
# include <linux/init.h>
# include <linux/errno.h>
# include <linux/kernel.h>
# include <linux/platform_device.h>
# include <linux/scatterlist.h>
# include <linux/dma-mapping.h>
2013-01-08 22:57:42 +04:00
# include <linux/dmaengine.h>
2013-01-08 22:57:40 +04:00
# include <linux/pm_runtime.h>
2013-01-08 22:57:44 +04:00
# include <linux/of.h>
# include <linux/of_device.h>
# include <linux/of_address.h>
2010-09-03 15:16:02 +04:00
# include <linux/io.h>
# include <linux/crypto.h>
# include <linux/interrupt.h>
# include <crypto/scatterwalk.h>
# include <crypto/aes.h>
2017-08-22 11:08:16 +03:00
# include <crypto/gcm.h>
2016-08-31 15:02:57 +03:00
# include <crypto/engine.h>
2016-08-04 13:28:44 +03:00
# include <crypto/internal/skcipher.h>
2017-05-24 10:35:31 +03:00
# include <crypto/internal/aead.h>
2010-09-03 15:16:02 +04:00
2017-05-24 10:35:28 +03:00
# include "omap-crypto.h"
2017-05-24 10:35:29 +03:00
# include "omap-aes.h"
2010-09-03 15:16:02 +04:00
/* keep registered devices data here */
static LIST_HEAD ( dev_list ) ;
static DEFINE_SPINLOCK ( list_lock ) ;
2018-02-27 16:30:38 +03:00
static int aes_fallback_sz = 200 ;
2013-08-18 09:56:11 +04:00
# ifdef DEBUG
# define omap_aes_read(dd, offset) \
( { \
int _read_ret ; \
_read_ret = __raw_readl ( dd - > io_base + offset ) ; \
pr_debug ( " omap_aes_read( " # offset " =%#x)= %#x \n " , \
offset , _read_ret ) ; \
_read_ret ; \
} )
# else
2017-05-24 10:35:30 +03:00
inline u32 omap_aes_read ( struct omap_aes_dev * dd , u32 offset )
2010-09-03 15:16:02 +04:00
{
return __raw_readl ( dd - > io_base + offset ) ;
}
2013-08-18 09:56:11 +04:00
# endif
# ifdef DEBUG
# define omap_aes_write(dd, offset, value) \
do { \
pr_debug ( " omap_aes_write( " # offset " =%#x) value=%#x \n " , \
offset , value ) ; \
__raw_writel ( value , dd - > io_base + offset ) ; \
} while ( 0 )
# else
2017-05-24 10:35:30 +03:00
inline void omap_aes_write ( struct omap_aes_dev * dd , u32 offset ,
2010-09-03 15:16:02 +04:00
u32 value )
{
__raw_writel ( value , dd - > io_base + offset ) ;
}
2013-08-18 09:56:11 +04:00
# endif
2010-09-03 15:16:02 +04:00
static inline void omap_aes_write_mask ( struct omap_aes_dev * dd , u32 offset ,
u32 value , u32 mask )
{
u32 val ;
val = omap_aes_read ( dd , offset ) ;
val & = ~ mask ;
val | = value ;
omap_aes_write ( dd , offset , val ) ;
}
static void omap_aes_write_n ( struct omap_aes_dev * dd , u32 offset ,
u32 * value , int count )
{
for ( ; count - - ; value + + , offset + = 4 )
omap_aes_write ( dd , offset , * value ) ;
}
static int omap_aes_hw_init ( struct omap_aes_dev * dd )
{
2016-08-04 13:28:42 +03:00
int err ;
2010-09-03 15:16:02 +04:00
if ( ! ( dd - > flags & FLAGS_INIT ) ) {
2010-11-30 11:13:28 +03:00
dd - > flags | = FLAGS_INIT ;
2010-11-30 11:13:29 +03:00
dd - > err = 0 ;
2010-09-03 15:16:02 +04:00
}
2016-08-04 13:28:42 +03:00
err = pm_runtime_get_sync ( dd - > dev ) ;
if ( err < 0 ) {
dev_err ( dd - > dev , " failed to get sync: %d \n " , err ) ;
return err ;
}
2010-11-30 11:13:28 +03:00
return 0 ;
2010-09-03 15:16:02 +04:00
}
2017-05-24 10:35:31 +03:00
void omap_aes_clear_copy_flags ( struct omap_aes_dev * dd )
{
dd - > flags & = ~ ( OMAP_CRYPTO_COPY_MASK < < FLAGS_IN_DATA_ST_SHIFT ) ;
dd - > flags & = ~ ( OMAP_CRYPTO_COPY_MASK < < FLAGS_OUT_DATA_ST_SHIFT ) ;
dd - > flags & = ~ ( OMAP_CRYPTO_COPY_MASK < < FLAGS_ASSOC_DATA_ST_SHIFT ) ;
}
2017-05-24 10:35:30 +03:00
int omap_aes_write_ctrl ( struct omap_aes_dev * dd )
2010-09-03 15:16:02 +04:00
{
2017-05-24 10:35:31 +03:00
struct omap_aes_reqctx * rctx ;
2010-09-03 15:16:02 +04:00
unsigned int key32 ;
2010-11-30 11:13:30 +03:00
int i , err ;
2015-07-07 18:31:45 +03:00
u32 val ;
2010-09-03 15:16:02 +04:00
2010-11-30 11:13:29 +03:00
err = omap_aes_hw_init ( dd ) ;
if ( err )
return err ;
2010-09-03 15:16:02 +04:00
key32 = dd - > ctx - > keylen / sizeof ( u32 ) ;
2010-11-30 11:13:30 +03:00
2017-05-24 10:35:31 +03:00
/* RESET the key as previous HASH keys should not get affected*/
if ( dd - > flags & FLAGS_GCM )
for ( i = 0 ; i < 0x40 ; i = i + 4 )
omap_aes_write ( dd , i , 0x0 ) ;
2010-09-03 15:16:02 +04:00
for ( i = 0 ; i < key32 ; i + + ) {
2013-01-08 22:57:46 +04:00
omap_aes_write ( dd , AES_REG_KEY ( dd , i ) ,
2010-09-03 15:16:02 +04:00
__le32_to_cpu ( dd - > ctx - > key [ i ] ) ) ;
}
2019-11-09 20:09:30 +03:00
if ( ( dd - > flags & ( FLAGS_CBC | FLAGS_CTR ) ) & & dd - > req - > iv )
omap_aes_write_n ( dd , AES_REG_IV ( dd , 0 ) , ( void * ) dd - > req - > iv , 4 ) ;
2010-11-30 11:13:30 +03:00
2017-05-24 10:35:31 +03:00
if ( ( dd - > flags & ( FLAGS_GCM ) ) & & dd - > aead_req - > iv ) {
rctx = aead_request_ctx ( dd - > aead_req ) ;
omap_aes_write_n ( dd , AES_REG_IV ( dd , 0 ) , ( u32 * ) rctx - > iv , 4 ) ;
}
2010-11-30 11:13:30 +03:00
val = FLD_VAL ( ( ( dd - > ctx - > keylen > > 3 ) - 1 ) , 4 , 3 ) ;
if ( dd - > flags & FLAGS_CBC )
val | = AES_REG_CTRL_CBC ;
2017-05-24 10:35:31 +03:00
if ( dd - > flags & ( FLAGS_CTR | FLAGS_GCM ) )
2013-10-30 02:37:38 +04:00
val | = AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_128 ;
2015-07-07 18:31:45 +03:00
2017-05-24 10:35:31 +03:00
if ( dd - > flags & FLAGS_GCM )
val | = AES_REG_CTRL_GCM ;
2010-11-30 11:13:30 +03:00
if ( dd - > flags & FLAGS_ENCRYPT )
val | = AES_REG_CTRL_DIRECTION ;
2010-09-03 15:16:02 +04:00
2015-07-07 18:31:45 +03:00
omap_aes_write_mask ( dd , AES_REG_CTRL ( dd ) , val , AES_REG_CTRL_MASK ) ;
2010-09-03 15:16:02 +04:00
2010-11-30 11:13:29 +03:00
return 0 ;
2010-09-03 15:16:02 +04:00
}
2013-01-08 22:57:46 +04:00
static void omap_aes_dma_trigger_omap2 ( struct omap_aes_dev * dd , int length )
{
u32 mask , val ;
val = dd - > pdata - > dma_start ;
if ( dd - > dma_lch_out ! = NULL )
val | = dd - > pdata - > dma_enable_out ;
if ( dd - > dma_lch_in ! = NULL )
val | = dd - > pdata - > dma_enable_in ;
mask = dd - > pdata - > dma_enable_out | dd - > pdata - > dma_enable_in |
dd - > pdata - > dma_start ;
omap_aes_write_mask ( dd , AES_REG_MASK ( dd ) , val , mask ) ;
}
static void omap_aes_dma_trigger_omap4 ( struct omap_aes_dev * dd , int length )
{
omap_aes_write ( dd , AES_REG_LENGTH_N ( 0 ) , length ) ;
omap_aes_write ( dd , AES_REG_LENGTH_N ( 1 ) , 0 ) ;
2017-05-24 10:35:31 +03:00
if ( dd - > flags & FLAGS_GCM )
omap_aes_write ( dd , AES_REG_A_LEN , dd - > assoc_len ) ;
2013-01-08 22:57:46 +04:00
omap_aes_dma_trigger_omap2 ( dd , length ) ;
}
static void omap_aes_dma_stop ( struct omap_aes_dev * dd )
{
u32 mask ;
mask = dd - > pdata - > dma_enable_out | dd - > pdata - > dma_enable_in |
dd - > pdata - > dma_start ;
omap_aes_write_mask ( dd , AES_REG_MASK ( dd ) , 0 , mask ) ;
}
2017-05-24 10:35:30 +03:00
struct omap_aes_dev * omap_aes_find_dev ( struct omap_aes_reqctx * rctx )
2010-09-03 15:16:02 +04:00
{
2016-08-04 13:28:43 +03:00
struct omap_aes_dev * dd ;
2010-09-03 15:16:02 +04:00
spin_lock_bh ( & list_lock ) ;
2016-08-04 13:28:43 +03:00
dd = list_first_entry ( & dev_list , struct omap_aes_dev , list ) ;
list_move_tail ( & dd - > list , & dev_list ) ;
2017-05-24 10:35:23 +03:00
rctx - > dd = dd ;
2010-09-03 15:16:02 +04:00
spin_unlock_bh ( & list_lock ) ;
return dd ;
}
2013-01-08 22:57:42 +04:00
static void omap_aes_dma_out_callback ( void * data )
{
struct omap_aes_dev * dd = data ;
/* dma_lch_out - completed */
tasklet_schedule ( & dd - > done_task ) ;
}
2010-09-03 15:16:02 +04:00
static int omap_aes_dma_init ( struct omap_aes_dev * dd )
{
2016-04-29 16:02:18 +03:00
int err ;
2010-09-03 15:16:02 +04:00
2013-01-08 22:57:42 +04:00
dd - > dma_lch_out = NULL ;
dd - > dma_lch_in = NULL ;
2010-09-03 15:16:02 +04:00
2016-04-29 16:02:18 +03:00
dd - > dma_lch_in = dma_request_chan ( dd - > dev , " rx " ) ;
if ( IS_ERR ( dd - > dma_lch_in ) ) {
2013-01-08 22:57:42 +04:00
dev_err ( dd - > dev , " Unable to request in DMA channel \n " ) ;
2016-04-29 16:02:18 +03:00
return PTR_ERR ( dd - > dma_lch_in ) ;
2013-01-08 22:57:42 +04:00
}
2016-04-29 16:02:18 +03:00
dd - > dma_lch_out = dma_request_chan ( dd - > dev , " tx " ) ;
if ( IS_ERR ( dd - > dma_lch_out ) ) {
2013-01-08 22:57:42 +04:00
dev_err ( dd - > dev , " Unable to request out DMA channel \n " ) ;
2016-04-29 16:02:18 +03:00
err = PTR_ERR ( dd - > dma_lch_out ) ;
2013-01-08 22:57:42 +04:00
goto err_dma_out ;
}
2010-09-03 15:16:02 +04:00
return 0 ;
err_dma_out :
2013-01-08 22:57:42 +04:00
dma_release_channel ( dd - > dma_lch_in ) ;
2016-04-29 16:02:18 +03:00
2010-09-03 15:16:02 +04:00
return err ;
}
static void omap_aes_dma_cleanup ( struct omap_aes_dev * dd )
{
2016-04-29 16:02:18 +03:00
if ( dd - > pio_only )
return ;
2013-01-08 22:57:42 +04:00
dma_release_channel ( dd - > dma_lch_out ) ;
dma_release_channel ( dd - > dma_lch_in ) ;
2010-09-03 15:16:02 +04:00
}
2017-05-24 10:35:23 +03:00
static int omap_aes_crypt_dma ( struct omap_aes_dev * dd ,
struct scatterlist * in_sg ,
struct scatterlist * out_sg ,
int in_sg_len , int out_sg_len )
2010-09-03 15:16:02 +04:00
{
2019-11-05 17:01:06 +03:00
struct dma_async_tx_descriptor * tx_in , * tx_out = NULL , * cb_desc ;
2013-01-08 22:57:42 +04:00
struct dma_slave_config cfg ;
2013-08-18 06:42:25 +04:00
int ret ;
2010-09-03 15:16:02 +04:00
2013-08-18 06:42:30 +04:00
if ( dd - > pio_only ) {
scatterwalk_start ( & dd - > in_walk , dd - > in_sg ) ;
2019-11-05 17:01:06 +03:00
if ( out_sg_len )
scatterwalk_start ( & dd - > out_walk , dd - > out_sg ) ;
2013-08-18 06:42:30 +04:00
/* Enable DATAIN interrupt and let it take
care of the rest */
omap_aes_write ( dd , AES_REG_IRQ_ENABLE ( dd ) , 0x2 ) ;
return 0 ;
}
2013-08-18 06:42:26 +04:00
dma_sync_sg_for_device ( dd - > dev , dd - > in_sg , in_sg_len , DMA_TO_DEVICE ) ;
2013-01-08 22:57:42 +04:00
memset ( & cfg , 0 , sizeof ( cfg ) ) ;
2013-01-08 22:57:46 +04:00
cfg . src_addr = dd - > phys_base + AES_REG_DATA_N ( dd , 0 ) ;
cfg . dst_addr = dd - > phys_base + AES_REG_DATA_N ( dd , 0 ) ;
2013-01-08 22:57:42 +04:00
cfg . src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES ;
cfg . dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES ;
cfg . src_maxburst = DST_MAXBURST ;
cfg . dst_maxburst = DST_MAXBURST ;
/* IN */
ret = dmaengine_slave_config ( dd - > dma_lch_in , & cfg ) ;
if ( ret ) {
dev_err ( dd - > dev , " can't configure IN dmaengine slave: %d \n " ,
ret ) ;
return ret ;
}
2013-08-18 06:42:25 +04:00
tx_in = dmaengine_prep_slave_sg ( dd - > dma_lch_in , in_sg , in_sg_len ,
2013-01-08 22:57:42 +04:00
DMA_MEM_TO_DEV ,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK ) ;
if ( ! tx_in ) {
dev_err ( dd - > dev , " IN prep_slave_sg() failed \n " ) ;
return - EINVAL ;
}
/* No callback necessary */
tx_in - > callback_param = dd ;
2019-11-05 17:01:06 +03:00
tx_in - > callback = NULL ;
2013-01-08 22:57:42 +04:00
/* OUT */
2019-11-05 17:01:06 +03:00
if ( out_sg_len ) {
ret = dmaengine_slave_config ( dd - > dma_lch_out , & cfg ) ;
if ( ret ) {
dev_err ( dd - > dev , " can't configure OUT dmaengine slave: %d \n " ,
ret ) ;
return ret ;
}
2013-01-08 22:57:42 +04:00
2019-11-05 17:01:06 +03:00
tx_out = dmaengine_prep_slave_sg ( dd - > dma_lch_out , out_sg ,
out_sg_len ,
DMA_DEV_TO_MEM ,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK ) ;
if ( ! tx_out ) {
dev_err ( dd - > dev , " OUT prep_slave_sg() failed \n " ) ;
return - EINVAL ;
}
cb_desc = tx_out ;
} else {
cb_desc = tx_in ;
2013-01-08 22:57:42 +04:00
}
2017-05-24 10:35:31 +03:00
if ( dd - > flags & FLAGS_GCM )
2019-11-05 17:01:06 +03:00
cb_desc - > callback = omap_aes_gcm_dma_out_callback ;
2017-05-24 10:35:31 +03:00
else
2019-11-05 17:01:06 +03:00
cb_desc - > callback = omap_aes_dma_out_callback ;
cb_desc - > callback_param = dd ;
2013-01-08 22:57:42 +04:00
dmaengine_submit ( tx_in ) ;
2019-11-05 17:01:06 +03:00
if ( tx_out )
dmaengine_submit ( tx_out ) ;
2013-01-08 22:57:42 +04:00
dma_async_issue_pending ( dd - > dma_lch_in ) ;
2019-11-05 17:01:06 +03:00
if ( out_sg_len )
dma_async_issue_pending ( dd - > dma_lch_out ) ;
2010-09-03 15:16:02 +04:00
2013-01-08 22:57:46 +04:00
/* start DMA */
2013-08-18 06:42:25 +04:00
dd - > pdata - > trigger ( dd , dd - > total ) ;
2010-11-30 11:13:31 +03:00
2010-09-03 15:16:02 +04:00
return 0 ;
}
2017-05-24 10:35:30 +03:00
int omap_aes_crypt_dma_start ( struct omap_aes_dev * dd )
2010-09-03 15:16:02 +04:00
{
2013-08-18 06:42:25 +04:00
int err ;
2010-09-03 15:16:02 +04:00
pr_debug ( " total: %d \n " , dd - > total ) ;
2013-08-18 06:42:30 +04:00
if ( ! dd - > pio_only ) {
err = dma_map_sg ( dd - > dev , dd - > in_sg , dd - > in_sg_len ,
DMA_TO_DEVICE ) ;
if ( ! err ) {
dev_err ( dd - > dev , " dma_map_sg() error \n " ) ;
return - EINVAL ;
}
2010-09-03 15:16:02 +04:00
2019-11-05 17:01:06 +03:00
if ( dd - > out_sg_len ) {
err = dma_map_sg ( dd - > dev , dd - > out_sg , dd - > out_sg_len ,
DMA_FROM_DEVICE ) ;
if ( ! err ) {
dev_err ( dd - > dev , " dma_map_sg() error \n " ) ;
return - EINVAL ;
}
2013-08-18 06:42:30 +04:00
}
2010-09-03 15:16:02 +04:00
}
2017-05-24 10:35:23 +03:00
err = omap_aes_crypt_dma ( dd , dd - > in_sg , dd - > out_sg , dd - > in_sg_len ,
2013-08-18 06:42:25 +04:00
dd - > out_sg_len ) ;
2013-08-18 06:42:30 +04:00
if ( err & & ! dd - > pio_only ) {
2013-08-18 06:42:25 +04:00
dma_unmap_sg ( dd - > dev , dd - > in_sg , dd - > in_sg_len , DMA_TO_DEVICE ) ;
2019-11-05 17:01:06 +03:00
if ( dd - > out_sg_len )
dma_unmap_sg ( dd - > dev , dd - > out_sg , dd - > out_sg_len ,
DMA_FROM_DEVICE ) ;
2010-11-30 11:13:29 +03:00
}
2010-09-03 15:16:02 +04:00
return err ;
}
static void omap_aes_finish_req ( struct omap_aes_dev * dd , int err )
{
2019-11-09 20:09:30 +03:00
struct skcipher_request * req = dd - > req ;
2010-09-03 15:16:02 +04:00
pr_debug ( " err: %d \n " , err ) ;
2019-11-09 20:09:30 +03:00
crypto_finalize_skcipher_request ( dd - > engine , req , err ) ;
2016-08-04 13:28:42 +03:00
pm_runtime_mark_last_busy ( dd - > dev ) ;
pm_runtime_put_autosuspend ( dd - > dev ) ;
2010-09-03 15:16:02 +04:00
}
2017-05-24 10:35:30 +03:00
int omap_aes_crypt_dma_stop ( struct omap_aes_dev * dd )
2010-09-03 15:16:02 +04:00
{
pr_debug ( " total: %d \n " , dd - > total ) ;
2013-01-08 22:57:46 +04:00
omap_aes_dma_stop ( dd ) ;
2010-09-03 15:16:02 +04:00
2015-12-14 11:45:23 +03:00
return 0 ;
2010-09-03 15:16:02 +04:00
}
2010-11-30 11:13:29 +03:00
static int omap_aes_handle_queue ( struct omap_aes_dev * dd ,
2019-11-09 20:09:30 +03:00
struct skcipher_request * req )
2010-09-03 15:16:02 +04:00
{
2010-11-30 11:13:28 +03:00
if ( req )
2019-11-09 20:09:30 +03:00
return crypto_transfer_skcipher_request_to_engine ( dd - > engine , req ) ;
2010-09-03 15:16:02 +04:00
2016-01-26 15:25:40 +03:00
return 0 ;
}
2010-09-03 15:16:02 +04:00
2016-01-26 15:25:40 +03:00
static int omap_aes_prepare_req ( struct crypto_engine * engine ,
2018-01-26 22:15:31 +03:00
void * areq )
2016-01-26 15:25:40 +03:00
{
2019-11-09 20:09:30 +03:00
struct skcipher_request * req = container_of ( areq , struct skcipher_request , base ) ;
struct omap_aes_ctx * ctx = crypto_skcipher_ctx (
crypto_skcipher_reqtfm ( req ) ) ;
struct omap_aes_reqctx * rctx = skcipher_request_ctx ( req ) ;
2017-05-24 10:35:23 +03:00
struct omap_aes_dev * dd = rctx - > dd ;
2017-05-24 10:35:28 +03:00
int ret ;
u16 flags ;
2010-09-03 15:16:02 +04:00
2016-01-26 15:25:40 +03:00
if ( ! dd )
return - ENODEV ;
2010-09-03 15:16:02 +04:00
/* assign new request to device */
dd - > req = req ;
2019-11-09 20:09:30 +03:00
dd - > total = req - > cryptlen ;
dd - > total_save = req - > cryptlen ;
2010-09-03 15:16:02 +04:00
dd - > in_sg = req - > src ;
dd - > out_sg = req - > dst ;
2017-05-24 10:35:28 +03:00
dd - > orig_out = req - > dst ;
flags = OMAP_CRYPTO_COPY_DATA ;
if ( req - > src = = req - > dst )
flags | = OMAP_CRYPTO_FORCE_COPY ;
ret = omap_crypto_align_sg ( & dd - > in_sg , dd - > total , AES_BLOCK_SIZE ,
2017-05-24 10:35:31 +03:00
dd - > in_sgl , flags ,
2017-05-24 10:35:28 +03:00
FLAGS_IN_DATA_ST_SHIFT , & dd - > flags ) ;
if ( ret )
return ret ;
ret = omap_crypto_align_sg ( & dd - > out_sg , dd - > total , AES_BLOCK_SIZE ,
& dd - > out_sgl , 0 ,
FLAGS_OUT_DATA_ST_SHIFT , & dd - > flags ) ;
if ( ret )
return ret ;
2010-09-03 15:16:02 +04:00
2016-07-12 08:17:52 +03:00
dd - > in_sg_len = sg_nents_for_len ( dd - > in_sg , dd - > total ) ;
if ( dd - > in_sg_len < 0 )
return dd - > in_sg_len ;
dd - > out_sg_len = sg_nents_for_len ( dd - > out_sg , dd - > total ) ;
if ( dd - > out_sg_len < 0 )
return dd - > out_sg_len ;
2010-09-03 15:16:02 +04:00
rctx - > mode & = FLAGS_MODE_MASK ;
dd - > flags = ( dd - > flags & ~ FLAGS_MODE_MASK ) | rctx - > mode ;
2010-11-30 11:13:30 +03:00
dd - > ctx = ctx ;
2017-05-24 10:35:23 +03:00
rctx - > dd = dd ;
2010-09-03 15:16:02 +04:00
2016-01-26 15:25:40 +03:00
return omap_aes_write_ctrl ( dd ) ;
}
2010-11-30 11:13:28 +03:00
2016-01-26 15:25:40 +03:00
static int omap_aes_crypt_req ( struct crypto_engine * engine ,
2018-01-26 22:15:31 +03:00
void * areq )
2016-01-26 15:25:40 +03:00
{
2019-11-09 20:09:30 +03:00
struct skcipher_request * req = container_of ( areq , struct skcipher_request , base ) ;
struct omap_aes_reqctx * rctx = skcipher_request_ctx ( req ) ;
2017-05-24 10:35:23 +03:00
struct omap_aes_dev * dd = rctx - > dd ;
2016-01-26 15:25:40 +03:00
if ( ! dd )
return - ENODEV ;
return omap_aes_crypt_dma_start ( dd ) ;
2010-09-03 15:16:02 +04:00
}
2019-11-05 17:00:54 +03:00
static void omap_aes_copy_ivout ( struct omap_aes_dev * dd , u8 * ivbuf )
{
int i ;
for ( i = 0 ; i < 4 ; i + + )
( ( u32 * ) ivbuf ) [ i ] = omap_aes_read ( dd , AES_REG_IV ( dd , i ) ) ;
}
2010-11-30 11:13:29 +03:00
static void omap_aes_done_task ( unsigned long data )
2010-09-03 15:16:02 +04:00
{
struct omap_aes_dev * dd = ( struct omap_aes_dev * ) data ;
2013-08-18 06:42:25 +04:00
pr_debug ( " enter done_task \n " ) ;
2010-11-30 11:13:29 +03:00
2013-08-18 06:42:30 +04:00
if ( ! dd - > pio_only ) {
dma_sync_sg_for_device ( dd - > dev , dd - > out_sg , dd - > out_sg_len ,
DMA_FROM_DEVICE ) ;
2013-08-18 06:42:32 +04:00
dma_unmap_sg ( dd - > dev , dd - > in_sg , dd - > in_sg_len , DMA_TO_DEVICE ) ;
dma_unmap_sg ( dd - > dev , dd - > out_sg , dd - > out_sg_len ,
DMA_FROM_DEVICE ) ;
2013-08-18 06:42:30 +04:00
omap_aes_crypt_dma_stop ( dd ) ;
}
2013-08-18 06:42:32 +04:00
2019-11-05 17:00:59 +03:00
omap_crypto_cleanup ( dd - > in_sg , NULL , 0 , dd - > total_save ,
2017-05-24 10:35:28 +03:00
FLAGS_IN_DATA_ST_SHIFT , dd - > flags ) ;
2013-08-18 06:42:32 +04:00
2019-11-05 17:00:59 +03:00
omap_crypto_cleanup ( dd - > out_sg , dd - > orig_out , 0 , dd - > total_save ,
2017-05-24 10:35:28 +03:00
FLAGS_OUT_DATA_ST_SHIFT , dd - > flags ) ;
2013-08-18 06:42:32 +04:00
2019-11-05 17:00:54 +03:00
/* Update IV output */
if ( dd - > flags & ( FLAGS_CBC | FLAGS_CTR ) )
omap_aes_copy_ivout ( dd , dd - > req - > iv ) ;
2013-08-18 06:42:25 +04:00
omap_aes_finish_req ( dd , 0 ) ;
2010-09-03 15:16:02 +04:00
pr_debug ( " exit \n " ) ;
}
2019-11-09 20:09:30 +03:00
static int omap_aes_crypt ( struct skcipher_request * req , unsigned long mode )
2010-09-03 15:16:02 +04:00
{
2019-11-09 20:09:30 +03:00
struct omap_aes_ctx * ctx = crypto_skcipher_ctx (
crypto_skcipher_reqtfm ( req ) ) ;
struct omap_aes_reqctx * rctx = skcipher_request_ctx ( req ) ;
2010-09-03 15:16:02 +04:00
struct omap_aes_dev * dd ;
2016-08-04 13:28:44 +03:00
int ret ;
2010-09-03 15:16:02 +04:00
2019-11-05 17:01:00 +03:00
if ( ( req - > cryptlen % AES_BLOCK_SIZE ) & & ! ( mode & FLAGS_CTR ) )
return - EINVAL ;
2019-11-09 20:09:30 +03:00
pr_debug ( " nbytes: %d, enc: %d, cbc: %d \n " , req - > cryptlen ,
2010-09-03 15:16:02 +04:00
! ! ( mode & FLAGS_ENCRYPT ) ,
! ! ( mode & FLAGS_CBC ) ) ;
2019-11-09 20:09:30 +03:00
if ( req - > cryptlen < aes_fallback_sz ) {
2018-09-19 05:10:58 +03:00
SYNC_SKCIPHER_REQUEST_ON_STACK ( subreq , ctx - > fallback ) ;
2016-08-04 13:28:44 +03:00
2018-09-19 05:10:58 +03:00
skcipher_request_set_sync_tfm ( subreq , ctx - > fallback ) ;
2016-08-04 13:28:44 +03:00
skcipher_request_set_callback ( subreq , req - > base . flags , NULL ,
NULL ) ;
skcipher_request_set_crypt ( subreq , req - > src , req - > dst ,
2019-11-09 20:09:30 +03:00
req - > cryptlen , req - > iv ) ;
2016-08-04 13:28:44 +03:00
if ( mode & FLAGS_ENCRYPT )
ret = crypto_skcipher_encrypt ( subreq ) ;
else
ret = crypto_skcipher_decrypt ( subreq ) ;
skcipher_request_zero ( subreq ) ;
return ret ;
}
2017-05-24 10:35:23 +03:00
dd = omap_aes_find_dev ( rctx ) ;
2010-09-03 15:16:02 +04:00
if ( ! dd )
return - ENODEV ;
rctx - > mode = mode ;
2010-11-30 11:13:29 +03:00
return omap_aes_handle_queue ( dd , req ) ;
2010-09-03 15:16:02 +04:00
}
/* ********************** ALG API ************************************ */
2019-11-09 20:09:30 +03:00
static int omap_aes_setkey ( struct crypto_skcipher * tfm , const u8 * key ,
2010-09-03 15:16:02 +04:00
unsigned int keylen )
{
2019-11-09 20:09:30 +03:00
struct omap_aes_ctx * ctx = crypto_skcipher_ctx ( tfm ) ;
2016-08-04 13:28:44 +03:00
int ret ;
2010-09-03 15:16:02 +04:00
if ( keylen ! = AES_KEYSIZE_128 & & keylen ! = AES_KEYSIZE_192 & &
keylen ! = AES_KEYSIZE_256 )
return - EINVAL ;
pr_debug ( " enter, keylen: %d \n " , keylen ) ;
memcpy ( ctx - > key , key , keylen ) ;
ctx - > keylen = keylen ;
2018-09-19 05:10:58 +03:00
crypto_sync_skcipher_clear_flags ( ctx - > fallback , CRYPTO_TFM_REQ_MASK ) ;
crypto_sync_skcipher_set_flags ( ctx - > fallback , tfm - > base . crt_flags &
2016-08-04 13:28:44 +03:00
CRYPTO_TFM_REQ_MASK ) ;
2018-09-19 05:10:58 +03:00
ret = crypto_sync_skcipher_setkey ( ctx - > fallback , key , keylen ) ;
2016-08-04 13:28:44 +03:00
if ( ! ret )
return 0 ;
2010-09-03 15:16:02 +04:00
return 0 ;
}
2019-11-09 20:09:30 +03:00
static int omap_aes_ecb_encrypt ( struct skcipher_request * req )
2010-09-03 15:16:02 +04:00
{
return omap_aes_crypt ( req , FLAGS_ENCRYPT ) ;
}
2019-11-09 20:09:30 +03:00
static int omap_aes_ecb_decrypt ( struct skcipher_request * req )
2010-09-03 15:16:02 +04:00
{
return omap_aes_crypt ( req , 0 ) ;
}
2019-11-09 20:09:30 +03:00
static int omap_aes_cbc_encrypt ( struct skcipher_request * req )
2010-09-03 15:16:02 +04:00
{
return omap_aes_crypt ( req , FLAGS_ENCRYPT | FLAGS_CBC ) ;
}
2019-11-09 20:09:30 +03:00
static int omap_aes_cbc_decrypt ( struct skcipher_request * req )
2010-09-03 15:16:02 +04:00
{
return omap_aes_crypt ( req , FLAGS_CBC ) ;
}
2019-11-09 20:09:30 +03:00
static int omap_aes_ctr_encrypt ( struct skcipher_request * req )
2013-01-08 22:57:47 +04:00
{
return omap_aes_crypt ( req , FLAGS_ENCRYPT | FLAGS_CTR ) ;
}
2019-11-09 20:09:30 +03:00
static int omap_aes_ctr_decrypt ( struct skcipher_request * req )
2013-01-08 22:57:47 +04:00
{
return omap_aes_crypt ( req , FLAGS_CTR ) ;
}
2018-01-26 22:15:31 +03:00
static int omap_aes_prepare_req ( struct crypto_engine * engine ,
void * req ) ;
static int omap_aes_crypt_req ( struct crypto_engine * engine ,
void * req ) ;
2019-11-09 20:09:30 +03:00
static int omap_aes_init_tfm ( struct crypto_skcipher * tfm )
2010-09-03 15:16:02 +04:00
{
2019-11-09 20:09:30 +03:00
const char * name = crypto_tfm_alg_name ( & tfm - > base ) ;
struct omap_aes_ctx * ctx = crypto_skcipher_ctx ( tfm ) ;
2018-09-19 05:10:58 +03:00
struct crypto_sync_skcipher * blk ;
2016-08-04 13:28:44 +03:00
2018-09-19 05:10:58 +03:00
blk = crypto_alloc_sync_skcipher ( name , 0 , CRYPTO_ALG_NEED_FALLBACK ) ;
2016-08-04 13:28:44 +03:00
if ( IS_ERR ( blk ) )
return PTR_ERR ( blk ) ;
ctx - > fallback = blk ;
2019-11-09 20:09:30 +03:00
crypto_skcipher_set_reqsize ( tfm , sizeof ( struct omap_aes_reqctx ) ) ;
2010-09-03 15:16:02 +04:00
2018-01-26 22:15:31 +03:00
ctx - > enginectx . op . prepare_request = omap_aes_prepare_req ;
ctx - > enginectx . op . unprepare_request = NULL ;
ctx - > enginectx . op . do_one_request = omap_aes_crypt_req ;
2010-09-03 15:16:02 +04:00
return 0 ;
}
2019-11-09 20:09:30 +03:00
static void omap_aes_exit_tfm ( struct crypto_skcipher * tfm )
2010-09-03 15:16:02 +04:00
{
2019-11-09 20:09:30 +03:00
struct omap_aes_ctx * ctx = crypto_skcipher_ctx ( tfm ) ;
2016-08-04 13:28:44 +03:00
if ( ctx - > fallback )
2018-09-19 05:10:58 +03:00
crypto_free_sync_skcipher ( ctx - > fallback ) ;
2016-08-04 13:28:44 +03:00
ctx - > fallback = NULL ;
2010-09-03 15:16:02 +04:00
}
/* ********************** ALGS ************************************ */
2019-11-09 20:09:30 +03:00
static struct skcipher_alg algs_ecb_cbc [ ] = {
2010-09-03 15:16:02 +04:00
{
2019-11-09 20:09:30 +03:00
. base . cra_name = " ecb(aes) " ,
. base . cra_driver_name = " ecb-aes-omap " ,
. base . cra_priority = 300 ,
. base . cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK ,
. base . cra_blocksize = AES_BLOCK_SIZE ,
. base . cra_ctxsize = sizeof ( struct omap_aes_ctx ) ,
. base . cra_module = THIS_MODULE ,
. min_keysize = AES_MIN_KEY_SIZE ,
. max_keysize = AES_MAX_KEY_SIZE ,
. setkey = omap_aes_setkey ,
. encrypt = omap_aes_ecb_encrypt ,
. decrypt = omap_aes_ecb_decrypt ,
. init = omap_aes_init_tfm ,
. exit = omap_aes_exit_tfm ,
2010-09-03 15:16:02 +04:00
} ,
{
2019-11-09 20:09:30 +03:00
. base . cra_name = " cbc(aes) " ,
. base . cra_driver_name = " cbc-aes-omap " ,
. base . cra_priority = 300 ,
. base . cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK ,
. base . cra_blocksize = AES_BLOCK_SIZE ,
. base . cra_ctxsize = sizeof ( struct omap_aes_ctx ) ,
. base . cra_module = THIS_MODULE ,
. min_keysize = AES_MIN_KEY_SIZE ,
. max_keysize = AES_MAX_KEY_SIZE ,
. ivsize = AES_BLOCK_SIZE ,
. setkey = omap_aes_setkey ,
. encrypt = omap_aes_cbc_encrypt ,
. decrypt = omap_aes_cbc_decrypt ,
. init = omap_aes_init_tfm ,
. exit = omap_aes_exit_tfm ,
2010-09-03 15:16:02 +04:00
}
} ;
2019-11-09 20:09:30 +03:00
static struct skcipher_alg algs_ctr [ ] = {
2013-01-08 22:57:47 +04:00
{
2019-11-09 20:09:30 +03:00
. base . cra_name = " ctr(aes) " ,
. base . cra_driver_name = " ctr-aes-omap " ,
. base . cra_priority = 300 ,
. base . cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK ,
2019-11-05 17:01:01 +03:00
. base . cra_blocksize = 1 ,
2019-11-09 20:09:30 +03:00
. base . cra_ctxsize = sizeof ( struct omap_aes_ctx ) ,
. base . cra_module = THIS_MODULE ,
. min_keysize = AES_MIN_KEY_SIZE ,
. max_keysize = AES_MAX_KEY_SIZE ,
. ivsize = AES_BLOCK_SIZE ,
. setkey = omap_aes_setkey ,
. encrypt = omap_aes_ctr_encrypt ,
. decrypt = omap_aes_ctr_decrypt ,
. init = omap_aes_init_tfm ,
. exit = omap_aes_exit_tfm ,
}
2013-01-08 22:57:47 +04:00
} ;
static struct omap_aes_algs_info omap_aes_algs_info_ecb_cbc [ ] = {
{
. algs_list = algs_ecb_cbc ,
. size = ARRAY_SIZE ( algs_ecb_cbc ) ,
} ,
} ;
2017-05-24 10:35:31 +03:00
static struct aead_alg algs_aead_gcm [ ] = {
{
. base = {
. cra_name = " gcm(aes) " ,
. cra_driver_name = " gcm-aes-omap " ,
. cra_priority = 300 ,
. cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY ,
. cra_blocksize = 1 ,
2019-11-05 17:01:05 +03:00
. cra_ctxsize = sizeof ( struct omap_aes_gcm_ctx ) ,
2017-05-24 10:35:31 +03:00
. cra_alignmask = 0xf ,
. cra_module = THIS_MODULE ,
} ,
. init = omap_aes_gcm_cra_init ,
2017-08-22 11:08:16 +03:00
. ivsize = GCM_AES_IV_SIZE ,
2017-05-24 10:35:31 +03:00
. maxauthsize = AES_BLOCK_SIZE ,
. setkey = omap_aes_gcm_setkey ,
2019-11-05 17:01:03 +03:00
. setauthsize = omap_aes_gcm_setauthsize ,
2017-05-24 10:35:31 +03:00
. encrypt = omap_aes_gcm_encrypt ,
. decrypt = omap_aes_gcm_decrypt ,
} ,
{
. base = {
. cra_name = " rfc4106(gcm(aes)) " ,
. cra_driver_name = " rfc4106-gcm-aes-omap " ,
. cra_priority = 300 ,
. cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY ,
. cra_blocksize = 1 ,
2019-11-05 17:01:05 +03:00
. cra_ctxsize = sizeof ( struct omap_aes_gcm_ctx ) ,
2017-05-24 10:35:31 +03:00
. cra_alignmask = 0xf ,
. cra_module = THIS_MODULE ,
} ,
. init = omap_aes_gcm_cra_init ,
. maxauthsize = AES_BLOCK_SIZE ,
2017-08-22 11:08:16 +03:00
. ivsize = GCM_RFC4106_IV_SIZE ,
2017-05-24 10:35:31 +03:00
. setkey = omap_aes_4106gcm_setkey ,
2019-11-05 17:01:03 +03:00
. setauthsize = omap_aes_4106gcm_setauthsize ,
2017-05-24 10:35:31 +03:00
. encrypt = omap_aes_4106gcm_encrypt ,
. decrypt = omap_aes_4106gcm_decrypt ,
} ,
} ;
static struct omap_aes_aead_algs omap_aes_aead_info = {
. algs_list = algs_aead_gcm ,
. size = ARRAY_SIZE ( algs_aead_gcm ) ,
} ;
2013-01-08 22:57:46 +04:00
static const struct omap_aes_pdata omap_aes_pdata_omap2 = {
2013-01-08 22:57:47 +04:00
. algs_info = omap_aes_algs_info_ecb_cbc ,
. algs_info_size = ARRAY_SIZE ( omap_aes_algs_info_ecb_cbc ) ,
2013-01-08 22:57:46 +04:00
. trigger = omap_aes_dma_trigger_omap2 ,
. key_ofs = 0x1c ,
. iv_ofs = 0x20 ,
. ctrl_ofs = 0x30 ,
. data_ofs = 0x34 ,
. rev_ofs = 0x44 ,
. mask_ofs = 0x48 ,
. dma_enable_in = BIT ( 2 ) ,
. dma_enable_out = BIT ( 3 ) ,
. dma_start = BIT ( 5 ) ,
. major_mask = 0xf0 ,
. major_shift = 4 ,
. minor_mask = 0x0f ,
. minor_shift = 0 ,
} ;
2013-01-08 22:57:44 +04:00
# ifdef CONFIG_OF
2013-01-08 22:57:47 +04:00
static struct omap_aes_algs_info omap_aes_algs_info_ecb_cbc_ctr [ ] = {
{
. algs_list = algs_ecb_cbc ,
. size = ARRAY_SIZE ( algs_ecb_cbc ) ,
} ,
{
. algs_list = algs_ctr ,
. size = ARRAY_SIZE ( algs_ctr ) ,
} ,
} ;
static const struct omap_aes_pdata omap_aes_pdata_omap3 = {
. algs_info = omap_aes_algs_info_ecb_cbc_ctr ,
. algs_info_size = ARRAY_SIZE ( omap_aes_algs_info_ecb_cbc_ctr ) ,
. trigger = omap_aes_dma_trigger_omap2 ,
. key_ofs = 0x1c ,
. iv_ofs = 0x20 ,
. ctrl_ofs = 0x30 ,
. data_ofs = 0x34 ,
. rev_ofs = 0x44 ,
. mask_ofs = 0x48 ,
. dma_enable_in = BIT ( 2 ) ,
. dma_enable_out = BIT ( 3 ) ,
. dma_start = BIT ( 5 ) ,
. major_mask = 0xf0 ,
. major_shift = 4 ,
. minor_mask = 0x0f ,
. minor_shift = 0 ,
} ;
2013-01-08 22:57:46 +04:00
static const struct omap_aes_pdata omap_aes_pdata_omap4 = {
2013-01-08 22:57:47 +04:00
. algs_info = omap_aes_algs_info_ecb_cbc_ctr ,
. algs_info_size = ARRAY_SIZE ( omap_aes_algs_info_ecb_cbc_ctr ) ,
2017-05-24 10:35:31 +03:00
. aead_algs_info = & omap_aes_aead_info ,
2013-01-08 22:57:46 +04:00
. trigger = omap_aes_dma_trigger_omap4 ,
. key_ofs = 0x3c ,
. iv_ofs = 0x40 ,
. ctrl_ofs = 0x50 ,
. data_ofs = 0x60 ,
. rev_ofs = 0x80 ,
. mask_ofs = 0x84 ,
2013-08-18 06:42:28 +04:00
. irq_status_ofs = 0x8c ,
. irq_enable_ofs = 0x90 ,
2013-01-08 22:57:46 +04:00
. dma_enable_in = BIT ( 5 ) ,
. dma_enable_out = BIT ( 6 ) ,
. major_mask = 0x0700 ,
. major_shift = 8 ,
. minor_mask = 0x003f ,
. minor_shift = 0 ,
} ;
2013-08-18 06:42:29 +04:00
static irqreturn_t omap_aes_irq ( int irq , void * dev_id )
{
struct omap_aes_dev * dd = dev_id ;
u32 status , i ;
u32 * src , * dst ;
status = omap_aes_read ( dd , AES_REG_IRQ_STATUS ( dd ) ) ;
if ( status & AES_REG_IRQ_DATA_IN ) {
omap_aes_write ( dd , AES_REG_IRQ_ENABLE ( dd ) , 0x0 ) ;
BUG_ON ( ! dd - > in_sg ) ;
BUG_ON ( _calc_walked ( in ) > dd - > in_sg - > length ) ;
src = sg_virt ( dd - > in_sg ) + _calc_walked ( in ) ;
for ( i = 0 ; i < AES_BLOCK_WORDS ; i + + ) {
omap_aes_write ( dd , AES_REG_DATA_N ( dd , i ) , * src ) ;
scatterwalk_advance ( & dd - > in_walk , 4 ) ;
if ( dd - > in_sg - > length = = _calc_walked ( in ) ) {
2015-01-20 11:06:16 +03:00
dd - > in_sg = sg_next ( dd - > in_sg ) ;
2013-08-18 06:42:29 +04:00
if ( dd - > in_sg ) {
scatterwalk_start ( & dd - > in_walk ,
dd - > in_sg ) ;
src = sg_virt ( dd - > in_sg ) +
_calc_walked ( in ) ;
}
} else {
src + + ;
}
}
/* Clear IRQ status */
status & = ~ AES_REG_IRQ_DATA_IN ;
omap_aes_write ( dd , AES_REG_IRQ_STATUS ( dd ) , status ) ;
/* Enable DATA_OUT interrupt */
omap_aes_write ( dd , AES_REG_IRQ_ENABLE ( dd ) , 0x4 ) ;
} else if ( status & AES_REG_IRQ_DATA_OUT ) {
omap_aes_write ( dd , AES_REG_IRQ_ENABLE ( dd ) , 0x0 ) ;
BUG_ON ( ! dd - > out_sg ) ;
BUG_ON ( _calc_walked ( out ) > dd - > out_sg - > length ) ;
dst = sg_virt ( dd - > out_sg ) + _calc_walked ( out ) ;
for ( i = 0 ; i < AES_BLOCK_WORDS ; i + + ) {
* dst = omap_aes_read ( dd , AES_REG_DATA_N ( dd , i ) ) ;
scatterwalk_advance ( & dd - > out_walk , 4 ) ;
if ( dd - > out_sg - > length = = _calc_walked ( out ) ) {
2015-01-20 11:06:16 +03:00
dd - > out_sg = sg_next ( dd - > out_sg ) ;
2013-08-18 06:42:29 +04:00
if ( dd - > out_sg ) {
scatterwalk_start ( & dd - > out_walk ,
dd - > out_sg ) ;
dst = sg_virt ( dd - > out_sg ) +
_calc_walked ( out ) ;
}
} else {
dst + + ;
}
}
2015-07-07 18:31:43 +03:00
dd - > total - = min_t ( size_t , AES_BLOCK_SIZE , dd - > total ) ;
2013-08-18 06:42:29 +04:00
/* Clear IRQ status */
status & = ~ AES_REG_IRQ_DATA_OUT ;
omap_aes_write ( dd , AES_REG_IRQ_STATUS ( dd ) , status ) ;
if ( ! dd - > total )
/* All bytes read! */
tasklet_schedule ( & dd - > done_task ) ;
else
/* Enable DATA_IN interrupt for next block */
omap_aes_write ( dd , AES_REG_IRQ_ENABLE ( dd ) , 0x2 ) ;
}
return IRQ_HANDLED ;
}
2013-01-08 22:57:44 +04:00
static const struct of_device_id omap_aes_of_match [ ] = {
{
. compatible = " ti,omap2-aes " ,
2013-01-08 22:57:46 +04:00
. data = & omap_aes_pdata_omap2 ,
} ,
2013-01-08 22:57:47 +04:00
{
. compatible = " ti,omap3-aes " ,
. data = & omap_aes_pdata_omap3 ,
} ,
2013-01-08 22:57:46 +04:00
{
. compatible = " ti,omap4-aes " ,
. data = & omap_aes_pdata_omap4 ,
2013-01-08 22:57:44 +04:00
} ,
{ } ,
} ;
MODULE_DEVICE_TABLE ( of , omap_aes_of_match ) ;
static int omap_aes_get_res_of ( struct omap_aes_dev * dd ,
struct device * dev , struct resource * res )
{
struct device_node * node = dev - > of_node ;
int err = 0 ;
2017-09-20 21:42:48 +03:00
dd - > pdata = of_device_get_match_data ( dev ) ;
if ( ! dd - > pdata ) {
2013-01-08 22:57:44 +04:00
dev_err ( dev , " no compatible OF match \n " ) ;
err = - EINVAL ;
goto err ;
}
err = of_address_to_resource ( node , 0 , res ) ;
if ( err < 0 ) {
dev_err ( dev , " can't translate OF node address \n " ) ;
err = - EINVAL ;
goto err ;
}
err :
return err ;
}
# else
static const struct of_device_id omap_aes_of_match [ ] = {
{ } ,
} ;
static int omap_aes_get_res_of ( struct omap_aes_dev * dd ,
struct device * dev , struct resource * res )
{
return - EINVAL ;
}
# endif
static int omap_aes_get_res_pdev ( struct omap_aes_dev * dd ,
struct platform_device * pdev , struct resource * res )
{
struct device * dev = & pdev - > dev ;
struct resource * r ;
int err = 0 ;
/* Get the base address */
r = platform_get_resource ( pdev , IORESOURCE_MEM , 0 ) ;
if ( ! r ) {
dev_err ( dev , " no MEM resource info \n " ) ;
err = - ENODEV ;
goto err ;
}
memcpy ( res , r , sizeof ( * res ) ) ;
2013-01-08 22:57:46 +04:00
/* Only OMAP2/3 can be non-DT */
dd - > pdata = & omap_aes_pdata_omap2 ;
2013-01-08 22:57:44 +04:00
err :
return err ;
}
2018-02-27 16:30:38 +03:00
static ssize_t fallback_show ( struct device * dev , struct device_attribute * attr ,
char * buf )
{
return sprintf ( buf , " %d \n " , aes_fallback_sz ) ;
}
static ssize_t fallback_store ( struct device * dev , struct device_attribute * attr ,
const char * buf , size_t size )
{
ssize_t status ;
long value ;
status = kstrtol ( buf , 0 , & value ) ;
if ( status )
return status ;
/* HW accelerator only works with buffers > 9 */
if ( value < 9 ) {
dev_err ( dev , " minimum fallback size 9 \n " ) ;
return - EINVAL ;
}
aes_fallback_sz = value ;
return size ;
}
2018-02-27 16:30:39 +03:00
static ssize_t queue_len_show ( struct device * dev , struct device_attribute * attr ,
char * buf )
{
struct omap_aes_dev * dd = dev_get_drvdata ( dev ) ;
return sprintf ( buf , " %d \n " , dd - > engine - > queue . max_qlen ) ;
}
static ssize_t queue_len_store ( struct device * dev ,
struct device_attribute * attr , const char * buf ,
size_t size )
{
struct omap_aes_dev * dd ;
ssize_t status ;
long value ;
unsigned long flags ;
status = kstrtol ( buf , 0 , & value ) ;
if ( status )
return status ;
if ( value < 1 )
return - EINVAL ;
/*
* Changing the queue size in fly is safe , if size becomes smaller
* than current size , it will just not accept new entries until
* it has shrank enough .
*/
spin_lock_bh ( & list_lock ) ;
list_for_each_entry ( dd , & dev_list , list ) {
spin_lock_irqsave ( & dd - > lock , flags ) ;
dd - > engine - > queue . max_qlen = value ;
dd - > aead_queue . base . max_qlen = value ;
spin_unlock_irqrestore ( & dd - > lock , flags ) ;
}
spin_unlock_bh ( & list_lock ) ;
return size ;
}
static DEVICE_ATTR_RW ( queue_len ) ;
2018-02-27 16:30:38 +03:00
static DEVICE_ATTR_RW ( fallback ) ;
static struct attribute * omap_aes_attrs [ ] = {
2018-02-27 16:30:39 +03:00
& dev_attr_queue_len . attr ,
2018-02-27 16:30:38 +03:00
& dev_attr_fallback . attr ,
NULL ,
} ;
static struct attribute_group omap_aes_attr_group = {
. attrs = omap_aes_attrs ,
} ;
2010-09-03 15:16:02 +04:00
static int omap_aes_probe ( struct platform_device * pdev )
{
struct device * dev = & pdev - > dev ;
struct omap_aes_dev * dd ;
2019-11-09 20:09:30 +03:00
struct skcipher_alg * algp ;
2017-05-24 10:35:31 +03:00
struct aead_alg * aalg ;
2013-01-08 22:57:44 +04:00
struct resource res ;
2013-08-18 06:42:31 +04:00
int err = - ENOMEM , i , j , irq = - 1 ;
2010-09-03 15:16:02 +04:00
u32 reg ;
2013-08-18 06:42:33 +04:00
dd = devm_kzalloc ( dev , sizeof ( struct omap_aes_dev ) , GFP_KERNEL ) ;
2010-09-03 15:16:02 +04:00
if ( dd = = NULL ) {
dev_err ( dev , " unable to alloc data struct. \n " ) ;
goto err_data ;
}
dd - > dev = dev ;
platform_set_drvdata ( pdev , dd ) ;
2017-05-24 10:35:31 +03:00
aead_init_queue ( & dd - > aead_queue , OMAP_AES_QUEUE_LENGTH ) ;
2013-01-08 22:57:44 +04:00
err = ( dev - > of_node ) ? omap_aes_get_res_of ( dd , dev , & res ) :
omap_aes_get_res_pdev ( dd , pdev , & res ) ;
if ( err )
2010-09-03 15:16:02 +04:00
goto err_res ;
2013-01-08 22:57:44 +04:00
2013-05-02 16:00:38 +04:00
dd - > io_base = devm_ioremap_resource ( dev , & res ) ;
if ( IS_ERR ( dd - > io_base ) ) {
err = PTR_ERR ( dd - > io_base ) ;
2013-01-08 22:57:40 +04:00
goto err_res ;
2010-09-03 15:16:02 +04:00
}
2013-01-08 22:57:44 +04:00
dd - > phys_base = res . start ;
2010-09-03 15:16:02 +04:00
2016-08-04 13:28:42 +03:00
pm_runtime_use_autosuspend ( dev ) ;
pm_runtime_set_autosuspend_delay ( dev , DEFAULT_AUTOSUSPEND_DELAY ) ;
2013-01-08 22:57:40 +04:00
pm_runtime_enable ( dev ) ;
2013-12-04 05:43:13 +04:00
err = pm_runtime_get_sync ( dev ) ;
if ( err < 0 ) {
dev_err ( dev , " %s: failed to get_sync(%d) \n " ,
__func__ , err ) ;
goto err_res ;
}
2013-01-08 22:57:40 +04:00
2013-01-08 22:57:46 +04:00
omap_aes_dma_stop ( dd ) ;
reg = omap_aes_read ( dd , AES_REG_REV ( dd ) ) ;
2013-01-08 22:57:40 +04:00
pm_runtime_put_sync ( dev ) ;
2010-09-03 15:16:02 +04:00
2013-01-08 22:57:46 +04:00
dev_info ( dev , " OMAP AES hw accel rev: %u.%u \n " ,
( reg & dd - > pdata - > major_mask ) > > dd - > pdata - > major_shift ,
( reg & dd - > pdata - > minor_mask ) > > dd - > pdata - > minor_shift ) ;
2010-11-30 11:13:29 +03:00
tasklet_init ( & dd - > done_task , omap_aes_done_task , ( unsigned long ) dd ) ;
2010-09-03 15:16:02 +04:00
err = omap_aes_dma_init ( dd ) ;
2016-04-29 16:02:18 +03:00
if ( err = = - EPROBE_DEFER ) {
goto err_irq ;
} else if ( err & & AES_REG_IRQ_STATUS ( dd ) & & AES_REG_IRQ_ENABLE ( dd ) ) {
2013-08-18 06:42:31 +04:00
dd - > pio_only = 1 ;
irq = platform_get_irq ( pdev , 0 ) ;
if ( irq < 0 ) {
2017-06-30 10:00:54 +03:00
err = irq ;
2013-08-18 06:42:31 +04:00
goto err_irq ;
}
2013-08-18 06:42:34 +04:00
err = devm_request_irq ( dev , irq , omap_aes_irq , 0 ,
2013-08-18 06:42:31 +04:00
dev_name ( dev ) , dd ) ;
if ( err ) {
dev_err ( dev , " Unable to grab omap-aes IRQ \n " ) ;
goto err_irq ;
}
}
2017-05-24 10:35:31 +03:00
spin_lock_init ( & dd - > lock ) ;
2010-09-03 15:16:02 +04:00
INIT_LIST_HEAD ( & dd - > list ) ;
spin_lock ( & list_lock ) ;
list_add_tail ( & dd - > list , & dev_list ) ;
spin_unlock ( & list_lock ) ;
2016-08-04 13:28:45 +03:00
/* Initialize crypto engine */
dd - > engine = crypto_engine_alloc_init ( dev , 1 ) ;
2016-09-15 06:27:32 +03:00
if ( ! dd - > engine ) {
err = - ENOMEM ;
2016-08-04 13:28:45 +03:00
goto err_engine ;
2016-09-15 06:27:32 +03:00
}
2016-08-04 13:28:45 +03:00
err = crypto_engine_start ( dd - > engine ) ;
if ( err )
goto err_engine ;
2013-01-08 22:57:47 +04:00
for ( i = 0 ; i < dd - > pdata - > algs_info_size ; i + + ) {
2016-06-01 11:56:02 +03:00
if ( ! dd - > pdata - > algs_info [ i ] . registered ) {
for ( j = 0 ; j < dd - > pdata - > algs_info [ i ] . size ; j + + ) {
algp = & dd - > pdata - > algs_info [ i ] . algs_list [ j ] ;
2013-01-08 22:57:47 +04:00
2019-11-09 20:09:30 +03:00
pr_debug ( " reg alg: %s \n " , algp - > base . cra_name ) ;
2013-01-08 22:57:47 +04:00
2019-11-09 20:09:30 +03:00
err = crypto_register_skcipher ( algp ) ;
2016-06-01 11:56:02 +03:00
if ( err )
goto err_algs ;
2013-01-08 22:57:47 +04:00
2016-06-01 11:56:02 +03:00
dd - > pdata - > algs_info [ i ] . registered + + ;
}
2013-01-08 22:57:47 +04:00
}
2010-09-03 15:16:02 +04:00
}
2017-05-24 10:35:31 +03:00
if ( dd - > pdata - > aead_algs_info & &
! dd - > pdata - > aead_algs_info - > registered ) {
for ( i = 0 ; i < dd - > pdata - > aead_algs_info - > size ; i + + ) {
aalg = & dd - > pdata - > aead_algs_info - > algs_list [ i ] ;
2019-11-09 20:09:30 +03:00
pr_debug ( " reg alg: %s \n " , aalg - > base . cra_name ) ;
2017-05-24 10:35:31 +03:00
err = crypto_register_aead ( aalg ) ;
if ( err )
goto err_aead_algs ;
dd - > pdata - > aead_algs_info - > registered + + ;
}
}
2018-02-27 16:30:38 +03:00
err = sysfs_create_group ( & dev - > kobj , & omap_aes_attr_group ) ;
if ( err ) {
dev_err ( dev , " could not create sysfs device attrs \n " ) ;
goto err_aead_algs ;
}
2010-09-03 15:16:02 +04:00
return 0 ;
2017-05-24 10:35:31 +03:00
err_aead_algs :
for ( i = dd - > pdata - > aead_algs_info - > registered - 1 ; i > = 0 ; i - - ) {
aalg = & dd - > pdata - > aead_algs_info - > algs_list [ i ] ;
crypto_unregister_aead ( aalg ) ;
}
2010-09-03 15:16:02 +04:00
err_algs :
2013-01-08 22:57:47 +04:00
for ( i = dd - > pdata - > algs_info_size - 1 ; i > = 0 ; i - - )
for ( j = dd - > pdata - > algs_info [ i ] . registered - 1 ; j > = 0 ; j - - )
2019-11-09 20:09:30 +03:00
crypto_unregister_skcipher (
2013-01-08 22:57:47 +04:00
& dd - > pdata - > algs_info [ i ] . algs_list [ j ] ) ;
2016-04-29 16:02:18 +03:00
2016-08-04 13:28:45 +03:00
err_engine :
if ( dd - > engine )
crypto_engine_exit ( dd - > engine ) ;
2016-04-29 16:02:18 +03:00
omap_aes_dma_cleanup ( dd ) ;
2013-08-18 06:42:31 +04:00
err_irq :
2010-11-30 11:13:29 +03:00
tasklet_kill ( & dd - > done_task ) ;
2013-01-08 22:57:40 +04:00
pm_runtime_disable ( dev ) ;
2010-09-03 15:16:02 +04:00
err_res :
dd = NULL ;
err_data :
dev_err ( dev , " initialization failed. \n " ) ;
return err ;
}
static int omap_aes_remove ( struct platform_device * pdev )
{
struct omap_aes_dev * dd = platform_get_drvdata ( pdev ) ;
2017-05-24 10:35:31 +03:00
struct aead_alg * aalg ;
2013-01-08 22:57:47 +04:00
int i , j ;
2010-09-03 15:16:02 +04:00
if ( ! dd )
return - ENODEV ;
spin_lock ( & list_lock ) ;
list_del ( & dd - > list ) ;
spin_unlock ( & list_lock ) ;
2013-01-08 22:57:47 +04:00
for ( i = dd - > pdata - > algs_info_size - 1 ; i > = 0 ; i - - )
for ( j = dd - > pdata - > algs_info [ i ] . registered - 1 ; j > = 0 ; j - - )
2019-11-09 20:09:30 +03:00
crypto_unregister_skcipher (
2013-01-08 22:57:47 +04:00
& dd - > pdata - > algs_info [ i ] . algs_list [ j ] ) ;
2010-09-03 15:16:02 +04:00
2017-05-24 10:35:31 +03:00
for ( i = dd - > pdata - > aead_algs_info - > size - 1 ; i > = 0 ; i - - ) {
aalg = & dd - > pdata - > aead_algs_info - > algs_list [ i ] ;
crypto_unregister_aead ( aalg ) ;
}
2016-01-26 15:25:40 +03:00
crypto_engine_exit ( dd - > engine ) ;
2017-05-24 10:35:31 +03:00
2010-11-30 11:13:29 +03:00
tasklet_kill ( & dd - > done_task ) ;
2010-09-03 15:16:02 +04:00
omap_aes_dma_cleanup ( dd ) ;
2013-01-08 22:57:40 +04:00
pm_runtime_disable ( dd - > dev ) ;
2019-11-05 17:00:52 +03:00
sysfs_remove_group ( & dd - > dev - > kobj , & omap_aes_attr_group ) ;
2010-09-03 15:16:02 +04:00
return 0 ;
}
2013-01-08 22:57:41 +04:00
# ifdef CONFIG_PM_SLEEP
static int omap_aes_suspend ( struct device * dev )
{
pm_runtime_put_sync ( dev ) ;
return 0 ;
}
static int omap_aes_resume ( struct device * dev )
{
pm_runtime_get_sync ( dev ) ;
return 0 ;
}
# endif
2014-02-27 15:31:38 +04:00
static SIMPLE_DEV_PM_OPS ( omap_aes_pm_ops , omap_aes_suspend , omap_aes_resume ) ;
2013-01-08 22:57:41 +04:00
2010-09-03 15:16:02 +04:00
static struct platform_driver omap_aes_driver = {
. probe = omap_aes_probe ,
. remove = omap_aes_remove ,
. driver = {
. name = " omap-aes " ,
2013-01-08 22:57:41 +04:00
. pm = & omap_aes_pm_ops ,
2013-01-08 22:57:44 +04:00
. of_match_table = omap_aes_of_match ,
2010-09-03 15:16:02 +04:00
} ,
} ;
2013-03-04 13:39:42 +04:00
module_platform_driver ( omap_aes_driver ) ;
2010-09-03 15:16:02 +04:00
MODULE_DESCRIPTION ( " OMAP AES hw acceleration support. " ) ;
MODULE_LICENSE ( " GPL v2 " ) ;
MODULE_AUTHOR ( " Dmitry Kasatkin " ) ;