2018-10-10 14:26:48 +03:00
// SPDX-License-Identifier: GPL-2.0+
2012-06-22 19:48:50 -05:00
/*
* caam - Freescale FSL CAAM support for hw_random
*
* Copyright 2011 Freescale Semiconductor , Inc .
2019-05-03 17:17:39 +03:00
* Copyright 2018 - 2019 NXP
2012-06-22 19:48:50 -05:00
*
* Based on caamalg . c crypto API driver .
*
*/
# include <linux/hw_random.h>
# include <linux/completion.h>
# include <linux/atomic.h>
2020-03-19 09:12:28 -07:00
# include <linux/kfifo.h>
2012-06-22 19:48:50 -05:00
# include "compat.h"
# include "regs.h"
# include "intern.h"
# include "desc_constr.h"
# include "jr.h"
# include "error.h"
2020-03-19 09:12:33 -07:00
# define CAAM_RNG_MAX_FIFO_STORE_SIZE 16
2020-03-19 09:12:28 -07:00
2012-06-22 19:48:50 -05:00
/*
2020-03-19 09:12:28 -07:00
* Length of used descriptors , see caam_init_desc ( )
2012-06-22 19:48:50 -05:00
*/
2020-03-19 09:12:28 -07:00
# define CAAM_RNG_DESC_LEN (CAAM_CMD_SZ + \
CAAM_CMD_SZ + \
CAAM_CMD_SZ + CAAM_PTR_SZ_MAX )
2012-06-22 19:48:50 -05:00
/* rng per-device context */
struct caam_rng_ctx {
2020-03-19 09:12:27 -07:00
struct hwrng rng ;
2012-06-22 19:48:50 -05:00
struct device * jrdev ;
2020-03-19 09:12:28 -07:00
struct device * ctrldev ;
void * desc_async ;
void * desc_sync ;
struct work_struct worker ;
struct kfifo fifo ;
2012-06-22 19:48:50 -05:00
} ;
2020-03-19 09:12:29 -07:00
struct caam_rng_job_ctx {
struct completion * done ;
int * err ;
} ;
2020-03-19 09:12:27 -07:00
static struct caam_rng_ctx * to_caam_rng_ctx ( struct hwrng * r )
{
return ( struct caam_rng_ctx * ) r - > priv ;
}
2019-07-31 16:08:13 +03:00
2020-03-19 09:12:28 -07:00
static void caam_rng_done ( struct device * jrdev , u32 * desc , u32 err ,
void * context )
2012-06-22 19:48:50 -05:00
{
2020-03-19 09:12:29 -07:00
struct caam_rng_job_ctx * jctx = context ;
2012-06-22 19:48:50 -05:00
2014-04-24 20:05:12 +02:00
if ( err )
2020-03-19 09:12:29 -07:00
* jctx - > err = caam_jr_strstatus ( jrdev , err ) ;
2012-06-22 19:48:50 -05:00
2020-03-19 09:12:29 -07:00
complete ( jctx - > done ) ;
2012-06-22 19:48:50 -05:00
}
2020-03-19 09:12:33 -07:00
static u32 * caam_init_desc ( u32 * desc , dma_addr_t dst_dma )
2012-06-22 19:48:50 -05:00
{
2020-03-19 09:12:28 -07:00
init_job_desc ( desc , 0 ) ; /* + 1 cmd_sz */
/* Generate random bytes: + 1 cmd_sz */
2020-03-19 09:12:32 -07:00
append_operation ( desc , OP_ALG_ALGSEL_RNG | OP_TYPE_CLASS1_ALG |
OP_ALG_PR_ON ) ;
2020-03-19 09:12:28 -07:00
/* Store bytes: + 1 cmd_sz + caam_ptr_sz */
2020-03-19 09:12:33 -07:00
append_fifo_store ( desc , dst_dma ,
CAAM_RNG_MAX_FIFO_STORE_SIZE , FIFOST_TYPE_RNGSTORE ) ;
2012-06-22 19:48:50 -05:00
2020-03-19 09:12:28 -07:00
print_hex_dump_debug ( " rng job desc@: " , DUMP_PREFIX_ADDRESS ,
16 , 4 , desc , desc_bytes ( desc ) , 1 ) ;
2012-06-22 19:48:50 -05:00
2020-03-19 09:12:28 -07:00
return desc ;
2012-06-22 19:48:50 -05:00
}
2020-03-19 09:12:28 -07:00
static int caam_rng_read_one ( struct device * jrdev ,
void * dst , int len ,
void * desc ,
struct completion * done )
2012-06-22 19:48:50 -05:00
{
2020-03-19 09:12:28 -07:00
dma_addr_t dst_dma ;
2020-03-19 09:12:29 -07:00
int err , ret = 0 ;
struct caam_rng_job_ctx jctx = {
. done = done ,
. err = & ret ,
} ;
2012-06-22 19:48:50 -05:00
2020-03-19 09:12:33 -07:00
len = CAAM_RNG_MAX_FIFO_STORE_SIZE ;
2012-06-22 19:48:50 -05:00
2020-03-19 09:12:28 -07:00
dst_dma = dma_map_single ( jrdev , dst , len , DMA_FROM_DEVICE ) ;
if ( dma_mapping_error ( jrdev , dst_dma ) ) {
dev_err ( jrdev , " unable to map destination memory \n " ) ;
return - ENOMEM ;
2012-06-22 19:48:50 -05:00
}
2020-03-19 09:12:28 -07:00
init_completion ( done ) ;
err = caam_jr_enqueue ( jrdev ,
2020-03-19 09:12:33 -07:00
caam_init_desc ( desc , dst_dma ) ,
2020-03-19 09:12:29 -07:00
caam_rng_done , & jctx ) ;
2020-03-19 09:12:28 -07:00
if ( err = = - EINPROGRESS ) {
wait_for_completion ( done ) ;
err = 0 ;
}
2012-06-22 19:48:50 -05:00
2020-03-19 09:12:28 -07:00
dma_unmap_single ( jrdev , dst_dma , len , DMA_FROM_DEVICE ) ;
2012-06-22 19:48:50 -05:00
2020-03-19 09:12:29 -07:00
return err ? : ( ret ? : len ) ;
2012-06-22 19:48:50 -05:00
}
2020-03-19 09:12:28 -07:00
static void caam_rng_fill_async ( struct caam_rng_ctx * ctx )
2012-06-22 19:48:50 -05:00
{
2020-03-19 09:12:28 -07:00
struct scatterlist sg [ 1 ] ;
struct completion done ;
int len , nents ;
sg_init_table ( sg , ARRAY_SIZE ( sg ) ) ;
nents = kfifo_dma_in_prepare ( & ctx - > fifo , sg , ARRAY_SIZE ( sg ) ,
2020-03-19 09:12:33 -07:00
CAAM_RNG_MAX_FIFO_STORE_SIZE ) ;
2020-03-19 09:12:28 -07:00
if ( ! nents )
return ;
len = caam_rng_read_one ( ctx - > jrdev , sg_virt ( & sg [ 0 ] ) ,
sg [ 0 ] . length ,
ctx - > desc_async ,
& done ) ;
if ( len < 0 )
return ;
kfifo_dma_in_finish ( & ctx - > fifo , len ) ;
}
2019-05-23 10:50:29 +02:00
2020-03-19 09:12:28 -07:00
static void caam_rng_worker ( struct work_struct * work )
{
struct caam_rng_ctx * ctx = container_of ( work , struct caam_rng_ctx ,
worker ) ;
caam_rng_fill_async ( ctx ) ;
2012-06-22 19:48:50 -05:00
}
2020-03-19 09:12:28 -07:00
static int caam_read ( struct hwrng * rng , void * dst , size_t max , bool wait )
2012-06-22 19:48:50 -05:00
{
2020-03-19 09:12:28 -07:00
struct caam_rng_ctx * ctx = to_caam_rng_ctx ( rng ) ;
int out ;
2012-06-22 19:48:50 -05:00
2020-03-19 09:12:28 -07:00
if ( wait ) {
struct completion done ;
2012-06-22 19:48:50 -05:00
2020-03-19 09:12:28 -07:00
return caam_rng_read_one ( ctx - > jrdev , dst , max ,
ctx - > desc_sync , & done ) ;
2014-07-11 15:34:49 +03:00
}
2012-06-22 19:48:50 -05:00
2020-03-19 09:12:28 -07:00
out = kfifo_out ( & ctx - > fifo , dst , max ) ;
2020-03-19 09:12:33 -07:00
if ( kfifo_is_empty ( & ctx - > fifo ) )
2020-03-19 09:12:28 -07:00
schedule_work ( & ctx - > worker ) ;
2019-05-23 10:50:29 +02:00
2020-03-19 09:12:28 -07:00
return out ;
2012-06-22 19:48:50 -05:00
}
static void caam_cleanup ( struct hwrng * rng )
{
2020-03-19 09:12:27 -07:00
struct caam_rng_ctx * ctx = to_caam_rng_ctx ( rng ) ;
2012-06-22 19:48:50 -05:00
2020-03-19 09:12:28 -07:00
flush_work ( & ctx - > worker ) ;
2020-03-19 09:12:27 -07:00
caam_jr_free ( ctx - > jrdev ) ;
2020-03-19 09:12:28 -07:00
kfifo_free ( & ctx - > fifo ) ;
2012-06-22 19:48:50 -05:00
}
2020-03-19 09:12:28 -07:00
static int caam_init ( struct hwrng * rng )
2012-06-22 19:48:50 -05:00
{
2020-03-19 09:12:28 -07:00
struct caam_rng_ctx * ctx = to_caam_rng_ctx ( rng ) ;
2014-07-11 15:34:49 +03:00
int err ;
2020-03-19 09:12:28 -07:00
ctx - > desc_sync = devm_kzalloc ( ctx - > ctrldev , CAAM_RNG_DESC_LEN ,
GFP_DMA | GFP_KERNEL ) ;
if ( ! ctx - > desc_sync )
return - ENOMEM ;
2012-06-22 19:48:50 -05:00
2020-03-19 09:12:28 -07:00
ctx - > desc_async = devm_kzalloc ( ctx - > ctrldev , CAAM_RNG_DESC_LEN ,
GFP_DMA | GFP_KERNEL ) ;
if ( ! ctx - > desc_async )
return - ENOMEM ;
2014-07-11 15:34:49 +03:00
2020-03-19 09:12:33 -07:00
if ( kfifo_alloc ( & ctx - > fifo , CAAM_RNG_MAX_FIFO_STORE_SIZE ,
GFP_DMA | GFP_KERNEL ) )
2020-03-19 09:12:28 -07:00
return - ENOMEM ;
2012-06-22 19:48:50 -05:00
2020-03-19 09:12:28 -07:00
INIT_WORK ( & ctx - > worker , caam_rng_worker ) ;
2014-07-11 15:34:49 +03:00
2020-03-19 09:12:26 -07:00
ctx - > jrdev = caam_jr_alloc ( ) ;
err = PTR_ERR_OR_ZERO ( ctx - > jrdev ) ;
if ( err ) {
2020-03-19 09:12:28 -07:00
kfifo_free ( & ctx - > fifo ) ;
2020-03-19 09:12:26 -07:00
pr_err ( " Job Ring Device allocation for transform failed \n " ) ;
return err ;
}
2014-07-11 15:34:49 +03:00
2020-03-19 09:12:28 -07:00
/*
* Fill async buffer to have early randomness data for
* hw_random
*/
caam_rng_fill_async ( ctx ) ;
2014-07-11 15:34:49 +03:00
2020-03-19 09:12:26 -07:00
return 0 ;
2012-06-22 19:48:50 -05:00
}
2020-03-19 09:12:27 -07:00
int caam_rng_init ( struct device * ctrldev ) ;
2012-06-22 19:48:50 -05:00
2020-03-19 09:12:27 -07:00
void caam_rng_exit ( struct device * ctrldev )
2012-06-22 19:48:50 -05:00
{
2020-03-19 09:12:27 -07:00
devres_release_group ( ctrldev , caam_rng_init ) ;
2012-06-22 19:48:50 -05:00
}
2019-05-03 17:17:39 +03:00
int caam_rng_init ( struct device * ctrldev )
2012-06-22 19:48:50 -05:00
{
2020-03-19 09:12:27 -07:00
struct caam_rng_ctx * ctx ;
2018-11-08 15:36:27 +02:00
u32 rng_inst ;
2019-05-03 17:17:39 +03:00
struct caam_drv_private * priv = dev_get_drvdata ( ctrldev ) ;
2020-03-19 09:12:27 -07:00
int ret ;
2014-07-07 10:42:12 +05:30
2015-08-05 11:28:48 -07:00
/* Check for an instantiated RNG before registration */
2018-11-08 15:36:27 +02:00
if ( priv - > era < 10 )
rng_inst = ( rd_reg32 ( & priv - > ctrl - > perfmon . cha_num_ls ) &
CHA_ID_LS_RNG_MASK ) > > CHA_ID_LS_RNG_SHIFT ;
else
rng_inst = rd_reg32 ( & priv - > ctrl - > vreg . rng ) & CHA_VER_NUM_MASK ;
2019-05-03 17:17:39 +03:00
if ( ! rng_inst )
return 0 ;
2015-08-05 11:28:48 -07:00
2020-03-19 09:12:27 -07:00
if ( ! devres_open_group ( ctrldev , caam_rng_init , GFP_KERNEL ) )
return - ENOMEM ;
2020-03-19 09:12:28 -07:00
ctx = devm_kzalloc ( ctrldev , sizeof ( * ctx ) , GFP_KERNEL ) ;
2020-03-19 09:12:27 -07:00
if ( ! ctx )
2020-03-19 09:12:26 -07:00
return - ENOMEM ;
2012-06-22 19:48:50 -05:00
2020-03-19 09:12:28 -07:00
ctx - > ctrldev = ctrldev ;
2020-03-19 09:12:27 -07:00
ctx - > rng . name = " rng-caam " ;
ctx - > rng . init = caam_init ;
ctx - > rng . cleanup = caam_cleanup ;
ctx - > rng . read = caam_read ;
ctx - > rng . priv = ( unsigned long ) ctx ;
2020-03-19 09:12:33 -07:00
ctx - > rng . quality = 1024 ;
2020-03-19 09:12:27 -07:00
2020-03-19 09:12:26 -07:00
dev_info ( ctrldev , " registering rng-caam \n " ) ;
2019-07-31 16:08:12 +03:00
2020-03-19 09:12:27 -07:00
ret = devm_hwrng_register ( ctrldev , & ctx - > rng ) ;
if ( ret ) {
caam_rng_exit ( ctrldev ) ;
return ret ;
2019-07-31 16:08:13 +03:00
}
2015-08-12 11:48:42 -03:00
2020-03-19 09:12:27 -07:00
devres_close_group ( ctrldev , caam_rng_init ) ;
return 0 ;
2012-06-22 19:48:50 -05:00
}