2018-07-13 17:51:37 +03:00
// SPDX-License-Identifier: GPL-2.0
2017-05-24 17:10:34 +03:00
/*
* Copyright ( C ) 2017 Marvell
*
* Antoine Tenart < antoine . tenart @ free - electrons . com >
*/
# include <linux/clk.h>
# include <linux/device.h>
# include <linux/dma-mapping.h>
# include <linux/dmapool.h>
# include <linux/firmware.h>
# include <linux/interrupt.h>
# include <linux/module.h>
# include <linux/of_platform.h>
# include <linux/of_irq.h>
2019-08-19 17:40:25 +03:00
# include <linux/pci.h>
2017-05-24 17:10:34 +03:00
# include <linux/platform_device.h>
# include <linux/workqueue.h>
2018-05-14 16:11:02 +03:00
# include <crypto/internal/aead.h>
2017-05-24 17:10:34 +03:00
# include <crypto/internal/hash.h>
# include <crypto/internal/skcipher.h>
# include "safexcel.h"
static u32 max_rings = EIP197_MAX_RINGS ;
module_param ( max_rings , uint , 0644 ) ;
MODULE_PARM_DESC ( max_rings , " Maximum number of rings to use. " ) ;
static void eip197_trc_cache_init ( struct safexcel_crypto_priv * priv )
{
u32 val , htable_offset ;
2018-06-28 18:15:40 +03:00
int i , cs_rc_max , cs_ht_wc , cs_trc_rec_wc , cs_trc_lg_rec_wc ;
2019-08-19 17:40:25 +03:00
if ( priv - > version = = EIP197D_MRVL ) {
2018-06-28 18:15:40 +03:00
cs_rc_max = EIP197D_CS_RC_MAX ;
cs_ht_wc = EIP197D_CS_HT_WC ;
cs_trc_rec_wc = EIP197D_CS_TRC_REC_WC ;
cs_trc_lg_rec_wc = EIP197D_CS_TRC_LG_REC_WC ;
2019-08-19 17:40:25 +03:00
} else {
/* Default to minimum "safe" settings */
cs_rc_max = EIP197B_CS_RC_MAX ;
cs_ht_wc = EIP197B_CS_HT_WC ;
cs_trc_rec_wc = EIP197B_CS_TRC_REC_WC ;
cs_trc_lg_rec_wc = EIP197B_CS_TRC_LG_REC_WC ;
2018-06-28 18:15:40 +03:00
}
2017-05-24 17:10:34 +03:00
/* Enable the record cache memory access */
val = readl ( priv - > base + EIP197_CS_RAM_CTRL ) ;
val & = ~ EIP197_TRC_ENABLE_MASK ;
val | = EIP197_TRC_ENABLE_0 ;
writel ( val , priv - > base + EIP197_CS_RAM_CTRL ) ;
/* Clear all ECC errors */
writel ( 0 , priv - > base + EIP197_TRC_ECCCTRL ) ;
/*
* Make sure the cache memory is accessible by taking record cache into
* reset .
*/
val = readl ( priv - > base + EIP197_TRC_PARAMS ) ;
val | = EIP197_TRC_PARAMS_SW_RESET ;
val & = ~ EIP197_TRC_PARAMS_DATA_ACCESS ;
writel ( val , priv - > base + EIP197_TRC_PARAMS ) ;
/* Clear all records */
2018-06-28 18:15:40 +03:00
for ( i = 0 ; i < cs_rc_max ; i + + ) {
2017-05-24 17:10:34 +03:00
u32 val , offset = EIP197_CLASSIFICATION_RAMS + i * EIP197_CS_RC_SIZE ;
writel ( EIP197_CS_RC_NEXT ( EIP197_RC_NULL ) |
EIP197_CS_RC_PREV ( EIP197_RC_NULL ) ,
priv - > base + offset ) ;
val = EIP197_CS_RC_NEXT ( i + 1 ) | EIP197_CS_RC_PREV ( i - 1 ) ;
if ( i = = 0 )
val | = EIP197_CS_RC_PREV ( EIP197_RC_NULL ) ;
2018-06-28 18:15:40 +03:00
else if ( i = = cs_rc_max - 1 )
2017-05-24 17:10:34 +03:00
val | = EIP197_CS_RC_NEXT ( EIP197_RC_NULL ) ;
writel ( val , priv - > base + offset + sizeof ( u32 ) ) ;
}
/* Clear the hash table entries */
2018-06-28 18:15:40 +03:00
htable_offset = cs_rc_max * EIP197_CS_RC_SIZE ;
for ( i = 0 ; i < cs_ht_wc ; i + + )
2017-05-24 17:10:34 +03:00
writel ( GENMASK ( 29 , 0 ) ,
priv - > base + EIP197_CLASSIFICATION_RAMS + htable_offset + i * sizeof ( u32 ) ) ;
/* Disable the record cache memory access */
val = readl ( priv - > base + EIP197_CS_RAM_CTRL ) ;
val & = ~ EIP197_TRC_ENABLE_MASK ;
writel ( val , priv - > base + EIP197_CS_RAM_CTRL ) ;
/* Write head and tail pointers of the record free chain */
val = EIP197_TRC_FREECHAIN_HEAD_PTR ( 0 ) |
2018-06-28 18:15:40 +03:00
EIP197_TRC_FREECHAIN_TAIL_PTR ( cs_rc_max - 1 ) ;
2017-05-24 17:10:34 +03:00
writel ( val , priv - > base + EIP197_TRC_FREECHAIN ) ;
/* Configure the record cache #1 */
2018-06-28 18:15:40 +03:00
val = EIP197_TRC_PARAMS2_RC_SZ_SMALL ( cs_trc_rec_wc ) |
EIP197_TRC_PARAMS2_HTABLE_PTR ( cs_rc_max ) ;
2017-05-24 17:10:34 +03:00
writel ( val , priv - > base + EIP197_TRC_PARAMS2 ) ;
/* Configure the record cache #2 */
2018-06-28 18:15:40 +03:00
val = EIP197_TRC_PARAMS_RC_SZ_LARGE ( cs_trc_lg_rec_wc ) |
2017-05-24 17:10:34 +03:00
EIP197_TRC_PARAMS_BLK_TIMER_SPEED ( 1 ) |
EIP197_TRC_PARAMS_HTABLE_SZ ( 2 ) ;
writel ( val , priv - > base + EIP197_TRC_PARAMS ) ;
}
static void eip197_write_firmware ( struct safexcel_crypto_priv * priv ,
2018-06-28 18:15:37 +03:00
const struct firmware * fw , int pe , u32 ctrl ,
2017-05-24 17:10:34 +03:00
u32 prog_en )
{
const u32 * data = ( const u32 * ) fw - > data ;
u32 val ;
int i ;
/* Reset the engine to make its program memory accessible */
writel ( EIP197_PE_ICE_x_CTRL_SW_RESET |
EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR |
EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR ,
2017-12-14 17:26:58 +03:00
EIP197_PE ( priv ) + ctrl ) ;
2017-05-24 17:10:34 +03:00
/* Enable access to the program memory */
2018-06-28 18:15:37 +03:00
writel ( prog_en , EIP197_PE ( priv ) + EIP197_PE_ICE_RAM_CTRL ( pe ) ) ;
2017-05-24 17:10:34 +03:00
/* Write the firmware */
for ( i = 0 ; i < fw - > size / sizeof ( u32 ) ; i + + )
writel ( be32_to_cpu ( data [ i ] ) ,
priv - > base + EIP197_CLASSIFICATION_RAMS + i * sizeof ( u32 ) ) ;
/* Disable access to the program memory */
2018-06-28 18:15:37 +03:00
writel ( 0 , EIP197_PE ( priv ) + EIP197_PE_ICE_RAM_CTRL ( pe ) ) ;
2017-05-24 17:10:34 +03:00
/* Release engine from reset */
2017-12-14 17:26:58 +03:00
val = readl ( EIP197_PE ( priv ) + ctrl ) ;
2017-05-24 17:10:34 +03:00
val & = ~ EIP197_PE_ICE_x_CTRL_SW_RESET ;
2017-12-14 17:26:58 +03:00
writel ( val , EIP197_PE ( priv ) + ctrl ) ;
2017-05-24 17:10:34 +03:00
}
static int eip197_load_firmwares ( struct safexcel_crypto_priv * priv )
{
const char * fw_name [ ] = { " ifpp.bin " , " ipue.bin " } ;
const struct firmware * fw [ FW_NB ] ;
2018-06-28 18:15:38 +03:00
char fw_path [ 31 ] , * dir = NULL ;
2018-06-28 18:15:37 +03:00
int i , j , ret = 0 , pe ;
2017-05-24 17:10:34 +03:00
u32 val ;
2019-08-19 17:40:25 +03:00
if ( priv - > version = = EIP197D_MRVL )
2018-06-28 18:15:38 +03:00
dir = " eip197d " ;
2019-08-19 17:40:25 +03:00
else if ( priv - > version = = EIP197B_MRVL | |
priv - > version = = EIP197_DEVBRD )
dir = " eip197b " ;
else
return - ENODEV ;
2018-06-28 18:15:38 +03:00
2017-05-24 17:10:34 +03:00
for ( i = 0 ; i < FW_NB ; i + + ) {
2018-06-28 18:15:38 +03:00
snprintf ( fw_path , 31 , " inside-secure/%s/%s " , dir , fw_name [ i ] ) ;
2018-06-28 18:15:31 +03:00
ret = request_firmware ( & fw [ i ] , fw_path , priv - > dev ) ;
2017-05-24 17:10:34 +03:00
if ( ret ) {
2019-08-19 17:40:25 +03:00
if ( priv - > version ! = EIP197B_MRVL )
2018-06-28 18:15:38 +03:00
goto release_fw ;
/* Fallback to the old firmware location for the
* EIP197b .
*/
2018-06-28 18:15:31 +03:00
ret = request_firmware ( & fw [ i ] , fw_name [ i ] , priv - > dev ) ;
if ( ret ) {
dev_err ( priv - > dev ,
" Failed to request firmware %s (%d) \n " ,
fw_name [ i ] , ret ) ;
goto release_fw ;
}
2017-05-24 17:10:34 +03:00
}
2018-06-28 18:15:31 +03:00
}
2017-05-24 17:10:34 +03:00
2018-06-28 18:15:37 +03:00
for ( pe = 0 ; pe < priv - > config . pes ; pe + + ) {
/* Clear the scratchpad memory */
val = readl ( EIP197_PE ( priv ) + EIP197_PE_ICE_SCRATCH_CTRL ( pe ) ) ;
val | = EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER |
EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN |
EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS |
EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS ;
writel ( val , EIP197_PE ( priv ) + EIP197_PE_ICE_SCRATCH_CTRL ( pe ) ) ;
memset_io ( EIP197_PE ( priv ) + EIP197_PE_ICE_SCRATCH_RAM ( pe ) , 0 ,
EIP197_NUM_OF_SCRATCH_BLOCKS * sizeof ( u32 ) ) ;
eip197_write_firmware ( priv , fw [ FW_IFPP ] , pe ,
EIP197_PE_ICE_FPP_CTRL ( pe ) ,
EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN ) ;
eip197_write_firmware ( priv , fw [ FW_IPUE ] , pe ,
EIP197_PE_ICE_PUE_CTRL ( pe ) ,
EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN ) ;
}
2017-05-24 17:10:34 +03:00
release_fw :
for ( j = 0 ; j < i ; j + + )
release_firmware ( fw [ j ] ) ;
return ret ;
}
static int safexcel_hw_setup_cdesc_rings ( struct safexcel_crypto_priv * priv )
{
u32 hdw , cd_size_rnd , val ;
int i ;
2017-12-14 17:26:58 +03:00
hdw = readl ( EIP197_HIA_AIC_G ( priv ) + EIP197_HIA_OPTIONS ) ;
2017-05-24 17:10:34 +03:00
hdw & = GENMASK ( 27 , 25 ) ;
hdw > > = 25 ;
cd_size_rnd = ( priv - > config . cd_size + ( BIT ( hdw ) - 1 ) ) > > hdw ;
for ( i = 0 ; i < priv - > config . rings ; i + + ) {
/* ring base address */
writel ( lower_32_bits ( priv - > ring [ i ] . cdr . base_dma ) ,
2017-12-14 17:26:58 +03:00
EIP197_HIA_CDR ( priv , i ) + EIP197_HIA_xDR_RING_BASE_ADDR_LO ) ;
2017-05-24 17:10:34 +03:00
writel ( upper_32_bits ( priv - > ring [ i ] . cdr . base_dma ) ,
2017-12-14 17:26:58 +03:00
EIP197_HIA_CDR ( priv , i ) + EIP197_HIA_xDR_RING_BASE_ADDR_HI ) ;
2017-05-24 17:10:34 +03:00
writel ( EIP197_xDR_DESC_MODE_64BIT | ( priv - > config . cd_offset < < 16 ) |
priv - > config . cd_size ,
2017-12-14 17:26:58 +03:00
EIP197_HIA_CDR ( priv , i ) + EIP197_HIA_xDR_DESC_SIZE ) ;
2017-05-24 17:10:34 +03:00
writel ( ( ( EIP197_FETCH_COUNT * ( cd_size_rnd < < hdw ) ) < < 16 ) |
( EIP197_FETCH_COUNT * priv - > config . cd_offset ) ,
2017-12-14 17:26:58 +03:00
EIP197_HIA_CDR ( priv , i ) + EIP197_HIA_xDR_CFG ) ;
2017-05-24 17:10:34 +03:00
/* Configure DMA tx control */
val = EIP197_HIA_xDR_CFG_WR_CACHE ( WR_CACHE_3BITS ) ;
val | = EIP197_HIA_xDR_CFG_RD_CACHE ( RD_CACHE_3BITS ) ;
2017-12-14 17:26:58 +03:00
writel ( val , EIP197_HIA_CDR ( priv , i ) + EIP197_HIA_xDR_DMA_CFG ) ;
2017-05-24 17:10:34 +03:00
/* clear any pending interrupt */
writel ( GENMASK ( 5 , 0 ) ,
2017-12-14 17:26:58 +03:00
EIP197_HIA_CDR ( priv , i ) + EIP197_HIA_xDR_STAT ) ;
2017-05-24 17:10:34 +03:00
}
return 0 ;
}
static int safexcel_hw_setup_rdesc_rings ( struct safexcel_crypto_priv * priv )
{
u32 hdw , rd_size_rnd , val ;
int i ;
2017-12-14 17:26:58 +03:00
hdw = readl ( EIP197_HIA_AIC_G ( priv ) + EIP197_HIA_OPTIONS ) ;
2017-05-24 17:10:34 +03:00
hdw & = GENMASK ( 27 , 25 ) ;
hdw > > = 25 ;
rd_size_rnd = ( priv - > config . rd_size + ( BIT ( hdw ) - 1 ) ) > > hdw ;
for ( i = 0 ; i < priv - > config . rings ; i + + ) {
/* ring base address */
writel ( lower_32_bits ( priv - > ring [ i ] . rdr . base_dma ) ,
2017-12-14 17:26:58 +03:00
EIP197_HIA_RDR ( priv , i ) + EIP197_HIA_xDR_RING_BASE_ADDR_LO ) ;
2017-05-24 17:10:34 +03:00
writel ( upper_32_bits ( priv - > ring [ i ] . rdr . base_dma ) ,
2017-12-14 17:26:58 +03:00
EIP197_HIA_RDR ( priv , i ) + EIP197_HIA_xDR_RING_BASE_ADDR_HI ) ;
2017-05-24 17:10:34 +03:00
writel ( EIP197_xDR_DESC_MODE_64BIT | ( priv - > config . rd_offset < < 16 ) |
priv - > config . rd_size ,
2017-12-14 17:26:58 +03:00
EIP197_HIA_RDR ( priv , i ) + EIP197_HIA_xDR_DESC_SIZE ) ;
2017-05-24 17:10:34 +03:00
writel ( ( ( EIP197_FETCH_COUNT * ( rd_size_rnd < < hdw ) ) < < 16 ) |
( EIP197_FETCH_COUNT * priv - > config . rd_offset ) ,
2017-12-14 17:26:58 +03:00
EIP197_HIA_RDR ( priv , i ) + EIP197_HIA_xDR_CFG ) ;
2017-05-24 17:10:34 +03:00
/* Configure DMA tx control */
val = EIP197_HIA_xDR_CFG_WR_CACHE ( WR_CACHE_3BITS ) ;
val | = EIP197_HIA_xDR_CFG_RD_CACHE ( RD_CACHE_3BITS ) ;
2018-03-19 11:21:15 +03:00
val | = EIP197_HIA_xDR_WR_RES_BUF | EIP197_HIA_xDR_WR_CTRL_BUF ;
2017-05-24 17:10:34 +03:00
writel ( val ,
2017-12-14 17:26:58 +03:00
EIP197_HIA_RDR ( priv , i ) + EIP197_HIA_xDR_DMA_CFG ) ;
2017-05-24 17:10:34 +03:00
/* clear any pending interrupt */
writel ( GENMASK ( 7 , 0 ) ,
2017-12-14 17:26:58 +03:00
EIP197_HIA_RDR ( priv , i ) + EIP197_HIA_xDR_STAT ) ;
2017-05-24 17:10:34 +03:00
/* enable ring interrupt */
2017-12-14 17:26:58 +03:00
val = readl ( EIP197_HIA_AIC_R ( priv ) + EIP197_HIA_AIC_R_ENABLE_CTRL ( i ) ) ;
2017-05-24 17:10:34 +03:00
val | = EIP197_RDR_IRQ ( i ) ;
2017-12-14 17:26:58 +03:00
writel ( val , EIP197_HIA_AIC_R ( priv ) + EIP197_HIA_AIC_R_ENABLE_CTRL ( i ) ) ;
2017-05-24 17:10:34 +03:00
}
return 0 ;
}
static int safexcel_hw_init ( struct safexcel_crypto_priv * priv )
{
u32 version , val ;
2018-06-28 18:15:37 +03:00
int i , ret , pe ;
2017-05-24 17:10:34 +03:00
2019-08-19 17:40:25 +03:00
dev_dbg ( priv - > dev , " HW init: using %d pipe(s) and %d ring(s) \n " ,
priv - > config . pes , priv - > config . rings ) ;
2017-05-24 17:10:34 +03:00
/* Determine endianess and configure byte swap */
2017-12-14 17:26:58 +03:00
version = readl ( EIP197_HIA_AIC ( priv ) + EIP197_HIA_VERSION ) ;
val = readl ( EIP197_HIA_AIC ( priv ) + EIP197_HIA_MST_CTRL ) ;
2017-05-24 17:10:34 +03:00
if ( ( version & 0xffff ) = = EIP197_HIA_VERSION_BE )
val | = EIP197_MST_CTRL_BYTE_SWAP ;
else if ( ( ( version > > 16 ) & 0xffff ) = = EIP197_HIA_VERSION_LE )
val | = ( EIP197_MST_CTRL_NO_BYTE_SWAP > > 24 ) ;
2019-08-19 17:40:25 +03:00
/*
* For EIP197 ' s only set maximum number of TX commands to 2 ^ 5 = 32
* Skip for the EIP97 as it does not have this field .
*/
if ( priv - > version ! = EIP97IES_MRVL )
2018-06-28 18:15:42 +03:00
val | = EIP197_MST_CTRL_TX_MAX_CMD ( 5 ) ;
2017-12-14 17:26:58 +03:00
writel ( val , EIP197_HIA_AIC ( priv ) + EIP197_HIA_MST_CTRL ) ;
2017-05-24 17:10:34 +03:00
/* Configure wr/rd cache values */
writel ( EIP197_MST_CTRL_RD_CACHE ( RD_CACHE_4BITS ) |
EIP197_MST_CTRL_WD_CACHE ( WR_CACHE_4BITS ) ,
2017-12-14 17:26:58 +03:00
EIP197_HIA_GEN_CFG ( priv ) + EIP197_MST_CTRL ) ;
2017-05-24 17:10:34 +03:00
/* Interrupts reset */
/* Disable all global interrupts */
2017-12-14 17:26:58 +03:00
writel ( 0 , EIP197_HIA_AIC_G ( priv ) + EIP197_HIA_AIC_G_ENABLE_CTRL ) ;
2017-05-24 17:10:34 +03:00
/* Clear any pending interrupt */
2017-12-14 17:26:58 +03:00
writel ( GENMASK ( 31 , 0 ) , EIP197_HIA_AIC_G ( priv ) + EIP197_HIA_AIC_G_ACK ) ;
2017-05-24 17:10:34 +03:00
2018-06-28 18:15:37 +03:00
/* Processing Engine configuration */
for ( pe = 0 ; pe < priv - > config . pes ; pe + + ) {
/* Data Fetch Engine configuration */
2017-05-24 17:10:34 +03:00
2018-06-28 18:15:37 +03:00
/* Reset all DFE threads */
writel ( EIP197_DxE_THR_CTRL_RESET_PE ,
EIP197_HIA_DFE_THR ( priv ) + EIP197_HIA_DFE_THR_CTRL ( pe ) ) ;
2017-05-24 17:10:34 +03:00
2019-08-19 17:40:25 +03:00
if ( priv - > version ! = EIP97IES_MRVL )
/* Reset HIA input interface arbiter (EIP197 only) */
2018-06-28 18:15:37 +03:00
writel ( EIP197_HIA_RA_PE_CTRL_RESET ,
EIP197_HIA_AIC ( priv ) + EIP197_HIA_RA_PE_CTRL ( pe ) ) ;
2017-05-24 17:10:34 +03:00
2018-06-28 18:15:37 +03:00
/* DMA transfer size to use */
val = EIP197_HIA_DFE_CFG_DIS_DEBUG ;
val | = EIP197_HIA_DxE_CFG_MIN_DATA_SIZE ( 6 ) |
EIP197_HIA_DxE_CFG_MAX_DATA_SIZE ( 9 ) ;
val | = EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE ( 6 ) |
EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE ( 7 ) ;
val | = EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL ( RD_CACHE_3BITS ) ;
val | = EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL ( RD_CACHE_3BITS ) ;
writel ( val , EIP197_HIA_DFE ( priv ) + EIP197_HIA_DFE_CFG ( pe ) ) ;
/* Leave the DFE threads reset state */
writel ( 0 , EIP197_HIA_DFE_THR ( priv ) + EIP197_HIA_DFE_THR_CTRL ( pe ) ) ;
/* Configure the processing engine thresholds */
writel ( EIP197_PE_IN_xBUF_THRES_MIN ( 6 ) |
EIP197_PE_IN_xBUF_THRES_MAX ( 9 ) ,
EIP197_PE ( priv ) + EIP197_PE_IN_DBUF_THRES ( pe ) ) ;
writel ( EIP197_PE_IN_xBUF_THRES_MIN ( 6 ) |
EIP197_PE_IN_xBUF_THRES_MAX ( 7 ) ,
EIP197_PE ( priv ) + EIP197_PE_IN_TBUF_THRES ( pe ) ) ;
2019-08-19 17:40:25 +03:00
if ( priv - > version ! = EIP97IES_MRVL )
2018-06-28 18:15:37 +03:00
/* enable HIA input interface arbiter and rings */
writel ( EIP197_HIA_RA_PE_CTRL_EN |
GENMASK ( priv - > config . rings - 1 , 0 ) ,
EIP197_HIA_AIC ( priv ) + EIP197_HIA_RA_PE_CTRL ( pe ) ) ;
2017-05-24 17:10:34 +03:00
2018-06-28 18:15:37 +03:00
/* Data Store Engine configuration */
/* Reset all DSE threads */
writel ( EIP197_DxE_THR_CTRL_RESET_PE ,
EIP197_HIA_DSE_THR ( priv ) + EIP197_HIA_DSE_THR_CTRL ( pe ) ) ;
/* Wait for all DSE threads to complete */
while ( ( readl ( EIP197_HIA_DSE_THR ( priv ) + EIP197_HIA_DSE_THR_STAT ( pe ) ) &
GENMASK ( 15 , 12 ) ) ! = GENMASK ( 15 , 12 ) )
;
/* DMA transfer size to use */
val = EIP197_HIA_DSE_CFG_DIS_DEBUG ;
val | = EIP197_HIA_DxE_CFG_MIN_DATA_SIZE ( 7 ) |
EIP197_HIA_DxE_CFG_MAX_DATA_SIZE ( 8 ) ;
val | = EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL ( WR_CACHE_3BITS ) ;
val | = EIP197_HIA_DSE_CFG_ALWAYS_BUFFERABLE ;
2019-08-19 17:40:25 +03:00
/* FIXME: instability issues can occur for EIP97 but disabling
* it impacts performance .
2018-06-28 18:15:37 +03:00
*/
2019-08-19 17:40:25 +03:00
if ( priv - > version ! = EIP97IES_MRVL )
2018-06-28 18:15:37 +03:00
val | = EIP197_HIA_DSE_CFG_EN_SINGLE_WR ;
writel ( val , EIP197_HIA_DSE ( priv ) + EIP197_HIA_DSE_CFG ( pe ) ) ;
/* Leave the DSE threads reset state */
writel ( 0 , EIP197_HIA_DSE_THR ( priv ) + EIP197_HIA_DSE_THR_CTRL ( pe ) ) ;
/* Configure the procesing engine thresholds */
writel ( EIP197_PE_OUT_DBUF_THRES_MIN ( 7 ) |
EIP197_PE_OUT_DBUF_THRES_MAX ( 8 ) ,
EIP197_PE ( priv ) + EIP197_PE_OUT_DBUF_THRES ( pe ) ) ;
/* Processing Engine configuration */
2019-05-27 17:50:59 +03:00
/* Token & context configuration */
val = EIP197_PE_EIP96_TOKEN_CTRL_CTX_UPDATES |
EIP197_PE_EIP96_TOKEN_CTRL_REUSE_CTX |
EIP197_PE_EIP96_TOKEN_CTRL_POST_REUSE_CTX ;
writel ( val , EIP197_PE ( priv ) + EIP197_PE_EIP96_TOKEN_CTRL ( pe ) ) ;
2019-07-05 09:49:23 +03:00
/* H/W capabilities selection: just enable everything */
writel ( EIP197_FUNCTION_ALL ,
EIP197_PE ( priv ) + EIP197_PE_EIP96_FUNCTION_EN ( pe ) ) ;
2017-12-14 17:26:58 +03:00
}
2017-05-24 17:10:34 +03:00
/* Command Descriptor Rings prepare */
for ( i = 0 ; i < priv - > config . rings ; i + + ) {
/* Clear interrupts for this ring */
writel ( GENMASK ( 31 , 0 ) ,
2017-12-14 17:26:58 +03:00
EIP197_HIA_AIC_R ( priv ) + EIP197_HIA_AIC_R_ENABLE_CLR ( i ) ) ;
2017-05-24 17:10:34 +03:00
/* Disable external triggering */
2017-12-14 17:26:58 +03:00
writel ( 0 , EIP197_HIA_CDR ( priv , i ) + EIP197_HIA_xDR_CFG ) ;
2017-05-24 17:10:34 +03:00
/* Clear the pending prepared counter */
writel ( EIP197_xDR_PREP_CLR_COUNT ,
2017-12-14 17:26:58 +03:00
EIP197_HIA_CDR ( priv , i ) + EIP197_HIA_xDR_PREP_COUNT ) ;
2017-05-24 17:10:34 +03:00
/* Clear the pending processed counter */
writel ( EIP197_xDR_PROC_CLR_COUNT ,
2017-12-14 17:26:58 +03:00
EIP197_HIA_CDR ( priv , i ) + EIP197_HIA_xDR_PROC_COUNT ) ;
2017-05-24 17:10:34 +03:00
writel ( 0 ,
2017-12-14 17:26:58 +03:00
EIP197_HIA_CDR ( priv , i ) + EIP197_HIA_xDR_PREP_PNTR ) ;
2017-05-24 17:10:34 +03:00
writel ( 0 ,
2017-12-14 17:26:58 +03:00
EIP197_HIA_CDR ( priv , i ) + EIP197_HIA_xDR_PROC_PNTR ) ;
2017-05-24 17:10:34 +03:00
writel ( ( EIP197_DEFAULT_RING_SIZE * priv - > config . cd_offset ) < < 2 ,
2017-12-14 17:26:58 +03:00
EIP197_HIA_CDR ( priv , i ) + EIP197_HIA_xDR_RING_SIZE ) ;
2017-05-24 17:10:34 +03:00
}
/* Result Descriptor Ring prepare */
for ( i = 0 ; i < priv - > config . rings ; i + + ) {
/* Disable external triggering*/
2017-12-14 17:26:58 +03:00
writel ( 0 , EIP197_HIA_RDR ( priv , i ) + EIP197_HIA_xDR_CFG ) ;
2017-05-24 17:10:34 +03:00
/* Clear the pending prepared counter */
writel ( EIP197_xDR_PREP_CLR_COUNT ,
2017-12-14 17:26:58 +03:00
EIP197_HIA_RDR ( priv , i ) + EIP197_HIA_xDR_PREP_COUNT ) ;
2017-05-24 17:10:34 +03:00
/* Clear the pending processed counter */
writel ( EIP197_xDR_PROC_CLR_COUNT ,
2017-12-14 17:26:58 +03:00
EIP197_HIA_RDR ( priv , i ) + EIP197_HIA_xDR_PROC_COUNT ) ;
2017-05-24 17:10:34 +03:00
writel ( 0 ,
2017-12-14 17:26:58 +03:00
EIP197_HIA_RDR ( priv , i ) + EIP197_HIA_xDR_PREP_PNTR ) ;
2017-05-24 17:10:34 +03:00
writel ( 0 ,
2017-12-14 17:26:58 +03:00
EIP197_HIA_RDR ( priv , i ) + EIP197_HIA_xDR_PROC_PNTR ) ;
2017-05-24 17:10:34 +03:00
/* Ring size */
writel ( ( EIP197_DEFAULT_RING_SIZE * priv - > config . rd_offset ) < < 2 ,
2017-12-14 17:26:58 +03:00
EIP197_HIA_RDR ( priv , i ) + EIP197_HIA_xDR_RING_SIZE ) ;
2017-05-24 17:10:34 +03:00
}
2018-06-28 18:15:37 +03:00
for ( pe = 0 ; pe < priv - > config . pes ; pe + + ) {
/* Enable command descriptor rings */
writel ( EIP197_DxE_THR_CTRL_EN | GENMASK ( priv - > config . rings - 1 , 0 ) ,
EIP197_HIA_DFE_THR ( priv ) + EIP197_HIA_DFE_THR_CTRL ( pe ) ) ;
2017-05-24 17:10:34 +03:00
2018-06-28 18:15:37 +03:00
/* Enable result descriptor rings */
writel ( EIP197_DxE_THR_CTRL_EN | GENMASK ( priv - > config . rings - 1 , 0 ) ,
EIP197_HIA_DSE_THR ( priv ) + EIP197_HIA_DSE_THR_CTRL ( pe ) ) ;
}
2017-05-24 17:10:34 +03:00
/* Clear any HIA interrupt */
2017-12-14 17:26:58 +03:00
writel ( GENMASK ( 30 , 20 ) , EIP197_HIA_AIC_G ( priv ) + EIP197_HIA_AIC_G_ACK ) ;
2017-05-24 17:10:34 +03:00
2019-08-19 17:40:25 +03:00
if ( priv - > version ! = EIP97IES_MRVL ) {
2017-12-14 17:26:58 +03:00
eip197_trc_cache_init ( priv ) ;
2017-05-24 17:10:34 +03:00
2017-12-14 17:26:58 +03:00
ret = eip197_load_firmwares ( priv ) ;
if ( ret )
return ret ;
}
2017-05-24 17:10:34 +03:00
safexcel_hw_setup_cdesc_rings ( priv ) ;
safexcel_hw_setup_rdesc_rings ( priv ) ;
return 0 ;
}
2017-12-14 17:26:53 +03:00
/* Called with ring's lock taken */
2018-02-13 11:26:56 +03:00
static void safexcel_try_push_requests ( struct safexcel_crypto_priv * priv ,
int ring )
2017-12-14 17:26:53 +03:00
{
2018-02-13 11:26:56 +03:00
int coal = min_t ( int , priv - > ring [ ring ] . requests , EIP197_MAX_BATCH_SZ ) ;
2017-12-14 17:26:53 +03:00
if ( ! coal )
2018-02-13 11:26:56 +03:00
return ;
2017-12-14 17:26:53 +03:00
/* Configure when we want an interrupt */
writel ( EIP197_HIA_RDR_THRESH_PKT_MODE |
EIP197_HIA_RDR_THRESH_PROC_PKT ( coal ) ,
2017-12-14 17:26:58 +03:00
EIP197_HIA_RDR ( priv , ring ) + EIP197_HIA_xDR_THRESH ) ;
2017-12-14 17:26:53 +03:00
}
2017-06-15 10:56:24 +03:00
void safexcel_dequeue ( struct safexcel_crypto_priv * priv , int ring )
2017-05-24 17:10:34 +03:00
{
struct crypto_async_request * req , * backlog ;
struct safexcel_context * ctx ;
2017-06-15 10:56:24 +03:00
int ret , nreq = 0 , cdesc = 0 , rdesc = 0 , commands , results ;
2017-05-24 17:10:34 +03:00
2017-12-14 17:26:57 +03:00
/* If a request wasn't properly dequeued because of a lack of resources,
* proceeded it first ,
*/
req = priv - > ring [ ring ] . req ;
backlog = priv - > ring [ ring ] . backlog ;
if ( req )
goto handle_req ;
2017-12-14 17:26:53 +03:00
while ( true ) {
2017-06-15 10:56:24 +03:00
spin_lock_bh ( & priv - > ring [ ring ] . queue_lock ) ;
backlog = crypto_get_backlog ( & priv - > ring [ ring ] . queue ) ;
2017-06-15 10:56:26 +03:00
req = crypto_dequeue_request ( & priv - > ring [ ring ] . queue ) ;
2017-06-15 10:56:24 +03:00
spin_unlock_bh ( & priv - > ring [ ring ] . queue_lock ) ;
2017-05-24 17:10:34 +03:00
2017-12-14 17:26:57 +03:00
if ( ! req ) {
priv - > ring [ ring ] . req = NULL ;
priv - > ring [ ring ] . backlog = NULL ;
2017-05-24 17:10:34 +03:00
goto finalize ;
2017-12-14 17:26:57 +03:00
}
2017-05-24 17:10:34 +03:00
2017-12-14 17:26:57 +03:00
handle_req :
2017-05-24 17:10:34 +03:00
ctx = crypto_tfm_ctx ( req - > tfm ) ;
2018-06-28 18:21:57 +03:00
ret = ctx - > send ( req , ring , & commands , & results ) ;
if ( ret )
2017-12-14 17:26:57 +03:00
goto request_failed ;
2017-05-24 17:10:34 +03:00
if ( backlog )
backlog - > complete ( backlog , - EINPROGRESS ) ;
2018-02-13 11:26:54 +03:00
/* In case the send() helper did not issue any command to push
* to the engine because the input data was cached , continue to
* dequeue other requests as this is valid and not an error .
*/
2018-06-28 18:21:57 +03:00
if ( ! commands & & ! results )
2018-02-13 11:26:54 +03:00
continue ;
2017-05-24 17:10:34 +03:00
2017-06-15 10:56:24 +03:00
cdesc + = commands ;
rdesc + = results ;
2017-12-14 17:26:53 +03:00
nreq + + ;
}
2017-05-24 17:10:34 +03:00
2017-12-14 17:26:57 +03:00
request_failed :
/* Not enough resources to handle all the requests. Bail out and save
* the request and the backlog for the next dequeue call ( per - ring ) .
*/
priv - > ring [ ring ] . req = req ;
priv - > ring [ ring ] . backlog = backlog ;
2017-05-24 17:10:34 +03:00
finalize :
2017-12-14 17:26:51 +03:00
if ( ! nreq )
2017-05-24 17:10:34 +03:00
return ;
2018-06-28 18:21:57 +03:00
spin_lock_bh ( & priv - > ring [ ring ] . lock ) ;
2017-05-24 17:10:34 +03:00
2018-02-13 11:26:56 +03:00
priv - > ring [ ring ] . requests + = nreq ;
2017-12-14 17:26:53 +03:00
if ( ! priv - > ring [ ring ] . busy ) {
2018-02-13 11:26:56 +03:00
safexcel_try_push_requests ( priv , ring ) ;
2018-02-13 11:26:51 +03:00
priv - > ring [ ring ] . busy = true ;
2017-12-14 17:26:53 +03:00
}
2018-06-28 18:21:57 +03:00
spin_unlock_bh ( & priv - > ring [ ring ] . lock ) ;
2017-05-24 17:10:34 +03:00
2017-06-15 10:56:24 +03:00
/* let the RDR know we have pending descriptors */
writel ( ( rdesc * priv - > config . rd_offset ) < < 2 ,
2017-12-14 17:26:58 +03:00
EIP197_HIA_RDR ( priv , ring ) + EIP197_HIA_xDR_PREP_COUNT ) ;
2017-05-24 17:10:34 +03:00
2017-06-15 10:56:24 +03:00
/* let the CDR know we have pending descriptors */
writel ( ( cdesc * priv - > config . cd_offset ) < < 2 ,
2017-12-14 17:26:58 +03:00
EIP197_HIA_CDR ( priv , ring ) + EIP197_HIA_xDR_PREP_COUNT ) ;
2017-05-24 17:10:34 +03:00
}
2018-05-14 16:11:01 +03:00
inline int safexcel_rdesc_check_errors ( struct safexcel_crypto_priv * priv ,
struct safexcel_result_desc * rdesc )
{
2019-07-02 17:39:53 +03:00
if ( likely ( ( ! rdesc - > descriptor_overflow ) & &
( ! rdesc - > buffer_overflow ) & &
( ! rdesc - > result_data . error_code ) ) )
2018-05-14 16:11:01 +03:00
return 0 ;
2019-07-02 17:39:53 +03:00
if ( rdesc - > descriptor_overflow )
dev_err ( priv - > dev , " Descriptor overflow detected " ) ;
if ( rdesc - > buffer_overflow )
dev_err ( priv - > dev , " Buffer overflow detected " ) ;
2019-07-05 09:49:24 +03:00
if ( rdesc - > result_data . error_code & 0x4066 ) {
/* Fatal error (bits 1,2,5,6 & 14) */
2018-05-14 16:11:01 +03:00
dev_err ( priv - > dev ,
2019-07-02 17:39:53 +03:00
" result descriptor error (%x) " ,
2018-05-14 16:11:01 +03:00
rdesc - > result_data . error_code ) ;
2019-07-02 17:39:53 +03:00
return - EIO ;
} else if ( rdesc - > result_data . error_code &
2019-07-05 09:49:24 +03:00
( BIT ( 7 ) | BIT ( 4 ) | BIT ( 3 ) | BIT ( 0 ) ) ) {
2019-07-02 17:39:53 +03:00
/*
* Give priority over authentication fails :
2019-07-05 09:49:24 +03:00
* Blocksize , length & overflow errors ,
* something wrong with the input !
2019-07-02 17:39:53 +03:00
*/
2019-05-27 17:50:58 +03:00
return - EINVAL ;
2019-07-02 17:39:53 +03:00
} else if ( rdesc - > result_data . error_code & BIT ( 9 ) ) {
2018-05-14 16:11:01 +03:00
/* Authentication failed */
return - EBADMSG ;
}
/* All other non-fatal errors */
return - EINVAL ;
}
2018-06-28 18:21:57 +03:00
inline void safexcel_rdr_req_set ( struct safexcel_crypto_priv * priv ,
int ring ,
struct safexcel_result_desc * rdesc ,
struct crypto_async_request * req )
{
int i = safexcel_ring_rdr_rdesc_index ( priv , ring , rdesc ) ;
priv - > ring [ ring ] . rdr_req [ i ] = req ;
}
inline struct crypto_async_request *
safexcel_rdr_req_get ( struct safexcel_crypto_priv * priv , int ring )
{
int i = safexcel_ring_first_rdr_index ( priv , ring ) ;
return priv - > ring [ ring ] . rdr_req [ i ] ;
}
2017-05-24 17:10:34 +03:00
void safexcel_complete ( struct safexcel_crypto_priv * priv , int ring )
{
struct safexcel_command_desc * cdesc ;
/* Acknowledge the command descriptors */
do {
cdesc = safexcel_ring_next_rptr ( priv , & priv - > ring [ ring ] . cdr ) ;
if ( IS_ERR ( cdesc ) ) {
dev_err ( priv - > dev ,
" Could not retrieve the command descriptor \n " ) ;
return ;
}
} while ( ! cdesc - > last_seg ) ;
}
void safexcel_inv_complete ( struct crypto_async_request * req , int error )
{
struct safexcel_inv_result * result = req - > data ;
if ( error = = - EINPROGRESS )
return ;
result - > error = error ;
complete ( & result - > completion ) ;
}
int safexcel_invalidate_cache ( struct crypto_async_request * async ,
struct safexcel_crypto_priv * priv ,
2018-06-28 18:21:57 +03:00
dma_addr_t ctxr_dma , int ring )
2017-05-24 17:10:34 +03:00
{
struct safexcel_command_desc * cdesc ;
struct safexcel_result_desc * rdesc ;
int ret = 0 ;
/* Prepare command descriptor */
cdesc = safexcel_add_cdesc ( priv , ring , true , true , 0 , 0 , 0 , ctxr_dma ) ;
2018-06-28 18:21:57 +03:00
if ( IS_ERR ( cdesc ) )
return PTR_ERR ( cdesc ) ;
2017-05-24 17:10:34 +03:00
cdesc - > control_data . type = EIP197_TYPE_EXTENDED ;
cdesc - > control_data . options = 0 ;
cdesc - > control_data . refresh = 0 ;
cdesc - > control_data . control0 = CONTEXT_CONTROL_INV_TR ;
/* Prepare result descriptor */
rdesc = safexcel_add_rdesc ( priv , ring , true , true , 0 , 0 ) ;
if ( IS_ERR ( rdesc ) ) {
ret = PTR_ERR ( rdesc ) ;
goto cdesc_rollback ;
}
2018-06-28 18:21:57 +03:00
safexcel_rdr_req_set ( priv , ring , rdesc , async ) ;
return ret ;
2017-05-24 17:10:34 +03:00
cdesc_rollback :
safexcel_ring_rollback_wptr ( priv , & priv - > ring [ ring ] . cdr ) ;
return ret ;
}
static inline void safexcel_handle_result_descriptor ( struct safexcel_crypto_priv * priv ,
int ring )
{
2018-06-28 18:21:57 +03:00
struct crypto_async_request * req ;
2017-05-24 17:10:34 +03:00
struct safexcel_context * ctx ;
2018-02-13 11:26:56 +03:00
int ret , i , nreq , ndesc , tot_descs , handled = 0 ;
2017-05-24 17:10:34 +03:00
bool should_complete ;
2017-12-14 17:26:56 +03:00
handle_results :
tot_descs = 0 ;
2017-12-14 17:26:58 +03:00
nreq = readl ( EIP197_HIA_RDR ( priv , ring ) + EIP197_HIA_xDR_PROC_COUNT ) ;
2017-12-14 17:26:56 +03:00
nreq > > = EIP197_xDR_PROC_xD_PKT_OFFSET ;
nreq & = EIP197_xDR_PROC_xD_PKT_MASK ;
2017-05-24 17:10:34 +03:00
if ( ! nreq )
2017-12-14 17:26:53 +03:00
goto requests_left ;
2017-05-24 17:10:34 +03:00
for ( i = 0 ; i < nreq ; i + + ) {
2018-06-28 18:21:57 +03:00
req = safexcel_rdr_req_get ( priv , ring ) ;
ctx = crypto_tfm_ctx ( req - > tfm ) ;
ndesc = ctx - > handle_result ( priv , ring , req ,
2017-05-24 17:10:34 +03:00
& should_complete , & ret ) ;
if ( ndesc < 0 ) {
2019-08-19 17:40:25 +03:00
dev_err ( priv - > dev , " failed to handle result (%d) \n " ,
ndesc ) ;
2017-12-14 17:26:55 +03:00
goto acknowledge ;
2017-05-24 17:10:34 +03:00
}
if ( should_complete ) {
local_bh_disable ( ) ;
2018-06-28 18:21:57 +03:00
req - > complete ( req , ret ) ;
2017-05-24 17:10:34 +03:00
local_bh_enable ( ) ;
}
2017-12-14 17:26:55 +03:00
tot_descs + = ndesc ;
2018-02-13 11:26:56 +03:00
handled + + ;
2017-12-14 17:26:55 +03:00
}
acknowledge :
2019-05-27 17:50:55 +03:00
if ( i )
2017-12-14 17:26:55 +03:00
writel ( EIP197_xDR_PROC_xD_PKT ( i ) |
EIP197_xDR_PROC_xD_COUNT ( tot_descs * priv - > config . rd_offset ) ,
2017-12-14 17:26:58 +03:00
EIP197_HIA_RDR ( priv , ring ) + EIP197_HIA_xDR_PROC_COUNT ) ;
2017-12-14 17:26:53 +03:00
2017-12-14 17:26:56 +03:00
/* If the number of requests overflowed the counter, try to proceed more
* requests .
*/
if ( nreq = = EIP197_xDR_PROC_xD_PKT_MASK )
goto handle_results ;
2017-12-14 17:26:53 +03:00
requests_left :
2018-06-28 18:21:57 +03:00
spin_lock_bh ( & priv - > ring [ ring ] . lock ) ;
2017-12-14 17:26:53 +03:00
2018-02-13 11:26:56 +03:00
priv - > ring [ ring ] . requests - = handled ;
safexcel_try_push_requests ( priv , ring ) ;
2017-12-14 17:26:53 +03:00
2018-02-13 11:26:56 +03:00
if ( ! priv - > ring [ ring ] . requests )
2017-12-14 17:26:53 +03:00
priv - > ring [ ring ] . busy = false ;
2018-06-28 18:21:57 +03:00
spin_unlock_bh ( & priv - > ring [ ring ] . lock ) ;
2017-05-24 17:10:34 +03:00
}
2017-12-14 17:26:51 +03:00
static void safexcel_dequeue_work ( struct work_struct * work )
{
struct safexcel_work_data * data =
container_of ( work , struct safexcel_work_data , work ) ;
2017-05-24 17:10:34 +03:00
2017-12-14 17:26:51 +03:00
safexcel_dequeue ( data - > priv , data - > ring ) ;
2017-05-24 17:10:34 +03:00
}
struct safexcel_ring_irq_data {
struct safexcel_crypto_priv * priv ;
int ring ;
} ;
static irqreturn_t safexcel_irq_ring ( int irq , void * data )
{
struct safexcel_ring_irq_data * irq_data = data ;
struct safexcel_crypto_priv * priv = irq_data - > priv ;
2017-12-14 17:26:52 +03:00
int ring = irq_data - > ring , rc = IRQ_NONE ;
2017-05-24 17:10:34 +03:00
u32 status , stat ;
2017-12-14 17:26:58 +03:00
status = readl ( EIP197_HIA_AIC_R ( priv ) + EIP197_HIA_AIC_R_ENABLED_STAT ( ring ) ) ;
2017-05-24 17:10:34 +03:00
if ( ! status )
2017-12-14 17:26:52 +03:00
return rc ;
2017-05-24 17:10:34 +03:00
/* RDR interrupts */
if ( status & EIP197_RDR_IRQ ( ring ) ) {
2017-12-14 17:26:58 +03:00
stat = readl ( EIP197_HIA_RDR ( priv , ring ) + EIP197_HIA_xDR_STAT ) ;
2017-05-24 17:10:34 +03:00
if ( unlikely ( stat & EIP197_xDR_ERR ) ) {
/*
* Fatal error , the RDR is unusable and must be
* reinitialized . This should not happen under
* normal circumstances .
*/
2019-08-19 17:40:25 +03:00
dev_err ( priv - > dev , " RDR: fatal error. \n " ) ;
2017-05-24 17:10:34 +03:00
} else if ( likely ( stat & EIP197_xDR_THRESH ) ) {
2017-12-14 17:26:52 +03:00
rc = IRQ_WAKE_THREAD ;
2017-05-24 17:10:34 +03:00
}
/* ACK the interrupts */
writel ( stat & 0xff ,
2017-12-14 17:26:58 +03:00
EIP197_HIA_RDR ( priv , ring ) + EIP197_HIA_xDR_STAT ) ;
2017-05-24 17:10:34 +03:00
}
/* ACK the interrupts */
2017-12-14 17:26:58 +03:00
writel ( status , EIP197_HIA_AIC_R ( priv ) + EIP197_HIA_AIC_R_ACK ( ring ) ) ;
2017-05-24 17:10:34 +03:00
2017-12-14 17:26:52 +03:00
return rc ;
}
static irqreturn_t safexcel_irq_ring_thread ( int irq , void * data )
{
struct safexcel_ring_irq_data * irq_data = data ;
struct safexcel_crypto_priv * priv = irq_data - > priv ;
int ring = irq_data - > ring ;
safexcel_handle_result_descriptor ( priv , ring ) ;
queue_work ( priv - > ring [ ring ] . workqueue ,
& priv - > ring [ ring ] . work_data . work ) ;
2017-05-24 17:10:34 +03:00
return IRQ_HANDLED ;
}
2019-08-19 17:40:25 +03:00
static int safexcel_request_ring_irq ( void * pdev , int irqid ,
int is_pci_dev ,
2017-05-24 17:10:34 +03:00
irq_handler_t handler ,
2017-12-14 17:26:52 +03:00
irq_handler_t threaded_handler ,
2017-05-24 17:10:34 +03:00
struct safexcel_ring_irq_data * ring_irq_priv )
{
2019-08-19 17:40:25 +03:00
int ret , irq ;
struct device * dev ;
if ( IS_ENABLED ( CONFIG_PCI ) & & is_pci_dev ) {
struct pci_dev * pci_pdev = pdev ;
2017-05-24 17:10:34 +03:00
2019-08-19 17:40:25 +03:00
dev = & pci_pdev - > dev ;
irq = pci_irq_vector ( pci_pdev , irqid ) ;
if ( irq < 0 ) {
dev_err ( dev , " unable to get device MSI IRQ %d (err %d) \n " ,
irqid , irq ) ;
return irq ;
}
} else if ( IS_ENABLED ( CONFIG_OF ) ) {
struct platform_device * plf_pdev = pdev ;
char irq_name [ 6 ] = { 0 } ; /* "ringX\0" */
snprintf ( irq_name , 6 , " ring%d " , irqid ) ;
dev = & plf_pdev - > dev ;
irq = platform_get_irq_byname ( plf_pdev , irq_name ) ;
if ( irq < 0 ) {
dev_err ( dev , " unable to get IRQ '%s' (err %d) \n " ,
irq_name , irq ) ;
return irq ;
}
2017-05-24 17:10:34 +03:00
}
2019-08-19 17:40:25 +03:00
ret = devm_request_threaded_irq ( dev , irq , handler ,
2017-12-14 17:26:52 +03:00
threaded_handler , IRQF_ONESHOT ,
2019-08-19 17:40:25 +03:00
dev_name ( dev ) , ring_irq_priv ) ;
2017-05-24 17:10:34 +03:00
if ( ret ) {
2019-08-19 17:40:25 +03:00
dev_err ( dev , " unable to request IRQ %d \n " , irq ) ;
2017-05-24 17:10:34 +03:00
return ret ;
}
return irq ;
}
static struct safexcel_alg_template * safexcel_algs [ ] = {
2018-06-28 18:21:55 +03:00
& safexcel_alg_ecb_des ,
& safexcel_alg_cbc_des ,
2018-06-28 18:21:56 +03:00
& safexcel_alg_ecb_des3_ede ,
& safexcel_alg_cbc_des3_ede ,
2017-05-24 17:10:34 +03:00
& safexcel_alg_ecb_aes ,
& safexcel_alg_cbc_aes ,
2019-07-05 09:49:23 +03:00
& safexcel_alg_ctr_aes ,
2018-06-28 18:21:53 +03:00
& safexcel_alg_md5 ,
2017-05-24 17:10:34 +03:00
& safexcel_alg_sha1 ,
& safexcel_alg_sha224 ,
& safexcel_alg_sha256 ,
2018-05-29 15:13:50 +03:00
& safexcel_alg_sha384 ,
2018-05-29 15:13:46 +03:00
& safexcel_alg_sha512 ,
2018-06-28 18:21:54 +03:00
& safexcel_alg_hmac_md5 ,
2017-05-24 17:10:34 +03:00
& safexcel_alg_hmac_sha1 ,
2018-03-19 11:21:21 +03:00
& safexcel_alg_hmac_sha224 ,
2018-03-19 11:21:20 +03:00
& safexcel_alg_hmac_sha256 ,
2018-05-29 15:13:51 +03:00
& safexcel_alg_hmac_sha384 ,
2018-05-29 15:13:47 +03:00
& safexcel_alg_hmac_sha512 ,
2018-05-14 16:11:04 +03:00
& safexcel_alg_authenc_hmac_sha1_cbc_aes ,
2018-05-14 16:11:03 +03:00
& safexcel_alg_authenc_hmac_sha224_cbc_aes ,
2018-05-14 16:11:02 +03:00
& safexcel_alg_authenc_hmac_sha256_cbc_aes ,
2018-05-29 15:13:52 +03:00
& safexcel_alg_authenc_hmac_sha384_cbc_aes ,
2018-05-29 15:13:48 +03:00
& safexcel_alg_authenc_hmac_sha512_cbc_aes ,
2019-07-05 09:49:22 +03:00
& safexcel_alg_authenc_hmac_sha1_cbc_des3_ede ,
2019-07-05 09:49:24 +03:00
& safexcel_alg_authenc_hmac_sha1_ctr_aes ,
& safexcel_alg_authenc_hmac_sha224_ctr_aes ,
& safexcel_alg_authenc_hmac_sha256_ctr_aes ,
& safexcel_alg_authenc_hmac_sha384_ctr_aes ,
& safexcel_alg_authenc_hmac_sha512_ctr_aes ,
2017-05-24 17:10:34 +03:00
} ;
static int safexcel_register_algorithms ( struct safexcel_crypto_priv * priv )
{
int i , j , ret = 0 ;
for ( i = 0 ; i < ARRAY_SIZE ( safexcel_algs ) ; i + + ) {
safexcel_algs [ i ] - > priv = priv ;
if ( safexcel_algs [ i ] - > type = = SAFEXCEL_ALG_TYPE_SKCIPHER )
ret = crypto_register_skcipher ( & safexcel_algs [ i ] - > alg . skcipher ) ;
2018-05-14 16:11:02 +03:00
else if ( safexcel_algs [ i ] - > type = = SAFEXCEL_ALG_TYPE_AEAD )
ret = crypto_register_aead ( & safexcel_algs [ i ] - > alg . aead ) ;
2017-05-24 17:10:34 +03:00
else
ret = crypto_register_ahash ( & safexcel_algs [ i ] - > alg . ahash ) ;
if ( ret )
goto fail ;
}
return 0 ;
fail :
for ( j = 0 ; j < i ; j + + ) {
if ( safexcel_algs [ j ] - > type = = SAFEXCEL_ALG_TYPE_SKCIPHER )
crypto_unregister_skcipher ( & safexcel_algs [ j ] - > alg . skcipher ) ;
2018-05-14 16:11:02 +03:00
else if ( safexcel_algs [ j ] - > type = = SAFEXCEL_ALG_TYPE_AEAD )
crypto_unregister_aead ( & safexcel_algs [ j ] - > alg . aead ) ;
2017-05-24 17:10:34 +03:00
else
crypto_unregister_ahash ( & safexcel_algs [ j ] - > alg . ahash ) ;
}
return ret ;
}
static void safexcel_unregister_algorithms ( struct safexcel_crypto_priv * priv )
{
int i ;
for ( i = 0 ; i < ARRAY_SIZE ( safexcel_algs ) ; i + + ) {
if ( safexcel_algs [ i ] - > type = = SAFEXCEL_ALG_TYPE_SKCIPHER )
crypto_unregister_skcipher ( & safexcel_algs [ i ] - > alg . skcipher ) ;
2018-05-14 16:11:02 +03:00
else if ( safexcel_algs [ i ] - > type = = SAFEXCEL_ALG_TYPE_AEAD )
crypto_unregister_aead ( & safexcel_algs [ i ] - > alg . aead ) ;
2017-05-24 17:10:34 +03:00
else
crypto_unregister_ahash ( & safexcel_algs [ i ] - > alg . ahash ) ;
}
}
static void safexcel_configure ( struct safexcel_crypto_priv * priv )
{
2018-06-28 18:15:37 +03:00
u32 val , mask = 0 ;
2017-05-24 17:10:34 +03:00
2017-12-14 17:26:58 +03:00
val = readl ( EIP197_HIA_AIC_G ( priv ) + EIP197_HIA_OPTIONS ) ;
2018-06-28 18:15:37 +03:00
/* Read number of PEs from the engine */
2019-08-19 17:40:25 +03:00
if ( priv - > version = = EIP97IES_MRVL )
/* Narrow field width for EIP97 type engine */
2018-06-28 18:15:37 +03:00
mask = EIP97_N_PES_MASK ;
2019-08-19 17:40:25 +03:00
else
/* Wider field width for all EIP197 type engines */
mask = EIP197_N_PES_MASK ;
2018-06-28 18:15:37 +03:00
priv - > config . pes = ( val > > EIP197_N_PES_OFFSET ) & mask ;
2019-08-19 17:40:25 +03:00
priv - > config . rings = min_t ( u32 , val & GENMASK ( 3 , 0 ) , max_rings ) ;
2017-05-24 17:10:34 +03:00
val = ( val & GENMASK ( 27 , 25 ) ) > > 25 ;
mask = BIT ( val ) - 1 ;
priv - > config . cd_size = ( sizeof ( struct safexcel_command_desc ) / sizeof ( u32 ) ) ;
priv - > config . cd_offset = ( priv - > config . cd_size + mask ) & ~ mask ;
priv - > config . rd_size = ( sizeof ( struct safexcel_result_desc ) / sizeof ( u32 ) ) ;
priv - > config . rd_offset = ( priv - > config . rd_size + mask ) & ~ mask ;
}
2017-12-14 17:26:58 +03:00
static void safexcel_init_register_offsets ( struct safexcel_crypto_priv * priv )
{
struct safexcel_register_offsets * offsets = & priv - > offsets ;
2019-08-19 17:40:25 +03:00
if ( priv - > version = = EIP97IES_MRVL ) {
2017-12-14 17:26:58 +03:00
offsets - > hia_aic = EIP97_HIA_AIC_BASE ;
offsets - > hia_aic_g = EIP97_HIA_AIC_G_BASE ;
offsets - > hia_aic_r = EIP97_HIA_AIC_R_BASE ;
offsets - > hia_aic_xdr = EIP97_HIA_AIC_xDR_BASE ;
offsets - > hia_dfe = EIP97_HIA_DFE_BASE ;
offsets - > hia_dfe_thr = EIP97_HIA_DFE_THR_BASE ;
offsets - > hia_dse = EIP97_HIA_DSE_BASE ;
offsets - > hia_dse_thr = EIP97_HIA_DSE_THR_BASE ;
offsets - > hia_gen_cfg = EIP97_HIA_GEN_CFG_BASE ;
offsets - > pe = EIP97_PE_BASE ;
2019-08-19 17:40:25 +03:00
} else {
offsets - > hia_aic = EIP197_HIA_AIC_BASE ;
offsets - > hia_aic_g = EIP197_HIA_AIC_G_BASE ;
offsets - > hia_aic_r = EIP197_HIA_AIC_R_BASE ;
offsets - > hia_aic_xdr = EIP197_HIA_AIC_xDR_BASE ;
offsets - > hia_dfe = EIP197_HIA_DFE_BASE ;
offsets - > hia_dfe_thr = EIP197_HIA_DFE_THR_BASE ;
offsets - > hia_dse = EIP197_HIA_DSE_BASE ;
offsets - > hia_dse_thr = EIP197_HIA_DSE_THR_BASE ;
offsets - > hia_gen_cfg = EIP197_HIA_GEN_CFG_BASE ;
offsets - > pe = EIP197_PE_BASE ;
2017-12-14 17:26:58 +03:00
}
}
2019-08-19 17:40:25 +03:00
/*
* Generic part of probe routine , shared by platform and PCI driver
*
* Assumes IO resources have been mapped , private data mem has been allocated ,
* clocks have been enabled , device pointer has been assigned etc .
*
*/
static int safexcel_probe_generic ( void * pdev ,
struct safexcel_crypto_priv * priv ,
int is_pci_dev )
2017-05-24 17:10:34 +03:00
{
2019-08-19 17:40:25 +03:00
struct device * dev = priv - > dev ;
2017-05-24 17:10:34 +03:00
int i , ret ;
2019-08-19 17:40:25 +03:00
priv - > context_pool = dmam_pool_create ( " safexcel-context " , dev ,
sizeof ( struct safexcel_context_record ) ,
1 , 0 ) ;
if ( ! priv - > context_pool )
2017-05-24 17:10:34 +03:00
return - ENOMEM ;
2017-12-14 17:26:58 +03:00
safexcel_init_register_offsets ( priv ) ;
2017-05-24 17:10:34 +03:00
2019-08-19 17:40:25 +03:00
if ( priv - > version ! = EIP97IES_MRVL )
priv - > flags | = EIP197_TRC_CACHE ;
2017-05-24 17:10:34 +03:00
2019-08-19 17:40:25 +03:00
safexcel_configure ( priv ) ;
2018-03-13 19:48:41 +03:00
2019-08-19 17:40:25 +03:00
if ( IS_ENABLED ( CONFIG_PCI ) & & priv - > version = = EIP197_DEVBRD ) {
/*
* Request MSI vectors for global + 1 per ring -
* or just 1 for older dev images
*/
struct pci_dev * pci_pdev = pdev ;
ret = pci_alloc_irq_vectors ( pci_pdev ,
priv - > config . rings + 1 ,
priv - > config . rings + 1 ,
PCI_IRQ_MSI | PCI_IRQ_MSIX ) ;
if ( ret < 0 ) {
dev_err ( dev , " Failed to allocate PCI MSI interrupts \n " ) ;
2017-05-24 17:10:34 +03:00
return ret ;
}
}
2019-08-19 17:40:25 +03:00
/* Register the ring IRQ handlers and configure the rings */
treewide: Replace more open-coded allocation size multiplications
As done treewide earlier, this catches several more open-coded
allocation size calculations that were added to the kernel during the
merge window. This performs the following mechanical transformations
using Coccinelle:
kvmalloc(a * b, ...) -> kvmalloc_array(a, b, ...)
kvzalloc(a * b, ...) -> kvcalloc(a, b, ...)
devm_kzalloc(..., a * b, ...) -> devm_kcalloc(..., a, b, ...)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-10-06 02:21:46 +03:00
priv - > ring = devm_kcalloc ( dev , priv - > config . rings ,
sizeof ( * priv - > ring ) ,
2018-06-28 18:15:36 +03:00
GFP_KERNEL ) ;
2019-08-19 17:40:25 +03:00
if ( ! priv - > ring )
return - ENOMEM ;
2018-06-28 18:15:36 +03:00
2017-05-24 17:10:34 +03:00
for ( i = 0 ; i < priv - > config . rings ; i + + ) {
2019-08-19 17:40:25 +03:00
char wq_name [ 9 ] = { 0 } ;
2017-05-24 17:10:34 +03:00
int irq ;
struct safexcel_ring_irq_data * ring_irq ;
ret = safexcel_init_ring_descriptors ( priv ,
& priv - > ring [ i ] . cdr ,
& priv - > ring [ i ] . rdr ) ;
2019-08-19 17:40:25 +03:00
if ( ret ) {
dev_err ( dev , " Failed to initialize rings \n " ) ;
return ret ;
}
2017-05-24 17:10:34 +03:00
treewide: Replace more open-coded allocation size multiplications
As done treewide earlier, this catches several more open-coded
allocation size calculations that were added to the kernel during the
merge window. This performs the following mechanical transformations
using Coccinelle:
kvmalloc(a * b, ...) -> kvmalloc_array(a, b, ...)
kvzalloc(a * b, ...) -> kvcalloc(a, b, ...)
devm_kzalloc(..., a * b, ...) -> devm_kcalloc(..., a, b, ...)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-10-06 02:21:46 +03:00
priv - > ring [ i ] . rdr_req = devm_kcalloc ( dev ,
EIP197_DEFAULT_RING_SIZE ,
sizeof ( priv - > ring [ i ] . rdr_req ) ,
2018-06-28 18:21:57 +03:00
GFP_KERNEL ) ;
2019-08-19 17:40:25 +03:00
if ( ! priv - > ring [ i ] . rdr_req )
return - ENOMEM ;
2018-06-28 18:21:57 +03:00
2017-05-24 17:10:34 +03:00
ring_irq = devm_kzalloc ( dev , sizeof ( * ring_irq ) , GFP_KERNEL ) ;
2019-08-19 17:40:25 +03:00
if ( ! ring_irq )
return - ENOMEM ;
2017-05-24 17:10:34 +03:00
ring_irq - > priv = priv ;
ring_irq - > ring = i ;
2019-08-19 17:40:25 +03:00
irq = safexcel_request_ring_irq ( pdev ,
EIP197_IRQ_NUMBER ( i , is_pci_dev ) ,
is_pci_dev ,
safexcel_irq_ring ,
2017-12-14 17:26:52 +03:00
safexcel_irq_ring_thread ,
2017-05-24 17:10:34 +03:00
ring_irq ) ;
2017-08-15 22:33:24 +03:00
if ( irq < 0 ) {
2019-08-19 17:40:25 +03:00
dev_err ( dev , " Failed to get IRQ ID for ring %d \n " , i ) ;
return irq ;
2017-08-15 22:33:24 +03:00
}
2017-05-24 17:10:34 +03:00
priv - > ring [ i ] . work_data . priv = priv ;
priv - > ring [ i ] . work_data . ring = i ;
2019-08-19 17:40:25 +03:00
INIT_WORK ( & priv - > ring [ i ] . work_data . work ,
safexcel_dequeue_work ) ;
2017-05-24 17:10:34 +03:00
snprintf ( wq_name , 9 , " wq_ring%d " , i ) ;
2019-08-19 17:40:25 +03:00
priv - > ring [ i ] . workqueue =
create_singlethread_workqueue ( wq_name ) ;
if ( ! priv - > ring [ i ] . workqueue )
return - ENOMEM ;
2017-05-24 17:10:34 +03:00
2018-02-13 11:26:56 +03:00
priv - > ring [ i ] . requests = 0 ;
2017-12-14 17:26:53 +03:00
priv - > ring [ i ] . busy = false ;
2017-06-15 10:56:24 +03:00
crypto_init_queue ( & priv - > ring [ i ] . queue ,
EIP197_DEFAULT_RING_SIZE ) ;
2017-05-24 17:10:34 +03:00
spin_lock_init ( & priv - > ring [ i ] . lock ) ;
2017-06-15 10:56:24 +03:00
spin_lock_init ( & priv - > ring [ i ] . queue_lock ) ;
2017-05-24 17:10:34 +03:00
}
atomic_set ( & priv - > ring_used , 0 ) ;
ret = safexcel_hw_init ( priv ) ;
if ( ret ) {
2019-08-19 17:40:25 +03:00
dev_err ( dev , " HW init failed (%d) \n " , ret ) ;
return ret ;
2017-05-24 17:10:34 +03:00
}
ret = safexcel_register_algorithms ( priv ) ;
if ( ret ) {
dev_err ( dev , " Failed to register algorithms (%d) \n " , ret ) ;
2019-08-19 17:40:25 +03:00
return ret ;
2017-05-24 17:10:34 +03:00
}
return 0 ;
}
2018-06-28 18:15:41 +03:00
static void safexcel_hw_reset_rings ( struct safexcel_crypto_priv * priv )
{
int i ;
for ( i = 0 ; i < priv - > config . rings ; i + + ) {
/* clear any pending interrupt */
writel ( GENMASK ( 5 , 0 ) , EIP197_HIA_CDR ( priv , i ) + EIP197_HIA_xDR_STAT ) ;
writel ( GENMASK ( 7 , 0 ) , EIP197_HIA_RDR ( priv , i ) + EIP197_HIA_xDR_STAT ) ;
/* Reset the CDR base address */
writel ( 0 , EIP197_HIA_CDR ( priv , i ) + EIP197_HIA_xDR_RING_BASE_ADDR_LO ) ;
writel ( 0 , EIP197_HIA_CDR ( priv , i ) + EIP197_HIA_xDR_RING_BASE_ADDR_HI ) ;
/* Reset the RDR base address */
writel ( 0 , EIP197_HIA_RDR ( priv , i ) + EIP197_HIA_xDR_RING_BASE_ADDR_LO ) ;
writel ( 0 , EIP197_HIA_RDR ( priv , i ) + EIP197_HIA_xDR_RING_BASE_ADDR_HI ) ;
}
}
2017-05-24 17:10:34 +03:00
2019-08-19 17:40:25 +03:00
# if IS_ENABLED(CONFIG_OF)
/* for Device Tree platform driver */
static int safexcel_probe ( struct platform_device * pdev )
{
struct device * dev = & pdev - > dev ;
struct safexcel_crypto_priv * priv ;
int ret ;
priv = devm_kzalloc ( dev , sizeof ( * priv ) , GFP_KERNEL ) ;
if ( ! priv )
return - ENOMEM ;
priv - > dev = dev ;
priv - > version = ( enum safexcel_eip_version ) of_device_get_match_data ( dev ) ;
platform_set_drvdata ( pdev , priv ) ;
priv - > base = devm_platform_ioremap_resource ( pdev , 0 ) ;
if ( IS_ERR ( priv - > base ) ) {
dev_err ( dev , " failed to get resource \n " ) ;
return PTR_ERR ( priv - > base ) ;
}
priv - > clk = devm_clk_get ( & pdev - > dev , NULL ) ;
ret = PTR_ERR_OR_ZERO ( priv - > clk ) ;
/* The clock isn't mandatory */
if ( ret ! = - ENOENT ) {
if ( ret )
return ret ;
ret = clk_prepare_enable ( priv - > clk ) ;
if ( ret ) {
dev_err ( dev , " unable to enable clk (%d) \n " , ret ) ;
return ret ;
}
}
priv - > reg_clk = devm_clk_get ( & pdev - > dev , " reg " ) ;
ret = PTR_ERR_OR_ZERO ( priv - > reg_clk ) ;
/* The clock isn't mandatory */
if ( ret ! = - ENOENT ) {
if ( ret )
goto err_core_clk ;
ret = clk_prepare_enable ( priv - > reg_clk ) ;
if ( ret ) {
dev_err ( dev , " unable to enable reg clk (%d) \n " , ret ) ;
goto err_core_clk ;
}
}
ret = dma_set_mask_and_coherent ( dev , DMA_BIT_MASK ( 64 ) ) ;
if ( ret )
goto err_reg_clk ;
/* Generic EIP97/EIP197 device probing */
ret = safexcel_probe_generic ( pdev , priv , 0 ) ;
if ( ret )
goto err_reg_clk ;
return 0 ;
err_reg_clk :
clk_disable_unprepare ( priv - > reg_clk ) ;
err_core_clk :
clk_disable_unprepare ( priv - > clk ) ;
return ret ;
}
2017-05-24 17:10:34 +03:00
static int safexcel_remove ( struct platform_device * pdev )
{
struct safexcel_crypto_priv * priv = platform_get_drvdata ( pdev ) ;
int i ;
safexcel_unregister_algorithms ( priv ) ;
2018-06-28 18:15:41 +03:00
safexcel_hw_reset_rings ( priv ) ;
2017-05-24 17:10:34 +03:00
clk_disable_unprepare ( priv - > clk ) ;
for ( i = 0 ; i < priv - > config . rings ; i + + )
destroy_workqueue ( priv - > ring [ i ] . workqueue ) ;
return 0 ;
}
static const struct of_device_id safexcel_of_match_table [ ] = {
2017-12-14 17:26:58 +03:00
{
2018-06-28 18:15:32 +03:00
. compatible = " inside-secure,safexcel-eip97ies " ,
2019-08-19 17:40:25 +03:00
. data = ( void * ) EIP97IES_MRVL ,
2018-06-28 18:15:32 +03:00
} ,
{
. compatible = " inside-secure,safexcel-eip197b " ,
2019-08-19 17:40:25 +03:00
. data = ( void * ) EIP197B_MRVL ,
2018-06-28 18:15:32 +03:00
} ,
2018-06-28 18:15:38 +03:00
{
. compatible = " inside-secure,safexcel-eip197d " ,
2019-08-19 17:40:25 +03:00
. data = ( void * ) EIP197D_MRVL ,
2018-06-28 18:15:38 +03:00
} ,
2019-08-19 17:40:25 +03:00
/* For backward compatibility and intended for generic use */
2018-06-28 18:15:32 +03:00
{
2017-12-14 17:26:58 +03:00
. compatible = " inside-secure,safexcel-eip97 " ,
2019-08-19 17:40:25 +03:00
. data = ( void * ) EIP97IES_MRVL ,
2017-12-14 17:26:58 +03:00
} ,
{
. compatible = " inside-secure,safexcel-eip197 " ,
2019-08-19 17:40:25 +03:00
. data = ( void * ) EIP197B_MRVL ,
2017-12-14 17:26:58 +03:00
} ,
2017-05-24 17:10:34 +03:00
{ } ,
} ;
static struct platform_driver crypto_safexcel = {
. probe = safexcel_probe ,
. remove = safexcel_remove ,
. driver = {
. name = " crypto-safexcel " ,
. of_match_table = safexcel_of_match_table ,
} ,
} ;
2019-08-19 17:40:25 +03:00
# endif
# if IS_ENABLED(CONFIG_PCI)
/* PCIE devices - i.e. Inside Secure development boards */
static int safexcel_pci_probe ( struct pci_dev * pdev ,
const struct pci_device_id * ent )
{
struct device * dev = & pdev - > dev ;
struct safexcel_crypto_priv * priv ;
void __iomem * pciebase ;
int rc ;
u32 val ;
dev_dbg ( dev , " Probing PCIE device: vendor %04x, device %04x, subv %04x, subdev %04x, ctxt %lx \n " ,
ent - > vendor , ent - > device , ent - > subvendor ,
ent - > subdevice , ent - > driver_data ) ;
priv = kzalloc ( sizeof ( * priv ) , GFP_KERNEL ) ;
if ( ! priv )
return - ENOMEM ;
priv - > dev = dev ;
priv - > version = ( enum safexcel_eip_version ) ent - > driver_data ;
pci_set_drvdata ( pdev , priv ) ;
/* enable the device */
rc = pcim_enable_device ( pdev ) ;
if ( rc ) {
dev_err ( dev , " Failed to enable PCI device \n " ) ;
return rc ;
}
/* take ownership of PCI BAR0 */
rc = pcim_iomap_regions ( pdev , 1 , " crypto_safexcel " ) ;
if ( rc ) {
dev_err ( dev , " Failed to map IO region for BAR0 \n " ) ;
return rc ;
}
priv - > base = pcim_iomap_table ( pdev ) [ 0 ] ;
if ( priv - > version = = EIP197_DEVBRD ) {
dev_dbg ( dev , " Device identified as FPGA based development board - applying HW reset \n " ) ;
rc = pcim_iomap_regions ( pdev , 4 , " crypto_safexcel " ) ;
if ( rc ) {
dev_err ( dev , " Failed to map IO region for BAR4 \n " ) ;
return rc ;
}
pciebase = pcim_iomap_table ( pdev ) [ 2 ] ;
val = readl ( pciebase + EIP197_XLX_IRQ_BLOCK_ID_ADDR ) ;
if ( ( val > > 16 ) = = EIP197_XLX_IRQ_BLOCK_ID_VALUE ) {
dev_dbg ( dev , " Detected Xilinx PCIE IRQ block version %d, multiple MSI support enabled \n " ,
( val & 0xff ) ) ;
/* Setup MSI identity map mapping */
writel ( EIP197_XLX_USER_VECT_LUT0_IDENT ,
pciebase + EIP197_XLX_USER_VECT_LUT0_ADDR ) ;
writel ( EIP197_XLX_USER_VECT_LUT1_IDENT ,
pciebase + EIP197_XLX_USER_VECT_LUT1_ADDR ) ;
writel ( EIP197_XLX_USER_VECT_LUT2_IDENT ,
pciebase + EIP197_XLX_USER_VECT_LUT2_ADDR ) ;
writel ( EIP197_XLX_USER_VECT_LUT3_IDENT ,
pciebase + EIP197_XLX_USER_VECT_LUT3_ADDR ) ;
/* Enable all device interrupts */
writel ( GENMASK ( 31 , 0 ) ,
pciebase + EIP197_XLX_USER_INT_ENB_MSK ) ;
} else {
dev_err ( dev , " Unrecognised IRQ block identifier %x \n " ,
val ) ;
return - ENODEV ;
}
/* HW reset FPGA dev board */
/* assert reset */
writel ( 1 , priv - > base + EIP197_XLX_GPIO_BASE ) ;
wmb ( ) ; /* maintain strict ordering for accesses here */
/* deassert reset */
writel ( 0 , priv - > base + EIP197_XLX_GPIO_BASE ) ;
wmb ( ) ; /* maintain strict ordering for accesses here */
}
/* enable bus mastering */
pci_set_master ( pdev ) ;
/* Generic EIP97/EIP197 device probing */
rc = safexcel_probe_generic ( pdev , priv , 1 ) ;
return rc ;
}
void safexcel_pci_remove ( struct pci_dev * pdev )
{
struct safexcel_crypto_priv * priv = pci_get_drvdata ( pdev ) ;
int i ;
safexcel_unregister_algorithms ( priv ) ;
for ( i = 0 ; i < priv - > config . rings ; i + + )
destroy_workqueue ( priv - > ring [ i ] . workqueue ) ;
safexcel_hw_reset_rings ( priv ) ;
}
static const struct pci_device_id safexcel_pci_ids [ ] = {
{
PCI_DEVICE_SUB ( PCI_VENDOR_ID_XILINX , 0x9038 ,
0x16ae , 0xc522 ) ,
/* assume EIP197B for now */
. driver_data = EIP197_DEVBRD ,
} ,
{ } ,
} ;
MODULE_DEVICE_TABLE ( pci , safexcel_pci_ids ) ;
static struct pci_driver safexcel_pci_driver = {
. name = " crypto-safexcel " ,
. id_table = safexcel_pci_ids ,
. probe = safexcel_pci_probe ,
. remove = safexcel_pci_remove ,
} ;
# endif
static int __init safexcel_init ( void )
{
int rc ;
# if IS_ENABLED(CONFIG_OF)
/* Register platform driver */
platform_driver_register ( & crypto_safexcel ) ;
# endif
# if IS_ENABLED(CONFIG_PCI)
/* Register PCI driver */
rc = pci_register_driver ( & safexcel_pci_driver ) ;
# endif
return 0 ;
}
static void __exit safexcel_exit ( void )
{
# if IS_ENABLED(CONFIG_OF)
/* Unregister platform driver */
platform_driver_unregister ( & crypto_safexcel ) ;
# endif
# if IS_ENABLED(CONFIG_PCI)
/* Unregister PCI driver if successfully registered before */
pci_unregister_driver ( & safexcel_pci_driver ) ;
# endif
}
module_init ( safexcel_init ) ;
module_exit ( safexcel_exit ) ;
2017-05-24 17:10:34 +03:00
MODULE_AUTHOR ( " Antoine Tenart <antoine.tenart@free-electrons.com> " ) ;
MODULE_AUTHOR ( " Ofer Heifetz <oferh@marvell.com> " ) ;
MODULE_AUTHOR ( " Igal Liberman <igall@marvell.com> " ) ;
2019-08-19 17:40:25 +03:00
MODULE_DESCRIPTION ( " Support for SafeXcel cryptographic engines: EIP97 & EIP197 " ) ;
2017-05-24 17:10:34 +03:00
MODULE_LICENSE ( " GPL v2 " ) ;