2018-01-22 12:27:00 +03:00
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
/* \file cc_driver.h
* ARM CryptoCell Linux Crypto Driver
*/
# ifndef __CC_DRIVER_H__
# define __CC_DRIVER_H__
# ifdef COMP_IN_WQ
# include <linux/workqueue.h>
# else
# include <linux/interrupt.h>
# endif
# include <linux/dma-mapping.h>
# include <crypto/algapi.h>
2018-01-22 12:27:01 +03:00
# include <crypto/internal/skcipher.h>
2018-01-22 12:27:00 +03:00
# include <crypto/aes.h>
# include <crypto/sha.h>
# include <crypto/aead.h>
# include <crypto/authenc.h>
# include <crypto/hash.h>
# include <crypto/skcipher.h>
# include <linux/version.h>
# include <linux/clk.h>
# include <linux/platform_device.h>
/* Registers definitions from shared/hw/ree_include */
# include "cc_host_regs.h"
# define CC_DEV_SHA_MAX 512
# include "cc_crypto_ctx.h"
# include "cc_hw_queue_defs.h"
# include "cc_sram_mgr.h"
extern bool cc_dump_desc ;
extern bool cc_dump_bytes ;
2018-10-29 12:50:12 +03:00
# define DRV_MODULE_VERSION "5.0"
2018-02-19 17:51:23 +03:00
enum cc_hw_rev {
CC_HW_REV_630 = 630 ,
CC_HW_REV_710 = 710 ,
2018-10-29 12:50:12 +03:00
CC_HW_REV_712 = 712 ,
CC_HW_REV_713 = 713
2018-02-19 17:51:23 +03:00
} ;
2018-01-22 12:27:00 +03:00
2018-11-13 12:40:35 +03:00
enum cc_std_body {
CC_STD_NIST = 0x1 ,
CC_STD_OSCCA = 0x2 ,
CC_STD_ALL = 0x3
} ;
2018-01-22 12:27:00 +03:00
# define CC_COHERENT_CACHE_PARAMS 0xEEE
/* Maximum DMA mask supported by IP */
# define DMA_BIT_MASK_LEN 48
# define CC_AXI_IRQ_MASK ((1 << CC_AXIM_CFG_BRESPMASK_BIT_SHIFT) | \
( 1 < < CC_AXIM_CFG_RRESPMASK_BIT_SHIFT ) | \
( 1 < < CC_AXIM_CFG_INFLTMASK_BIT_SHIFT ) | \
( 1 < < CC_AXIM_CFG_COMPMASK_BIT_SHIFT ) )
# define CC_AXI_ERR_IRQ_MASK BIT(CC_HOST_IRR_AXI_ERR_INT_BIT_SHIFT)
# define CC_COMP_IRQ_MASK BIT(CC_HOST_IRR_AXIM_COMP_INT_BIT_SHIFT)
2019-04-18 16:38:39 +03:00
# define CC_SECURITY_DISABLED_MASK BIT(CC_SECURITY_DISABLED_VALUE_BIT_SHIFT)
2018-01-22 12:27:00 +03:00
# define AXIM_MON_COMP_VALUE GENMASK(CC_AXIM_MON_COMP_VALUE_BIT_SIZE + \
CC_AXIM_MON_COMP_VALUE_BIT_SHIFT , \
CC_AXIM_MON_COMP_VALUE_BIT_SHIFT )
2019-04-18 16:38:40 +03:00
# define CC_CPP_AES_ABORT_MASK ( \
BIT ( CC_HOST_IMR_REE_OP_ABORTED_AES_0_MASK_BIT_SHIFT ) | \
BIT ( CC_HOST_IMR_REE_OP_ABORTED_AES_1_MASK_BIT_SHIFT ) | \
BIT ( CC_HOST_IMR_REE_OP_ABORTED_AES_2_MASK_BIT_SHIFT ) | \
BIT ( CC_HOST_IMR_REE_OP_ABORTED_AES_3_MASK_BIT_SHIFT ) | \
BIT ( CC_HOST_IMR_REE_OP_ABORTED_AES_4_MASK_BIT_SHIFT ) | \
BIT ( CC_HOST_IMR_REE_OP_ABORTED_AES_5_MASK_BIT_SHIFT ) | \
BIT ( CC_HOST_IMR_REE_OP_ABORTED_AES_6_MASK_BIT_SHIFT ) | \
BIT ( CC_HOST_IMR_REE_OP_ABORTED_AES_7_MASK_BIT_SHIFT ) )
# define CC_CPP_SM4_ABORT_MASK ( \
BIT ( CC_HOST_IMR_REE_OP_ABORTED_SM_0_MASK_BIT_SHIFT ) | \
BIT ( CC_HOST_IMR_REE_OP_ABORTED_SM_1_MASK_BIT_SHIFT ) | \
BIT ( CC_HOST_IMR_REE_OP_ABORTED_SM_2_MASK_BIT_SHIFT ) | \
BIT ( CC_HOST_IMR_REE_OP_ABORTED_SM_3_MASK_BIT_SHIFT ) | \
BIT ( CC_HOST_IMR_REE_OP_ABORTED_SM_4_MASK_BIT_SHIFT ) | \
BIT ( CC_HOST_IMR_REE_OP_ABORTED_SM_5_MASK_BIT_SHIFT ) | \
BIT ( CC_HOST_IMR_REE_OP_ABORTED_SM_6_MASK_BIT_SHIFT ) | \
BIT ( CC_HOST_IMR_REE_OP_ABORTED_SM_7_MASK_BIT_SHIFT ) )
2018-01-22 12:27:00 +03:00
/* Register name mangling macro */
# define CC_REG(reg_name) CC_ ## reg_name ## _REG_OFFSET
/* TEE FIPS status interrupt */
# define CC_GPR0_IRQ_MASK BIT(CC_HOST_IRR_GPR0_BIT_SHIFT)
# define CC_CRA_PRIO 400
# define MIN_HW_QUEUE_SIZE 50 /* Minimum size required for proper function */
# define MAX_REQUEST_QUEUE_SIZE 4096
# define MAX_MLLI_BUFF_SIZE 2080
# define MAX_ICV_NENTS_SUPPORTED 2
/* Definitions for HW descriptors DIN/DOUT fields */
# define NS_BIT 1
# define AXI_ID 0
/* AXI_ID is not actually the AXI ID of the transaction but the value of AXI_ID
* field in the HW descriptor . The DMA engine + 8 that value .
*/
2019-04-18 16:38:40 +03:00
struct cc_cpp_req {
bool is_cpp ;
enum cc_cpp_alg alg ;
u8 slot ;
} ;
2018-01-22 12:27:00 +03:00
# define CC_MAX_IVGEN_DMA_ADDRESSES 3
struct cc_crypto_req {
void ( * user_cb ) ( struct device * dev , void * req , int err ) ;
void * user_arg ;
dma_addr_t ivgen_dma_addr [ CC_MAX_IVGEN_DMA_ADDRESSES ] ;
/* For the first 'ivgen_dma_addr_len' addresses of this array,
* generated IV would be placed in it by send_request ( ) .
* Same generated IV for all addresses !
*/
/* Amount of 'ivgen_dma_addr' elements to be filled. */
unsigned int ivgen_dma_addr_len ;
/* The generated IV size required, 8/16 B allowed. */
unsigned int ivgen_size ;
struct completion seq_compl ; /* request completion */
2019-04-18 16:38:40 +03:00
struct cc_cpp_req cpp ;
2018-01-22 12:27:00 +03:00
} ;
/**
* struct cc_drvdata - driver private data context
* @ cc_base : virt address of the CC registers
* @ irq : device IRQ number
* @ irq_mask : Interrupt mask shadow ( 1 for masked interrupts )
*/
struct cc_drvdata {
void __iomem * cc_base ;
int irq ;
u32 irq_mask ;
struct completion hw_queue_avail ; /* wait for HW queue availability */
struct platform_device * plat_dev ;
cc_sram_addr_t mlli_sram_addr ;
void * buff_mgr_handle ;
2018-01-22 12:27:01 +03:00
void * cipher_handle ;
2018-01-22 12:27:02 +03:00
void * hash_handle ;
2018-01-22 12:27:03 +03:00
void * aead_handle ;
2018-01-22 12:27:00 +03:00
void * request_mgr_handle ;
2018-01-22 12:27:04 +03:00
void * fips_handle ;
2018-01-22 12:27:00 +03:00
void * ivgen_handle ;
void * sram_mgr_handle ;
void * debugfs ;
struct clk * clk ;
bool coherent ;
2018-02-19 17:51:23 +03:00
char * hw_rev_name ;
enum cc_hw_rev hw_rev ;
u32 axim_mon_offset ;
2018-05-24 17:19:06 +03:00
u32 sig_offset ;
u32 ver_offset ;
2018-11-13 12:40:35 +03:00
int std_bodies ;
2019-04-18 16:38:39 +03:00
bool sec_disabled ;
2019-04-18 16:38:40 +03:00
u32 comp_mask ;
2018-01-22 12:27:00 +03:00
} ;
struct cc_crypto_alg {
struct list_head entry ;
int cipher_mode ;
int flow_mode ; /* Note: currently, refers to the cipher mode only. */
int auth_mode ;
2018-01-22 12:27:01 +03:00
unsigned int data_unit ;
2018-01-22 12:27:00 +03:00
struct cc_drvdata * drvdata ;
2018-01-22 12:27:01 +03:00
struct skcipher_alg skcipher_alg ;
2018-01-22 12:27:03 +03:00
struct aead_alg aead_alg ;
2018-01-22 12:27:00 +03:00
} ;
struct cc_alg_template {
char name [ CRYPTO_MAX_ALG_NAME ] ;
char driver_name [ CRYPTO_MAX_ALG_NAME ] ;
unsigned int blocksize ;
union {
struct skcipher_alg skcipher ;
struct aead_alg aead ;
} template_u ;
int cipher_mode ;
int flow_mode ; /* Note: currently, refers to the cipher mode only. */
int auth_mode ;
2018-02-19 17:51:23 +03:00
u32 min_hw_rev ;
2018-11-13 12:40:35 +03:00
enum cc_std_body std_body ;
2019-04-18 16:38:39 +03:00
bool sec_func ;
2018-01-22 12:27:01 +03:00
unsigned int data_unit ;
2018-01-22 12:27:00 +03:00
struct cc_drvdata * drvdata ;
} ;
struct async_gen_req_ctx {
dma_addr_t iv_dma_addr ;
enum drv_crypto_direction op_type ;
} ;
static inline struct device * drvdata_to_dev ( struct cc_drvdata * drvdata )
{
return & drvdata - > plat_dev - > dev ;
}
void __dump_byte_array ( const char * name , const u8 * buf , size_t len ) ;
static inline void dump_byte_array ( const char * name , const u8 * the_array ,
size_t size )
{
if ( cc_dump_bytes )
__dump_byte_array ( name , the_array , size ) ;
}
int init_cc_regs ( struct cc_drvdata * drvdata , bool is_probe ) ;
void fini_cc_regs ( struct cc_drvdata * drvdata ) ;
int cc_clk_on ( struct cc_drvdata * drvdata ) ;
void cc_clk_off ( struct cc_drvdata * drvdata ) ;
2018-10-18 15:59:57 +03:00
unsigned int cc_get_default_hash_len ( struct cc_drvdata * drvdata ) ;
2018-01-22 12:27:00 +03:00
static inline void cc_iowrite ( struct cc_drvdata * drvdata , u32 reg , u32 val )
{
iowrite32 ( val , ( drvdata - > cc_base + reg ) ) ;
}
static inline u32 cc_ioread ( struct cc_drvdata * drvdata , u32 reg )
{
return ioread32 ( drvdata - > cc_base + reg ) ;
}
static inline gfp_t cc_gfp_flags ( struct crypto_async_request * req )
{
return ( req - > flags & CRYPTO_TFM_REQ_MAY_SLEEP ) ?
GFP_KERNEL : GFP_ATOMIC ;
}
2018-02-19 17:51:23 +03:00
static inline void set_queue_last_ind ( struct cc_drvdata * drvdata ,
struct cc_hw_desc * pdesc )
{
if ( drvdata - > hw_rev > = CC_HW_REV_712 )
set_queue_last_ind_bit ( pdesc ) ;
}
2018-01-22 12:27:00 +03:00
# endif /*__CC_DRIVER_H__*/