2019-11-13 19:11:04 +08:00
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2019 HiSilicon Limited. */
# ifndef __HISI_SEC_V2_H
# define __HISI_SEC_V2_H
2022-03-08 18:48:54 +00:00
# include <linux/hisi_acc_qm.h>
2019-11-13 19:11:04 +08:00
# include "sec_crypto.h"
2020-01-11 10:41:50 +08:00
/* Algorithm resource per hardware SEC queue */
struct sec_alg_res {
2020-03-05 10:06:25 +08:00
u8 * pbuf ;
dma_addr_t pbuf_dma ;
2019-11-13 19:11:04 +08:00
u8 * c_ivin ;
dma_addr_t c_ivin_dma ;
2021-06-04 09:31:26 +08:00
u8 * a_ivin ;
dma_addr_t a_ivin_dma ;
2020-01-11 10:41:56 +08:00
u8 * out_mac ;
dma_addr_t out_mac_dma ;
2019-11-13 19:11:04 +08:00
} ;
/* Cipher request of SEC private */
struct sec_cipher_req {
struct hisi_acc_hw_sgl * c_out ;
dma_addr_t c_out_dma ;
2020-03-05 10:06:24 +08:00
u8 * c_ivin ;
dma_addr_t c_ivin_dma ;
2019-11-13 19:11:04 +08:00
struct skcipher_request * sk_req ;
u32 c_len ;
bool encrypt ;
} ;
2020-01-11 10:41:56 +08:00
struct sec_aead_req {
u8 * out_mac ;
dma_addr_t out_mac_dma ;
2021-06-04 09:31:26 +08:00
u8 * a_ivin ;
dma_addr_t a_ivin_dma ;
2020-01-11 10:41:56 +08:00
struct aead_request * aead_req ;
} ;
2019-11-13 19:11:04 +08:00
/* SEC request of Crypto */
struct sec_req {
2021-05-28 18:26:13 +08:00
union {
struct sec_sqe sec_sqe ;
struct sec_sqe3 sec_sqe3 ;
} ;
2019-11-13 19:11:04 +08:00
struct sec_ctx * ctx ;
struct sec_qp_ctx * qp_ctx ;
2021-06-04 09:31:29 +08:00
/**
* Common parameter of the SEC request .
*/
struct hisi_acc_hw_sgl * in ;
dma_addr_t in_dma ;
2019-11-13 19:11:04 +08:00
struct sec_cipher_req c_req ;
2020-01-11 10:41:56 +08:00
struct sec_aead_req aead_req ;
2020-07-07 09:15:38 +08:00
struct list_head backlog_head ;
2020-01-11 10:41:56 +08:00
2019-11-13 19:11:04 +08:00
int err_type ;
int req_id ;
2021-03-13 15:28:24 +08:00
u32 flag ;
2019-11-13 19:11:04 +08:00
/* Status of the SEC request */
2020-01-11 10:41:48 +08:00
bool fake_busy ;
2020-03-05 10:06:25 +08:00
bool use_pbuf ;
2019-11-13 19:11:04 +08:00
} ;
/**
* struct sec_req_op - Operations for SEC request
* @ buf_map : DMA map the SGL buffers of the request
* @ buf_unmap : DMA unmap the SGL buffers of the request
* @ bd_fill : Fill the SEC queue BD
* @ bd_send : Send the SEC BD into the hardware queue
* @ callback : Call back for the request
* @ process : Main processing logic of Skcipher
*/
struct sec_req_op {
int ( * buf_map ) ( struct sec_ctx * ctx , struct sec_req * req ) ;
void ( * buf_unmap ) ( struct sec_ctx * ctx , struct sec_req * req ) ;
void ( * do_transfer ) ( struct sec_ctx * ctx , struct sec_req * req ) ;
int ( * bd_fill ) ( struct sec_ctx * ctx , struct sec_req * req ) ;
int ( * bd_send ) ( struct sec_ctx * ctx , struct sec_req * req ) ;
2020-01-11 10:41:53 +08:00
void ( * callback ) ( struct sec_ctx * ctx , struct sec_req * req , int err ) ;
2019-11-13 19:11:04 +08:00
int ( * process ) ( struct sec_ctx * ctx , struct sec_req * req ) ;
} ;
2020-01-11 10:41:56 +08:00
/* SEC auth context */
struct sec_auth_ctx {
dma_addr_t a_key_dma ;
u8 * a_key ;
u8 a_key_len ;
u8 mac_len ;
u8 a_alg ;
2021-06-04 09:31:27 +08:00
bool fallback ;
2020-01-11 10:41:56 +08:00
struct crypto_shash * hash_tfm ;
2021-06-04 09:31:27 +08:00
struct crypto_aead * fallback_aead_tfm ;
2020-01-11 10:41:56 +08:00
} ;
2019-11-13 19:11:04 +08:00
/* SEC cipher context which cipher's relatives */
struct sec_cipher_ctx {
u8 * c_key ;
dma_addr_t c_key_dma ;
sector_t iv_offset ;
u32 c_gran_size ;
u32 ivsize ;
u8 c_mode ;
u8 c_alg ;
u8 c_key_len ;
2021-05-28 19:42:05 +08:00
/* add software support */
bool fallback ;
struct crypto_sync_skcipher * fbtfm ;
2019-11-13 19:11:04 +08:00
} ;
/* SEC queue context which defines queue's relatives */
struct sec_qp_ctx {
struct hisi_qp * qp ;
2020-01-11 10:41:51 +08:00
struct sec_req * req_list [ QM_Q_DEPTH ] ;
2019-11-13 19:11:04 +08:00
struct idr req_idr ;
2020-01-11 10:41:51 +08:00
struct sec_alg_res res [ QM_Q_DEPTH ] ;
2019-11-13 19:11:04 +08:00
struct sec_ctx * ctx ;
struct mutex req_lock ;
2020-07-07 09:15:38 +08:00
struct list_head backlog ;
2019-11-13 19:11:04 +08:00
struct hisi_acc_sgl_pool * c_in_pool ;
struct hisi_acc_sgl_pool * c_out_pool ;
} ;
2020-01-11 10:41:56 +08:00
enum sec_alg_type {
SEC_SKCIPHER ,
SEC_AEAD
} ;
2019-11-13 19:11:04 +08:00
/* SEC Crypto TFM context which defines queue and cipher .etc relatives */
struct sec_ctx {
struct sec_qp_ctx * qp_ctx ;
struct sec_dev * sec ;
const struct sec_req_op * req_op ;
2020-03-10 16:42:52 +08:00
struct hisi_qp * * qps ;
2019-11-13 19:11:04 +08:00
/* Half queues for encipher, and half for decipher */
u32 hlf_q_num ;
/* Threshold for fake busy, trigger to return -EBUSY to user */
u32 fake_req_limit ;
2022-05-21 13:10:45 +02:00
/* Current cyclic index to select a queue for encipher */
2019-11-13 19:11:04 +08:00
atomic_t enc_qcyclic ;
2022-05-21 13:10:45 +02:00
/* Current cyclic index to select a queue for decipher */
2019-11-13 19:11:04 +08:00
atomic_t dec_qcyclic ;
2020-01-11 10:41:56 +08:00
enum sec_alg_type alg_type ;
2020-03-05 10:06:25 +08:00
bool pbuf_supported ;
2019-11-13 19:11:04 +08:00
struct sec_cipher_ctx c_ctx ;
2020-01-11 10:41:56 +08:00
struct sec_auth_ctx a_ctx ;
2021-05-28 18:26:14 +08:00
u8 type_supported ;
2021-03-13 15:28:24 +08:00
struct device * dev ;
2019-11-13 19:11:04 +08:00
} ;
2019-11-13 19:11:07 +08:00
enum sec_debug_file_index {
SEC_CLEAR_ENABLE ,
SEC_DEBUG_FILE_NUM ,
} ;
struct sec_debug_file {
enum sec_debug_file_index index ;
spinlock_t lock ;
struct hisi_qm * qm ;
} ;
struct sec_dfx {
2020-01-07 21:08:58 +01:00
atomic64_t send_cnt ;
atomic64_t recv_cnt ;
2020-05-15 17:13:56 +08:00
atomic64_t send_busy_cnt ;
2020-07-07 09:15:38 +08:00
atomic64_t recv_busy_cnt ;
2020-05-15 17:13:56 +08:00
atomic64_t err_bd_cnt ;
atomic64_t invalid_req_cnt ;
atomic64_t done_flag_cnt ;
2019-11-13 19:11:07 +08:00
} ;
struct sec_debug {
struct sec_dfx dfx ;
struct sec_debug_file files [ SEC_DEBUG_FILE_NUM ] ;
} ;
2019-11-13 19:11:04 +08:00
struct sec_dev {
struct hisi_qm qm ;
2019-11-13 19:11:07 +08:00
struct sec_debug debug ;
2019-11-13 19:11:04 +08:00
u32 ctx_q_num ;
2020-03-05 10:06:23 +08:00
bool iommu_used ;
2019-11-13 19:11:04 +08:00
} ;
2020-03-10 16:42:52 +08:00
void sec_destroy_qps ( struct hisi_qp * * qps , int qp_num ) ;
struct hisi_qp * * sec_create_qps ( void ) ;
2021-03-04 14:35:44 +08:00
int sec_register_to_crypto ( struct hisi_qm * qm ) ;
void sec_unregister_from_crypto ( struct hisi_qm * qm ) ;
2019-11-13 19:11:04 +08:00
# endif