02884a4f12
When kunpeng920 encryption driver is used to deencrypt and decrypt packets during the softirq, it is not allowed to use mutex lock. The kernel will report the following error: BUG: scheduling while atomic: swapper/57/0/0x00000300 Call trace: dump_backtrace+0x0/0x1e4 show_stack+0x20/0x2c dump_stack+0xd8/0x140 __schedule_bug+0x68/0x80 __schedule+0x728/0x840 schedule+0x50/0xe0 schedule_preempt_disabled+0x18/0x24 __mutex_lock.constprop.0+0x594/0x5dc __mutex_lock_slowpath+0x1c/0x30 mutex_lock+0x50/0x60 sec_request_init+0x8c/0x1a0 [hisi_sec2] sec_process+0x28/0x1ac [hisi_sec2] sec_skcipher_crypto+0xf4/0x1d4 [hisi_sec2] sec_skcipher_encrypt+0x1c/0x30 [hisi_sec2] crypto_skcipher_encrypt+0x2c/0x40 crypto_authenc_encrypt+0xc8/0xfc [authenc] crypto_aead_encrypt+0x2c/0x40 echainiv_encrypt+0x144/0x1a0 [echainiv] crypto_aead_encrypt+0x2c/0x40 esp_output_tail+0x348/0x5c0 [esp4] esp_output+0x120/0x19c [esp4] xfrm_output_one+0x25c/0x4d4 xfrm_output_resume+0x6c/0x1fc xfrm_output+0xac/0x3c0 xfrm4_output+0x64/0x130 ip_build_and_send_pkt+0x158/0x20c tcp_v4_send_synack+0xdc/0x1f0 tcp_conn_request+0x7d0/0x994 tcp_v4_conn_request+0x58/0x6c tcp_v6_conn_request+0xf0/0x100 tcp_rcv_state_process+0x1cc/0xd60 tcp_v4_do_rcv+0x10c/0x250 tcp_v4_rcv+0xfc4/0x10a4 ip_protocol_deliver_rcu+0xf4/0x200 ip_local_deliver_finish+0x58/0x70 ip_local_deliver+0x68/0x120 ip_sublist_rcv_finish+0x70/0x94 ip_list_rcv_finish.constprop.0+0x17c/0x1d0 ip_sublist_rcv+0x40/0xb0 ip_list_rcv+0x140/0x1dc __netif_receive_skb_list_core+0x154/0x28c __netif_receive_skb_list+0x120/0x1a0 netif_receive_skb_list_internal+0xe4/0x1f0 napi_complete_done+0x70/0x1f0 gro_cell_poll+0x9c/0xb0 napi_poll+0xcc/0x264 net_rx_action+0xd4/0x21c __do_softirq+0x130/0x358 irq_exit+0x11c/0x13c __handle_domain_irq+0x88/0xf0 gic_handle_irq+0x78/0x2c0 el1_irq+0xb8/0x140 arch_cpu_idle+0x18/0x40 default_idle_call+0x5c/0x1c0 cpuidle_idle_call+0x174/0x1b0 do_idle+0xc8/0x160 cpu_startup_entry+0x30/0x11c secondary_start_kernel+0x158/0x1e4 softirq: huh, entered softirq 3 NET_RX 0000000093774ee4 with preempt_count 00000100, exited with fffffe00? Fixes: 416d82204df4 ("crypto: hisilicon - add HiSilicon SEC V2 driver") Signed-off-by: Zhengchao Shao <shaozhengchao@huawei.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
199 lines
4.3 KiB
C
199 lines
4.3 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/* Copyright (c) 2019 HiSilicon Limited. */
|
|
|
|
#ifndef __HISI_SEC_V2_H
|
|
#define __HISI_SEC_V2_H
|
|
|
|
#include <linux/hisi_acc_qm.h>
|
|
#include "sec_crypto.h"
|
|
|
|
/* Algorithm resource per hardware SEC queue */
|
|
struct sec_alg_res {
|
|
u8 *pbuf;
|
|
dma_addr_t pbuf_dma;
|
|
u8 *c_ivin;
|
|
dma_addr_t c_ivin_dma;
|
|
u8 *a_ivin;
|
|
dma_addr_t a_ivin_dma;
|
|
u8 *out_mac;
|
|
dma_addr_t out_mac_dma;
|
|
};
|
|
|
|
/* Cipher request of SEC private */
|
|
struct sec_cipher_req {
|
|
struct hisi_acc_hw_sgl *c_out;
|
|
dma_addr_t c_out_dma;
|
|
u8 *c_ivin;
|
|
dma_addr_t c_ivin_dma;
|
|
struct skcipher_request *sk_req;
|
|
u32 c_len;
|
|
bool encrypt;
|
|
};
|
|
|
|
struct sec_aead_req {
|
|
u8 *out_mac;
|
|
dma_addr_t out_mac_dma;
|
|
u8 *a_ivin;
|
|
dma_addr_t a_ivin_dma;
|
|
struct aead_request *aead_req;
|
|
};
|
|
|
|
/* SEC request of Crypto */
|
|
struct sec_req {
|
|
union {
|
|
struct sec_sqe sec_sqe;
|
|
struct sec_sqe3 sec_sqe3;
|
|
};
|
|
struct sec_ctx *ctx;
|
|
struct sec_qp_ctx *qp_ctx;
|
|
|
|
/**
|
|
* Common parameter of the SEC request.
|
|
*/
|
|
struct hisi_acc_hw_sgl *in;
|
|
dma_addr_t in_dma;
|
|
struct sec_cipher_req c_req;
|
|
struct sec_aead_req aead_req;
|
|
struct list_head backlog_head;
|
|
|
|
int err_type;
|
|
int req_id;
|
|
u32 flag;
|
|
|
|
/* Status of the SEC request */
|
|
bool fake_busy;
|
|
bool use_pbuf;
|
|
};
|
|
|
|
/**
|
|
* struct sec_req_op - Operations for SEC request
|
|
* @buf_map: DMA map the SGL buffers of the request
|
|
* @buf_unmap: DMA unmap the SGL buffers of the request
|
|
* @bd_fill: Fill the SEC queue BD
|
|
* @bd_send: Send the SEC BD into the hardware queue
|
|
* @callback: Call back for the request
|
|
* @process: Main processing logic of Skcipher
|
|
*/
|
|
struct sec_req_op {
|
|
int (*buf_map)(struct sec_ctx *ctx, struct sec_req *req);
|
|
void (*buf_unmap)(struct sec_ctx *ctx, struct sec_req *req);
|
|
void (*do_transfer)(struct sec_ctx *ctx, struct sec_req *req);
|
|
int (*bd_fill)(struct sec_ctx *ctx, struct sec_req *req);
|
|
int (*bd_send)(struct sec_ctx *ctx, struct sec_req *req);
|
|
void (*callback)(struct sec_ctx *ctx, struct sec_req *req, int err);
|
|
int (*process)(struct sec_ctx *ctx, struct sec_req *req);
|
|
};
|
|
|
|
/* SEC auth context */
|
|
struct sec_auth_ctx {
|
|
dma_addr_t a_key_dma;
|
|
u8 *a_key;
|
|
u8 a_key_len;
|
|
u8 mac_len;
|
|
u8 a_alg;
|
|
bool fallback;
|
|
struct crypto_shash *hash_tfm;
|
|
struct crypto_aead *fallback_aead_tfm;
|
|
};
|
|
|
|
/* SEC cipher context which cipher's relatives */
|
|
struct sec_cipher_ctx {
|
|
u8 *c_key;
|
|
dma_addr_t c_key_dma;
|
|
sector_t iv_offset;
|
|
u32 c_gran_size;
|
|
u32 ivsize;
|
|
u8 c_mode;
|
|
u8 c_alg;
|
|
u8 c_key_len;
|
|
|
|
/* add software support */
|
|
bool fallback;
|
|
struct crypto_sync_skcipher *fbtfm;
|
|
};
|
|
|
|
/* SEC queue context which defines queue's relatives */
|
|
struct sec_qp_ctx {
|
|
struct hisi_qp *qp;
|
|
struct sec_req *req_list[QM_Q_DEPTH];
|
|
struct idr req_idr;
|
|
struct sec_alg_res res[QM_Q_DEPTH];
|
|
struct sec_ctx *ctx;
|
|
spinlock_t req_lock;
|
|
struct list_head backlog;
|
|
struct hisi_acc_sgl_pool *c_in_pool;
|
|
struct hisi_acc_sgl_pool *c_out_pool;
|
|
};
|
|
|
|
enum sec_alg_type {
|
|
SEC_SKCIPHER,
|
|
SEC_AEAD
|
|
};
|
|
|
|
/* SEC Crypto TFM context which defines queue and cipher .etc relatives */
|
|
struct sec_ctx {
|
|
struct sec_qp_ctx *qp_ctx;
|
|
struct sec_dev *sec;
|
|
const struct sec_req_op *req_op;
|
|
struct hisi_qp **qps;
|
|
|
|
/* Half queues for encipher, and half for decipher */
|
|
u32 hlf_q_num;
|
|
|
|
/* Threshold for fake busy, trigger to return -EBUSY to user */
|
|
u32 fake_req_limit;
|
|
|
|
/* Current cyclic index to select a queue for encipher */
|
|
atomic_t enc_qcyclic;
|
|
|
|
/* Current cyclic index to select a queue for decipher */
|
|
atomic_t dec_qcyclic;
|
|
|
|
enum sec_alg_type alg_type;
|
|
bool pbuf_supported;
|
|
struct sec_cipher_ctx c_ctx;
|
|
struct sec_auth_ctx a_ctx;
|
|
u8 type_supported;
|
|
struct device *dev;
|
|
};
|
|
|
|
|
|
enum sec_debug_file_index {
|
|
SEC_CLEAR_ENABLE,
|
|
SEC_DEBUG_FILE_NUM,
|
|
};
|
|
|
|
struct sec_debug_file {
|
|
enum sec_debug_file_index index;
|
|
spinlock_t lock;
|
|
struct hisi_qm *qm;
|
|
};
|
|
|
|
struct sec_dfx {
|
|
atomic64_t send_cnt;
|
|
atomic64_t recv_cnt;
|
|
atomic64_t send_busy_cnt;
|
|
atomic64_t recv_busy_cnt;
|
|
atomic64_t err_bd_cnt;
|
|
atomic64_t invalid_req_cnt;
|
|
atomic64_t done_flag_cnt;
|
|
};
|
|
|
|
struct sec_debug {
|
|
struct sec_dfx dfx;
|
|
struct sec_debug_file files[SEC_DEBUG_FILE_NUM];
|
|
};
|
|
|
|
struct sec_dev {
|
|
struct hisi_qm qm;
|
|
struct sec_debug debug;
|
|
u32 ctx_q_num;
|
|
bool iommu_used;
|
|
};
|
|
|
|
void sec_destroy_qps(struct hisi_qp **qps, int qp_num);
|
|
struct hisi_qp **sec_create_qps(void);
|
|
int sec_register_to_crypto(struct hisi_qm *qm);
|
|
void sec_unregister_from_crypto(struct hisi_qm *qm);
|
|
#endif
|