2019-08-02 15:57:50 +08:00
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 HiSilicon Limited. */
# include <asm/page.h>
2020-04-03 16:16:38 +08:00
# include <linux/acpi.h>
# include <linux/aer.h>
2019-08-02 15:57:50 +08:00
# include <linux/bitmap.h>
2019-08-02 15:57:55 +08:00
# include <linux/debugfs.h>
2019-08-02 15:57:50 +08:00
# include <linux/dma-mapping.h>
2020-05-09 17:44:02 +08:00
# include <linux/idr.h>
2019-08-02 15:57:50 +08:00
# include <linux/io.h>
# include <linux/irqreturn.h>
# include <linux/log2.h>
2019-08-02 15:57:55 +08:00
# include <linux/seq_file.h>
2019-08-02 15:57:50 +08:00
# include <linux/slab.h>
2020-02-11 15:54:25 +08:00
# include <linux/uacce.h>
# include <linux/uaccess.h>
# include <uapi/misc/uacce/hisi_qm.h>
2019-08-02 15:57:50 +08:00
# include "qm.h"
/* eq/aeq irq enable */
# define QM_VF_AEQ_INT_SOURCE 0x0
# define QM_VF_AEQ_INT_MASK 0x4
# define QM_VF_EQ_INT_SOURCE 0x8
# define QM_VF_EQ_INT_MASK 0xc
# define QM_IRQ_NUM_V1 1
# define QM_IRQ_NUM_PF_V2 4
2019-08-02 15:57:53 +08:00
# define QM_IRQ_NUM_VF_V2 2
2019-08-02 15:57:50 +08:00
# define QM_EQ_EVENT_IRQ_VECTOR 0
# define QM_AEQ_EVENT_IRQ_VECTOR 1
# define QM_ABNORMAL_EVENT_IRQ_VECTOR 3
/* mailbox */
# define QM_MB_CMD_SQC 0x0
# define QM_MB_CMD_CQC 0x1
# define QM_MB_CMD_EQC 0x2
# define QM_MB_CMD_AEQC 0x3
# define QM_MB_CMD_SQC_BT 0x4
# define QM_MB_CMD_CQC_BT 0x5
# define QM_MB_CMD_SQC_VFT_V2 0x6
# define QM_MB_CMD_SEND_BASE 0x300
# define QM_MB_EVENT_SHIFT 8
# define QM_MB_BUSY_SHIFT 13
# define QM_MB_OP_SHIFT 14
# define QM_MB_CMD_DATA_ADDR_L 0x304
# define QM_MB_CMD_DATA_ADDR_H 0x308
/* sqc shift */
# define QM_SQ_HOP_NUM_SHIFT 0
# define QM_SQ_PAGE_SIZE_SHIFT 4
# define QM_SQ_BUF_SIZE_SHIFT 8
# define QM_SQ_SQE_SIZE_SHIFT 12
# define QM_SQ_PRIORITY_SHIFT 0
# define QM_SQ_ORDERS_SHIFT 4
# define QM_SQ_TYPE_SHIFT 8
# define QM_SQ_TYPE_MASK GENMASK(3, 0)
2020-04-03 16:16:42 +08:00
# define QM_SQ_TAIL_IDX(sqc) ((le16_to_cpu((sqc)->w11) >> 6) & 0x1)
2019-08-02 15:57:50 +08:00
/* cqc shift */
# define QM_CQ_HOP_NUM_SHIFT 0
# define QM_CQ_PAGE_SIZE_SHIFT 4
# define QM_CQ_BUF_SIZE_SHIFT 8
# define QM_CQ_CQE_SIZE_SHIFT 12
# define QM_CQ_PHASE_SHIFT 0
# define QM_CQ_FLAG_SHIFT 1
2019-10-21 15:41:03 +08:00
# define QM_CQE_PHASE(cqe) (le16_to_cpu((cqe)->w7) & 0x1)
2019-08-02 15:57:50 +08:00
# define QM_QC_CQE_SIZE 4
2020-04-03 16:16:42 +08:00
# define QM_CQ_TAIL_IDX(cqc) ((le16_to_cpu((cqc)->w11) >> 6) & 0x1)
2019-08-02 15:57:50 +08:00
/* eqc shift */
# define QM_EQE_AEQE_SIZE (2UL << 12)
# define QM_EQC_PHASE_SHIFT 16
2019-10-21 15:41:03 +08:00
# define QM_EQE_PHASE(eqe) ((le32_to_cpu((eqe)->dw0) >> 16) & 0x1)
2019-08-02 15:57:50 +08:00
# define QM_EQE_CQN_MASK GENMASK(15, 0)
2019-10-21 15:41:03 +08:00
# define QM_AEQE_PHASE(aeqe) ((le32_to_cpu((aeqe)->dw0) >> 16) & 0x1)
2019-08-02 15:57:50 +08:00
# define QM_AEQE_TYPE_SHIFT 17
# define QM_DOORBELL_CMD_SQ 0
# define QM_DOORBELL_CMD_CQ 1
# define QM_DOORBELL_CMD_EQ 2
# define QM_DOORBELL_CMD_AEQ 3
# define QM_DOORBELL_BASE_V1 0x340
# define QM_DB_CMD_SHIFT_V1 16
# define QM_DB_INDEX_SHIFT_V1 32
# define QM_DB_PRIORITY_SHIFT_V1 48
# define QM_DOORBELL_SQ_CQ_BASE_V2 0x1000
# define QM_DOORBELL_EQ_AEQ_BASE_V2 0x2000
# define QM_DB_CMD_SHIFT_V2 12
# define QM_DB_RAND_SHIFT_V2 16
# define QM_DB_INDEX_SHIFT_V2 32
# define QM_DB_PRIORITY_SHIFT_V2 48
# define QM_MEM_START_INIT 0x100040
# define QM_MEM_INIT_DONE 0x100044
# define QM_VFT_CFG_RDY 0x10006c
# define QM_VFT_CFG_OP_WR 0x100058
# define QM_VFT_CFG_TYPE 0x10005c
# define QM_SQC_VFT 0x0
# define QM_CQC_VFT 0x1
# define QM_VFT_CFG 0x100060
# define QM_VFT_CFG_OP_ENABLE 0x100054
# define QM_VFT_CFG_DATA_L 0x100064
# define QM_VFT_CFG_DATA_H 0x100068
# define QM_SQC_VFT_BUF_SIZE (7ULL << 8)
# define QM_SQC_VFT_SQC_SIZE (5ULL << 12)
# define QM_SQC_VFT_INDEX_NUMBER (1ULL << 16)
# define QM_SQC_VFT_START_SQN_SHIFT 28
# define QM_SQC_VFT_VALID (1ULL << 44)
# define QM_SQC_VFT_SQN_SHIFT 45
# define QM_CQC_VFT_BUF_SIZE (7ULL << 8)
# define QM_CQC_VFT_SQC_SIZE (5ULL << 12)
# define QM_CQC_VFT_INDEX_NUMBER (1ULL << 16)
# define QM_CQC_VFT_VALID (1ULL << 28)
# define QM_SQC_VFT_BASE_SHIFT_V2 28
# define QM_SQC_VFT_BASE_MASK_V2 GENMASK(5, 0)
# define QM_SQC_VFT_NUM_SHIFT_V2 45
# define QM_SQC_VFT_NUM_MASK_v2 GENMASK(9, 0)
2019-08-02 15:57:55 +08:00
# define QM_DFX_CNT_CLR_CE 0x100118
2019-08-02 15:57:50 +08:00
# define QM_ABNORMAL_INT_SOURCE 0x100000
2020-04-03 16:16:38 +08:00
# define QM_ABNORMAL_INT_SOURCE_CLR GENMASK(12, 0)
2019-08-02 15:57:50 +08:00
# define QM_ABNORMAL_INT_MASK 0x100004
# define QM_ABNORMAL_INT_MASK_VALUE 0x1fff
# define QM_ABNORMAL_INT_STATUS 0x100008
2020-04-03 16:16:38 +08:00
# define QM_ABNORMAL_INT_SET 0x10000c
2019-08-02 15:57:50 +08:00
# define QM_ABNORMAL_INF00 0x100010
# define QM_FIFO_OVERFLOW_TYPE 0xc0
# define QM_FIFO_OVERFLOW_TYPE_SHIFT 6
# define QM_FIFO_OVERFLOW_VF 0x3f
# define QM_ABNORMAL_INF01 0x100014
# define QM_DB_TIMEOUT_TYPE 0xc0
# define QM_DB_TIMEOUT_TYPE_SHIFT 6
# define QM_DB_TIMEOUT_VF 0x3f
# define QM_RAS_CE_ENABLE 0x1000ec
# define QM_RAS_FE_ENABLE 0x1000f0
# define QM_RAS_NFE_ENABLE 0x1000f4
# define QM_RAS_CE_THRESHOLD 0x1000f8
# define QM_RAS_CE_TIMES_PER_IRQ 1
# define QM_RAS_MSI_INT_SEL 0x1040f4
2020-04-03 16:16:38 +08:00
# define QM_DEV_RESET_FLAG 0
# define QM_RESET_WAIT_TIMEOUT 400
# define QM_PEH_VENDOR_ID 0x1000d8
# define ACC_VENDOR_ID_VALUE 0x5a5a
# define QM_PEH_DFX_INFO0 0x1000fc
# define ACC_PEH_SRIOV_CTRL_VF_MSE_SHIFT 3
# define ACC_PEH_MSI_DISABLE GENMASK(31, 0)
# define ACC_MASTER_GLOBAL_CTRL_SHUTDOWN 0x1
# define ACC_MASTER_TRANS_RETURN_RW 3
# define ACC_MASTER_TRANS_RETURN 0x300150
# define ACC_MASTER_GLOBAL_CTRL 0x300000
# define ACC_AM_CFG_PORT_WR_EN 0x30001c
# define QM_RAS_NFE_MBIT_DISABLE ~QM_ECC_MBIT
# define ACC_AM_ROB_ECC_INT_STS 0x300104
# define ACC_ROB_ECC_ERR_MULTPL BIT(1)
# define POLL_PERIOD 10
# define POLL_TIMEOUT 1000
2020-04-03 16:16:42 +08:00
# define WAIT_PERIOD_US_MAX 200
# define WAIT_PERIOD_US_MIN 100
2020-04-03 16:16:38 +08:00
# define MAX_WAIT_COUNTS 1000
2019-08-02 15:57:50 +08:00
# define QM_CACHE_WB_START 0x204
# define QM_CACHE_WB_DONE 0x208
# define PCI_BAR_2 2
# define QM_SQE_DATA_ALIGN_MASK GENMASK(6, 0)
# define QMC_ALIGN(sz) ALIGN(sz, 32)
2020-05-15 17:13:55 +08:00
# define QM_DBG_READ_LEN 256
2019-08-02 15:57:55 +08:00
# define QM_DBG_TMP_BUF_LEN 22
2020-05-09 17:43:59 +08:00
# define QM_PCI_COMMAND_INVALID ~0
2019-08-02 15:57:50 +08:00
# define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \
( ( ( hop_num ) < < QM_CQ_HOP_NUM_SHIFT ) | \
( ( pg_sz ) < < QM_CQ_PAGE_SIZE_SHIFT ) | \
( ( buf_sz ) < < QM_CQ_BUF_SIZE_SHIFT ) | \
( ( cqe_sz ) < < QM_CQ_CQE_SIZE_SHIFT ) )
# define QM_MK_CQC_DW3_V2(cqe_sz) \
( ( QM_Q_DEPTH - 1 ) | ( ( cqe_sz ) < < QM_CQ_CQE_SIZE_SHIFT ) )
# define QM_MK_SQC_W13(priority, orders, alg_type) \
( ( ( priority ) < < QM_SQ_PRIORITY_SHIFT ) | \
( ( orders ) < < QM_SQ_ORDERS_SHIFT ) | \
( ( ( alg_type ) & QM_SQ_TYPE_MASK ) < < QM_SQ_TYPE_SHIFT ) )
# define QM_MK_SQC_DW3_V1(hop_num, pg_sz, buf_sz, sqe_sz) \
( ( ( hop_num ) < < QM_SQ_HOP_NUM_SHIFT ) | \
( ( pg_sz ) < < QM_SQ_PAGE_SIZE_SHIFT ) | \
( ( buf_sz ) < < QM_SQ_BUF_SIZE_SHIFT ) | \
( ( u32 ) ilog2 ( sqe_sz ) < < QM_SQ_SQE_SIZE_SHIFT ) )
# define QM_MK_SQC_DW3_V2(sqe_sz) \
( ( QM_Q_DEPTH - 1 ) | ( ( u32 ) ilog2 ( sqe_sz ) < < QM_SQ_SQE_SIZE_SHIFT ) )
2019-10-21 15:41:03 +08:00
# define INIT_QC_COMMON(qc, base, pasid) do { \
( qc ) - > head = 0 ; \
( qc ) - > tail = 0 ; \
( qc ) - > base_l = cpu_to_le32 ( lower_32_bits ( base ) ) ; \
( qc ) - > base_h = cpu_to_le32 ( upper_32_bits ( base ) ) ; \
( qc ) - > dw3 = 0 ; \
( qc ) - > w8 = 0 ; \
( qc ) - > rsvd0 = 0 ; \
( qc ) - > pasid = cpu_to_le16 ( pasid ) ; \
( qc ) - > w11 = 0 ; \
( qc ) - > rsvd1 = 0 ; \
2019-08-02 15:57:50 +08:00
} while ( 0 )
enum vft_type {
SQC_VFT = 0 ,
CQC_VFT ,
} ;
2020-05-09 17:44:04 +08:00
enum acc_err_result {
ACC_ERR_NONE ,
ACC_ERR_NEED_RESET ,
ACC_ERR_RECOVERED ,
} ;
2019-08-02 15:57:50 +08:00
struct qm_cqe {
__le32 rsvd0 ;
__le16 cmd_id ;
__le16 rsvd1 ;
__le16 sq_head ;
__le16 sq_num ;
__le16 rsvd2 ;
__le16 w7 ;
} ;
struct qm_eqe {
__le32 dw0 ;
} ;
struct qm_aeqe {
__le32 dw0 ;
} ;
struct qm_sqc {
__le16 head ;
__le16 tail ;
__le32 base_l ;
__le32 base_h ;
__le32 dw3 ;
__le16 w8 ;
__le16 rsvd0 ;
__le16 pasid ;
__le16 w11 ;
__le16 cq_num ;
__le16 w13 ;
__le32 rsvd1 ;
} ;
struct qm_cqc {
__le16 head ;
__le16 tail ;
__le32 base_l ;
__le32 base_h ;
__le32 dw3 ;
__le16 w8 ;
__le16 rsvd0 ;
__le16 pasid ;
__le16 w11 ;
__le32 dw6 ;
__le32 rsvd1 ;
} ;
struct qm_eqc {
__le16 head ;
__le16 tail ;
__le32 base_l ;
__le32 base_h ;
__le32 dw3 ;
__le32 rsvd [ 2 ] ;
__le32 dw6 ;
} ;
struct qm_aeqc {
__le16 head ;
__le16 tail ;
__le32 base_l ;
__le32 base_h ;
__le32 dw3 ;
__le32 rsvd [ 2 ] ;
__le32 dw6 ;
} ;
struct qm_mailbox {
__le16 w0 ;
__le16 queue_num ;
__le32 base_l ;
__le32 base_h ;
__le32 rsvd ;
} ;
struct qm_doorbell {
__le16 queue_num ;
__le16 cmd ;
__le16 index ;
__le16 priority ;
} ;
2020-03-10 16:42:49 +08:00
struct hisi_qm_resource {
struct hisi_qm * qm ;
int distance ;
struct list_head list ;
} ;
2019-08-02 15:57:50 +08:00
struct hisi_qm_hw_ops {
2019-08-02 15:57:53 +08:00
int ( * get_vft ) ( struct hisi_qm * qm , u32 * base , u32 * number ) ;
2019-08-02 15:57:50 +08:00
void ( * qm_db ) ( struct hisi_qm * qm , u16 qn ,
u8 cmd , u16 index , u8 priority ) ;
u32 ( * get_irq_num ) ( struct hisi_qm * qm ) ;
2019-08-02 15:57:55 +08:00
int ( * debug_init ) ( struct hisi_qm * qm ) ;
2020-05-09 17:44:03 +08:00
void ( * hw_error_init ) ( struct hisi_qm * qm , u32 ce , u32 nfe , u32 fe ) ;
2020-01-20 15:30:06 +08:00
void ( * hw_error_uninit ) ( struct hisi_qm * qm ) ;
2020-05-09 17:44:04 +08:00
enum acc_err_result ( * hw_error_handle ) ( struct hisi_qm * qm ) ;
2019-08-02 15:57:50 +08:00
} ;
2020-05-15 17:13:54 +08:00
struct qm_dfx_item {
const char * name ;
u32 offset ;
} ;
static struct qm_dfx_item qm_dfx_files [ ] = {
{ " err_irq " , offsetof ( struct qm_dfx , err_irq_cnt ) } ,
{ " aeq_irq " , offsetof ( struct qm_dfx , aeq_irq_cnt ) } ,
{ " abnormal_irq " , offsetof ( struct qm_dfx , abnormal_irq_cnt ) } ,
{ " create_qp_err " , offsetof ( struct qm_dfx , create_qp_err_cnt ) } ,
{ " mb_err " , offsetof ( struct qm_dfx , mb_err_cnt ) } ,
} ;
2019-08-02 15:57:55 +08:00
static const char * const qm_debug_file_name [ ] = {
[ CURRENT_Q ] = " current_q " ,
[ CLEAR_ENABLE ] = " clear_enable " ,
} ;
2019-08-02 15:57:50 +08:00
struct hisi_qm_hw_error {
u32 int_msk ;
const char * msg ;
} ;
static const struct hisi_qm_hw_error qm_hw_error [ ] = {
{ . int_msk = BIT ( 0 ) , . msg = " qm_axi_rresp " } ,
{ . int_msk = BIT ( 1 ) , . msg = " qm_axi_bresp " } ,
{ . int_msk = BIT ( 2 ) , . msg = " qm_ecc_mbit " } ,
{ . int_msk = BIT ( 3 ) , . msg = " qm_ecc_1bit " } ,
{ . int_msk = BIT ( 4 ) , . msg = " qm_acc_get_task_timeout " } ,
{ . int_msk = BIT ( 5 ) , . msg = " qm_acc_do_task_timeout " } ,
{ . int_msk = BIT ( 6 ) , . msg = " qm_acc_wb_not_ready_timeout " } ,
{ . int_msk = BIT ( 7 ) , . msg = " qm_sq_cq_vf_invalid " } ,
{ . int_msk = BIT ( 8 ) , . msg = " qm_cq_vf_invalid " } ,
{ . int_msk = BIT ( 9 ) , . msg = " qm_sq_vf_invalid " } ,
{ . int_msk = BIT ( 10 ) , . msg = " qm_db_timeout " } ,
{ . int_msk = BIT ( 11 ) , . msg = " qm_of_fifo_of " } ,
{ . int_msk = BIT ( 12 ) , . msg = " qm_db_random_invalid " } ,
{ /* sentinel */ }
} ;
static const char * const qm_db_timeout [ ] = {
" sq " , " cq " , " eq " , " aeq " ,
} ;
static const char * const qm_fifo_overflow [ ] = {
" cq " , " eq " , " aeq " ,
} ;
2020-05-09 17:43:58 +08:00
static const char * const qm_s [ ] = {
" init " , " start " , " close " , " stop " ,
} ;
static const char * const qp_s [ ] = {
" none " , " init " , " start " , " stop " , " close " ,
} ;
static bool qm_avail_state ( struct hisi_qm * qm , enum qm_state new )
{
enum qm_state curr = atomic_read ( & qm - > status . flags ) ;
bool avail = false ;
switch ( curr ) {
case QM_INIT :
if ( new = = QM_START | | new = = QM_CLOSE )
avail = true ;
break ;
case QM_START :
if ( new = = QM_STOP )
avail = true ;
break ;
case QM_STOP :
if ( new = = QM_CLOSE | | new = = QM_START )
avail = true ;
break ;
default :
break ;
}
dev_dbg ( & qm - > pdev - > dev , " change qm state from %s to %s \n " ,
qm_s [ curr ] , qm_s [ new ] ) ;
if ( ! avail )
dev_warn ( & qm - > pdev - > dev , " Can not change qm state from %s to %s \n " ,
qm_s [ curr ] , qm_s [ new ] ) ;
return avail ;
}
static bool qm_qp_avail_state ( struct hisi_qm * qm , struct hisi_qp * qp ,
enum qp_state new )
{
enum qm_state qm_curr = atomic_read ( & qm - > status . flags ) ;
enum qp_state qp_curr = 0 ;
bool avail = false ;
if ( qp )
qp_curr = atomic_read ( & qp - > qp_status . flags ) ;
switch ( new ) {
case QP_INIT :
if ( qm_curr = = QM_START | | qm_curr = = QM_INIT )
avail = true ;
break ;
case QP_START :
if ( ( qm_curr = = QM_START & & qp_curr = = QP_INIT ) | |
( qm_curr = = QM_START & & qp_curr = = QP_STOP ) )
avail = true ;
break ;
case QP_STOP :
if ( ( qm_curr = = QM_START & & qp_curr = = QP_START ) | |
( qp_curr = = QP_INIT ) )
avail = true ;
break ;
case QP_CLOSE :
if ( ( qm_curr = = QM_START & & qp_curr = = QP_INIT ) | |
( qm_curr = = QM_START & & qp_curr = = QP_STOP ) | |
( qm_curr = = QM_STOP & & qp_curr = = QP_STOP ) | |
( qm_curr = = QM_STOP & & qp_curr = = QP_INIT ) )
avail = true ;
break ;
default :
break ;
}
dev_dbg ( & qm - > pdev - > dev , " change qp state from %s to %s in QM %s \n " ,
qp_s [ qp_curr ] , qp_s [ new ] , qm_s [ qm_curr ] ) ;
if ( ! avail )
dev_warn ( & qm - > pdev - > dev ,
" Can not change qp state from %s to %s in QM %s \n " ,
qp_s [ qp_curr ] , qp_s [ new ] , qm_s [ qm_curr ] ) ;
return avail ;
}
2019-08-02 15:57:50 +08:00
/* return 0 mailbox ready, -ETIMEDOUT hardware timeout */
static int qm_wait_mb_ready ( struct hisi_qm * qm )
{
u32 val ;
return readl_relaxed_poll_timeout ( qm - > io_base + QM_MB_CMD_SEND_BASE ,
val , ! ( ( val > > QM_MB_BUSY_SHIFT ) &
0x1 ) , 10 , 1000 ) ;
}
/* 128 bit should be written to hardware at one time to trigger a mailbox */
static void qm_mb_write ( struct hisi_qm * qm , const void * src )
{
void __iomem * fun_base = qm - > io_base + QM_MB_CMD_SEND_BASE ;
unsigned long tmp0 = 0 , tmp1 = 0 ;
2019-09-19 16:09:06 +02:00
if ( ! IS_ENABLED ( CONFIG_ARM64 ) ) {
memcpy_toio ( fun_base , src , 16 ) ;
wmb ( ) ;
return ;
}
2019-08-02 15:57:50 +08:00
asm volatile ( " ldp %0, %1, %3 \n "
" stp %0, %1, %2 \n "
" dsb sy \n "
: " =&r " ( tmp0 ) ,
" =&r " ( tmp1 ) ,
2019-10-21 15:41:03 +08:00
" +Q " ( * ( ( char __iomem * ) fun_base ) )
2019-08-02 15:57:50 +08:00
: " Q " ( * ( ( char * ) src ) )
: " memory " ) ;
}
static int qm_mb ( struct hisi_qm * qm , u8 cmd , dma_addr_t dma_addr , u16 queue ,
bool op )
{
struct qm_mailbox mailbox ;
int ret = 0 ;
2019-08-16 08:47:43 +10:00
dev_dbg ( & qm - > pdev - > dev , " QM mailbox request to q%u: %u-%llx \n " ,
queue , cmd , ( unsigned long long ) dma_addr ) ;
2019-08-02 15:57:50 +08:00
2019-10-21 15:41:03 +08:00
mailbox . w0 = cpu_to_le16 ( cmd |
2019-08-02 15:57:50 +08:00
( op ? 0x1 < < QM_MB_OP_SHIFT : 0 ) |
2019-10-21 15:41:03 +08:00
( 0x1 < < QM_MB_BUSY_SHIFT ) ) ;
mailbox . queue_num = cpu_to_le16 ( queue ) ;
mailbox . base_l = cpu_to_le32 ( lower_32_bits ( dma_addr ) ) ;
mailbox . base_h = cpu_to_le32 ( upper_32_bits ( dma_addr ) ) ;
2019-08-02 15:57:50 +08:00
mailbox . rsvd = 0 ;
mutex_lock ( & qm - > mailbox_lock ) ;
if ( unlikely ( qm_wait_mb_ready ( qm ) ) ) {
ret = - EBUSY ;
dev_err ( & qm - > pdev - > dev , " QM mailbox is busy to start! \n " ) ;
goto busy_unlock ;
}
qm_mb_write ( qm , & mailbox ) ;
if ( unlikely ( qm_wait_mb_ready ( qm ) ) ) {
ret = - EBUSY ;
dev_err ( & qm - > pdev - > dev , " QM mailbox operation timeout! \n " ) ;
goto busy_unlock ;
}
busy_unlock :
mutex_unlock ( & qm - > mailbox_lock ) ;
2020-05-15 17:13:54 +08:00
if ( ret )
atomic64_inc ( & qm - > debug . dfx . mb_err_cnt ) ;
2019-08-02 15:57:50 +08:00
return ret ;
}
static void qm_db_v1 ( struct hisi_qm * qm , u16 qn , u8 cmd , u16 index , u8 priority )
{
u64 doorbell ;
doorbell = qn | ( ( u64 ) cmd < < QM_DB_CMD_SHIFT_V1 ) |
( ( u64 ) index < < QM_DB_INDEX_SHIFT_V1 ) |
( ( u64 ) priority < < QM_DB_PRIORITY_SHIFT_V1 ) ;
writeq ( doorbell , qm - > io_base + QM_DOORBELL_BASE_V1 ) ;
}
static void qm_db_v2 ( struct hisi_qm * qm , u16 qn , u8 cmd , u16 index , u8 priority )
{
u64 doorbell ;
u64 dbase ;
u16 randata = 0 ;
if ( cmd = = QM_DOORBELL_CMD_SQ | | cmd = = QM_DOORBELL_CMD_CQ )
dbase = QM_DOORBELL_SQ_CQ_BASE_V2 ;
else
dbase = QM_DOORBELL_EQ_AEQ_BASE_V2 ;
doorbell = qn | ( ( u64 ) cmd < < QM_DB_CMD_SHIFT_V2 ) |
( ( u64 ) randata < < QM_DB_RAND_SHIFT_V2 ) |
( ( u64 ) index < < QM_DB_INDEX_SHIFT_V2 ) |
( ( u64 ) priority < < QM_DB_PRIORITY_SHIFT_V2 ) ;
writeq ( doorbell , qm - > io_base + dbase ) ;
}
static void qm_db ( struct hisi_qm * qm , u16 qn , u8 cmd , u16 index , u8 priority )
{
dev_dbg ( & qm - > pdev - > dev , " QM doorbell request: qn=%u, cmd=%u, index=%u \n " ,
qn , cmd , index ) ;
qm - > ops - > qm_db ( qm , qn , cmd , index , priority ) ;
}
static int qm_dev_mem_reset ( struct hisi_qm * qm )
{
u32 val ;
writel ( 0x1 , qm - > io_base + QM_MEM_START_INIT ) ;
return readl_relaxed_poll_timeout ( qm - > io_base + QM_MEM_INIT_DONE , val ,
val & BIT ( 0 ) , 10 , 1000 ) ;
}
static u32 qm_get_irq_num_v1 ( struct hisi_qm * qm )
{
return QM_IRQ_NUM_V1 ;
}
static u32 qm_get_irq_num_v2 ( struct hisi_qm * qm )
{
2019-08-02 15:57:53 +08:00
if ( qm - > fun_type = = QM_HW_PF )
return QM_IRQ_NUM_PF_V2 ;
else
return QM_IRQ_NUM_VF_V2 ;
2019-08-02 15:57:50 +08:00
}
static struct hisi_qp * qm_to_hisi_qp ( struct hisi_qm * qm , struct qm_eqe * eqe )
{
2019-10-21 15:41:03 +08:00
u16 cqn = le32_to_cpu ( eqe - > dw0 ) & QM_EQE_CQN_MASK ;
2019-08-02 15:57:50 +08:00
2020-05-09 17:44:02 +08:00
return & qm - > qp_array [ cqn ] ;
2019-08-02 15:57:50 +08:00
}
static void qm_cq_head_update ( struct hisi_qp * qp )
{
if ( qp - > qp_status . cq_head = = QM_Q_DEPTH - 1 ) {
qp - > qp_status . cqc_phase = ! qp - > qp_status . cqc_phase ;
qp - > qp_status . cq_head = 0 ;
} else {
qp - > qp_status . cq_head + + ;
}
}
static void qm_poll_qp ( struct hisi_qp * qp , struct hisi_qm * qm )
{
2020-02-11 15:54:25 +08:00
if ( qp - > event_cb ) {
qp - > event_cb ( qp ) ;
return ;
}
2019-08-02 15:57:50 +08:00
if ( qp - > req_cb ) {
2020-02-11 15:54:25 +08:00
struct qm_cqe * cqe = qp - > cqe + qp - > qp_status . cq_head ;
2019-08-02 15:57:50 +08:00
while ( QM_CQE_PHASE ( cqe ) = = qp - > qp_status . cqc_phase ) {
dma_rmb ( ) ;
2019-10-21 15:41:03 +08:00
qp - > req_cb ( qp , qp - > sqe + qm - > sqe_size *
le16_to_cpu ( cqe - > sq_head ) ) ;
2019-08-02 15:57:50 +08:00
qm_cq_head_update ( qp ) ;
cqe = qp - > cqe + qp - > qp_status . cq_head ;
qm_db ( qm , qp - > qp_id , QM_DOORBELL_CMD_CQ ,
qp - > qp_status . cq_head , 0 ) ;
atomic_dec ( & qp - > qp_status . used ) ;
}
/* set c_flag */
qm_db ( qm , qp - > qp_id , QM_DOORBELL_CMD_CQ ,
qp - > qp_status . cq_head , 1 ) ;
}
}
2020-03-05 10:06:21 +08:00
static void qm_work_process ( struct work_struct * work )
2019-08-02 15:57:50 +08:00
{
2020-03-05 10:06:21 +08:00
struct hisi_qm * qm = container_of ( work , struct hisi_qm , work ) ;
2019-08-02 15:57:50 +08:00
struct qm_eqe * eqe = qm - > eqe + qm - > status . eq_head ;
struct hisi_qp * qp ;
int eqe_num = 0 ;
while ( QM_EQE_PHASE ( eqe ) = = qm - > status . eqc_phase ) {
eqe_num + + ;
qp = qm_to_hisi_qp ( qm , eqe ) ;
2020-05-09 17:44:02 +08:00
qm_poll_qp ( qp , qm ) ;
2019-08-02 15:57:50 +08:00
if ( qm - > status . eq_head = = QM_Q_DEPTH - 1 ) {
qm - > status . eqc_phase = ! qm - > status . eqc_phase ;
eqe = qm - > eqe ;
qm - > status . eq_head = 0 ;
} else {
eqe + + ;
qm - > status . eq_head + + ;
}
if ( eqe_num = = QM_Q_DEPTH / 2 - 1 ) {
eqe_num = 0 ;
qm_db ( qm , 0 , QM_DOORBELL_CMD_EQ , qm - > status . eq_head , 0 ) ;
}
}
qm_db ( qm , 0 , QM_DOORBELL_CMD_EQ , qm - > status . eq_head , 0 ) ;
2020-03-05 10:06:21 +08:00
}
static irqreturn_t do_qm_irq ( int irq , void * data )
{
struct hisi_qm * qm = ( struct hisi_qm * ) data ;
/* the workqueue created by device driver of QM */
if ( qm - > wq )
queue_work ( qm - > wq , & qm - > work ) ;
else
schedule_work ( & qm - > work ) ;
2019-08-02 15:57:50 +08:00
return IRQ_HANDLED ;
}
static irqreturn_t qm_irq ( int irq , void * data )
{
struct hisi_qm * qm = data ;
if ( readl ( qm - > io_base + QM_VF_EQ_INT_SOURCE ) )
2020-03-05 10:06:21 +08:00
return do_qm_irq ( irq , data ) ;
2019-08-02 15:57:50 +08:00
2020-05-15 17:13:54 +08:00
atomic64_inc ( & qm - > debug . dfx . err_irq_cnt ) ;
2019-08-02 15:57:50 +08:00
dev_err ( & qm - > pdev - > dev , " invalid int source \n " ) ;
qm_db ( qm , 0 , QM_DOORBELL_CMD_EQ , qm - > status . eq_head , 0 ) ;
return IRQ_NONE ;
}
static irqreturn_t qm_aeq_irq ( int irq , void * data )
{
struct hisi_qm * qm = data ;
struct qm_aeqe * aeqe = qm - > aeqe + qm - > status . aeq_head ;
u32 type ;
2020-05-15 17:13:54 +08:00
atomic64_inc ( & qm - > debug . dfx . aeq_irq_cnt ) ;
2019-08-02 15:57:50 +08:00
if ( ! readl ( qm - > io_base + QM_VF_AEQ_INT_SOURCE ) )
return IRQ_NONE ;
while ( QM_AEQE_PHASE ( aeqe ) = = qm - > status . aeqc_phase ) {
2019-10-21 15:41:03 +08:00
type = le32_to_cpu ( aeqe - > dw0 ) > > QM_AEQE_TYPE_SHIFT ;
2019-08-02 15:57:50 +08:00
if ( type < ARRAY_SIZE ( qm_fifo_overflow ) )
dev_err ( & qm - > pdev - > dev , " %s overflow \n " ,
qm_fifo_overflow [ type ] ) ;
else
dev_err ( & qm - > pdev - > dev , " unknown error type %d \n " ,
type ) ;
if ( qm - > status . aeq_head = = QM_Q_DEPTH - 1 ) {
qm - > status . aeqc_phase = ! qm - > status . aeqc_phase ;
aeqe = qm - > aeqe ;
qm - > status . aeq_head = 0 ;
} else {
aeqe + + ;
qm - > status . aeq_head + + ;
}
qm_db ( qm , 0 , QM_DOORBELL_CMD_AEQ , qm - > status . aeq_head , 0 ) ;
}
return IRQ_HANDLED ;
}
static void qm_irq_unregister ( struct hisi_qm * qm )
{
struct pci_dev * pdev = qm - > pdev ;
free_irq ( pci_irq_vector ( pdev , QM_EQ_EVENT_IRQ_VECTOR ) , qm ) ;
if ( qm - > ver = = QM_HW_V2 ) {
free_irq ( pci_irq_vector ( pdev , QM_AEQ_EVENT_IRQ_VECTOR ) , qm ) ;
2019-08-02 15:57:53 +08:00
if ( qm - > fun_type = = QM_HW_PF )
free_irq ( pci_irq_vector ( pdev ,
QM_ABNORMAL_EVENT_IRQ_VECTOR ) , qm ) ;
2019-08-02 15:57:50 +08:00
}
}
static void qm_init_qp_status ( struct hisi_qp * qp )
{
struct hisi_qp_status * qp_status = & qp - > qp_status ;
qp_status - > sq_tail = 0 ;
qp_status - > cq_head = 0 ;
2019-10-21 15:41:03 +08:00
qp_status - > cqc_phase = true ;
2020-05-09 17:43:58 +08:00
atomic_set ( & qp_status - > flags , 0 ) ;
2019-08-02 15:57:50 +08:00
}
static void qm_vft_data_cfg ( struct hisi_qm * qm , enum vft_type type , u32 base ,
u32 number )
{
u64 tmp = 0 ;
if ( number > 0 ) {
switch ( type ) {
case SQC_VFT :
switch ( qm - > ver ) {
case QM_HW_V1 :
tmp = QM_SQC_VFT_BUF_SIZE |
QM_SQC_VFT_SQC_SIZE |
QM_SQC_VFT_INDEX_NUMBER |
QM_SQC_VFT_VALID |
( u64 ) base < < QM_SQC_VFT_START_SQN_SHIFT ;
break ;
case QM_HW_V2 :
tmp = ( u64 ) base < < QM_SQC_VFT_START_SQN_SHIFT |
QM_SQC_VFT_VALID |
( u64 ) ( number - 1 ) < < QM_SQC_VFT_SQN_SHIFT ;
break ;
case QM_HW_UNKNOWN :
break ;
}
break ;
case CQC_VFT :
switch ( qm - > ver ) {
case QM_HW_V1 :
tmp = QM_CQC_VFT_BUF_SIZE |
QM_CQC_VFT_SQC_SIZE |
QM_CQC_VFT_INDEX_NUMBER |
QM_CQC_VFT_VALID ;
break ;
case QM_HW_V2 :
tmp = QM_CQC_VFT_VALID ;
break ;
case QM_HW_UNKNOWN :
break ;
}
break ;
}
}
writel ( lower_32_bits ( tmp ) , qm - > io_base + QM_VFT_CFG_DATA_L ) ;
writel ( upper_32_bits ( tmp ) , qm - > io_base + QM_VFT_CFG_DATA_H ) ;
}
static int qm_set_vft_common ( struct hisi_qm * qm , enum vft_type type ,
u32 fun_num , u32 base , u32 number )
{
unsigned int val ;
int ret ;
ret = readl_relaxed_poll_timeout ( qm - > io_base + QM_VFT_CFG_RDY , val ,
val & BIT ( 0 ) , 10 , 1000 ) ;
if ( ret )
return ret ;
writel ( 0x0 , qm - > io_base + QM_VFT_CFG_OP_WR ) ;
writel ( type , qm - > io_base + QM_VFT_CFG_TYPE ) ;
writel ( fun_num , qm - > io_base + QM_VFT_CFG ) ;
qm_vft_data_cfg ( qm , type , base , number ) ;
writel ( 0x0 , qm - > io_base + QM_VFT_CFG_RDY ) ;
writel ( 0x1 , qm - > io_base + QM_VFT_CFG_OP_ENABLE ) ;
return readl_relaxed_poll_timeout ( qm - > io_base + QM_VFT_CFG_RDY , val ,
val & BIT ( 0 ) , 10 , 1000 ) ;
}
/* The config should be conducted after qm_dev_mem_reset() */
static int qm_set_sqc_cqc_vft ( struct hisi_qm * qm , u32 fun_num , u32 base ,
u32 number )
{
int ret , i ;
for ( i = SQC_VFT ; i < = CQC_VFT ; i + + ) {
ret = qm_set_vft_common ( qm , i , fun_num , base , number ) ;
if ( ret )
return ret ;
}
return 0 ;
}
2019-08-02 15:57:53 +08:00
static int qm_get_vft_v2 ( struct hisi_qm * qm , u32 * base , u32 * number )
{
u64 sqc_vft ;
int ret ;
ret = qm_mb ( qm , QM_MB_CMD_SQC_VFT_V2 , 0 , 0 , 1 ) ;
if ( ret )
return ret ;
sqc_vft = readl ( qm - > io_base + QM_MB_CMD_DATA_ADDR_L ) |
( ( u64 ) readl ( qm - > io_base + QM_MB_CMD_DATA_ADDR_H ) < < 32 ) ;
* base = QM_SQC_VFT_BASE_MASK_V2 & ( sqc_vft > > QM_SQC_VFT_BASE_SHIFT_V2 ) ;
* number = ( QM_SQC_VFT_NUM_MASK_v2 &
( sqc_vft > > QM_SQC_VFT_NUM_SHIFT_V2 ) ) + 1 ;
return 0 ;
}
2019-08-02 15:57:55 +08:00
static struct hisi_qm * file_to_qm ( struct debugfs_file * file )
{
struct qm_debug * debug = file - > debug ;
return container_of ( debug , struct hisi_qm , debug ) ;
}
static u32 current_q_read ( struct debugfs_file * file )
{
struct hisi_qm * qm = file_to_qm ( file ) ;
return readl ( qm - > io_base + QM_DFX_SQE_CNT_VF_SQN ) > > QM_DFX_QN_SHIFT ;
}
static int current_q_write ( struct debugfs_file * file , u32 val )
{
struct hisi_qm * qm = file_to_qm ( file ) ;
u32 tmp ;
if ( val > = qm - > debug . curr_qm_qp_num )
return - EINVAL ;
tmp = val < < QM_DFX_QN_SHIFT |
( readl ( qm - > io_base + QM_DFX_SQE_CNT_VF_SQN ) & CURRENT_FUN_MASK ) ;
writel ( tmp , qm - > io_base + QM_DFX_SQE_CNT_VF_SQN ) ;
tmp = val < < QM_DFX_QN_SHIFT |
( readl ( qm - > io_base + QM_DFX_CQE_CNT_VF_CQN ) & CURRENT_FUN_MASK ) ;
writel ( tmp , qm - > io_base + QM_DFX_CQE_CNT_VF_CQN ) ;
return 0 ;
}
static u32 clear_enable_read ( struct debugfs_file * file )
{
struct hisi_qm * qm = file_to_qm ( file ) ;
return readl ( qm - > io_base + QM_DFX_CNT_CLR_CE ) ;
}
/* rd_clr_ctrl 1 enable read clear, otherwise 0 disable it */
static int clear_enable_write ( struct debugfs_file * file , u32 rd_clr_ctrl )
{
struct hisi_qm * qm = file_to_qm ( file ) ;
if ( rd_clr_ctrl > 1 )
return - EINVAL ;
writel ( rd_clr_ctrl , qm - > io_base + QM_DFX_CNT_CLR_CE ) ;
return 0 ;
}
static ssize_t qm_debug_read ( struct file * filp , char __user * buf ,
size_t count , loff_t * pos )
{
struct debugfs_file * file = filp - > private_data ;
enum qm_debug_file index = file - > index ;
char tbuf [ QM_DBG_TMP_BUF_LEN ] ;
u32 val ;
int ret ;
mutex_lock ( & file - > lock ) ;
switch ( index ) {
case CURRENT_Q :
val = current_q_read ( file ) ;
break ;
case CLEAR_ENABLE :
val = clear_enable_read ( file ) ;
break ;
default :
mutex_unlock ( & file - > lock ) ;
return - EINVAL ;
}
mutex_unlock ( & file - > lock ) ;
ret = sprintf ( tbuf , " %u \n " , val ) ;
return simple_read_from_buffer ( buf , count , pos , tbuf , ret ) ;
}
static ssize_t qm_debug_write ( struct file * filp , const char __user * buf ,
size_t count , loff_t * pos )
{
struct debugfs_file * file = filp - > private_data ;
enum qm_debug_file index = file - > index ;
unsigned long val ;
char tbuf [ QM_DBG_TMP_BUF_LEN ] ;
int len , ret ;
if ( * pos ! = 0 )
return 0 ;
if ( count > = QM_DBG_TMP_BUF_LEN )
return - ENOSPC ;
len = simple_write_to_buffer ( tbuf , QM_DBG_TMP_BUF_LEN - 1 , pos , buf ,
count ) ;
if ( len < 0 )
return len ;
tbuf [ len ] = ' \0 ' ;
if ( kstrtoul ( tbuf , 0 , & val ) )
return - EFAULT ;
mutex_lock ( & file - > lock ) ;
switch ( index ) {
case CURRENT_Q :
ret = current_q_write ( file , val ) ;
if ( ret )
goto err_input ;
break ;
case CLEAR_ENABLE :
ret = clear_enable_write ( file , val ) ;
if ( ret )
goto err_input ;
break ;
default :
ret = - EINVAL ;
goto err_input ;
}
mutex_unlock ( & file - > lock ) ;
return count ;
err_input :
mutex_unlock ( & file - > lock ) ;
return ret ;
}
static const struct file_operations qm_debug_fops = {
. owner = THIS_MODULE ,
. open = simple_open ,
. read = qm_debug_read ,
. write = qm_debug_write ,
} ;
struct qm_dfx_registers {
char * reg_name ;
u64 reg_offset ;
} ;
# define CNT_CYC_REGS_NUM 10
static struct qm_dfx_registers qm_dfx_regs [ ] = {
/* XXX_CNT are reading clear register */
{ " QM_ECC_1BIT_CNT " , 0x104000ull } ,
{ " QM_ECC_MBIT_CNT " , 0x104008ull } ,
{ " QM_DFX_MB_CNT " , 0x104018ull } ,
{ " QM_DFX_DB_CNT " , 0x104028ull } ,
{ " QM_DFX_SQE_CNT " , 0x104038ull } ,
{ " QM_DFX_CQE_CNT " , 0x104048ull } ,
{ " QM_DFX_SEND_SQE_TO_ACC_CNT " , 0x104050ull } ,
{ " QM_DFX_WB_SQE_FROM_ACC_CNT " , 0x104058ull } ,
{ " QM_DFX_ACC_FINISH_CNT " , 0x104060ull } ,
{ " QM_DFX_CQE_ERR_CNT " , 0x1040b4ull } ,
{ " QM_DFX_FUNS_ACTIVE_ST " , 0x200ull } ,
{ " QM_ECC_1BIT_INF " , 0x104004ull } ,
{ " QM_ECC_MBIT_INF " , 0x10400cull } ,
{ " QM_DFX_ACC_RDY_VLD0 " , 0x1040a0ull } ,
{ " QM_DFX_ACC_RDY_VLD1 " , 0x1040a4ull } ,
{ " QM_DFX_AXI_RDY_VLD " , 0x1040a8ull } ,
{ " QM_DFX_FF_ST0 " , 0x1040c8ull } ,
{ " QM_DFX_FF_ST1 " , 0x1040ccull } ,
{ " QM_DFX_FF_ST2 " , 0x1040d0ull } ,
{ " QM_DFX_FF_ST3 " , 0x1040d4ull } ,
{ " QM_DFX_FF_ST4 " , 0x1040d8ull } ,
{ " QM_DFX_FF_ST5 " , 0x1040dcull } ,
{ " QM_DFX_FF_ST6 " , 0x1040e0ull } ,
{ " QM_IN_IDLE_ST " , 0x1040e4ull } ,
{ NULL , 0 }
} ;
static struct qm_dfx_registers qm_vf_dfx_regs [ ] = {
{ " QM_DFX_FUNS_ACTIVE_ST " , 0x200ull } ,
{ NULL , 0 }
} ;
static int qm_regs_show ( struct seq_file * s , void * unused )
{
struct hisi_qm * qm = s - > private ;
struct qm_dfx_registers * regs ;
u32 val ;
if ( qm - > fun_type = = QM_HW_PF )
regs = qm_dfx_regs ;
else
regs = qm_vf_dfx_regs ;
while ( regs - > reg_name ) {
val = readl ( qm - > io_base + regs - > reg_offset ) ;
seq_printf ( s , " %s= 0x%08x \n " , regs - > reg_name , val ) ;
regs + + ;
}
return 0 ;
}
static int qm_regs_open ( struct inode * inode , struct file * file )
{
return single_open ( file , qm_regs_show , inode - > i_private ) ;
}
static const struct file_operations qm_regs_fops = {
. owner = THIS_MODULE ,
. open = qm_regs_open ,
. read = seq_read ,
2019-08-14 17:28:38 +08:00
. release = single_release ,
2019-08-02 15:57:55 +08:00
} ;
static int qm_create_debugfs_file ( struct hisi_qm * qm , enum qm_debug_file index )
{
2019-11-07 09:52:00 +01:00
struct dentry * qm_d = qm - > debug . qm_d ;
2019-08-02 15:57:55 +08:00
struct debugfs_file * file = qm - > debug . files + index ;
2019-11-07 09:52:00 +01:00
debugfs_create_file ( qm_debug_file_name [ index ] , 0600 , qm_d , file ,
& qm_debug_fops ) ;
2019-08-02 15:57:55 +08:00
file - > index = index ;
mutex_init ( & file - > lock ) ;
file - > debug = & qm - > debug ;
return 0 ;
}
2020-05-09 17:44:03 +08:00
static void qm_hw_error_init_v1 ( struct hisi_qm * qm , u32 ce , u32 nfe , u32 fe )
2019-08-02 15:57:50 +08:00
{
writel ( QM_ABNORMAL_INT_MASK_VALUE , qm - > io_base + QM_ABNORMAL_INT_MASK ) ;
}
2020-05-09 17:44:03 +08:00
static void qm_hw_error_init_v2 ( struct hisi_qm * qm , u32 ce , u32 nfe , u32 fe )
2019-08-02 15:57:50 +08:00
{
2020-05-09 17:44:03 +08:00
u32 irq_enable = ce | nfe | fe ;
2019-08-02 15:57:50 +08:00
u32 irq_unmask = ~ irq_enable ;
qm - > error_mask = ce | nfe | fe ;
2020-04-03 16:16:38 +08:00
/* clear QM hw residual error source */
2020-05-09 17:44:03 +08:00
writel ( QM_ABNORMAL_INT_SOURCE_CLR ,
qm - > io_base + QM_ABNORMAL_INT_SOURCE ) ;
2020-04-03 16:16:38 +08:00
2019-08-02 15:57:50 +08:00
/* configure error type */
writel ( ce , qm - > io_base + QM_RAS_CE_ENABLE ) ;
writel ( QM_RAS_CE_TIMES_PER_IRQ , qm - > io_base + QM_RAS_CE_THRESHOLD ) ;
writel ( nfe , qm - > io_base + QM_RAS_NFE_ENABLE ) ;
writel ( fe , qm - > io_base + QM_RAS_FE_ENABLE ) ;
irq_unmask & = readl ( qm - > io_base + QM_ABNORMAL_INT_MASK ) ;
writel ( irq_unmask , qm - > io_base + QM_ABNORMAL_INT_MASK ) ;
}
2020-01-20 15:30:06 +08:00
static void qm_hw_error_uninit_v2 ( struct hisi_qm * qm )
{
writel ( QM_ABNORMAL_INT_MASK_VALUE , qm - > io_base + QM_ABNORMAL_INT_MASK ) ;
}
2019-08-02 15:57:50 +08:00
static void qm_log_hw_error ( struct hisi_qm * qm , u32 error_status )
{
2020-01-20 15:30:09 +08:00
const struct hisi_qm_hw_error * err ;
2019-08-02 15:57:50 +08:00
struct device * dev = & qm - > pdev - > dev ;
u32 reg_val , type , vf_num ;
2020-01-20 15:30:09 +08:00
int i ;
for ( i = 0 ; i < ARRAY_SIZE ( qm_hw_error ) ; i + + ) {
err = & qm_hw_error [ i ] ;
if ( ! ( err - > int_msk & error_status ) )
continue ;
dev_err ( dev , " %s [error status=0x%x] found \n " ,
err - > msg , err - > int_msk ) ;
if ( err - > int_msk & QM_DB_TIMEOUT ) {
reg_val = readl ( qm - > io_base + QM_ABNORMAL_INF01 ) ;
type = ( reg_val & QM_DB_TIMEOUT_TYPE ) > >
QM_DB_TIMEOUT_TYPE_SHIFT ;
vf_num = reg_val & QM_DB_TIMEOUT_VF ;
dev_err ( dev , " qm %s doorbell timeout in function %u \n " ,
qm_db_timeout [ type ] , vf_num ) ;
} else if ( err - > int_msk & QM_OF_FIFO_OF ) {
reg_val = readl ( qm - > io_base + QM_ABNORMAL_INF00 ) ;
type = ( reg_val & QM_FIFO_OVERFLOW_TYPE ) > >
QM_FIFO_OVERFLOW_TYPE_SHIFT ;
vf_num = reg_val & QM_FIFO_OVERFLOW_VF ;
if ( type < ARRAY_SIZE ( qm_fifo_overflow ) )
dev_err ( dev , " qm %s fifo overflow in function %u \n " ,
qm_fifo_overflow [ type ] , vf_num ) ;
else
dev_err ( dev , " unknown error type \n " ) ;
2019-08-02 15:57:50 +08:00
}
}
}
2020-05-09 17:44:04 +08:00
static enum acc_err_result qm_hw_error_handle_v2 ( struct hisi_qm * qm )
2019-08-02 15:57:50 +08:00
{
u32 error_status , tmp ;
/* read err sts */
tmp = readl ( qm - > io_base + QM_ABNORMAL_INT_STATUS ) ;
error_status = qm - > error_mask & tmp ;
if ( error_status ) {
2020-04-03 16:16:38 +08:00
if ( error_status & QM_ECC_MBIT )
qm - > err_status . is_qm_ecc_mbit = true ;
2019-08-02 15:57:50 +08:00
qm_log_hw_error ( qm , error_status ) ;
2020-05-09 17:44:03 +08:00
if ( error_status = = QM_DB_RANDOM_INVALID ) {
writel ( error_status , qm - > io_base +
QM_ABNORMAL_INT_SOURCE ) ;
2020-05-09 17:44:04 +08:00
return ACC_ERR_RECOVERED ;
2020-05-09 17:44:03 +08:00
}
2019-08-02 15:57:50 +08:00
2020-05-09 17:44:04 +08:00
return ACC_ERR_NEED_RESET ;
2019-08-02 15:57:50 +08:00
}
2020-05-09 17:44:04 +08:00
return ACC_ERR_RECOVERED ;
2019-08-02 15:57:50 +08:00
}
static const struct hisi_qm_hw_ops qm_hw_ops_v1 = {
. qm_db = qm_db_v1 ,
. get_irq_num = qm_get_irq_num_v1 ,
. hw_error_init = qm_hw_error_init_v1 ,
} ;
static const struct hisi_qm_hw_ops qm_hw_ops_v2 = {
2019-08-02 15:57:53 +08:00
. get_vft = qm_get_vft_v2 ,
2019-08-02 15:57:50 +08:00
. qm_db = qm_db_v2 ,
. get_irq_num = qm_get_irq_num_v2 ,
. hw_error_init = qm_hw_error_init_v2 ,
2020-01-20 15:30:06 +08:00
. hw_error_uninit = qm_hw_error_uninit_v2 ,
2019-08-02 15:57:50 +08:00
. hw_error_handle = qm_hw_error_handle_v2 ,
} ;
static void * qm_get_avail_sqe ( struct hisi_qp * qp )
{
struct hisi_qp_status * qp_status = & qp - > qp_status ;
u16 sq_tail = qp_status - > sq_tail ;
if ( unlikely ( atomic_read ( & qp - > qp_status . used ) = = QM_Q_DEPTH ) )
return NULL ;
return qp - > sqe + sq_tail * qp - > qm - > sqe_size ;
}
2020-05-09 17:43:58 +08:00
static struct hisi_qp * qm_create_qp_nolock ( struct hisi_qm * qm , u8 alg_type )
2019-08-02 15:57:50 +08:00
{
struct device * dev = & qm - > pdev - > dev ;
struct hisi_qp * qp ;
2020-05-09 17:44:02 +08:00
int qp_id ;
2019-08-02 15:57:50 +08:00
2020-05-09 17:43:58 +08:00
if ( ! qm_qp_avail_state ( qm , NULL , QP_INIT ) )
return ERR_PTR ( - EPERM ) ;
2020-05-09 17:44:02 +08:00
if ( qm - > qp_in_used = = qm - > qp_num ) {
dev_info_ratelimited ( dev , " All %u queues of QM are busy! \n " ,
qm - > qp_num ) ;
2020-05-15 17:13:54 +08:00
atomic64_inc ( & qm - > debug . dfx . create_qp_err_cnt ) ;
2020-05-09 17:44:02 +08:00
return ERR_PTR ( - EBUSY ) ;
2019-08-02 15:57:50 +08:00
}
2020-05-09 17:44:02 +08:00
qp_id = idr_alloc_cyclic ( & qm - > qp_idr , NULL , 0 , qm - > qp_num , GFP_ATOMIC ) ;
if ( qp_id < 0 ) {
dev_info_ratelimited ( dev , " All %u queues of QM are busy! \n " ,
qm - > qp_num ) ;
2020-05-15 17:13:54 +08:00
atomic64_inc ( & qm - > debug . dfx . create_qp_err_cnt ) ;
2020-05-09 17:44:02 +08:00
return ERR_PTR ( - EBUSY ) ;
2019-08-02 15:57:50 +08:00
}
2020-05-09 17:44:02 +08:00
qp = & qm - > qp_array [ qp_id ] ;
memset ( qp - > cqe , 0 , sizeof ( struct qm_cqe ) * QM_Q_DEPTH ) ;
2020-05-09 17:44:00 +08:00
2020-05-09 17:44:02 +08:00
qp - > event_cb = NULL ;
qp - > req_cb = NULL ;
2019-08-02 15:57:50 +08:00
qp - > qp_id = qp_id ;
qp - > alg_type = alg_type ;
2020-05-09 17:44:02 +08:00
qm - > qp_in_used + + ;
2020-05-09 17:43:58 +08:00
atomic_set ( & qp - > qp_status . flags , QP_INIT ) ;
2019-08-02 15:57:50 +08:00
return qp ;
}
2020-05-09 17:43:58 +08:00
/**
* hisi_qm_create_qp ( ) - Create a queue pair from qm .
* @ qm : The qm we create a qp from .
* @ alg_type : Accelerator specific algorithm type in sqc .
*
* return created qp , - EBUSY if all qps in qm allocated , - ENOMEM if allocating
* qp memory fails .
*/
struct hisi_qp * hisi_qm_create_qp ( struct hisi_qm * qm , u8 alg_type )
{
struct hisi_qp * qp ;
down_write ( & qm - > qps_lock ) ;
qp = qm_create_qp_nolock ( qm , alg_type ) ;
up_write ( & qm - > qps_lock ) ;
return qp ;
}
2019-08-02 15:57:50 +08:00
EXPORT_SYMBOL_GPL ( hisi_qm_create_qp ) ;
/**
* hisi_qm_release_qp ( ) - Release a qp back to its qm .
* @ qp : The qp we want to release .
*
* This function releases the resource of a qp .
*/
void hisi_qm_release_qp ( struct hisi_qp * qp )
{
struct hisi_qm * qm = qp - > qm ;
2020-05-09 17:43:58 +08:00
down_write ( & qm - > qps_lock ) ;
if ( ! qm_qp_avail_state ( qm , qp , QP_CLOSE ) ) {
up_write ( & qm - > qps_lock ) ;
return ;
}
2019-10-26 11:00:16 +08:00
qm - > qp_in_used - - ;
2020-05-09 17:44:02 +08:00
idr_remove ( & qm - > qp_idr , qp - > qp_id ) ;
2020-05-09 17:43:58 +08:00
up_write ( & qm - > qps_lock ) ;
2019-08-02 15:57:50 +08:00
}
EXPORT_SYMBOL_GPL ( hisi_qm_release_qp ) ;
static int qm_qp_ctx_cfg ( struct hisi_qp * qp , int qp_id , int pasid )
{
struct hisi_qm * qm = qp - > qm ;
struct device * dev = & qm - > pdev - > dev ;
enum qm_hw_ver ver = qm - > ver ;
struct qm_sqc * sqc ;
struct qm_cqc * cqc ;
dma_addr_t sqc_dma ;
dma_addr_t cqc_dma ;
int ret ;
qm_init_qp_status ( qp ) ;
sqc = kzalloc ( sizeof ( struct qm_sqc ) , GFP_KERNEL ) ;
if ( ! sqc )
return - ENOMEM ;
sqc_dma = dma_map_single ( dev , sqc , sizeof ( struct qm_sqc ) ,
DMA_TO_DEVICE ) ;
if ( dma_mapping_error ( dev , sqc_dma ) ) {
kfree ( sqc ) ;
return - ENOMEM ;
}
INIT_QC_COMMON ( sqc , qp - > sqe_dma , pasid ) ;
if ( ver = = QM_HW_V1 ) {
2019-10-21 15:41:03 +08:00
sqc - > dw3 = cpu_to_le32 ( QM_MK_SQC_DW3_V1 ( 0 , 0 , 0 , qm - > sqe_size ) ) ;
sqc - > w8 = cpu_to_le16 ( QM_Q_DEPTH - 1 ) ;
2019-08-02 15:57:50 +08:00
} else if ( ver = = QM_HW_V2 ) {
2019-10-21 15:41:03 +08:00
sqc - > dw3 = cpu_to_le32 ( QM_MK_SQC_DW3_V2 ( qm - > sqe_size ) ) ;
2019-08-02 15:57:50 +08:00
sqc - > w8 = 0 ; /* rand_qc */
}
2019-10-21 15:41:03 +08:00
sqc - > cq_num = cpu_to_le16 ( qp_id ) ;
sqc - > w13 = cpu_to_le16 ( QM_MK_SQC_W13 ( 0 , 1 , qp - > alg_type ) ) ;
2019-08-02 15:57:50 +08:00
ret = qm_mb ( qm , QM_MB_CMD_SQC , sqc_dma , qp_id , 0 ) ;
dma_unmap_single ( dev , sqc_dma , sizeof ( struct qm_sqc ) , DMA_TO_DEVICE ) ;
kfree ( sqc ) ;
if ( ret )
return ret ;
cqc = kzalloc ( sizeof ( struct qm_cqc ) , GFP_KERNEL ) ;
if ( ! cqc )
return - ENOMEM ;
cqc_dma = dma_map_single ( dev , cqc , sizeof ( struct qm_cqc ) ,
DMA_TO_DEVICE ) ;
if ( dma_mapping_error ( dev , cqc_dma ) ) {
kfree ( cqc ) ;
return - ENOMEM ;
}
INIT_QC_COMMON ( cqc , qp - > cqe_dma , pasid ) ;
if ( ver = = QM_HW_V1 ) {
2019-10-21 15:41:03 +08:00
cqc - > dw3 = cpu_to_le32 ( QM_MK_CQC_DW3_V1 ( 0 , 0 , 0 , 4 ) ) ;
cqc - > w8 = cpu_to_le16 ( QM_Q_DEPTH - 1 ) ;
2019-08-02 15:57:50 +08:00
} else if ( ver = = QM_HW_V2 ) {
2019-10-21 15:41:03 +08:00
cqc - > dw3 = cpu_to_le32 ( QM_MK_CQC_DW3_V2 ( 4 ) ) ;
2019-08-02 15:57:50 +08:00
cqc - > w8 = 0 ;
}
2019-10-21 15:41:03 +08:00
cqc - > dw6 = cpu_to_le32 ( 1 < < QM_CQ_PHASE_SHIFT | 1 < < QM_CQ_FLAG_SHIFT ) ;
2019-08-02 15:57:50 +08:00
ret = qm_mb ( qm , QM_MB_CMD_CQC , cqc_dma , qp_id , 0 ) ;
dma_unmap_single ( dev , cqc_dma , sizeof ( struct qm_cqc ) , DMA_TO_DEVICE ) ;
kfree ( cqc ) ;
return ret ;
}
2020-05-09 17:43:58 +08:00
static int qm_start_qp_nolock ( struct hisi_qp * qp , unsigned long arg )
2019-08-02 15:57:50 +08:00
{
struct hisi_qm * qm = qp - > qm ;
struct device * dev = & qm - > pdev - > dev ;
int qp_id = qp - > qp_id ;
int pasid = arg ;
int ret ;
2020-05-09 17:43:58 +08:00
if ( ! qm_qp_avail_state ( qm , qp , QP_START ) )
return - EPERM ;
2019-08-02 15:57:50 +08:00
ret = qm_qp_ctx_cfg ( qp , qp_id , pasid ) ;
if ( ret )
return ret ;
2020-05-09 17:43:58 +08:00
atomic_set ( & qp - > qp_status . flags , QP_START ) ;
2019-08-02 15:57:50 +08:00
dev_dbg ( dev , " queue %d started \n " , qp_id ) ;
2020-02-11 15:54:25 +08:00
return 0 ;
2019-08-02 15:57:50 +08:00
}
2020-05-09 17:43:58 +08:00
/**
* hisi_qm_start_qp ( ) - Start a qp into running .
* @ qp : The qp we want to start to run .
* @ arg : Accelerator specific argument .
*
* After this function , qp can receive request from user . Return 0 if
* successful , Return - EBUSY if failed .
*/
int hisi_qm_start_qp ( struct hisi_qp * qp , unsigned long arg )
{
struct hisi_qm * qm = qp - > qm ;
int ret ;
down_write ( & qm - > qps_lock ) ;
ret = qm_start_qp_nolock ( qp , arg ) ;
up_write ( & qm - > qps_lock ) ;
return ret ;
}
2019-08-02 15:57:50 +08:00
EXPORT_SYMBOL_GPL ( hisi_qm_start_qp ) ;
2020-04-03 16:16:42 +08:00
static void * qm_ctx_alloc ( struct hisi_qm * qm , size_t ctx_size ,
dma_addr_t * dma_addr )
{
struct device * dev = & qm - > pdev - > dev ;
void * ctx_addr ;
ctx_addr = kzalloc ( ctx_size , GFP_KERNEL ) ;
if ( ! ctx_addr )
return ERR_PTR ( - ENOMEM ) ;
* dma_addr = dma_map_single ( dev , ctx_addr , ctx_size , DMA_FROM_DEVICE ) ;
if ( dma_mapping_error ( dev , * dma_addr ) ) {
dev_err ( dev , " DMA mapping error! \n " ) ;
kfree ( ctx_addr ) ;
return ERR_PTR ( - ENOMEM ) ;
}
return ctx_addr ;
}
static void qm_ctx_free ( struct hisi_qm * qm , size_t ctx_size ,
const void * ctx_addr , dma_addr_t * dma_addr )
{
struct device * dev = & qm - > pdev - > dev ;
dma_unmap_single ( dev , * dma_addr , ctx_size , DMA_FROM_DEVICE ) ;
kfree ( ctx_addr ) ;
}
static int qm_dump_sqc_raw ( struct hisi_qm * qm , dma_addr_t dma_addr , u16 qp_id )
{
return qm_mb ( qm , QM_MB_CMD_SQC , dma_addr , qp_id , 1 ) ;
}
static int qm_dump_cqc_raw ( struct hisi_qm * qm , dma_addr_t dma_addr , u16 qp_id )
{
return qm_mb ( qm , QM_MB_CMD_CQC , dma_addr , qp_id , 1 ) ;
}
/**
* Determine whether the queue is cleared by judging the tail pointers of
* sq and cq .
*/
static int qm_drain_qp ( struct hisi_qp * qp )
{
size_t size = sizeof ( struct qm_sqc ) + sizeof ( struct qm_cqc ) ;
struct hisi_qm * qm = qp - > qm ;
struct device * dev = & qm - > pdev - > dev ;
struct qm_sqc * sqc ;
struct qm_cqc * cqc ;
dma_addr_t dma_addr ;
int ret = 0 , i = 0 ;
void * addr ;
/*
* No need to judge if ECC multi - bit error occurs because the
* master OOO will be blocked .
*/
if ( qm - > err_status . is_qm_ecc_mbit | | qm - > err_status . is_dev_ecc_mbit )
return 0 ;
addr = qm_ctx_alloc ( qm , size , & dma_addr ) ;
if ( IS_ERR ( addr ) ) {
dev_err ( dev , " Failed to alloc ctx for sqc and cqc! \n " ) ;
return - ENOMEM ;
}
while ( + + i ) {
ret = qm_dump_sqc_raw ( qm , dma_addr , qp - > qp_id ) ;
if ( ret ) {
dev_err_ratelimited ( dev , " Failed to dump sqc! \n " ) ;
break ;
}
sqc = addr ;
ret = qm_dump_cqc_raw ( qm , ( dma_addr + sizeof ( struct qm_sqc ) ) ,
qp - > qp_id ) ;
if ( ret ) {
dev_err_ratelimited ( dev , " Failed to dump cqc! \n " ) ;
break ;
}
cqc = addr + sizeof ( struct qm_sqc ) ;
if ( ( sqc - > tail = = cqc - > tail ) & &
( QM_SQ_TAIL_IDX ( sqc ) = = QM_CQ_TAIL_IDX ( cqc ) ) )
break ;
if ( i = = MAX_WAIT_COUNTS ) {
dev_err ( dev , " Fail to empty queue %u! \n " , qp - > qp_id ) ;
ret = - EBUSY ;
break ;
}
usleep_range ( WAIT_PERIOD_US_MIN , WAIT_PERIOD_US_MAX ) ;
}
qm_ctx_free ( qm , size , addr , & dma_addr ) ;
return ret ;
}
2020-05-09 17:43:58 +08:00
static int qm_stop_qp_nolock ( struct hisi_qp * qp )
2019-08-02 15:57:50 +08:00
{
struct device * dev = & qp - > qm - > pdev - > dev ;
2020-04-03 16:16:42 +08:00
int ret ;
2019-08-02 15:57:50 +08:00
2020-05-09 17:43:58 +08:00
/*
* It is allowed to stop and release qp when reset , If the qp is
* stopped when reset but still want to be released then , the
* is_resetting flag should be set negative so that this qp will not
* be restarted after reset .
*/
if ( atomic_read ( & qp - > qp_status . flags ) = = QP_STOP ) {
qp - > is_resetting = false ;
2019-08-02 15:57:50 +08:00
return 0 ;
2020-05-09 17:43:58 +08:00
}
if ( ! qm_qp_avail_state ( qp - > qm , qp , QP_STOP ) )
return - EPERM ;
atomic_set ( & qp - > qp_status . flags , QP_STOP ) ;
2019-08-02 15:57:50 +08:00
2020-04-03 16:16:42 +08:00
ret = qm_drain_qp ( qp ) ;
if ( ret )
dev_err ( dev , " Failed to drain out data for stopping! \n " ) ;
if ( qp - > qm - > wq )
flush_workqueue ( qp - > qm - > wq ) ;
else
flush_work ( & qp - > qm - > work ) ;
2019-08-02 15:57:50 +08:00
dev_dbg ( dev , " stop queue %u! " , qp - > qp_id ) ;
return 0 ;
}
2020-05-09 17:43:58 +08:00
/**
* hisi_qm_stop_qp ( ) - Stop a qp in qm .
* @ qp : The qp we want to stop .
*
* This function is reverse of hisi_qm_start_qp . Return 0 if successful .
*/
int hisi_qm_stop_qp ( struct hisi_qp * qp )
{
int ret ;
down_write ( & qp - > qm - > qps_lock ) ;
ret = qm_stop_qp_nolock ( qp ) ;
up_write ( & qp - > qm - > qps_lock ) ;
return ret ;
}
2019-08-02 15:57:50 +08:00
EXPORT_SYMBOL_GPL ( hisi_qm_stop_qp ) ;
/**
* hisi_qp_send ( ) - Queue up a task in the hardware queue .
* @ qp : The qp in which to put the message .
* @ msg : The message .
*
* This function will return - EBUSY if qp is currently full , and - EAGAIN
* if qp related qm is resetting .
2020-05-09 17:43:58 +08:00
*
* Note : This function may run with qm_irq_thread and ACC reset at same time .
* It has no race with qm_irq_thread . However , during hisi_qp_send , ACC
* reset may happen , we have no lock here considering performance . This
* causes current qm_db sending fail or can not receive sended sqe . QM
* sync / async receive function should handle the error sqe . ACC reset
* done function should clear used sqe to 0.
2019-08-02 15:57:50 +08:00
*/
int hisi_qp_send ( struct hisi_qp * qp , const void * msg )
{
struct hisi_qp_status * qp_status = & qp - > qp_status ;
u16 sq_tail = qp_status - > sq_tail ;
u16 sq_tail_next = ( sq_tail + 1 ) % QM_Q_DEPTH ;
void * sqe = qm_get_avail_sqe ( qp ) ;
2020-05-09 17:43:58 +08:00
if ( unlikely ( atomic_read ( & qp - > qp_status . flags ) = = QP_STOP | |
atomic_read ( & qp - > qm - > status . flags ) = = QM_STOP | |
qp - > is_resetting ) ) {
2019-08-02 15:57:50 +08:00
dev_info ( & qp - > qm - > pdev - > dev , " QP is stopped or resetting \n " ) ;
return - EAGAIN ;
}
if ( ! sqe )
return - EBUSY ;
memcpy ( sqe , msg , qp - > qm - > sqe_size ) ;
qm_db ( qp - > qm , qp - > qp_id , QM_DOORBELL_CMD_SQ , sq_tail_next , 0 ) ;
atomic_inc ( & qp - > qp_status . used ) ;
qp_status - > sq_tail = sq_tail_next ;
return 0 ;
}
EXPORT_SYMBOL_GPL ( hisi_qp_send ) ;
static void hisi_qm_cache_wb ( struct hisi_qm * qm )
{
unsigned int val ;
if ( qm - > ver = = QM_HW_V2 ) {
writel ( 0x1 , qm - > io_base + QM_CACHE_WB_START ) ;
if ( readl_relaxed_poll_timeout ( qm - > io_base + QM_CACHE_WB_DONE ,
val , val & BIT ( 0 ) , 10 , 1000 ) )
dev_err ( & qm - > pdev - > dev , " QM writeback sqc cache fail! \n " ) ;
}
}
2020-02-11 15:54:25 +08:00
static void qm_qp_event_notifier ( struct hisi_qp * qp )
{
wake_up_interruptible ( & qp - > uacce_q - > wait ) ;
}
static int hisi_qm_get_available_instances ( struct uacce_device * uacce )
{
2020-05-09 17:44:02 +08:00
return hisi_qm_get_free_qp_num ( uacce - > priv ) ;
2020-02-11 15:54:25 +08:00
}
static int hisi_qm_uacce_get_queue ( struct uacce_device * uacce ,
unsigned long arg ,
struct uacce_queue * q )
{
struct hisi_qm * qm = uacce - > priv ;
struct hisi_qp * qp ;
u8 alg_type = 0 ;
qp = hisi_qm_create_qp ( qm , alg_type ) ;
if ( IS_ERR ( qp ) )
return PTR_ERR ( qp ) ;
q - > priv = qp ;
q - > uacce = uacce ;
qp - > uacce_q = q ;
qp - > event_cb = qm_qp_event_notifier ;
qp - > pasid = arg ;
return 0 ;
}
static void hisi_qm_uacce_put_queue ( struct uacce_queue * q )
{
struct hisi_qp * qp = q - > priv ;
hisi_qm_cache_wb ( qp - > qm ) ;
hisi_qm_release_qp ( qp ) ;
}
/* map sq/cq/doorbell to user space */
static int hisi_qm_uacce_mmap ( struct uacce_queue * q ,
struct vm_area_struct * vma ,
struct uacce_qfile_region * qfr )
{
struct hisi_qp * qp = q - > priv ;
struct hisi_qm * qm = qp - > qm ;
size_t sz = vma - > vm_end - vma - > vm_start ;
struct pci_dev * pdev = qm - > pdev ;
struct device * dev = & pdev - > dev ;
unsigned long vm_pgoff ;
int ret ;
switch ( qfr - > type ) {
case UACCE_QFRT_MMIO :
if ( qm - > ver = = QM_HW_V2 ) {
if ( sz > PAGE_SIZE * ( QM_DOORBELL_PAGE_NR +
QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE ) )
return - EINVAL ;
} else {
if ( sz > PAGE_SIZE * QM_DOORBELL_PAGE_NR )
return - EINVAL ;
}
vma - > vm_flags | = VM_IO ;
return remap_pfn_range ( vma , vma - > vm_start ,
qm - > phys_base > > PAGE_SHIFT ,
sz , pgprot_noncached ( vma - > vm_page_prot ) ) ;
case UACCE_QFRT_DUS :
if ( sz ! = qp - > qdma . size )
return - EINVAL ;
/*
* dma_mmap_coherent ( ) requires vm_pgoff as 0
* restore vm_pfoff to initial value for mmap ( )
*/
vm_pgoff = vma - > vm_pgoff ;
vma - > vm_pgoff = 0 ;
ret = dma_mmap_coherent ( dev , vma , qp - > qdma . va ,
qp - > qdma . dma , sz ) ;
vma - > vm_pgoff = vm_pgoff ;
return ret ;
default :
return - EINVAL ;
}
}
static int hisi_qm_uacce_start_queue ( struct uacce_queue * q )
{
struct hisi_qp * qp = q - > priv ;
return hisi_qm_start_qp ( qp , qp - > pasid ) ;
}
static void hisi_qm_uacce_stop_queue ( struct uacce_queue * q )
{
hisi_qm_stop_qp ( q - > priv ) ;
}
static int qm_set_sqctype ( struct uacce_queue * q , u16 type )
{
struct hisi_qm * qm = q - > uacce - > priv ;
struct hisi_qp * qp = q - > priv ;
2020-05-09 17:43:58 +08:00
down_write ( & qm - > qps_lock ) ;
2020-02-11 15:54:25 +08:00
qp - > alg_type = type ;
2020-05-09 17:43:58 +08:00
up_write ( & qm - > qps_lock ) ;
2020-02-11 15:54:25 +08:00
return 0 ;
}
static long hisi_qm_uacce_ioctl ( struct uacce_queue * q , unsigned int cmd ,
unsigned long arg )
{
struct hisi_qp * qp = q - > priv ;
struct hisi_qp_ctx qp_ctx ;
if ( cmd = = UACCE_CMD_QM_SET_QP_CTX ) {
if ( copy_from_user ( & qp_ctx , ( void __user * ) arg ,
sizeof ( struct hisi_qp_ctx ) ) )
return - EFAULT ;
if ( qp_ctx . qc_type ! = 0 & & qp_ctx . qc_type ! = 1 )
return - EINVAL ;
qm_set_sqctype ( q , qp_ctx . qc_type ) ;
qp_ctx . id = qp - > qp_id ;
if ( copy_to_user ( ( void __user * ) arg , & qp_ctx ,
sizeof ( struct hisi_qp_ctx ) ) )
return - EFAULT ;
} else {
return - EINVAL ;
}
return 0 ;
}
static const struct uacce_ops uacce_qm_ops = {
. get_available_instances = hisi_qm_get_available_instances ,
. get_queue = hisi_qm_uacce_get_queue ,
. put_queue = hisi_qm_uacce_put_queue ,
. start_queue = hisi_qm_uacce_start_queue ,
. stop_queue = hisi_qm_uacce_stop_queue ,
. mmap = hisi_qm_uacce_mmap ,
. ioctl = hisi_qm_uacce_ioctl ,
} ;
static int qm_alloc_uacce ( struct hisi_qm * qm )
{
struct pci_dev * pdev = qm - > pdev ;
struct uacce_device * uacce ;
unsigned long mmio_page_nr ;
unsigned long dus_page_nr ;
struct uacce_interface interface = {
. flags = UACCE_DEV_SVA ,
. ops = & uacce_qm_ops ,
} ;
strncpy ( interface . name , pdev - > driver - > name , sizeof ( interface . name ) ) ;
uacce = uacce_alloc ( & pdev - > dev , & interface ) ;
if ( IS_ERR ( uacce ) )
return PTR_ERR ( uacce ) ;
if ( uacce - > flags & UACCE_DEV_SVA ) {
qm - > use_sva = true ;
} else {
/* only consider sva case */
uacce_remove ( uacce ) ;
qm - > uacce = NULL ;
return - EINVAL ;
}
uacce - > is_vf = pdev - > is_virtfn ;
uacce - > priv = qm ;
uacce - > algs = qm - > algs ;
if ( qm - > ver = = QM_HW_V1 ) {
mmio_page_nr = QM_DOORBELL_PAGE_NR ;
uacce - > api_ver = HISI_QM_API_VER_BASE ;
} else {
mmio_page_nr = QM_DOORBELL_PAGE_NR +
QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE ;
uacce - > api_ver = HISI_QM_API_VER2_BASE ;
}
dus_page_nr = ( PAGE_SIZE - 1 + qm - > sqe_size * QM_Q_DEPTH +
sizeof ( struct qm_cqe ) * QM_Q_DEPTH ) > > PAGE_SHIFT ;
uacce - > qf_pg_num [ UACCE_QFRT_MMIO ] = mmio_page_nr ;
uacce - > qf_pg_num [ UACCE_QFRT_DUS ] = dus_page_nr ;
qm - > uacce = uacce ;
return 0 ;
}
2019-10-26 11:00:16 +08:00
/**
* hisi_qm_get_free_qp_num ( ) - Get free number of qp in qm .
* @ qm : The qm which want to get free qp .
*
* This function return free number of qp in qm .
*/
int hisi_qm_get_free_qp_num ( struct hisi_qm * qm )
{
int ret ;
2020-05-09 17:43:58 +08:00
down_read ( & qm - > qps_lock ) ;
2019-10-26 11:00:16 +08:00
ret = qm - > qp_num - qm - > qp_in_used ;
2020-05-09 17:43:58 +08:00
up_read ( & qm - > qps_lock ) ;
2019-10-26 11:00:16 +08:00
return ret ;
}
EXPORT_SYMBOL_GPL ( hisi_qm_get_free_qp_num ) ;
2020-05-09 17:44:02 +08:00
static void hisi_qp_memory_uninit ( struct hisi_qm * qm , int num )
{
struct device * dev = & qm - > pdev - > dev ;
struct qm_dma * qdma ;
int i ;
for ( i = num - 1 ; i > = 0 ; i - - ) {
qdma = & qm - > qp_array [ i ] . qdma ;
dma_free_coherent ( dev , qdma - > size , qdma - > va , qdma - > dma ) ;
}
kfree ( qm - > qp_array ) ;
}
static int hisi_qp_memory_init ( struct hisi_qm * qm , size_t dma_size , int id )
{
struct device * dev = & qm - > pdev - > dev ;
size_t off = qm - > sqe_size * QM_Q_DEPTH ;
struct hisi_qp * qp ;
qp = & qm - > qp_array [ id ] ;
qp - > qdma . va = dma_alloc_coherent ( dev , dma_size , & qp - > qdma . dma ,
GFP_KERNEL ) ;
if ( ! qp - > qdma . va )
return - ENOMEM ;
qp - > sqe = qp - > qdma . va ;
qp - > sqe_dma = qp - > qdma . dma ;
qp - > cqe = qp - > qdma . va + off ;
qp - > cqe_dma = qp - > qdma . dma + off ;
qp - > qdma . size = dma_size ;
qp - > qm = qm ;
qp - > qp_id = id ;
return 0 ;
}
static int hisi_qm_memory_init ( struct hisi_qm * qm )
{
struct device * dev = & qm - > pdev - > dev ;
size_t qp_dma_size , off = 0 ;
int i , ret = 0 ;
# define QM_INIT_BUF(qm, type, num) do { \
( qm ) - > type = ( ( qm ) - > qdma . va + ( off ) ) ; \
( qm ) - > type # # _dma = ( qm ) - > qdma . dma + ( off ) ; \
off + = QMC_ALIGN ( sizeof ( struct qm_ # # type ) * ( num ) ) ; \
} while ( 0 )
idr_init ( & qm - > qp_idr ) ;
qm - > qdma . size = QMC_ALIGN ( sizeof ( struct qm_eqe ) * QM_Q_DEPTH ) +
QMC_ALIGN ( sizeof ( struct qm_aeqe ) * QM_Q_DEPTH ) +
QMC_ALIGN ( sizeof ( struct qm_sqc ) * qm - > qp_num ) +
QMC_ALIGN ( sizeof ( struct qm_cqc ) * qm - > qp_num ) ;
qm - > qdma . va = dma_alloc_coherent ( dev , qm - > qdma . size , & qm - > qdma . dma ,
GFP_ATOMIC ) ;
dev_dbg ( dev , " allocate qm dma buf size=%zx) \n " , qm - > qdma . size ) ;
if ( ! qm - > qdma . va )
return - ENOMEM ;
QM_INIT_BUF ( qm , eqe , QM_Q_DEPTH ) ;
QM_INIT_BUF ( qm , aeqe , QM_Q_DEPTH ) ;
QM_INIT_BUF ( qm , sqc , qm - > qp_num ) ;
QM_INIT_BUF ( qm , cqc , qm - > qp_num ) ;
qm - > qp_array = kcalloc ( qm - > qp_num , sizeof ( struct hisi_qp ) , GFP_KERNEL ) ;
if ( ! qm - > qp_array ) {
ret = - ENOMEM ;
goto err_alloc_qp_array ;
}
/* one more page for device or qp statuses */
qp_dma_size = qm - > sqe_size * QM_Q_DEPTH +
sizeof ( struct qm_cqe ) * QM_Q_DEPTH ;
qp_dma_size = PAGE_ALIGN ( qp_dma_size ) ;
for ( i = 0 ; i < qm - > qp_num ; i + + ) {
ret = hisi_qp_memory_init ( qm , qp_dma_size , i ) ;
if ( ret )
goto err_init_qp_mem ;
dev_dbg ( dev , " allocate qp dma buf size=%zx) \n " , qp_dma_size ) ;
}
return ret ;
err_init_qp_mem :
hisi_qp_memory_uninit ( qm , i ) ;
err_alloc_qp_array :
dma_free_coherent ( dev , qm - > qdma . size , qm - > qdma . va , qm - > qdma . dma ) ;
return ret ;
}
2020-05-09 17:44:01 +08:00
static void hisi_qm_pre_init ( struct hisi_qm * qm )
{
struct pci_dev * pdev = qm - > pdev ;
switch ( qm - > ver ) {
case QM_HW_V1 :
qm - > ops = & qm_hw_ops_v1 ;
break ;
case QM_HW_V2 :
qm - > ops = & qm_hw_ops_v2 ;
break ;
default :
return ;
}
pci_set_drvdata ( pdev , qm ) ;
mutex_init ( & qm - > mailbox_lock ) ;
init_rwsem ( & qm - > qps_lock ) ;
qm - > qp_in_used = 0 ;
}
2019-08-02 15:57:50 +08:00
/**
* hisi_qm_uninit ( ) - Uninitialize qm .
* @ qm : The qm needed uninit .
*
* This function uninits qm related device resources .
*/
void hisi_qm_uninit ( struct hisi_qm * qm )
{
struct pci_dev * pdev = qm - > pdev ;
struct device * dev = & pdev - > dev ;
2020-05-09 17:43:58 +08:00
down_write ( & qm - > qps_lock ) ;
if ( ! qm_avail_state ( qm , QM_CLOSE ) ) {
up_write ( & qm - > qps_lock ) ;
return ;
}
2020-02-11 15:54:25 +08:00
uacce_remove ( qm - > uacce ) ;
qm - > uacce = NULL ;
2020-05-09 17:44:02 +08:00
hisi_qp_memory_uninit ( qm , qm - > qp_num ) ;
idr_destroy ( & qm - > qp_idr ) ;
2020-05-09 17:44:00 +08:00
if ( qm - > qdma . va ) {
2019-08-02 15:57:50 +08:00
hisi_qm_cache_wb ( qm ) ;
dma_free_coherent ( dev , qm - > qdma . size ,
qm - > qdma . va , qm - > qdma . dma ) ;
memset ( & qm - > qdma , 0 , sizeof ( qm - > qdma ) ) ;
}
qm_irq_unregister ( qm ) ;
pci_free_irq_vectors ( pdev ) ;
iounmap ( qm - > io_base ) ;
pci_release_mem_regions ( pdev ) ;
pci_disable_device ( pdev ) ;
2020-05-09 17:43:58 +08:00
up_write ( & qm - > qps_lock ) ;
2019-08-02 15:57:50 +08:00
}
EXPORT_SYMBOL_GPL ( hisi_qm_uninit ) ;
2019-08-02 15:57:53 +08:00
/**
* hisi_qm_get_vft ( ) - Get vft from a qm .
* @ qm : The qm we want to get its vft .
* @ base : The base number of queue in vft .
* @ number : The number of queues in vft .
*
* We can allocate multiple queues to a qm by configuring virtual function
* table . We get related configures by this function . Normally , we call this
* function in VF driver to get the queue information .
*
* qm hw v1 does not support this interface .
*/
int hisi_qm_get_vft ( struct hisi_qm * qm , u32 * base , u32 * number )
{
if ( ! base | | ! number )
return - EINVAL ;
if ( ! qm - > ops - > get_vft ) {
dev_err ( & qm - > pdev - > dev , " Don't support vft read! \n " ) ;
return - EINVAL ;
}
return qm - > ops - > get_vft ( qm , base , number ) ;
}
EXPORT_SYMBOL_GPL ( hisi_qm_get_vft ) ;
2019-08-02 15:57:50 +08:00
/**
* This function is alway called in PF driver , it is used to assign queues
* among PF and VFs .
*
* Assign queues A ~ B to PF : hisi_qm_set_vft ( qm , 0 , A , B - A + 1 )
* Assign queues A ~ B to VF : hisi_qm_set_vft ( qm , 2 , A , B - A + 1 )
* ( VF function number 0x2 )
*/
2020-04-02 14:53:02 +08:00
static int hisi_qm_set_vft ( struct hisi_qm * qm , u32 fun_num , u32 base ,
2019-08-02 15:57:50 +08:00
u32 number )
{
u32 max_q_num = qm - > ctrl_qp_num ;
if ( base > = max_q_num | | number > max_q_num | |
( base + number ) > max_q_num )
return - EINVAL ;
return qm_set_sqc_cqc_vft ( qm , fun_num , base , number ) ;
}
static void qm_init_eq_aeq_status ( struct hisi_qm * qm )
{
struct hisi_qm_status * status = & qm - > status ;
status - > eq_head = 0 ;
status - > aeq_head = 0 ;
2019-10-21 15:41:03 +08:00
status - > eqc_phase = true ;
status - > aeqc_phase = true ;
2019-08-02 15:57:50 +08:00
}
static int qm_eq_ctx_cfg ( struct hisi_qm * qm )
{
struct device * dev = & qm - > pdev - > dev ;
struct qm_eqc * eqc ;
struct qm_aeqc * aeqc ;
dma_addr_t eqc_dma ;
dma_addr_t aeqc_dma ;
int ret ;
qm_init_eq_aeq_status ( qm ) ;
eqc = kzalloc ( sizeof ( struct qm_eqc ) , GFP_KERNEL ) ;
if ( ! eqc )
return - ENOMEM ;
eqc_dma = dma_map_single ( dev , eqc , sizeof ( struct qm_eqc ) ,
DMA_TO_DEVICE ) ;
if ( dma_mapping_error ( dev , eqc_dma ) ) {
kfree ( eqc ) ;
return - ENOMEM ;
}
2019-10-21 15:41:03 +08:00
eqc - > base_l = cpu_to_le32 ( lower_32_bits ( qm - > eqe_dma ) ) ;
eqc - > base_h = cpu_to_le32 ( upper_32_bits ( qm - > eqe_dma ) ) ;
2019-08-02 15:57:50 +08:00
if ( qm - > ver = = QM_HW_V1 )
2019-10-21 15:41:03 +08:00
eqc - > dw3 = cpu_to_le32 ( QM_EQE_AEQE_SIZE ) ;
eqc - > dw6 = cpu_to_le32 ( ( QM_Q_DEPTH - 1 ) | ( 1 < < QM_EQC_PHASE_SHIFT ) ) ;
2019-08-02 15:57:50 +08:00
ret = qm_mb ( qm , QM_MB_CMD_EQC , eqc_dma , 0 , 0 ) ;
dma_unmap_single ( dev , eqc_dma , sizeof ( struct qm_eqc ) , DMA_TO_DEVICE ) ;
kfree ( eqc ) ;
if ( ret )
return ret ;
aeqc = kzalloc ( sizeof ( struct qm_aeqc ) , GFP_KERNEL ) ;
if ( ! aeqc )
return - ENOMEM ;
aeqc_dma = dma_map_single ( dev , aeqc , sizeof ( struct qm_aeqc ) ,
DMA_TO_DEVICE ) ;
if ( dma_mapping_error ( dev , aeqc_dma ) ) {
kfree ( aeqc ) ;
return - ENOMEM ;
}
2019-10-21 15:41:03 +08:00
aeqc - > base_l = cpu_to_le32 ( lower_32_bits ( qm - > aeqe_dma ) ) ;
aeqc - > base_h = cpu_to_le32 ( upper_32_bits ( qm - > aeqe_dma ) ) ;
aeqc - > dw6 = cpu_to_le32 ( ( QM_Q_DEPTH - 1 ) | ( 1 < < QM_EQC_PHASE_SHIFT ) ) ;
2019-08-02 15:57:50 +08:00
ret = qm_mb ( qm , QM_MB_CMD_AEQC , aeqc_dma , 0 , 0 ) ;
dma_unmap_single ( dev , aeqc_dma , sizeof ( struct qm_aeqc ) , DMA_TO_DEVICE ) ;
kfree ( aeqc ) ;
return ret ;
}
static int __hisi_qm_start ( struct hisi_qm * qm )
{
int ret ;
WARN_ON ( ! qm - > qdma . dma ) ;
2019-08-02 15:57:53 +08:00
if ( qm - > fun_type = = QM_HW_PF ) {
ret = qm_dev_mem_reset ( qm ) ;
if ( ret )
return ret ;
2019-08-02 15:57:50 +08:00
2019-08-02 15:57:53 +08:00
ret = hisi_qm_set_vft ( qm , 0 , qm - > qp_base , qm - > qp_num ) ;
if ( ret )
return ret ;
}
2019-08-02 15:57:50 +08:00
ret = qm_eq_ctx_cfg ( qm ) ;
if ( ret )
return ret ;
ret = qm_mb ( qm , QM_MB_CMD_SQC_BT , qm - > sqc_dma , 0 , 0 ) ;
if ( ret )
return ret ;
ret = qm_mb ( qm , QM_MB_CMD_CQC_BT , qm - > cqc_dma , 0 , 0 ) ;
if ( ret )
return ret ;
writel ( 0x0 , qm - > io_base + QM_VF_EQ_INT_MASK ) ;
writel ( 0x0 , qm - > io_base + QM_VF_AEQ_INT_MASK ) ;
return 0 ;
}
/**
* hisi_qm_start ( ) - start qm
* @ qm : The qm to be started .
*
* This function starts a qm , then we can allocate qp from this qm .
*/
int hisi_qm_start ( struct hisi_qm * qm )
{
struct device * dev = & qm - > pdev - > dev ;
2020-05-09 17:43:58 +08:00
int ret = 0 ;
down_write ( & qm - > qps_lock ) ;
if ( ! qm_avail_state ( qm , QM_START ) ) {
up_write ( & qm - > qps_lock ) ;
return - EPERM ;
}
2019-08-02 15:57:50 +08:00
dev_dbg ( dev , " qm start with %d queue pairs \n " , qm - > qp_num ) ;
if ( ! qm - > qp_num ) {
dev_err ( dev , " qp_num should not be 0 \n " ) ;
2020-05-09 17:43:58 +08:00
ret = - EINVAL ;
goto err_unlock ;
2019-08-02 15:57:50 +08:00
}
2020-05-09 17:43:58 +08:00
ret = __hisi_qm_start ( qm ) ;
if ( ! ret )
atomic_set ( & qm - > status . flags , QM_START ) ;
err_unlock :
up_write ( & qm - > qps_lock ) ;
return ret ;
2019-08-02 15:57:50 +08:00
}
EXPORT_SYMBOL_GPL ( hisi_qm_start ) ;
2020-04-03 16:16:38 +08:00
static int qm_restart ( struct hisi_qm * qm )
{
struct device * dev = & qm - > pdev - > dev ;
struct hisi_qp * qp ;
int ret , i ;
ret = hisi_qm_start ( qm ) ;
if ( ret < 0 )
return ret ;
2020-05-09 17:43:58 +08:00
down_write ( & qm - > qps_lock ) ;
2020-04-03 16:16:38 +08:00
for ( i = 0 ; i < qm - > qp_num ; i + + ) {
2020-05-09 17:44:02 +08:00
qp = & qm - > qp_array [ i ] ;
if ( atomic_read ( & qp - > qp_status . flags ) = = QP_STOP & &
2020-05-09 17:43:58 +08:00
qp - > is_resetting = = true ) {
ret = qm_start_qp_nolock ( qp , 0 ) ;
2020-04-03 16:16:38 +08:00
if ( ret < 0 ) {
dev_err ( dev , " Failed to start qp%d! \n " , i ) ;
2020-05-09 17:43:58 +08:00
up_write ( & qm - > qps_lock ) ;
return ret ;
}
qp - > is_resetting = false ;
}
}
up_write ( & qm - > qps_lock ) ;
return 0 ;
}
/* Stop started qps in reset flow */
static int qm_stop_started_qp ( struct hisi_qm * qm )
{
struct device * dev = & qm - > pdev - > dev ;
struct hisi_qp * qp ;
int i , ret ;
for ( i = 0 ; i < qm - > qp_num ; i + + ) {
2020-05-09 17:44:02 +08:00
qp = & qm - > qp_array [ i ] ;
2020-05-09 17:43:58 +08:00
if ( qp & & atomic_read ( & qp - > qp_status . flags ) = = QP_START ) {
qp - > is_resetting = true ;
ret = qm_stop_qp_nolock ( qp ) ;
if ( ret < 0 ) {
dev_err ( dev , " Failed to stop qp%d! \n " , i ) ;
2020-04-03 16:16:38 +08:00
return ret ;
}
}
}
return 0 ;
}
/**
* This function clears all queues memory in a qm . Reset of accelerator can
* use this to clear queues .
*/
static void qm_clear_queues ( struct hisi_qm * qm )
{
struct hisi_qp * qp ;
int i ;
for ( i = 0 ; i < qm - > qp_num ; i + + ) {
2020-05-09 17:44:02 +08:00
qp = & qm - > qp_array [ i ] ;
if ( qp - > is_resetting )
2020-04-03 16:16:38 +08:00
memset ( qp - > qdma . va , 0 , qp - > qdma . size ) ;
}
memset ( qm - > qdma . va , 0 , qm - > qdma . size ) ;
}
2019-08-02 15:57:50 +08:00
/**
* hisi_qm_stop ( ) - Stop a qm .
* @ qm : The qm which will be stopped .
*
* This function stops qm and its qps , then qm can not accept request .
* Related resources are not released at this state , we can use hisi_qm_start
* to let qm start again .
*/
int hisi_qm_stop ( struct hisi_qm * qm )
{
2020-05-09 17:43:58 +08:00
struct device * dev = & qm - > pdev - > dev ;
int ret = 0 ;
2019-08-02 15:57:50 +08:00
2020-05-09 17:43:58 +08:00
down_write ( & qm - > qps_lock ) ;
if ( ! qm_avail_state ( qm , QM_STOP ) ) {
ret = - EPERM ;
goto err_unlock ;
2019-08-02 15:57:50 +08:00
}
2020-05-09 17:43:58 +08:00
if ( qm - > status . stop_reason = = QM_SOFT_RESET | |
qm - > status . stop_reason = = QM_FLR ) {
ret = qm_stop_started_qp ( qm ) ;
if ( ret < 0 ) {
dev_err ( dev , " Failed to stop started qp! \n " ) ;
goto err_unlock ;
}
}
2019-08-02 15:57:50 +08:00
/* Mask eq and aeq irq */
writel ( 0x1 , qm - > io_base + QM_VF_EQ_INT_MASK ) ;
writel ( 0x1 , qm - > io_base + QM_VF_AEQ_INT_MASK ) ;
2019-08-02 15:57:53 +08:00
if ( qm - > fun_type = = QM_HW_PF ) {
ret = hisi_qm_set_vft ( qm , 0 , 0 , 0 ) ;
2020-05-09 17:43:58 +08:00
if ( ret < 0 ) {
2019-08-02 15:57:53 +08:00
dev_err ( dev , " Failed to set vft! \n " ) ;
2020-05-09 17:43:58 +08:00
ret = - EBUSY ;
goto err_unlock ;
}
2019-08-02 15:57:53 +08:00
}
2019-08-02 15:57:50 +08:00
2020-04-03 16:16:38 +08:00
qm_clear_queues ( qm ) ;
2020-05-09 17:43:58 +08:00
atomic_set ( & qm - > status . flags , QM_STOP ) ;
2020-04-03 16:16:38 +08:00
2020-05-09 17:43:58 +08:00
err_unlock :
up_write ( & qm - > qps_lock ) ;
2019-08-02 15:57:50 +08:00
return ret ;
}
EXPORT_SYMBOL_GPL ( hisi_qm_stop ) ;
2020-05-15 17:13:55 +08:00
static ssize_t qm_status_read ( struct file * filp , char __user * buffer ,
size_t count , loff_t * pos )
{
struct hisi_qm * qm = filp - > private_data ;
char buf [ QM_DBG_READ_LEN ] ;
int val , cp_len , len ;
if ( * pos )
return 0 ;
if ( count < QM_DBG_READ_LEN )
return - ENOSPC ;
val = atomic_read ( & qm - > status . flags ) ;
len = snprintf ( buf , QM_DBG_READ_LEN , " %s \n " , qm_s [ val ] ) ;
if ( ! len )
return - EFAULT ;
cp_len = copy_to_user ( buffer , buf , len ) ;
if ( cp_len )
return - EFAULT ;
return ( * pos = len ) ;
}
static const struct file_operations qm_status_fops = {
. owner = THIS_MODULE ,
. open = simple_open ,
. read = qm_status_read ,
} ;
2020-05-15 17:13:54 +08:00
static int qm_debugfs_atomic64_set ( void * data , u64 val )
{
if ( val )
return - EINVAL ;
atomic64_set ( ( atomic64_t * ) data , 0 ) ;
return 0 ;
}
static int qm_debugfs_atomic64_get ( void * data , u64 * val )
{
* val = atomic64_read ( ( atomic64_t * ) data ) ;
return 0 ;
}
DEFINE_DEBUGFS_ATTRIBUTE ( qm_atomic64_ops , qm_debugfs_atomic64_get ,
qm_debugfs_atomic64_set , " %llu \n " ) ;
2019-08-02 15:57:55 +08:00
/**
* hisi_qm_debug_init ( ) - Initialize qm related debugfs files .
* @ qm : The qm for which we want to add debugfs files .
*
* Create qm related debugfs files .
*/
int hisi_qm_debug_init ( struct hisi_qm * qm )
{
2020-05-15 17:13:54 +08:00
struct qm_dfx * dfx = & qm - > debug . dfx ;
2019-11-07 09:52:00 +01:00
struct dentry * qm_d ;
2020-05-15 17:13:54 +08:00
void * data ;
2019-08-02 15:57:55 +08:00
int i , ret ;
qm_d = debugfs_create_dir ( " qm " , qm - > debug . debug_root ) ;
qm - > debug . qm_d = qm_d ;
/* only show this in PF */
if ( qm - > fun_type = = QM_HW_PF )
for ( i = CURRENT_Q ; i < DEBUG_FILE_NUM ; i + + )
if ( qm_create_debugfs_file ( qm , i ) ) {
ret = - ENOENT ;
goto failed_to_create ;
}
2019-11-07 09:52:00 +01:00
debugfs_create_file ( " qm_regs " , 0444 , qm - > debug . qm_d , qm , & qm_regs_fops ) ;
2019-08-02 15:57:55 +08:00
2020-05-15 17:13:55 +08:00
debugfs_create_file ( " status " , 0444 , qm - > debug . qm_d , qm ,
& qm_status_fops ) ;
2020-05-15 17:13:54 +08:00
for ( i = 0 ; i < ARRAY_SIZE ( qm_dfx_files ) ; i + + ) {
data = ( atomic64_t * ) ( ( uintptr_t ) dfx + qm_dfx_files [ i ] . offset ) ;
debugfs_create_file ( qm_dfx_files [ i ] . name ,
0644 ,
qm_d ,
data ,
& qm_atomic64_ops ) ;
}
2019-08-02 15:57:55 +08:00
return 0 ;
failed_to_create :
debugfs_remove_recursive ( qm_d ) ;
return ret ;
}
EXPORT_SYMBOL_GPL ( hisi_qm_debug_init ) ;
/**
* hisi_qm_debug_regs_clear ( ) - clear qm debug related registers .
* @ qm : The qm for which we want to clear its debug registers .
*/
void hisi_qm_debug_regs_clear ( struct hisi_qm * qm )
{
struct qm_dfx_registers * regs ;
int i ;
/* clear current_q */
writel ( 0x0 , qm - > io_base + QM_DFX_SQE_CNT_VF_SQN ) ;
writel ( 0x0 , qm - > io_base + QM_DFX_CQE_CNT_VF_CQN ) ;
/*
* these registers are reading and clearing , so clear them after
* reading them .
*/
writel ( 0x1 , qm - > io_base + QM_DFX_CNT_CLR_CE ) ;
regs = qm_dfx_regs ;
for ( i = 0 ; i < CNT_CYC_REGS_NUM ; i + + ) {
readl ( qm - > io_base + regs - > reg_offset ) ;
regs + + ;
}
writel ( 0x0 , qm - > io_base + QM_DFX_CNT_CLR_CE ) ;
}
EXPORT_SYMBOL_GPL ( hisi_qm_debug_regs_clear ) ;
2020-01-20 15:30:06 +08:00
static void qm_hw_error_init ( struct hisi_qm * qm )
2019-08-02 15:57:50 +08:00
{
2020-01-20 15:30:06 +08:00
const struct hisi_qm_err_info * err_info = & qm - > err_ini - > err_info ;
2019-08-02 15:57:50 +08:00
if ( ! qm - > ops - > hw_error_init ) {
2019-10-21 15:41:00 +08:00
dev_err ( & qm - > pdev - > dev , " QM doesn't support hw error handling! \n " ) ;
2019-08-02 15:57:50 +08:00
return ;
}
2020-05-09 17:44:03 +08:00
qm - > ops - > hw_error_init ( qm , err_info - > ce , err_info - > nfe , err_info - > fe ) ;
2020-01-20 15:30:06 +08:00
}
static void qm_hw_error_uninit ( struct hisi_qm * qm )
{
if ( ! qm - > ops - > hw_error_uninit ) {
dev_err ( & qm - > pdev - > dev , " Unexpected QM hw error uninit! \n " ) ;
return ;
}
qm - > ops - > hw_error_uninit ( qm ) ;
2019-08-02 15:57:50 +08:00
}
2020-05-09 17:44:04 +08:00
static enum acc_err_result qm_hw_error_handle ( struct hisi_qm * qm )
2019-08-02 15:57:50 +08:00
{
if ( ! qm - > ops - > hw_error_handle ) {
2019-10-21 15:41:00 +08:00
dev_err ( & qm - > pdev - > dev , " QM doesn't support hw error report! \n " ) ;
2020-05-09 17:44:04 +08:00
return ACC_ERR_NONE ;
2019-08-02 15:57:50 +08:00
}
return qm - > ops - > hw_error_handle ( qm ) ;
}
/**
* hisi_qm_get_hw_version ( ) - Get hardware version of a qm .
* @ pdev : The device which hardware version we want to get .
*
* This function gets the hardware version of a qm . Return QM_HW_UNKNOWN
* if the hardware version is not supported .
*/
enum qm_hw_ver hisi_qm_get_hw_version ( struct pci_dev * pdev )
{
switch ( pdev - > revision ) {
case QM_HW_V1 :
case QM_HW_V2 :
return pdev - > revision ;
default :
return QM_HW_UNKNOWN ;
}
}
EXPORT_SYMBOL_GPL ( hisi_qm_get_hw_version ) ;
2020-01-20 15:30:06 +08:00
/**
* hisi_qm_dev_err_init ( ) - Initialize device error configuration .
* @ qm : The qm for which we want to do error initialization .
*
* Initialize QM and device error related configuration .
*/
void hisi_qm_dev_err_init ( struct hisi_qm * qm )
{
if ( qm - > fun_type = = QM_HW_VF )
return ;
qm_hw_error_init ( qm ) ;
if ( ! qm - > err_ini - > hw_err_enable ) {
dev_err ( & qm - > pdev - > dev , " Device doesn't support hw error init! \n " ) ;
return ;
}
qm - > err_ini - > hw_err_enable ( qm ) ;
}
EXPORT_SYMBOL_GPL ( hisi_qm_dev_err_init ) ;
/**
* hisi_qm_dev_err_uninit ( ) - Uninitialize device error configuration .
* @ qm : The qm for which we want to do error uninitialization .
*
* Uninitialize QM and device error related configuration .
*/
void hisi_qm_dev_err_uninit ( struct hisi_qm * qm )
{
if ( qm - > fun_type = = QM_HW_VF )
return ;
qm_hw_error_uninit ( qm ) ;
if ( ! qm - > err_ini - > hw_err_disable ) {
dev_err ( & qm - > pdev - > dev , " Unexpected device hw error uninit! \n " ) ;
return ;
}
qm - > err_ini - > hw_err_disable ( qm ) ;
}
EXPORT_SYMBOL_GPL ( hisi_qm_dev_err_uninit ) ;
2020-03-10 16:42:49 +08:00
/**
* hisi_qm_free_qps ( ) - free multiple queue pairs .
* @ qps : The queue pairs need to be freed .
* @ qp_num : The num of queue pairs .
*/
void hisi_qm_free_qps ( struct hisi_qp * * qps , int qp_num )
{
int i ;
if ( ! qps | | qp_num < = 0 )
return ;
for ( i = qp_num - 1 ; i > = 0 ; i - - )
hisi_qm_release_qp ( qps [ i ] ) ;
}
EXPORT_SYMBOL_GPL ( hisi_qm_free_qps ) ;
static void free_list ( struct list_head * head )
{
struct hisi_qm_resource * res , * tmp ;
list_for_each_entry_safe ( res , tmp , head , list ) {
list_del ( & res - > list ) ;
kfree ( res ) ;
}
}
static int hisi_qm_sort_devices ( int node , struct list_head * head ,
struct hisi_qm_list * qm_list )
{
struct hisi_qm_resource * res , * tmp ;
struct hisi_qm * qm ;
struct list_head * n ;
struct device * dev ;
int dev_node = 0 ;
list_for_each_entry ( qm , & qm_list - > list , list ) {
dev = & qm - > pdev - > dev ;
if ( IS_ENABLED ( CONFIG_NUMA ) ) {
dev_node = dev_to_node ( dev ) ;
if ( dev_node < 0 )
dev_node = 0 ;
}
res = kzalloc ( sizeof ( * res ) , GFP_KERNEL ) ;
if ( ! res )
return - ENOMEM ;
res - > qm = qm ;
res - > distance = node_distance ( dev_node , node ) ;
n = head ;
list_for_each_entry ( tmp , head , list ) {
if ( res - > distance < tmp - > distance ) {
n = & tmp - > list ;
break ;
}
}
list_add_tail ( & res - > list , n ) ;
}
return 0 ;
}
/**
* hisi_qm_alloc_qps_node ( ) - Create multiple queue pairs .
* @ qm_list : The list of all available devices .
* @ qp_num : The number of queue pairs need created .
* @ alg_type : The algorithm type .
* @ node : The numa node .
* @ qps : The queue pairs need created .
*
* This function will sort all available device according to numa distance .
* Then try to create all queue pairs from one device , if all devices do
* not meet the requirements will return error .
*/
int hisi_qm_alloc_qps_node ( struct hisi_qm_list * qm_list , int qp_num ,
u8 alg_type , int node , struct hisi_qp * * qps )
{
struct hisi_qm_resource * tmp ;
int ret = - ENODEV ;
LIST_HEAD ( head ) ;
int i ;
if ( ! qps | | ! qm_list | | qp_num < = 0 )
return - EINVAL ;
mutex_lock ( & qm_list - > lock ) ;
if ( hisi_qm_sort_devices ( node , & head , qm_list ) ) {
mutex_unlock ( & qm_list - > lock ) ;
goto err ;
}
list_for_each_entry ( tmp , & head , list ) {
for ( i = 0 ; i < qp_num ; i + + ) {
qps [ i ] = hisi_qm_create_qp ( tmp - > qm , alg_type ) ;
if ( IS_ERR ( qps [ i ] ) ) {
hisi_qm_free_qps ( qps , i ) ;
break ;
}
}
if ( i = = qp_num ) {
ret = 0 ;
break ;
}
}
mutex_unlock ( & qm_list - > lock ) ;
if ( ret )
pr_info ( " Failed to create qps, node[%d], alg[%d], qp[%d]! \n " ,
node , alg_type , qp_num ) ;
err :
free_list ( & head ) ;
return ret ;
}
EXPORT_SYMBOL_GPL ( hisi_qm_alloc_qps_node ) ;
2020-04-02 14:53:02 +08:00
static int qm_vf_q_assign ( struct hisi_qm * qm , u32 num_vfs )
{
u32 remain_q_num , q_num , i , j ;
u32 q_base = qm - > qp_num ;
int ret ;
if ( ! num_vfs )
return - EINVAL ;
remain_q_num = qm - > ctrl_qp_num - qm - > qp_num ;
/* If remain queues not enough, return error. */
if ( qm - > ctrl_qp_num < qm - > qp_num | | remain_q_num < num_vfs )
return - EINVAL ;
q_num = remain_q_num / num_vfs ;
for ( i = 1 ; i < = num_vfs ; i + + ) {
if ( i = = num_vfs )
q_num + = remain_q_num % num_vfs ;
ret = hisi_qm_set_vft ( qm , i , q_base , q_num ) ;
if ( ret ) {
for ( j = i ; j > 0 ; j - - )
hisi_qm_set_vft ( qm , j , 0 , 0 ) ;
return ret ;
}
q_base + = q_num ;
}
return 0 ;
}
static int qm_clear_vft_config ( struct hisi_qm * qm )
{
int ret ;
u32 i ;
for ( i = 1 ; i < = qm - > vfs_num ; i + + ) {
ret = hisi_qm_set_vft ( qm , i , 0 , 0 ) ;
if ( ret )
return ret ;
}
qm - > vfs_num = 0 ;
return 0 ;
}
/**
* hisi_qm_sriov_enable ( ) - enable virtual functions
* @ pdev : the PCIe device
* @ max_vfs : the number of virtual functions to enable
*
* Returns the number of enabled VFs . If there are VFs enabled already or
* max_vfs is more than the total number of device can be enabled , returns
* failure .
*/
int hisi_qm_sriov_enable ( struct pci_dev * pdev , int max_vfs )
{
struct hisi_qm * qm = pci_get_drvdata ( pdev ) ;
int pre_existing_vfs , num_vfs , total_vfs , ret ;
total_vfs = pci_sriov_get_totalvfs ( pdev ) ;
pre_existing_vfs = pci_num_vf ( pdev ) ;
if ( pre_existing_vfs ) {
pci_err ( pdev , " %d VFs already enabled. Please disable pre-enabled VFs! \n " ,
pre_existing_vfs ) ;
return 0 ;
}
num_vfs = min_t ( int , max_vfs , total_vfs ) ;
ret = qm_vf_q_assign ( qm , num_vfs ) ;
if ( ret ) {
pci_err ( pdev , " Can't assign queues for VF! \n " ) ;
return ret ;
}
qm - > vfs_num = num_vfs ;
ret = pci_enable_sriov ( pdev , num_vfs ) ;
if ( ret ) {
pci_err ( pdev , " Can't enable VF! \n " ) ;
qm_clear_vft_config ( qm ) ;
return ret ;
}
pci_info ( pdev , " VF enabled, vfs_num(=%d)! \n " , num_vfs ) ;
return num_vfs ;
}
EXPORT_SYMBOL_GPL ( hisi_qm_sriov_enable ) ;
/**
* hisi_qm_sriov_disable - disable virtual functions
* @ pdev : the PCI device
*
* Return failure if there are VFs assigned already .
*/
int hisi_qm_sriov_disable ( struct pci_dev * pdev )
{
struct hisi_qm * qm = pci_get_drvdata ( pdev ) ;
if ( pci_vfs_assigned ( pdev ) ) {
pci_err ( pdev , " Failed to disable VFs as VFs are assigned! \n " ) ;
return - EPERM ;
}
/* remove in hpre_pci_driver will be called to free VF resources */
pci_disable_sriov ( pdev ) ;
return qm_clear_vft_config ( qm ) ;
}
EXPORT_SYMBOL_GPL ( hisi_qm_sriov_disable ) ;
/**
* hisi_qm_sriov_configure - configure the number of VFs
* @ pdev : The PCI device
* @ num_vfs : The number of VFs need enabled
*
* Enable SR - IOV according to num_vfs , 0 means disable .
*/
int hisi_qm_sriov_configure ( struct pci_dev * pdev , int num_vfs )
{
if ( num_vfs = = 0 )
return hisi_qm_sriov_disable ( pdev ) ;
else
return hisi_qm_sriov_enable ( pdev , num_vfs ) ;
}
EXPORT_SYMBOL_GPL ( hisi_qm_sriov_configure ) ;
2020-05-09 17:44:04 +08:00
static enum acc_err_result qm_dev_err_handle ( struct hisi_qm * qm )
2020-01-20 15:30:08 +08:00
{
u32 err_sts ;
if ( ! qm - > err_ini - > get_dev_hw_err_status ) {
dev_err ( & qm - > pdev - > dev , " Device doesn't support get hw error status! \n " ) ;
2020-05-09 17:44:04 +08:00
return ACC_ERR_NONE ;
2020-01-20 15:30:08 +08:00
}
/* get device hardware error status */
err_sts = qm - > err_ini - > get_dev_hw_err_status ( qm ) ;
if ( err_sts ) {
2020-04-03 16:16:38 +08:00
if ( err_sts & qm - > err_ini - > err_info . ecc_2bits_mask )
qm - > err_status . is_dev_ecc_mbit = true ;
2020-01-20 15:30:08 +08:00
if ( ! qm - > err_ini - > log_dev_hw_err ) {
dev_err ( & qm - > pdev - > dev , " Device doesn't support log hw error! \n " ) ;
2020-05-09 17:44:04 +08:00
return ACC_ERR_NEED_RESET ;
2020-01-20 15:30:08 +08:00
}
qm - > err_ini - > log_dev_hw_err ( qm , err_sts ) ;
2020-05-09 17:44:04 +08:00
return ACC_ERR_NEED_RESET ;
2020-01-20 15:30:08 +08:00
}
2020-05-09 17:44:04 +08:00
return ACC_ERR_RECOVERED ;
2020-01-20 15:30:08 +08:00
}
2020-05-09 17:44:04 +08:00
static enum acc_err_result qm_process_dev_error ( struct hisi_qm * qm )
2020-01-20 15:30:08 +08:00
{
2020-05-09 17:44:04 +08:00
enum acc_err_result qm_ret , dev_ret ;
2020-01-20 15:30:08 +08:00
/* log qm error */
qm_ret = qm_hw_error_handle ( qm ) ;
/* log device error */
dev_ret = qm_dev_err_handle ( qm ) ;
2020-05-09 17:44:04 +08:00
return ( qm_ret = = ACC_ERR_NEED_RESET | |
dev_ret = = ACC_ERR_NEED_RESET ) ?
ACC_ERR_NEED_RESET : ACC_ERR_RECOVERED ;
2020-01-20 15:30:08 +08:00
}
/**
* hisi_qm_dev_err_detected ( ) - Get device and qm error status then log it .
* @ pdev : The PCI device which need report error .
* @ state : The connectivity between CPU and device .
*
* We register this function into PCIe AER handlers , It will report device or
* qm hardware error status when error occur .
*/
pci_ers_result_t hisi_qm_dev_err_detected ( struct pci_dev * pdev ,
pci_channel_state_t state )
{
2020-05-09 17:44:04 +08:00
struct hisi_qm * qm = pci_get_drvdata ( pdev ) ;
enum acc_err_result ret ;
2020-01-20 15:30:08 +08:00
if ( pdev - > is_virtfn )
return PCI_ERS_RESULT_NONE ;
pci_info ( pdev , " PCI error detected, state(=%d)!! \n " , state ) ;
if ( state = = pci_channel_io_perm_failure )
return PCI_ERS_RESULT_DISCONNECT ;
2020-05-09 17:44:04 +08:00
ret = qm_process_dev_error ( qm ) ;
if ( ret = = ACC_ERR_NEED_RESET )
return PCI_ERS_RESULT_NEED_RESET ;
return PCI_ERS_RESULT_RECOVERED ;
2020-01-20 15:30:08 +08:00
}
EXPORT_SYMBOL_GPL ( hisi_qm_dev_err_detected ) ;
2020-05-09 17:43:59 +08:00
static int qm_get_hw_error_status ( struct hisi_qm * qm )
{
return readl ( qm - > io_base + QM_ABNORMAL_INT_STATUS ) ;
}
2020-04-03 16:16:38 +08:00
static int qm_check_req_recv ( struct hisi_qm * qm )
{
struct pci_dev * pdev = qm - > pdev ;
int ret ;
u32 val ;
writel ( ACC_VENDOR_ID_VALUE , qm - > io_base + QM_PEH_VENDOR_ID ) ;
ret = readl_relaxed_poll_timeout ( qm - > io_base + QM_PEH_VENDOR_ID , val ,
( val = = ACC_VENDOR_ID_VALUE ) ,
POLL_PERIOD , POLL_TIMEOUT ) ;
if ( ret ) {
dev_err ( & pdev - > dev , " Fails to read QM reg! \n " ) ;
return ret ;
}
writel ( PCI_VENDOR_ID_HUAWEI , qm - > io_base + QM_PEH_VENDOR_ID ) ;
ret = readl_relaxed_poll_timeout ( qm - > io_base + QM_PEH_VENDOR_ID , val ,
( val = = PCI_VENDOR_ID_HUAWEI ) ,
POLL_PERIOD , POLL_TIMEOUT ) ;
if ( ret )
dev_err ( & pdev - > dev , " Fails to read QM reg in the second time! \n " ) ;
return ret ;
}
static int qm_set_pf_mse ( struct hisi_qm * qm , bool set )
{
struct pci_dev * pdev = qm - > pdev ;
u16 cmd ;
int i ;
pci_read_config_word ( pdev , PCI_COMMAND , & cmd ) ;
if ( set )
cmd | = PCI_COMMAND_MEMORY ;
else
cmd & = ~ PCI_COMMAND_MEMORY ;
pci_write_config_word ( pdev , PCI_COMMAND , cmd ) ;
for ( i = 0 ; i < MAX_WAIT_COUNTS ; i + + ) {
pci_read_config_word ( pdev , PCI_COMMAND , & cmd ) ;
if ( set = = ( ( cmd & PCI_COMMAND_MEMORY ) > > 1 ) )
return 0 ;
udelay ( 1 ) ;
}
return - ETIMEDOUT ;
}
static int qm_set_vf_mse ( struct hisi_qm * qm , bool set )
{
struct pci_dev * pdev = qm - > pdev ;
u16 sriov_ctrl ;
int pos ;
int i ;
pos = pci_find_ext_capability ( pdev , PCI_EXT_CAP_ID_SRIOV ) ;
pci_read_config_word ( pdev , pos + PCI_SRIOV_CTRL , & sriov_ctrl ) ;
if ( set )
sriov_ctrl | = PCI_SRIOV_CTRL_MSE ;
else
sriov_ctrl & = ~ PCI_SRIOV_CTRL_MSE ;
pci_write_config_word ( pdev , pos + PCI_SRIOV_CTRL , sriov_ctrl ) ;
for ( i = 0 ; i < MAX_WAIT_COUNTS ; i + + ) {
pci_read_config_word ( pdev , pos + PCI_SRIOV_CTRL , & sriov_ctrl ) ;
if ( set = = ( sriov_ctrl & PCI_SRIOV_CTRL_MSE ) > >
ACC_PEH_SRIOV_CTRL_VF_MSE_SHIFT )
return 0 ;
udelay ( 1 ) ;
}
return - ETIMEDOUT ;
}
static int qm_set_msi ( struct hisi_qm * qm , bool set )
{
struct pci_dev * pdev = qm - > pdev ;
if ( set ) {
pci_write_config_dword ( pdev , pdev - > msi_cap + PCI_MSI_MASK_64 ,
0 ) ;
} else {
pci_write_config_dword ( pdev , pdev - > msi_cap + PCI_MSI_MASK_64 ,
ACC_PEH_MSI_DISABLE ) ;
if ( qm - > err_status . is_qm_ecc_mbit | |
qm - > err_status . is_dev_ecc_mbit )
return 0 ;
mdelay ( 1 ) ;
if ( readl ( qm - > io_base + QM_PEH_DFX_INFO0 ) )
return - EFAULT ;
}
return 0 ;
}
static int qm_vf_reset_prepare ( struct hisi_qm * qm )
{
struct hisi_qm_list * qm_list = qm - > qm_list ;
2020-05-09 17:43:58 +08:00
int stop_reason = qm - > status . stop_reason ;
2020-04-03 16:16:38 +08:00
struct pci_dev * pdev = qm - > pdev ;
struct pci_dev * virtfn ;
struct hisi_qm * vf_qm ;
int ret = 0 ;
mutex_lock ( & qm_list - > lock ) ;
list_for_each_entry ( vf_qm , & qm_list - > list , list ) {
virtfn = vf_qm - > pdev ;
if ( virtfn = = pdev )
continue ;
if ( pci_physfn ( virtfn ) = = pdev ) {
2020-05-09 17:43:58 +08:00
vf_qm - > status . stop_reason = stop_reason ;
2020-04-03 16:16:38 +08:00
ret = hisi_qm_stop ( vf_qm ) ;
if ( ret )
goto stop_fail ;
}
}
stop_fail :
mutex_unlock ( & qm_list - > lock ) ;
return ret ;
}
static int qm_reset_prepare_ready ( struct hisi_qm * qm )
{
struct pci_dev * pdev = qm - > pdev ;
struct hisi_qm * pf_qm = pci_get_drvdata ( pci_physfn ( pdev ) ) ;
int delay = 0 ;
/* All reset requests need to be queued for processing */
while ( test_and_set_bit ( QM_DEV_RESET_FLAG , & pf_qm - > reset_flag ) ) {
msleep ( + + delay ) ;
if ( delay > QM_RESET_WAIT_TIMEOUT )
return - EBUSY ;
}
return 0 ;
}
static int qm_controller_reset_prepare ( struct hisi_qm * qm )
{
struct pci_dev * pdev = qm - > pdev ;
int ret ;
ret = qm_reset_prepare_ready ( qm ) ;
if ( ret ) {
pci_err ( pdev , " Controller reset not ready! \n " ) ;
return ret ;
}
if ( qm - > vfs_num ) {
ret = qm_vf_reset_prepare ( qm ) ;
if ( ret ) {
pci_err ( pdev , " Fails to stop VFs! \n " ) ;
return ret ;
}
}
2020-05-09 17:43:58 +08:00
qm - > status . stop_reason = QM_SOFT_RESET ;
2020-04-03 16:16:38 +08:00
ret = hisi_qm_stop ( qm ) ;
if ( ret ) {
pci_err ( pdev , " Fails to stop QM! \n " ) ;
return ret ;
}
return 0 ;
}
static void qm_dev_ecc_mbit_handle ( struct hisi_qm * qm )
{
u32 nfe_enb = 0 ;
if ( ! qm - > err_status . is_dev_ecc_mbit & &
qm - > err_status . is_qm_ecc_mbit & &
qm - > err_ini - > close_axi_master_ooo ) {
qm - > err_ini - > close_axi_master_ooo ( qm ) ;
} else if ( qm - > err_status . is_dev_ecc_mbit & &
! qm - > err_status . is_qm_ecc_mbit & &
! qm - > err_ini - > close_axi_master_ooo ) {
nfe_enb = readl ( qm - > io_base + QM_RAS_NFE_ENABLE ) ;
writel ( nfe_enb & QM_RAS_NFE_MBIT_DISABLE ,
qm - > io_base + QM_RAS_NFE_ENABLE ) ;
writel ( QM_ECC_MBIT , qm - > io_base + QM_ABNORMAL_INT_SET ) ;
}
}
static int qm_soft_reset ( struct hisi_qm * qm )
{
struct pci_dev * pdev = qm - > pdev ;
int ret ;
u32 val ;
/* Ensure all doorbells and mailboxes received by QM */
ret = qm_check_req_recv ( qm ) ;
if ( ret )
return ret ;
if ( qm - > vfs_num ) {
ret = qm_set_vf_mse ( qm , false ) ;
if ( ret ) {
pci_err ( pdev , " Fails to disable vf MSE bit. \n " ) ;
return ret ;
}
}
ret = qm_set_msi ( qm , false ) ;
if ( ret ) {
pci_err ( pdev , " Fails to disable PEH MSI bit. \n " ) ;
return ret ;
}
qm_dev_ecc_mbit_handle ( qm ) ;
/* OOO register set and check */
writel ( ACC_MASTER_GLOBAL_CTRL_SHUTDOWN ,
qm - > io_base + ACC_MASTER_GLOBAL_CTRL ) ;
/* If bus lock, reset chip */
ret = readl_relaxed_poll_timeout ( qm - > io_base + ACC_MASTER_TRANS_RETURN ,
val ,
( val = = ACC_MASTER_TRANS_RETURN_RW ) ,
POLL_PERIOD , POLL_TIMEOUT ) ;
if ( ret ) {
pci_emerg ( pdev , " Bus lock! Please reset system. \n " ) ;
return ret ;
}
ret = qm_set_pf_mse ( qm , false ) ;
if ( ret ) {
pci_err ( pdev , " Fails to disable pf MSE bit. \n " ) ;
return ret ;
}
/* The reset related sub-control registers are not in PCI BAR */
if ( ACPI_HANDLE ( & pdev - > dev ) ) {
unsigned long long value = 0 ;
acpi_status s ;
s = acpi_evaluate_integer ( ACPI_HANDLE ( & pdev - > dev ) ,
qm - > err_ini - > err_info . acpi_rst ,
NULL , & value ) ;
if ( ACPI_FAILURE ( s ) ) {
pci_err ( pdev , " NO controller reset method! \n " ) ;
return - EIO ;
}
if ( value ) {
pci_err ( pdev , " Reset step %llu failed! \n " , value ) ;
return - EIO ;
}
} else {
pci_err ( pdev , " No reset method! \n " ) ;
return - EINVAL ;
}
return 0 ;
}
static int qm_vf_reset_done ( struct hisi_qm * qm )
{
struct hisi_qm_list * qm_list = qm - > qm_list ;
struct pci_dev * pdev = qm - > pdev ;
struct pci_dev * virtfn ;
struct hisi_qm * vf_qm ;
int ret = 0 ;
mutex_lock ( & qm_list - > lock ) ;
list_for_each_entry ( vf_qm , & qm_list - > list , list ) {
virtfn = vf_qm - > pdev ;
if ( virtfn = = pdev )
continue ;
if ( pci_physfn ( virtfn ) = = pdev ) {
ret = qm_restart ( vf_qm ) ;
if ( ret )
goto restart_fail ;
}
}
restart_fail :
mutex_unlock ( & qm_list - > lock ) ;
return ret ;
}
static int qm_get_dev_err_status ( struct hisi_qm * qm )
{
2020-05-09 17:43:59 +08:00
return qm - > err_ini - > get_dev_hw_err_status ( qm ) ;
2020-04-03 16:16:38 +08:00
}
static int qm_dev_hw_init ( struct hisi_qm * qm )
{
return qm - > err_ini - > hw_init ( qm ) ;
}
static void qm_restart_prepare ( struct hisi_qm * qm )
{
u32 value ;
if ( ! qm - > err_status . is_qm_ecc_mbit & &
! qm - > err_status . is_dev_ecc_mbit )
return ;
/* temporarily close the OOO port used for PEH to write out MSI */
value = readl ( qm - > io_base + ACC_AM_CFG_PORT_WR_EN ) ;
writel ( value & ~ qm - > err_ini - > err_info . msi_wr_port ,
qm - > io_base + ACC_AM_CFG_PORT_WR_EN ) ;
/* clear dev ecc 2bit error source if having */
2020-05-09 17:43:59 +08:00
value = qm_get_dev_err_status ( qm ) &
qm - > err_ini - > err_info . ecc_2bits_mask ;
2020-04-03 16:16:38 +08:00
if ( value & & qm - > err_ini - > clear_dev_hw_err_status )
qm - > err_ini - > clear_dev_hw_err_status ( qm , value ) ;
/* clear QM ecc mbit error source */
writel ( QM_ECC_MBIT , qm - > io_base + QM_ABNORMAL_INT_SOURCE ) ;
/* clear AM Reorder Buffer ecc mbit source */
writel ( ACC_ROB_ECC_ERR_MULTPL , qm - > io_base + ACC_AM_ROB_ECC_INT_STS ) ;
if ( qm - > err_ini - > open_axi_master_ooo )
qm - > err_ini - > open_axi_master_ooo ( qm ) ;
}
static void qm_restart_done ( struct hisi_qm * qm )
{
u32 value ;
if ( ! qm - > err_status . is_qm_ecc_mbit & &
! qm - > err_status . is_dev_ecc_mbit )
return ;
/* open the OOO port for PEH to write out MSI */
value = readl ( qm - > io_base + ACC_AM_CFG_PORT_WR_EN ) ;
value | = qm - > err_ini - > err_info . msi_wr_port ;
writel ( value , qm - > io_base + ACC_AM_CFG_PORT_WR_EN ) ;
qm - > err_status . is_qm_ecc_mbit = false ;
qm - > err_status . is_dev_ecc_mbit = false ;
}
static int qm_controller_reset_done ( struct hisi_qm * qm )
{
struct pci_dev * pdev = qm - > pdev ;
int ret ;
ret = qm_set_msi ( qm , true ) ;
if ( ret ) {
pci_err ( pdev , " Fails to enable PEH MSI bit! \n " ) ;
return ret ;
}
ret = qm_set_pf_mse ( qm , true ) ;
if ( ret ) {
pci_err ( pdev , " Fails to enable pf MSE bit! \n " ) ;
return ret ;
}
if ( qm - > vfs_num ) {
ret = qm_set_vf_mse ( qm , true ) ;
if ( ret ) {
pci_err ( pdev , " Fails to enable vf MSE bit! \n " ) ;
return ret ;
}
}
ret = qm_dev_hw_init ( qm ) ;
if ( ret ) {
pci_err ( pdev , " Failed to init device \n " ) ;
return ret ;
}
qm_restart_prepare ( qm ) ;
ret = qm_restart ( qm ) ;
if ( ret ) {
pci_err ( pdev , " Failed to start QM! \n " ) ;
return ret ;
}
if ( qm - > vfs_num ) {
ret = qm_vf_q_assign ( qm , qm - > vfs_num ) ;
if ( ret ) {
pci_err ( pdev , " Failed to assign queue! \n " ) ;
return ret ;
}
}
ret = qm_vf_reset_done ( qm ) ;
if ( ret ) {
pci_err ( pdev , " Failed to start VFs! \n " ) ;
return - EPERM ;
}
hisi_qm_dev_err_init ( qm ) ;
qm_restart_done ( qm ) ;
clear_bit ( QM_DEV_RESET_FLAG , & qm - > reset_flag ) ;
return 0 ;
}
2020-04-23 10:22:36 +08:00
static int qm_controller_reset ( struct hisi_qm * qm )
2020-04-03 16:16:38 +08:00
{
struct pci_dev * pdev = qm - > pdev ;
int ret ;
pci_info ( pdev , " Controller resetting... \n " ) ;
ret = qm_controller_reset_prepare ( qm ) ;
if ( ret )
return ret ;
ret = qm_soft_reset ( qm ) ;
if ( ret ) {
pci_err ( pdev , " Controller reset failed (%d) \n " , ret ) ;
return ret ;
}
ret = qm_controller_reset_done ( qm ) ;
if ( ret )
return ret ;
pci_info ( pdev , " Controller reset complete \n " ) ;
return 0 ;
}
/**
* hisi_qm_dev_slot_reset ( ) - slot reset
* @ pdev : the PCIe device
*
* This function offers QM relate PCIe device reset interface . Drivers which
* use QM can use this function as slot_reset in its struct pci_error_handlers .
*/
pci_ers_result_t hisi_qm_dev_slot_reset ( struct pci_dev * pdev )
{
struct hisi_qm * qm = pci_get_drvdata ( pdev ) ;
int ret ;
if ( pdev - > is_virtfn )
return PCI_ERS_RESULT_RECOVERED ;
pci_aer_clear_nonfatal_status ( pdev ) ;
/* reset pcie device controller */
ret = qm_controller_reset ( qm ) ;
if ( ret ) {
pci_err ( pdev , " Controller reset failed (%d) \n " , ret ) ;
return PCI_ERS_RESULT_DISCONNECT ;
}
return PCI_ERS_RESULT_RECOVERED ;
}
EXPORT_SYMBOL_GPL ( hisi_qm_dev_slot_reset ) ;
2020-05-09 17:43:59 +08:00
/* check the interrupt is ecc-mbit error or not */
static int qm_check_dev_error ( struct hisi_qm * qm )
{
int ret ;
if ( qm - > fun_type = = QM_HW_VF )
return 0 ;
ret = qm_get_hw_error_status ( qm ) & QM_ECC_MBIT ;
if ( ret )
return ret ;
return ( qm_get_dev_err_status ( qm ) &
qm - > err_ini - > err_info . ecc_2bits_mask ) ;
}
void hisi_qm_reset_prepare ( struct pci_dev * pdev )
{
struct hisi_qm * pf_qm = pci_get_drvdata ( pci_physfn ( pdev ) ) ;
struct hisi_qm * qm = pci_get_drvdata ( pdev ) ;
u32 delay = 0 ;
int ret ;
hisi_qm_dev_err_uninit ( pf_qm ) ;
/*
* Check whether there is an ECC mbit error , If it occurs , need to
* wait for soft reset to fix it .
*/
while ( qm_check_dev_error ( pf_qm ) ) {
msleep ( + + delay ) ;
if ( delay > QM_RESET_WAIT_TIMEOUT )
return ;
}
ret = qm_reset_prepare_ready ( qm ) ;
if ( ret ) {
pci_err ( pdev , " FLR not ready! \n " ) ;
return ;
}
if ( qm - > vfs_num ) {
ret = qm_vf_reset_prepare ( qm ) ;
if ( ret ) {
pci_err ( pdev , " Failed to prepare reset, ret = %d. \n " ,
ret ) ;
return ;
}
}
ret = hisi_qm_stop ( qm ) ;
if ( ret ) {
pci_err ( pdev , " Failed to stop QM, ret = %d. \n " , ret ) ;
return ;
}
pci_info ( pdev , " FLR resetting... \n " ) ;
}
EXPORT_SYMBOL_GPL ( hisi_qm_reset_prepare ) ;
static bool qm_flr_reset_complete ( struct pci_dev * pdev )
{
struct pci_dev * pf_pdev = pci_physfn ( pdev ) ;
struct hisi_qm * qm = pci_get_drvdata ( pf_pdev ) ;
u32 id ;
pci_read_config_dword ( qm - > pdev , PCI_COMMAND , & id ) ;
if ( id = = QM_PCI_COMMAND_INVALID ) {
pci_err ( pdev , " Device can not be used! \n " ) ;
return false ;
}
clear_bit ( QM_DEV_RESET_FLAG , & qm - > reset_flag ) ;
return true ;
}
void hisi_qm_reset_done ( struct pci_dev * pdev )
{
struct hisi_qm * pf_qm = pci_get_drvdata ( pci_physfn ( pdev ) ) ;
struct hisi_qm * qm = pci_get_drvdata ( pdev ) ;
int ret ;
hisi_qm_dev_err_init ( pf_qm ) ;
ret = qm_restart ( qm ) ;
if ( ret ) {
pci_err ( pdev , " Failed to start QM, ret = %d. \n " , ret ) ;
goto flr_done ;
}
if ( qm - > fun_type = = QM_HW_PF ) {
ret = qm_dev_hw_init ( qm ) ;
if ( ret ) {
pci_err ( pdev , " Failed to init PF, ret = %d. \n " , ret ) ;
goto flr_done ;
}
if ( ! qm - > vfs_num )
goto flr_done ;
ret = qm_vf_q_assign ( qm , qm - > vfs_num ) ;
if ( ret ) {
pci_err ( pdev , " Failed to assign VFs, ret = %d. \n " , ret ) ;
goto flr_done ;
}
ret = qm_vf_reset_done ( qm ) ;
if ( ret ) {
pci_err ( pdev , " Failed to start VFs, ret = %d. \n " , ret ) ;
goto flr_done ;
}
}
flr_done :
if ( qm_flr_reset_complete ( pdev ) )
pci_info ( pdev , " FLR reset complete \n " ) ;
}
EXPORT_SYMBOL_GPL ( hisi_qm_reset_done ) ;
2020-05-09 17:44:04 +08:00
static irqreturn_t qm_abnormal_irq ( int irq , void * data )
{
struct hisi_qm * qm = data ;
enum acc_err_result ret ;
2020-05-15 17:13:54 +08:00
atomic64_inc ( & qm - > debug . dfx . abnormal_irq_cnt ) ;
2020-05-09 17:44:04 +08:00
ret = qm_process_dev_error ( qm ) ;
if ( ret = = ACC_ERR_NEED_RESET )
schedule_work ( & qm - > rst_work ) ;
return IRQ_HANDLED ;
}
static int qm_irq_register ( struct hisi_qm * qm )
{
struct pci_dev * pdev = qm - > pdev ;
int ret ;
ret = request_irq ( pci_irq_vector ( pdev , QM_EQ_EVENT_IRQ_VECTOR ) ,
qm_irq , IRQF_SHARED , qm - > dev_name , qm ) ;
if ( ret )
return ret ;
if ( qm - > ver = = QM_HW_V2 ) {
ret = request_irq ( pci_irq_vector ( pdev , QM_AEQ_EVENT_IRQ_VECTOR ) ,
qm_aeq_irq , IRQF_SHARED , qm - > dev_name , qm ) ;
if ( ret )
goto err_aeq_irq ;
if ( qm - > fun_type = = QM_HW_PF ) {
ret = request_irq ( pci_irq_vector ( pdev ,
QM_ABNORMAL_EVENT_IRQ_VECTOR ) ,
qm_abnormal_irq , IRQF_SHARED ,
qm - > dev_name , qm ) ;
if ( ret )
goto err_abonormal_irq ;
}
}
return 0 ;
err_abonormal_irq :
free_irq ( pci_irq_vector ( pdev , QM_AEQ_EVENT_IRQ_VECTOR ) , qm ) ;
err_aeq_irq :
free_irq ( pci_irq_vector ( pdev , QM_EQ_EVENT_IRQ_VECTOR ) , qm ) ;
return ret ;
}
static void hisi_qm_controller_reset ( struct work_struct * rst_work )
{
struct hisi_qm * qm = container_of ( rst_work , struct hisi_qm , rst_work ) ;
int ret ;
/* reset pcie device controller */
ret = qm_controller_reset ( qm ) ;
if ( ret )
dev_err ( & qm - > pdev - > dev , " controller reset failed (%d) \n " , ret ) ;
}
/**
* hisi_qm_init ( ) - Initialize configures about qm .
* @ qm : The qm needing init .
*
* This function init qm , then we can call hisi_qm_start to put qm into work .
*/
int hisi_qm_init ( struct hisi_qm * qm )
{
struct pci_dev * pdev = qm - > pdev ;
struct device * dev = & pdev - > dev ;
unsigned int num_vec ;
int ret ;
hisi_qm_pre_init ( qm ) ;
ret = qm_alloc_uacce ( qm ) ;
if ( ret < 0 )
dev_warn ( & pdev - > dev , " fail to alloc uacce (%d) \n " , ret ) ;
ret = pci_enable_device_mem ( pdev ) ;
if ( ret < 0 ) {
dev_err ( & pdev - > dev , " Failed to enable device mem! \n " ) ;
goto err_remove_uacce ;
}
ret = pci_request_mem_regions ( pdev , qm - > dev_name ) ;
if ( ret < 0 ) {
dev_err ( & pdev - > dev , " Failed to request mem regions! \n " ) ;
goto err_disable_pcidev ;
}
qm - > phys_base = pci_resource_start ( pdev , PCI_BAR_2 ) ;
qm - > phys_size = pci_resource_len ( qm - > pdev , PCI_BAR_2 ) ;
qm - > io_base = ioremap ( qm - > phys_base , qm - > phys_size ) ;
if ( ! qm - > io_base ) {
ret = - EIO ;
goto err_release_mem_regions ;
}
ret = dma_set_mask_and_coherent ( dev , DMA_BIT_MASK ( 64 ) ) ;
if ( ret < 0 )
goto err_iounmap ;
pci_set_master ( pdev ) ;
if ( ! qm - > ops - > get_irq_num ) {
ret = - EOPNOTSUPP ;
goto err_iounmap ;
}
num_vec = qm - > ops - > get_irq_num ( qm ) ;
ret = pci_alloc_irq_vectors ( pdev , num_vec , num_vec , PCI_IRQ_MSI ) ;
if ( ret < 0 ) {
dev_err ( dev , " Failed to enable MSI vectors! \n " ) ;
goto err_iounmap ;
}
ret = qm_irq_register ( qm ) ;
if ( ret )
goto err_free_irq_vectors ;
if ( qm - > fun_type = = QM_HW_VF & & qm - > ver = = QM_HW_V2 ) {
/* v2 starts to support get vft by mailbox */
ret = hisi_qm_get_vft ( qm , & qm - > qp_base , & qm - > qp_num ) ;
if ( ret )
goto err_irq_unregister ;
}
ret = hisi_qm_memory_init ( qm ) ;
if ( ret )
goto err_irq_unregister ;
INIT_WORK ( & qm - > work , qm_work_process ) ;
if ( qm - > fun_type = = QM_HW_PF )
INIT_WORK ( & qm - > rst_work , hisi_qm_controller_reset ) ;
atomic_set ( & qm - > status . flags , QM_INIT ) ;
return 0 ;
err_irq_unregister :
qm_irq_unregister ( qm ) ;
err_free_irq_vectors :
pci_free_irq_vectors ( pdev ) ;
err_iounmap :
iounmap ( qm - > io_base ) ;
err_release_mem_regions :
pci_release_mem_regions ( pdev ) ;
err_disable_pcidev :
pci_disable_device ( pdev ) ;
err_remove_uacce :
uacce_remove ( qm - > uacce ) ;
qm - > uacce = NULL ;
return ret ;
}
EXPORT_SYMBOL_GPL ( hisi_qm_init ) ;
2019-08-02 15:57:50 +08:00
MODULE_LICENSE ( " GPL v2 " ) ;
MODULE_AUTHOR ( " Zhou Wang <wangzhou1@hisilicon.com> " ) ;
MODULE_DESCRIPTION ( " HiSilicon Accelerator queue manager driver " ) ;