2023-08-23 11:29:12 +02:00
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2019-06-20 18:21:32 +02:00
/* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
/* Copyright (c) 2008-2019, IBM Corporation */
# include <linux/errno.h>
# include <linux/types.h>
# include <rdma/ib_verbs.h>
# include "siw.h"
static int map_wc_opcode [ SIW_NUM_OPCODES ] = {
[ SIW_OP_WRITE ] = IB_WC_RDMA_WRITE ,
[ SIW_OP_SEND ] = IB_WC_SEND ,
[ SIW_OP_SEND_WITH_IMM ] = IB_WC_SEND ,
[ SIW_OP_READ ] = IB_WC_RDMA_READ ,
[ SIW_OP_READ_LOCAL_INV ] = IB_WC_RDMA_READ ,
[ SIW_OP_COMP_AND_SWAP ] = IB_WC_COMP_SWAP ,
[ SIW_OP_FETCH_AND_ADD ] = IB_WC_FETCH_ADD ,
[ SIW_OP_INVAL_STAG ] = IB_WC_LOCAL_INV ,
[ SIW_OP_REG_MR ] = IB_WC_REG_MR ,
[ SIW_OP_RECEIVE ] = IB_WC_RECV ,
[ SIW_OP_READ_RESPONSE ] = - 1 /* not used */
} ;
static struct {
2019-07-10 10:48:00 -07:00
enum siw_wc_status siw ;
2019-06-20 18:21:32 +02:00
enum ib_wc_status ib ;
} map_cqe_status [ SIW_NUM_WC_STATUS ] = {
{ SIW_WC_SUCCESS , IB_WC_SUCCESS } ,
{ SIW_WC_LOC_LEN_ERR , IB_WC_LOC_LEN_ERR } ,
{ SIW_WC_LOC_PROT_ERR , IB_WC_LOC_PROT_ERR } ,
{ SIW_WC_LOC_QP_OP_ERR , IB_WC_LOC_QP_OP_ERR } ,
{ SIW_WC_WR_FLUSH_ERR , IB_WC_WR_FLUSH_ERR } ,
{ SIW_WC_BAD_RESP_ERR , IB_WC_BAD_RESP_ERR } ,
{ SIW_WC_LOC_ACCESS_ERR , IB_WC_LOC_ACCESS_ERR } ,
{ SIW_WC_REM_ACCESS_ERR , IB_WC_REM_ACCESS_ERR } ,
{ SIW_WC_REM_INV_REQ_ERR , IB_WC_REM_INV_REQ_ERR } ,
{ SIW_WC_GENERAL_ERR , IB_WC_GENERAL_ERR }
} ;
/*
* Reap one CQE from the CQ . Only used by kernel clients
* during CQ normal operation . Might be called during CQ
* flush for user mapped CQE array as well .
*/
int siw_reap_cqe ( struct siw_cq * cq , struct ib_wc * wc )
{
struct siw_cqe * cqe ;
unsigned long flags ;
spin_lock_irqsave ( & cq - > lock , flags ) ;
cqe = & cq - > queue [ cq - > cq_get % cq - > num_cqe ] ;
if ( READ_ONCE ( cqe - > flags ) & SIW_WQE_VALID ) {
memset ( wc , 0 , sizeof ( * wc ) ) ;
wc - > wr_id = cqe - > id ;
wc - > byte_len = cqe - > bytes ;
/*
* During CQ flush , also user land CQE ' s may get
* reaped here , which do not hold a QP reference
* and do not qualify for memory extension verbs .
*/
2019-12-10 17:17:29 +01:00
if ( likely ( rdma_is_kernel_res ( & cq - > base_cq . res ) ) ) {
2019-06-20 18:21:32 +02:00
if ( cqe - > flags & SIW_WQE_REM_INVAL ) {
wc - > ex . invalidate_rkey = cqe - > inval_stag ;
wc - > wc_flags = IB_WC_WITH_INVALIDATE ;
}
wc - > qp = cqe - > base_qp ;
2022-11-07 15:50:57 +01:00
wc - > opcode = map_wc_opcode [ cqe - > opcode ] ;
wc - > status = map_cqe_status [ cqe - > status ] . ib ;
2019-08-22 19:37:38 +02:00
siw_dbg_cq ( cq ,
" idx %u, type %d, flags %2x, id 0x%pK \n " ,
2019-06-20 18:21:32 +02:00
cq - > cq_get % cq - > num_cqe , cqe - > opcode ,
2019-08-22 19:37:38 +02:00
cqe - > flags , ( void * ) ( uintptr_t ) cqe - > id ) ;
2022-11-07 15:50:57 +01:00
} else {
/*
* A malicious user may set invalid opcode or
* status in the user mmapped CQE array .
* Sanity check and correct values in that case
* to avoid out - of - bounds access to global arrays
* for opcode and status mapping .
*/
u8 opcode = cqe - > opcode ;
u16 status = cqe - > status ;
if ( opcode > = SIW_NUM_OPCODES ) {
opcode = 0 ;
2022-11-15 18:07:47 +01:00
status = SIW_WC_GENERAL_ERR ;
2022-11-07 15:50:57 +01:00
} else if ( status > = SIW_NUM_WC_STATUS ) {
2022-11-15 18:07:47 +01:00
status = SIW_WC_GENERAL_ERR ;
2022-11-07 15:50:57 +01:00
}
wc - > opcode = map_wc_opcode [ opcode ] ;
wc - > status = map_cqe_status [ status ] . ib ;
2019-06-20 18:21:32 +02:00
}
WRITE_ONCE ( cqe - > flags , 0 ) ;
cq - > cq_get + + ;
spin_unlock_irqrestore ( & cq - > lock , flags ) ;
return 1 ;
}
spin_unlock_irqrestore ( & cq - > lock , flags ) ;
return 0 ;
}
/*
* siw_cq_flush ( )
*
* Flush all CQ elements .
*/
void siw_cq_flush ( struct siw_cq * cq )
{
struct ib_wc wc ;
while ( siw_reap_cqe ( cq , & wc ) )
;
}