2020-08-27 09:54:40 -05:00
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2016-06-16 16:45:23 +03:00
/*
* Copyright ( c ) 2016 Mellanox Technologies Ltd . All rights reserved .
* Copyright ( c ) 2015 System Fabric Works , Inc . All rights reserved .
*/
# ifndef RXE_LOC_H
# define RXE_LOC_H
/* rxe_av.c */
2019-01-29 12:08:49 +02:00
void rxe_init_av ( struct rdma_ah_attr * attr , struct rxe_av * av ) ;
2016-06-16 16:45:23 +03:00
2017-04-29 14:41:18 -04:00
int rxe_av_chk_attr ( struct rxe_dev * rxe , struct rdma_ah_attr * attr ) ;
2016-06-16 16:45:23 +03:00
2018-01-31 06:06:55 -05:00
void rxe_av_from_attr ( u8 port_num , struct rxe_av * av ,
2018-01-31 06:06:54 -05:00
struct rdma_ah_attr * attr ) ;
2016-06-16 16:45:23 +03:00
2018-01-31 06:06:57 -05:00
void rxe_av_to_attr ( struct rxe_av * av , struct rdma_ah_attr * attr ) ;
2016-06-16 16:45:23 +03:00
2018-06-13 10:22:06 +03:00
void rxe_av_fill_ip_info ( struct rxe_av * av , struct rdma_ah_attr * attr ) ;
2016-06-16 16:45:23 +03:00
struct rxe_av * rxe_get_av ( struct rxe_pkt_info * pkt ) ;
/* rxe_cq.c */
int rxe_cq_chk_attr ( struct rxe_dev * rxe , struct rxe_cq * cq ,
2018-03-13 16:33:17 -06:00
int cqe , int comp_vector ) ;
2016-06-16 16:45:23 +03:00
int rxe_cq_from_init ( struct rxe_dev * rxe , struct rxe_cq * cq , int cqe ,
2019-03-31 19:10:07 +03:00
int comp_vector , struct ib_udata * udata ,
2018-03-13 16:33:18 -06:00
struct rxe_create_cq_resp __user * uresp ) ;
2016-06-16 16:45:23 +03:00
2018-03-13 16:33:18 -06:00
int rxe_cq_resize_queue ( struct rxe_cq * cq , int new_cqe ,
2019-03-31 19:10:07 +03:00
struct rxe_resize_cq_resp __user * uresp ,
struct ib_udata * udata ) ;
2016-06-16 16:45:23 +03:00
int rxe_cq_post ( struct rxe_cq * cq , struct rxe_cqe * cqe , int solicited ) ;
2017-08-28 16:11:50 -04:00
void rxe_cq_disable ( struct rxe_cq * cq ) ;
2017-01-10 11:15:46 -08:00
void rxe_cq_cleanup ( struct rxe_pool_entry * arg ) ;
2016-06-16 16:45:23 +03:00
/* rxe_mcast.c */
int rxe_mcast_get_grp ( struct rxe_dev * rxe , union ib_gid * mgid ,
struct rxe_mc_grp * * grp_p ) ;
int rxe_mcast_add_grp_elem ( struct rxe_dev * rxe , struct rxe_qp * qp ,
struct rxe_mc_grp * grp ) ;
int rxe_mcast_drop_grp_elem ( struct rxe_dev * rxe , struct rxe_qp * qp ,
union ib_gid * mgid ) ;
void rxe_drop_all_mcast_groups ( struct rxe_qp * qp ) ;
2017-01-10 11:15:46 -08:00
void rxe_mc_cleanup ( struct rxe_pool_entry * arg ) ;
2016-06-16 16:45:23 +03:00
/* rxe_mmap.c */
struct rxe_mmap_info {
struct list_head pending_mmaps ;
struct ib_ucontext * context ;
struct kref ref ;
void * obj ;
struct mminfo info ;
} ;
void rxe_mmap_release ( struct kref * ref ) ;
2019-03-31 19:10:07 +03:00
struct rxe_mmap_info * rxe_create_mmap_info ( struct rxe_dev * dev , u32 size ,
struct ib_udata * udata , void * obj ) ;
2016-06-16 16:45:23 +03:00
int rxe_mmap ( struct ib_ucontext * context , struct vm_area_struct * vma ) ;
/* rxe_mr.c */
2021-06-07 23:25:47 -05:00
u8 rxe_get_next_key ( u32 last_key ) ;
2021-03-25 16:24:26 -05:00
void rxe_mr_init_dma ( struct rxe_pd * pd , int access , struct rxe_mr * mr ) ;
int rxe_mr_init_user ( struct rxe_pd * pd , u64 start , u64 length , u64 iova ,
2021-05-12 16:12:22 +08:00
int access , struct rxe_mr * mr ) ;
2021-03-25 16:24:26 -05:00
int rxe_mr_init_fast ( struct rxe_pd * pd , int max_pages , struct rxe_mr * mr ) ;
int rxe_mr_copy ( struct rxe_mr * mr , u64 iova , void * addr , int length ,
2021-06-07 23:25:51 -05:00
enum rxe_mr_copy_dir dir , u32 * crcp ) ;
2018-04-23 03:57:58 -04:00
int copy_data ( struct rxe_pd * pd , int access ,
2016-06-16 16:45:23 +03:00
struct rxe_dma_info * dma , void * addr , int length ,
2021-06-07 23:25:51 -05:00
enum rxe_mr_copy_dir dir , u32 * crcp ) ;
2021-03-25 16:24:26 -05:00
void * iova_to_vaddr ( struct rxe_mr * mr , u64 iova , int length ) ;
struct rxe_mr * lookup_mr ( struct rxe_pd * pd , int access , u32 key ,
2021-06-07 23:25:51 -05:00
enum rxe_mr_lookup_type type ) ;
2021-03-25 16:24:26 -05:00
int mr_check_range ( struct rxe_mr * mr , u64 iova , size_t length ) ;
2016-06-16 16:45:23 +03:00
int advance_dma_data ( struct rxe_dma_info * dma , unsigned int length ) ;
2021-06-07 23:25:51 -05:00
int rxe_invalidate_mr ( struct rxe_qp * qp , u32 rkey ) ;
2021-06-07 23:25:53 -05:00
int rxe_dereg_mr ( struct ib_mr * ibmr , struct ib_udata * udata ) ;
2021-06-07 23:25:51 -05:00
void rxe_mr_cleanup ( struct rxe_pool_entry * arg ) ;
2016-06-16 16:45:23 +03:00
2021-06-07 23:25:47 -05:00
/* rxe_mw.c */
int rxe_alloc_mw ( struct ib_mw * ibmw , struct ib_udata * udata ) ;
int rxe_dealloc_mw ( struct ib_mw * ibmw ) ;
2021-06-07 23:25:50 -05:00
int rxe_bind_mw ( struct rxe_qp * qp , struct rxe_send_wqe * wqe ) ;
2021-06-07 23:25:51 -05:00
int rxe_invalidate_mw ( struct rxe_qp * qp , u32 rkey ) ;
2021-06-07 23:25:52 -05:00
struct rxe_mw * rxe_lookup_mw ( struct rxe_qp * qp , int access , u32 rkey ) ;
2021-06-07 23:25:47 -05:00
void rxe_mw_cleanup ( struct rxe_pool_entry * arg ) ;
2017-01-10 11:15:53 -08:00
/* rxe_net.c */
2018-04-20 17:05:03 +03:00
void rxe_loopback ( struct sk_buff * skb ) ;
2018-02-27 06:04:33 -05:00
int rxe_send ( struct rxe_pkt_info * pkt , struct sk_buff * skb ) ;
2017-01-10 11:15:53 -08:00
struct sk_buff * rxe_init_packet ( struct rxe_dev * rxe , struct rxe_av * av ,
int paylen , struct rxe_pkt_info * pkt ) ;
2018-08-28 13:51:37 +03:00
int rxe_prepare ( struct rxe_pkt_info * pkt , struct sk_buff * skb , u32 * crc ) ;
2017-01-10 11:15:53 -08:00
const char * rxe_parent_name ( struct rxe_dev * rxe , unsigned int port_num ) ;
int rxe_mcast_add ( struct rxe_dev * rxe , union ib_gid * mgid ) ;
int rxe_mcast_delete ( struct rxe_dev * rxe , union ib_gid * mgid ) ;
2016-06-16 16:45:23 +03:00
/* rxe_qp.c */
int rxe_qp_chk_init ( struct rxe_dev * rxe , struct ib_qp_init_attr * init ) ;
int rxe_qp_from_init ( struct rxe_dev * rxe , struct rxe_qp * qp , struct rxe_pd * pd ,
2018-03-13 16:33:18 -06:00
struct ib_qp_init_attr * init ,
struct rxe_create_qp_resp __user * uresp ,
2018-12-17 17:15:18 +02:00
struct ib_pd * ibpd , struct ib_udata * udata ) ;
2016-06-16 16:45:23 +03:00
int rxe_qp_to_init ( struct rxe_qp * qp , struct ib_qp_init_attr * init ) ;
int rxe_qp_chk_attr ( struct rxe_dev * rxe , struct rxe_qp * qp ,
struct ib_qp_attr * attr , int mask ) ;
int rxe_qp_from_attr ( struct rxe_qp * qp , struct ib_qp_attr * attr ,
int mask , struct ib_udata * udata ) ;
int rxe_qp_to_attr ( struct rxe_qp * qp , struct ib_qp_attr * attr , int mask ) ;
void rxe_qp_error ( struct rxe_qp * qp ) ;
void rxe_qp_destroy ( struct rxe_qp * qp ) ;
2017-01-10 11:15:46 -08:00
void rxe_qp_cleanup ( struct rxe_pool_entry * arg ) ;
2016-06-16 16:45:23 +03:00
static inline int qp_num ( struct rxe_qp * qp )
{
return qp - > ibqp . qp_num ;
}
static inline enum ib_qp_type qp_type ( struct rxe_qp * qp )
{
return qp - > ibqp . qp_type ;
}
static inline enum ib_qp_state qp_state ( struct rxe_qp * qp )
{
return qp - > attr . qp_state ;
}
static inline int qp_mtu ( struct rxe_qp * qp )
{
if ( qp - > ibqp . qp_type = = IB_QPT_RC | | qp - > ibqp . qp_type = = IB_QPT_UC )
return qp - > attr . path_mtu ;
else
2018-09-26 22:12:23 -07:00
return IB_MTU_4096 ;
2016-06-16 16:45:23 +03:00
}
static inline int rcv_wqe_size ( int max_sge )
{
return sizeof ( struct rxe_recv_wqe ) +
max_sge * sizeof ( struct ib_sge ) ;
}
void free_rd_atomic_resource ( struct rxe_qp * qp , struct resp_res * res ) ;
static inline void rxe_advance_resp_resource ( struct rxe_qp * qp )
{
qp - > resp . res_head + + ;
2016-09-28 20:26:44 +00:00
if ( unlikely ( qp - > resp . res_head = = qp - > attr . max_dest_rd_atomic ) )
2016-06-16 16:45:23 +03:00
qp - > resp . res_head = 0 ;
}
2017-10-24 03:28:27 -07:00
void retransmit_timer ( struct timer_list * t ) ;
void rnr_nak_timer ( struct timer_list * t ) ;
2016-06-16 16:45:23 +03:00
/* rxe_srq.c */
# define IB_SRQ_INIT_MASK (~IB_SRQ_LIMIT)
int rxe_srq_chk_attr ( struct rxe_dev * rxe , struct rxe_srq * srq ,
struct ib_srq_attr * attr , enum ib_srq_attr_mask mask ) ;
int rxe_srq_from_init ( struct rxe_dev * rxe , struct rxe_srq * srq ,
2019-03-31 19:10:07 +03:00
struct ib_srq_init_attr * init , struct ib_udata * udata ,
2018-03-13 16:33:18 -06:00
struct rxe_create_srq_resp __user * uresp ) ;
2016-06-16 16:45:23 +03:00
int rxe_srq_from_attr ( struct rxe_dev * rxe , struct rxe_srq * srq ,
struct ib_srq_attr * attr , enum ib_srq_attr_mask mask ,
2019-03-31 19:10:07 +03:00
struct rxe_modify_srq_cmd * ucmd , struct ib_udata * udata ) ;
2016-06-16 16:45:23 +03:00
2019-01-22 16:27:24 -07:00
void rxe_dealloc ( struct ib_device * ib_dev ) ;
2016-06-16 16:45:23 +03:00
int rxe_completer ( void * arg ) ;
int rxe_requester ( void * arg ) ;
int rxe_responder ( void * arg ) ;
u32 rxe_icrc_hdr ( struct rxe_pkt_info * pkt , struct sk_buff * skb ) ;
2019-01-20 08:21:40 -05:00
void rxe_resp_queue_pkt ( struct rxe_qp * qp , struct sk_buff * skb ) ;
2016-06-16 16:45:23 +03:00
2019-01-20 08:21:40 -05:00
void rxe_comp_queue_pkt ( struct rxe_qp * qp , struct sk_buff * skb ) ;
2016-06-16 16:45:23 +03:00
2017-06-15 11:29:04 +03:00
static inline unsigned int wr_opcode_mask ( int opcode , struct rxe_qp * qp )
2016-06-16 16:45:23 +03:00
{
return rxe_wr_opcode_info [ opcode ] . mask [ qp - > ibqp . qp_type ] ;
}
2018-11-03 08:13:18 -04:00
static inline int rxe_xmit_packet ( struct rxe_qp * qp , struct rxe_pkt_info * pkt ,
struct sk_buff * skb )
2016-06-16 16:45:23 +03:00
{
int err ;
int is_request = pkt - > mask & RXE_REQ_MASK ;
2018-11-03 08:13:18 -04:00
struct rxe_dev * rxe = to_rdev ( qp - > ibqp . device ) ;
2016-06-16 16:45:23 +03:00
if ( ( is_request & & ( qp - > req . state ! = QP_STATE_READY ) ) | |
( ! is_request & & ( qp - > resp . state ! = QP_STATE_READY ) ) ) {
pr_info ( " Packet dropped. QP is not in ready state \n " ) ;
goto drop ;
}
if ( pkt - > mask & RXE_LOOPBACK_MASK ) {
memcpy ( SKB_TO_PKT ( skb ) , pkt , sizeof ( * pkt ) ) ;
2018-04-20 17:05:03 +03:00
rxe_loopback ( skb ) ;
err = 0 ;
2016-06-16 16:45:23 +03:00
} else {
2018-02-27 06:04:33 -05:00
err = rxe_send ( pkt , skb ) ;
2016-06-16 16:45:23 +03:00
}
if ( err ) {
rxe - > xmit_errors + + ;
2017-03-10 18:23:56 +02:00
rxe_counter_inc ( rxe , RXE_CNT_SEND_ERR ) ;
2016-06-16 16:45:23 +03:00
return err ;
}
if ( ( qp_type ( qp ) ! = IB_QPT_RC ) & &
( pkt - > mask & RXE_END_MASK ) ) {
pkt - > wqe - > state = wqe_state_done ;
rxe_run_task ( & qp - > comp . task , 1 ) ;
}
2017-03-10 18:23:56 +02:00
rxe_counter_inc ( rxe , RXE_CNT_SENT_PKTS ) ;
2016-06-16 16:45:23 +03:00
goto done ;
drop :
kfree_skb ( skb ) ;
err = 0 ;
done :
return err ;
}
# endif /* RXE_LOC_H */