2020-08-27 09:54:40 -05:00
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2016-06-16 16:45:23 +03:00
/*
* Copyright ( c ) 2016 Mellanox Technologies Ltd . All rights reserved .
* Copyright ( c ) 2015 System Fabric Works , Inc . All rights reserved .
*/
# ifndef RXE_LOC_H
# define RXE_LOC_H
/* rxe_av.c */
2019-01-29 12:08:49 +02:00
void rxe_init_av ( struct rdma_ah_attr * attr , struct rxe_av * av ) ;
2022-11-03 12:10:10 -05:00
int rxe_av_chk_attr ( struct rxe_qp * qp , struct rdma_ah_attr * attr ) ;
int rxe_ah_chk_attr ( struct rxe_ah * ah , struct rdma_ah_attr * attr ) ;
2018-01-31 06:06:55 -05:00
void rxe_av_from_attr ( u8 port_num , struct rxe_av * av ,
2018-01-31 06:06:54 -05:00
struct rdma_ah_attr * attr ) ;
2018-01-31 06:06:57 -05:00
void rxe_av_to_attr ( struct rxe_av * av , struct rdma_ah_attr * attr ) ;
2018-06-13 10:22:06 +03:00
void rxe_av_fill_ip_info ( struct rxe_av * av , struct rdma_ah_attr * attr ) ;
2022-03-03 18:07:57 -06:00
struct rxe_av * rxe_get_av ( struct rxe_pkt_info * pkt , struct rxe_ah * * ahp ) ;
2016-06-16 16:45:23 +03:00
/* rxe_cq.c */
int rxe_cq_chk_attr ( struct rxe_dev * rxe , struct rxe_cq * cq ,
2018-03-13 16:33:17 -06:00
int cqe , int comp_vector ) ;
2016-06-16 16:45:23 +03:00
int rxe_cq_from_init ( struct rxe_dev * rxe , struct rxe_cq * cq , int cqe ,
2019-03-31 19:10:07 +03:00
int comp_vector , struct ib_udata * udata ,
2018-03-13 16:33:18 -06:00
struct rxe_create_cq_resp __user * uresp ) ;
2016-06-16 16:45:23 +03:00
2018-03-13 16:33:18 -06:00
int rxe_cq_resize_queue ( struct rxe_cq * cq , int new_cqe ,
2019-03-31 19:10:07 +03:00
struct rxe_resize_cq_resp __user * uresp ,
struct ib_udata * udata ) ;
2016-06-16 16:45:23 +03:00
int rxe_cq_post ( struct rxe_cq * cq , struct rxe_cqe * cqe , int solicited ) ;
2017-08-28 16:11:50 -04:00
void rxe_cq_disable ( struct rxe_cq * cq ) ;
2022-04-20 20:40:35 -05:00
void rxe_cq_cleanup ( struct rxe_pool_elem * elem ) ;
2016-06-16 16:45:23 +03:00
/* rxe_mcast.c */
2022-02-08 15:16:39 -06:00
struct rxe_mcg * rxe_lookup_mcg ( struct rxe_dev * rxe , union ib_gid * mgid ) ;
2022-01-27 15:37:31 -06:00
int rxe_attach_mcast ( struct ib_qp * ibqp , union ib_gid * mgid , u16 mlid ) ;
int rxe_detach_mcast ( struct ib_qp * ibqp , union ib_gid * mgid , u16 mlid ) ;
2022-02-08 15:16:41 -06:00
void rxe_cleanup_mcg ( struct kref * kref ) ;
2016-06-16 16:45:23 +03:00
/* rxe_mmap.c */
struct rxe_mmap_info {
struct list_head pending_mmaps ;
struct ib_ucontext * context ;
struct kref ref ;
void * obj ;
struct mminfo info ;
} ;
void rxe_mmap_release ( struct kref * ref ) ;
2019-03-31 19:10:07 +03:00
struct rxe_mmap_info * rxe_create_mmap_info ( struct rxe_dev * dev , u32 size ,
struct ib_udata * udata , void * obj ) ;
2016-06-16 16:45:23 +03:00
int rxe_mmap ( struct ib_ucontext * context , struct vm_area_struct * vma ) ;
/* rxe_mr.c */
2021-06-07 23:25:47 -05:00
u8 rxe_get_next_key ( u32 last_key ) ;
2022-08-05 13:31:54 -05:00
void rxe_mr_init_dma ( int access , struct rxe_mr * mr ) ;
int rxe_mr_init_user ( struct rxe_dev * rxe , u64 start , u64 length , u64 iova ,
2021-05-12 16:12:22 +08:00
int access , struct rxe_mr * mr ) ;
2022-08-05 13:31:54 -05:00
int rxe_mr_init_fast ( int max_pages , struct rxe_mr * mr ) ;
2022-12-06 21:01:58 +08:00
int rxe_flush_pmem_iova ( struct rxe_mr * mr , u64 iova , int length ) ;
2021-03-25 16:24:26 -05:00
int rxe_mr_copy ( struct rxe_mr * mr , u64 iova , void * addr , int length ,
2021-07-06 23:00:36 -05:00
enum rxe_mr_copy_dir dir ) ;
int copy_data ( struct rxe_pd * pd , int access , struct rxe_dma_info * dma ,
void * addr , int length , enum rxe_mr_copy_dir dir ) ;
2023-01-19 17:59:33 -06:00
int rxe_map_mr_sg ( struct ib_mr * ibmr , struct scatterlist * sg ,
int sg_nents , unsigned int * sg_offset ) ;
2021-03-25 16:24:26 -05:00
void * iova_to_vaddr ( struct rxe_mr * mr , u64 iova , int length ) ;
2023-01-19 17:59:34 -06:00
int rxe_mr_do_atomic_op ( struct rxe_mr * mr , u64 iova , int opcode ,
u64 compare , u64 swap_add , u64 * orig_val ) ;
2021-03-25 16:24:26 -05:00
struct rxe_mr * lookup_mr ( struct rxe_pd * pd , int access , u32 key ,
2021-06-07 23:25:51 -05:00
enum rxe_mr_lookup_type type ) ;
2021-03-25 16:24:26 -05:00
int mr_check_range ( struct rxe_mr * mr , u64 iova , size_t length ) ;
2016-06-16 16:45:23 +03:00
int advance_dma_data ( struct rxe_dma_info * dma , unsigned int length ) ;
2022-07-07 09:30:06 +02:00
int rxe_invalidate_mr ( struct rxe_qp * qp , u32 key ) ;
2021-09-14 11:42:05 -05:00
int rxe_reg_fast_mr ( struct rxe_qp * qp , struct rxe_send_wqe * wqe ) ;
2021-06-07 23:25:53 -05:00
int rxe_dereg_mr ( struct ib_mr * ibmr , struct ib_udata * udata ) ;
2022-04-20 20:40:35 -05:00
void rxe_mr_cleanup ( struct rxe_pool_elem * elem ) ;
2016-06-16 16:45:23 +03:00
2021-06-07 23:25:47 -05:00
/* rxe_mw.c */
int rxe_alloc_mw ( struct ib_mw * ibmw , struct ib_udata * udata ) ;
int rxe_dealloc_mw ( struct ib_mw * ibmw ) ;
2021-06-07 23:25:50 -05:00
int rxe_bind_mw ( struct rxe_qp * qp , struct rxe_send_wqe * wqe ) ;
2021-06-07 23:25:51 -05:00
int rxe_invalidate_mw ( struct rxe_qp * qp , u32 rkey ) ;
2021-06-07 23:25:52 -05:00
struct rxe_mw * rxe_lookup_mw ( struct rxe_qp * qp , int access , u32 rkey ) ;
2022-04-20 20:40:35 -05:00
void rxe_mw_cleanup ( struct rxe_pool_elem * elem ) ;
2021-06-07 23:25:47 -05:00
2017-01-10 11:15:53 -08:00
/* rxe_net.c */
struct sk_buff * rxe_init_packet ( struct rxe_dev * rxe , struct rxe_av * av ,
int paylen , struct rxe_pkt_info * pkt ) ;
2022-03-03 18:07:57 -06:00
int rxe_prepare ( struct rxe_av * av , struct rxe_pkt_info * pkt ,
struct sk_buff * skb ) ;
2021-07-06 23:00:34 -05:00
int rxe_xmit_packet ( struct rxe_qp * qp , struct rxe_pkt_info * pkt ,
struct sk_buff * skb ) ;
2017-01-10 11:15:53 -08:00
const char * rxe_parent_name ( struct rxe_dev * rxe , unsigned int port_num ) ;
2016-06-16 16:45:23 +03:00
/* rxe_qp.c */
int rxe_qp_chk_init ( struct rxe_dev * rxe , struct ib_qp_init_attr * init ) ;
int rxe_qp_from_init ( struct rxe_dev * rxe , struct rxe_qp * qp , struct rxe_pd * pd ,
2018-03-13 16:33:18 -06:00
struct ib_qp_init_attr * init ,
struct rxe_create_qp_resp __user * uresp ,
2018-12-17 17:15:18 +02:00
struct ib_pd * ibpd , struct ib_udata * udata ) ;
2016-06-16 16:45:23 +03:00
int rxe_qp_to_init ( struct rxe_qp * qp , struct ib_qp_init_attr * init ) ;
int rxe_qp_chk_attr ( struct rxe_dev * rxe , struct rxe_qp * qp ,
struct ib_qp_attr * attr , int mask ) ;
int rxe_qp_from_attr ( struct rxe_qp * qp , struct ib_qp_attr * attr ,
int mask , struct ib_udata * udata ) ;
int rxe_qp_to_attr ( struct rxe_qp * qp , struct ib_qp_attr * attr , int mask ) ;
void rxe_qp_error ( struct rxe_qp * qp ) ;
2022-01-27 15:37:33 -06:00
int rxe_qp_chk_destroy ( struct rxe_qp * qp ) ;
2021-11-03 00:02:31 -05:00
void rxe_qp_cleanup ( struct rxe_pool_elem * elem ) ;
2016-06-16 16:45:23 +03:00
static inline int qp_num ( struct rxe_qp * qp )
{
return qp - > ibqp . qp_num ;
}
static inline enum ib_qp_type qp_type ( struct rxe_qp * qp )
{
return qp - > ibqp . qp_type ;
}
static inline enum ib_qp_state qp_state ( struct rxe_qp * qp )
{
return qp - > attr . qp_state ;
}
static inline int qp_mtu ( struct rxe_qp * qp )
{
if ( qp - > ibqp . qp_type = = IB_QPT_RC | | qp - > ibqp . qp_type = = IB_QPT_UC )
return qp - > attr . path_mtu ;
else
2018-09-26 22:12:23 -07:00
return IB_MTU_4096 ;
2016-06-16 16:45:23 +03:00
}
static inline int rcv_wqe_size ( int max_sge )
{
return sizeof ( struct rxe_recv_wqe ) +
max_sge * sizeof ( struct ib_sge ) ;
}
2022-07-08 03:55:50 +00:00
void free_rd_atomic_resource ( struct resp_res * res ) ;
2016-06-16 16:45:23 +03:00
static inline void rxe_advance_resp_resource ( struct rxe_qp * qp )
{
qp - > resp . res_head + + ;
2016-09-28 20:26:44 +00:00
if ( unlikely ( qp - > resp . res_head = = qp - > attr . max_dest_rd_atomic ) )
2016-06-16 16:45:23 +03:00
qp - > resp . res_head = 0 ;
}
2017-10-24 03:28:27 -07:00
void retransmit_timer ( struct timer_list * t ) ;
void rnr_nak_timer ( struct timer_list * t ) ;
2016-06-16 16:45:23 +03:00
/* rxe_srq.c */
2022-04-20 20:40:34 -05:00
int rxe_srq_chk_init ( struct rxe_dev * rxe , struct ib_srq_init_attr * init ) ;
2016-06-16 16:45:23 +03:00
int rxe_srq_from_init ( struct rxe_dev * rxe , struct rxe_srq * srq ,
2019-03-31 19:10:07 +03:00
struct ib_srq_init_attr * init , struct ib_udata * udata ,
2018-03-13 16:33:18 -06:00
struct rxe_create_srq_resp __user * uresp ) ;
2022-04-20 20:40:34 -05:00
int rxe_srq_chk_attr ( struct rxe_dev * rxe , struct rxe_srq * srq ,
struct ib_srq_attr * attr , enum ib_srq_attr_mask mask ) ;
2016-06-16 16:45:23 +03:00
int rxe_srq_from_attr ( struct rxe_dev * rxe , struct rxe_srq * srq ,
struct ib_srq_attr * attr , enum ib_srq_attr_mask mask ,
2019-03-31 19:10:07 +03:00
struct rxe_modify_srq_cmd * ucmd , struct ib_udata * udata ) ;
2022-04-20 20:40:35 -05:00
void rxe_srq_cleanup ( struct rxe_pool_elem * elem ) ;
2016-06-16 16:45:23 +03:00
2019-01-22 16:27:24 -07:00
void rxe_dealloc ( struct ib_device * ib_dev ) ;
2016-06-16 16:45:23 +03:00
int rxe_completer ( void * arg ) ;
int rxe_requester ( void * arg ) ;
int rxe_responder ( void * arg ) ;
2021-07-06 23:00:33 -05:00
/* rxe_icrc.c */
2021-07-06 23:00:39 -05:00
int rxe_icrc_init ( struct rxe_dev * rxe ) ;
2021-07-06 23:00:33 -05:00
int rxe_icrc_check ( struct sk_buff * skb , struct rxe_pkt_info * pkt ) ;
2021-07-06 23:00:36 -05:00
void rxe_icrc_generate ( struct sk_buff * skb , struct rxe_pkt_info * pkt ) ;
2016-06-16 16:45:23 +03:00
2019-01-20 08:21:40 -05:00
void rxe_resp_queue_pkt ( struct rxe_qp * qp , struct sk_buff * skb ) ;
2016-06-16 16:45:23 +03:00
2019-01-20 08:21:40 -05:00
void rxe_comp_queue_pkt ( struct rxe_qp * qp , struct sk_buff * skb ) ;
2016-06-16 16:45:23 +03:00
2017-06-15 11:29:04 +03:00
static inline unsigned int wr_opcode_mask ( int opcode , struct rxe_qp * qp )
2016-06-16 16:45:23 +03:00
{
return rxe_wr_opcode_info [ opcode ] . mask [ qp - > ibqp . qp_type ] ;
}
# endif /* RXE_LOC_H */