2016-03-02 02:20:54 +03:00
/*
* Copyright ( c ) 2016 Oracle . All rights reserved .
*
* This software is available to you under a choice of one of two
* licenses . You may choose to be licensed under the terms of the GNU
* General Public License ( GPL ) Version 2 , available from the file
* COPYING in the main directory of this source tree , or the
* OpenIB . org BSD license below :
*
* Redistribution and use in source and binary forms , with or
* without modification , are permitted provided that the following
* conditions are met :
*
* - Redistributions of source code must retain the above
* copyright notice , this list of conditions and the following
* disclaimer .
*
* - Redistributions in binary form must reproduce the above
* copyright notice , this list of conditions and the following
* disclaimer in the documentation and / or other materials
* provided with the distribution .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND ,
* EXPRESS OR IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY , FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT . IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER LIABILITY , WHETHER IN AN
* ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING FROM , OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE .
*/
# include "ib_mr.h"
2019-07-17 01:29:17 +03:00
static inline void
rds_transition_frwr_state ( struct rds_ib_mr * ibmr ,
enum rds_ib_fr_state old_state ,
enum rds_ib_fr_state new_state )
{
if ( cmpxchg ( & ibmr - > u . frmr . fr_state ,
old_state , new_state ) = = old_state & &
old_state = = FRMR_IS_INUSE ) {
/* enforce order of ibmr->u.frmr.fr_state update
* before decrementing i_fastreg_inuse_count
*/
smp_mb__before_atomic ( ) ;
atomic_dec ( & ibmr - > ic - > i_fastreg_inuse_count ) ;
if ( waitqueue_active ( & rds_ib_ring_empty_wait ) )
wake_up ( & rds_ib_ring_empty_wait ) ;
}
}
2016-03-02 02:20:54 +03:00
static struct rds_ib_mr * rds_ib_alloc_frmr ( struct rds_ib_device * rds_ibdev ,
int npages )
{
struct rds_ib_mr_pool * pool ;
struct rds_ib_mr * ibmr = NULL ;
struct rds_ib_frmr * frmr ;
int err = 0 ;
if ( npages < = RDS_MR_8K_MSG_SIZE )
pool = rds_ibdev - > mr_8k_pool ;
else
pool = rds_ibdev - > mr_1m_pool ;
ibmr = rds_ib_try_reuse_ibmr ( pool ) ;
if ( ibmr )
return ibmr ;
ibmr = kzalloc_node ( sizeof ( * ibmr ) , GFP_KERNEL ,
rdsibdev_to_node ( rds_ibdev ) ) ;
if ( ! ibmr ) {
err = - ENOMEM ;
goto out_no_cigar ;
}
frmr = & ibmr - > u . frmr ;
frmr - > mr = ib_alloc_mr ( rds_ibdev - > pd , IB_MR_TYPE_MEM_REG ,
2020-05-28 22:45:45 +03:00
pool - > max_pages ) ;
2016-03-02 02:20:54 +03:00
if ( IS_ERR ( frmr - > mr ) ) {
pr_warn ( " RDS/IB: %s failed to allocate MR " , __func__ ) ;
2018-08-07 14:34:16 +03:00
err = PTR_ERR ( frmr - > mr ) ;
2016-03-02 02:20:54 +03:00
goto out_no_cigar ;
}
ibmr - > pool = pool ;
if ( pool - > pool_type = = RDS_IB_MR_8K_POOL )
rds_ib_stats_inc ( s_ib_rdma_mr_8k_alloc ) ;
else
rds_ib_stats_inc ( s_ib_rdma_mr_1m_alloc ) ;
if ( atomic_read ( & pool - > item_count ) > pool - > max_items_soft )
pool - > max_items_soft = pool - > max_items ;
frmr - > fr_state = FRMR_IS_FREE ;
2019-07-17 01:28:51 +03:00
init_waitqueue_head ( & frmr - > fr_inv_done ) ;
2019-07-17 01:29:02 +03:00
init_waitqueue_head ( & frmr - > fr_reg_done ) ;
2016-03-02 02:20:54 +03:00
return ibmr ;
out_no_cigar :
kfree ( ibmr ) ;
atomic_dec ( & pool - > item_count ) ;
return ERR_PTR ( err ) ;
}
static void rds_ib_free_frmr ( struct rds_ib_mr * ibmr , bool drop )
{
struct rds_ib_mr_pool * pool = ibmr - > pool ;
if ( drop )
llist_add ( & ibmr - > llnode , & pool - > drop_list ) ;
else
llist_add ( & ibmr - > llnode , & pool - > free_list ) ;
atomic_add ( ibmr - > sg_len , & pool - > free_pinned ) ;
atomic_inc ( & pool - > dirty_count ) ;
/* If we've pinned too many pages, request a flush */
if ( atomic_read ( & pool - > free_pinned ) > = pool - > max_free_pinned | |
atomic_read ( & pool - > dirty_count ) > = pool - > max_items / 5 )
queue_delayed_work ( rds_ib_mr_wq , & pool - > flush_worker , 10 ) ;
}
static int rds_ib_post_reg_frmr ( struct rds_ib_mr * ibmr )
{
struct rds_ib_frmr * frmr = & ibmr - > u . frmr ;
struct ib_reg_wr reg_wr ;
2016-12-05 03:25:43 +03:00
int ret , off = 0 ;
2016-03-02 02:20:54 +03:00
while ( atomic_dec_return ( & ibmr - > ic - > i_fastreg_wrs ) < = 0 ) {
atomic_inc ( & ibmr - > ic - > i_fastreg_wrs ) ;
cpu_relax ( ) ;
}
2021-08-17 20:04:37 +03:00
ret = ib_map_mr_sg_zbva ( frmr - > mr , ibmr - > sg , ibmr - > sg_dma_len ,
2016-12-05 03:25:43 +03:00
& off , PAGE_SIZE ) ;
2021-08-17 20:04:37 +03:00
if ( unlikely ( ret ! = ibmr - > sg_dma_len ) )
2016-03-02 02:20:54 +03:00
return ret < 0 ? ret : - EINVAL ;
2019-07-17 01:29:17 +03:00
if ( cmpxchg ( & frmr - > fr_state ,
FRMR_IS_FREE , FRMR_IS_INUSE ) ! = FRMR_IS_FREE )
return - EBUSY ;
atomic_inc ( & ibmr - > ic - > i_fastreg_inuse_count ) ;
2016-03-02 02:20:54 +03:00
/* Perform a WR for the fast_reg_mr. Each individual page
* in the sg list is added to the fast reg page list and placed
* inside the fast_reg_mr WR . The key used is a rolling 8 bit
* counter , which should guarantee uniqueness .
*/
ib_update_fast_reg_key ( frmr - > mr , ibmr - > remap_count + + ) ;
2019-07-17 01:29:02 +03:00
frmr - > fr_reg = true ;
2016-03-02 02:20:54 +03:00
memset ( & reg_wr , 0 , sizeof ( reg_wr ) ) ;
reg_wr . wr . wr_id = ( unsigned long ) ( void * ) ibmr ;
reg_wr . wr . opcode = IB_WR_REG_MR ;
reg_wr . wr . num_sge = 0 ;
reg_wr . mr = frmr - > mr ;
reg_wr . key = frmr - > mr - > rkey ;
reg_wr . access = IB_ACCESS_LOCAL_WRITE |
IB_ACCESS_REMOTE_READ |
IB_ACCESS_REMOTE_WRITE ;
reg_wr . wr . send_flags = IB_SEND_SIGNALED ;
2018-07-18 19:25:28 +03:00
ret = ib_post_send ( ibmr - > ic - > i_cm_id - > qp , & reg_wr . wr , NULL ) ;
2016-03-02 02:20:54 +03:00
if ( unlikely ( ret ) ) {
/* Failure here can be because of -ENOMEM as well */
2019-07-17 01:29:17 +03:00
rds_transition_frwr_state ( ibmr , FRMR_IS_INUSE , FRMR_IS_STALE ) ;
2016-03-02 02:20:54 +03:00
atomic_inc ( & ibmr - > ic - > i_fastreg_wrs ) ;
if ( printk_ratelimit ( ) )
pr_warn ( " RDS/IB: %s returned error(%d) \n " ,
__func__ , ret ) ;
2019-07-17 01:29:02 +03:00
goto out ;
2016-03-02 02:20:54 +03:00
}
2019-07-17 01:29:02 +03:00
/* Wait for the registration to complete in order to prevent an invalid
* access error resulting from a race between the memory region already
* being accessed while registration is still pending .
*/
wait_event ( frmr - > fr_reg_done , ! frmr - > fr_reg ) ;
out :
2016-03-02 02:20:54 +03:00
return ret ;
}
static int rds_ib_map_frmr ( struct rds_ib_device * rds_ibdev ,
struct rds_ib_mr_pool * pool ,
struct rds_ib_mr * ibmr ,
struct scatterlist * sg , unsigned int sg_len )
{
struct ib_device * dev = rds_ibdev - > dev ;
struct rds_ib_frmr * frmr = & ibmr - > u . frmr ;
int i ;
u32 len ;
int ret = 0 ;
/* We want to teardown old ibmr values here and fill it up with
* new sg values
*/
rds_ib_teardown_mr ( ibmr ) ;
ibmr - > sg = sg ;
ibmr - > sg_len = sg_len ;
ibmr - > sg_dma_len = 0 ;
frmr - > sg_byte_len = 0 ;
WARN_ON ( ibmr - > sg_dma_len ) ;
ibmr - > sg_dma_len = ib_dma_map_sg ( dev , ibmr - > sg , ibmr - > sg_len ,
DMA_BIDIRECTIONAL ) ;
if ( unlikely ( ! ibmr - > sg_dma_len ) ) {
pr_warn ( " RDS/IB: %s failed! \n " , __func__ ) ;
return - EBUSY ;
}
frmr - > sg_byte_len = 0 ;
frmr - > dma_npages = 0 ;
len = 0 ;
ret = - EINVAL ;
for ( i = 0 ; i < ibmr - > sg_dma_len ; + + i ) {
2019-01-31 19:30:34 +03:00
unsigned int dma_len = sg_dma_len ( & ibmr - > sg [ i ] ) ;
u64 dma_addr = sg_dma_address ( & ibmr - > sg [ i ] ) ;
2016-03-02 02:20:54 +03:00
frmr - > sg_byte_len + = dma_len ;
if ( dma_addr & ~ PAGE_MASK ) {
if ( i > 0 )
goto out_unmap ;
else
+ + frmr - > dma_npages ;
}
if ( ( dma_addr + dma_len ) & ~ PAGE_MASK ) {
if ( i < ibmr - > sg_dma_len - 1 )
goto out_unmap ;
else
+ + frmr - > dma_npages ;
}
len + = dma_len ;
}
frmr - > dma_npages + = len > > PAGE_SHIFT ;
2020-05-28 22:45:45 +03:00
if ( frmr - > dma_npages > ibmr - > pool - > max_pages ) {
2016-03-02 02:20:54 +03:00
ret = - EMSGSIZE ;
goto out_unmap ;
}
ret = rds_ib_post_reg_frmr ( ibmr ) ;
if ( ret )
goto out_unmap ;
if ( ibmr - > pool - > pool_type = = RDS_IB_MR_8K_POOL )
rds_ib_stats_inc ( s_ib_rdma_mr_8k_used ) ;
else
rds_ib_stats_inc ( s_ib_rdma_mr_1m_used ) ;
return ret ;
out_unmap :
ib_dma_unmap_sg ( rds_ibdev - > dev , ibmr - > sg , ibmr - > sg_len ,
DMA_BIDIRECTIONAL ) ;
ibmr - > sg_dma_len = 0 ;
return ret ;
}
static int rds_ib_post_inv ( struct rds_ib_mr * ibmr )
{
2018-07-18 19:25:28 +03:00
struct ib_send_wr * s_wr ;
2016-03-02 02:20:54 +03:00
struct rds_ib_frmr * frmr = & ibmr - > u . frmr ;
struct rdma_cm_id * i_cm_id = ibmr - > ic - > i_cm_id ;
int ret = - EINVAL ;
if ( ! i_cm_id | | ! i_cm_id - > qp | | ! frmr - > mr )
goto out ;
if ( frmr - > fr_state ! = FRMR_IS_INUSE )
goto out ;
2019-06-29 03:31:19 +03:00
while ( atomic_dec_return ( & ibmr - > ic - > i_fastreg_wrs ) < = 0 ) {
atomic_inc ( & ibmr - > ic - > i_fastreg_wrs ) ;
2016-03-02 02:20:54 +03:00
cpu_relax ( ) ;
}
frmr - > fr_inv = true ;
s_wr = & frmr - > fr_wr ;
memset ( s_wr , 0 , sizeof ( * s_wr ) ) ;
s_wr - > wr_id = ( unsigned long ) ( void * ) ibmr ;
s_wr - > opcode = IB_WR_LOCAL_INV ;
s_wr - > ex . invalidate_rkey = frmr - > mr - > rkey ;
s_wr - > send_flags = IB_SEND_SIGNALED ;
2018-07-18 19:25:28 +03:00
ret = ib_post_send ( i_cm_id - > qp , s_wr , NULL ) ;
2016-03-02 02:20:54 +03:00
if ( unlikely ( ret ) ) {
2019-07-17 01:29:17 +03:00
rds_transition_frwr_state ( ibmr , FRMR_IS_INUSE , FRMR_IS_STALE ) ;
2016-03-02 02:20:54 +03:00
frmr - > fr_inv = false ;
2019-07-17 01:29:17 +03:00
/* enforce order of frmr->fr_inv update
* before incrementing i_fastreg_wrs
*/
smp_mb__before_atomic ( ) ;
2019-06-29 03:31:19 +03:00
atomic_inc ( & ibmr - > ic - > i_fastreg_wrs ) ;
2016-03-02 02:20:54 +03:00
pr_err ( " RDS/IB: %s returned error(%d) \n " , __func__ , ret ) ;
goto out ;
}
2019-07-17 01:29:02 +03:00
/* Wait for the FRMR_IS_FREE (or FRMR_IS_STALE) transition in order to
* 1 ) avoid a silly bouncing between " clean_list " and " drop_list "
* triggered by function " rds_ib_reg_frmr " as it is releases frmr
* regions whose state is not " FRMR_IS_FREE " right away .
* 2 ) prevents an invalid access error in a race
* from a pending " IB_WR_LOCAL_INV " operation
* with a teardown ( " dma_unmap_sg " , " put_page " )
* and de - registration ( " ib_dereg_mr " ) of the corresponding
* memory region .
*/
wait_event ( frmr - > fr_inv_done , frmr - > fr_state ! = FRMR_IS_INUSE ) ;
2016-03-02 02:20:54 +03:00
out :
return ret ;
}
void rds_ib_mr_cqe_handler ( struct rds_ib_connection * ic , struct ib_wc * wc )
{
struct rds_ib_mr * ibmr = ( void * ) ( unsigned long ) wc - > wr_id ;
struct rds_ib_frmr * frmr = & ibmr - > u . frmr ;
if ( wc - > status ! = IB_WC_SUCCESS ) {
2019-07-17 01:29:17 +03:00
rds_transition_frwr_state ( ibmr , FRMR_IS_INUSE , FRMR_IS_STALE ) ;
2016-03-02 02:20:54 +03:00
if ( rds_conn_up ( ic - > conn ) )
rds_ib_conn_error ( ic - > conn ,
" frmr completion <%pI4,%pI4> status %u(%s), vendor_err 0x%x, disconnecting and reconnecting \n " ,
& ic - > conn - > c_laddr ,
& ic - > conn - > c_faddr ,
wc - > status ,
ib_wc_status_msg ( wc - > status ) ,
wc - > vendor_err ) ;
}
if ( frmr - > fr_inv ) {
2019-07-17 01:29:17 +03:00
rds_transition_frwr_state ( ibmr , FRMR_IS_INUSE , FRMR_IS_FREE ) ;
2016-03-02 02:20:54 +03:00
frmr - > fr_inv = false ;
2019-07-17 01:28:51 +03:00
wake_up ( & frmr - > fr_inv_done ) ;
2016-03-02 02:20:54 +03:00
}
2019-06-29 03:31:19 +03:00
2019-07-17 01:29:02 +03:00
if ( frmr - > fr_reg ) {
frmr - > fr_reg = false ;
wake_up ( & frmr - > fr_reg_done ) ;
}
2019-07-17 01:29:17 +03:00
/* enforce order of frmr->{fr_reg,fr_inv} update
* before incrementing i_fastreg_wrs
*/
smp_mb__before_atomic ( ) ;
2019-06-29 03:31:19 +03:00
atomic_inc ( & ic - > i_fastreg_wrs ) ;
2016-03-02 02:20:54 +03:00
}
void rds_ib_unreg_frmr ( struct list_head * list , unsigned int * nfreed ,
unsigned long * unpinned , unsigned int goal )
{
struct rds_ib_mr * ibmr , * next ;
struct rds_ib_frmr * frmr ;
2019-07-17 01:29:02 +03:00
int ret = 0 , ret2 ;
2016-03-02 02:20:54 +03:00
unsigned int freed = * nfreed ;
/* String all ib_mr's onto one list and hand them to ib_unmap_fmr */
list_for_each_entry ( ibmr , list , unmap_list ) {
2019-07-17 01:29:02 +03:00
if ( ibmr - > sg_dma_len ) {
ret2 = rds_ib_post_inv ( ibmr ) ;
if ( ret2 & & ! ret )
ret = ret2 ;
}
2016-03-02 02:20:54 +03:00
}
2019-07-17 01:29:02 +03:00
2016-03-02 02:20:54 +03:00
if ( ret )
pr_warn ( " RDS/IB: %s failed (err=%d) \n " , __func__ , ret ) ;
/* Now we can destroy the DMA mapping and unpin any pages */
list_for_each_entry_safe ( ibmr , next , list , unmap_list ) {
* unpinned + = ibmr - > sg_len ;
frmr = & ibmr - > u . frmr ;
__rds_ib_teardown_mr ( ibmr ) ;
if ( freed < goal | | frmr - > fr_state = = FRMR_IS_STALE ) {
/* Don't de-allocate if the MR is not free yet */
if ( frmr - > fr_state = = FRMR_IS_INUSE )
continue ;
if ( ibmr - > pool - > pool_type = = RDS_IB_MR_8K_POOL )
rds_ib_stats_inc ( s_ib_rdma_mr_8k_free ) ;
else
rds_ib_stats_inc ( s_ib_rdma_mr_1m_free ) ;
list_del ( & ibmr - > unmap_list ) ;
if ( frmr - > mr )
ib_dereg_mr ( frmr - > mr ) ;
kfree ( ibmr ) ;
freed + + ;
}
}
* nfreed = freed ;
}
struct rds_ib_mr * rds_ib_reg_frmr ( struct rds_ib_device * rds_ibdev ,
struct rds_ib_connection * ic ,
struct scatterlist * sg ,
unsigned long nents , u32 * key )
{
struct rds_ib_mr * ibmr = NULL ;
struct rds_ib_frmr * frmr ;
int ret ;
2018-07-25 06:31:58 +03:00
if ( ! ic ) {
/* TODO: Add FRWR support for RDS_GET_MR using proxy qp*/
return ERR_PTR ( - EOPNOTSUPP ) ;
}
2016-03-02 02:20:54 +03:00
do {
2019-07-17 01:29:02 +03:00
if ( ibmr )
2016-03-02 02:20:54 +03:00
rds_ib_free_frmr ( ibmr , true ) ;
ibmr = rds_ib_alloc_frmr ( rds_ibdev , nents ) ;
if ( IS_ERR ( ibmr ) )
return ibmr ;
frmr = & ibmr - > u . frmr ;
} while ( frmr - > fr_state ! = FRMR_IS_FREE ) ;
ibmr - > ic = ic ;
ibmr - > device = rds_ibdev ;
ret = rds_ib_map_frmr ( rds_ibdev , ibmr - > pool , ibmr , sg , nents ) ;
if ( ret = = 0 ) {
* key = frmr - > mr - > rkey ;
} else {
rds_ib_free_frmr ( ibmr , false ) ;
ibmr = ERR_PTR ( ret ) ;
}
return ibmr ;
}
void rds_ib_free_frmr_list ( struct rds_ib_mr * ibmr )
{
struct rds_ib_mr_pool * pool = ibmr - > pool ;
struct rds_ib_frmr * frmr = & ibmr - > u . frmr ;
if ( frmr - > fr_state = = FRMR_IS_STALE )
llist_add ( & ibmr - > llnode , & pool - > drop_list ) ;
else
llist_add ( & ibmr - > llnode , & pool - > free_list ) ;
}