2013-07-07 17:25:49 +03:00
/*
2015-04-02 17:07:30 +03:00
* Copyright ( c ) 2013 - 2015 , Mellanox Technologies . All rights reserved .
2013-07-07 17:25:49 +03:00
*
* This software is available to you under a choice of one of two
* licenses . You may choose to be licensed under the terms of the GNU
* General Public License ( GPL ) Version 2 , available from the file
* COPYING in the main directory of this source tree , or the
* OpenIB . org BSD license below :
*
* Redistribution and use in source and binary forms , with or
* without modification , are permitted provided that the following
* conditions are met :
*
* - Redistributions of source code must retain the above
* copyright notice , this list of conditions and the following
* disclaimer .
*
* - Redistributions in binary form must reproduce the above
* copyright notice , this list of conditions and the following
* disclaimer in the documentation and / or other materials
* provided with the distribution .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND ,
* EXPRESS OR IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY , FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT . IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER LIABILITY , WHETHER IN AN
* ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING FROM , OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE .
*/
# include <asm-generic/kmap_types.h>
# include <linux/module.h>
# include <linux/init.h>
# include <linux/errno.h>
# include <linux/pci.h>
# include <linux/dma-mapping.h>
# include <linux/slab.h>
# include <linux/io-mapping.h>
# include <linux/sched.h>
# include <rdma/ib_user_verbs.h>
2015-06-04 19:30:46 +03:00
# include <linux/mlx5/vport.h>
2013-07-07 17:25:49 +03:00
# include <rdma/ib_smi.h>
# include <rdma/ib_umem.h>
# include "user.h"
# include "mlx5_ib.h"
# define DRIVER_NAME "mlx5_ib"
2014-02-19 17:47:31 +02:00
# define DRIVER_VERSION "2.2-1"
# define DRIVER_RELDATE "Feb 2014"
2013-07-07 17:25:49 +03:00
MODULE_AUTHOR ( " Eli Cohen <eli@mellanox.com> " ) ;
MODULE_DESCRIPTION ( " Mellanox Connect-IB HCA IB driver " ) ;
MODULE_LICENSE ( " Dual BSD/GPL " ) ;
MODULE_VERSION ( DRIVER_VERSION ) ;
2014-07-28 23:30:22 +03:00
static int deprecated_prof_sel = 2 ;
module_param_named ( prof_sel , deprecated_prof_sel , int , 0444 ) ;
MODULE_PARM_DESC ( prof_sel , " profile selector. Deprecated here. Moved to module mlx5_core " ) ;
2013-07-07 17:25:49 +03:00
static char mlx5_version [ ] =
DRIVER_NAME " : Mellanox Connect-IB Infiniband driver v "
DRIVER_VERSION " ( " DRIVER_RELDATE " ) \n " ;
2015-06-04 19:30:46 +03:00
static enum rdma_link_layer
mlx5_ib_port_link_layer ( struct ib_device * device )
{
struct mlx5_ib_dev * dev = to_mdev ( device ) ;
switch ( MLX5_CAP_GEN ( dev - > mdev , port_type ) ) {
case MLX5_CAP_PORT_TYPE_IB :
return IB_LINK_LAYER_INFINIBAND ;
case MLX5_CAP_PORT_TYPE_ETH :
return IB_LINK_LAYER_ETHERNET ;
default :
return IB_LINK_LAYER_UNSPECIFIED ;
}
}
static int mlx5_use_mad_ifc ( struct mlx5_ib_dev * dev )
{
return ! dev - > mdev - > issi ;
}
enum {
MLX5_VPORT_ACCESS_METHOD_MAD ,
MLX5_VPORT_ACCESS_METHOD_HCA ,
MLX5_VPORT_ACCESS_METHOD_NIC ,
} ;
static int mlx5_get_vport_access_method ( struct ib_device * ibdev )
{
if ( mlx5_use_mad_ifc ( to_mdev ( ibdev ) ) )
return MLX5_VPORT_ACCESS_METHOD_MAD ;
if ( mlx5_ib_port_link_layer ( ibdev ) = =
IB_LINK_LAYER_ETHERNET )
return MLX5_VPORT_ACCESS_METHOD_NIC ;
return MLX5_VPORT_ACCESS_METHOD_HCA ;
}
static int mlx5_query_system_image_guid ( struct ib_device * ibdev ,
__be64 * sys_image_guid )
{
struct mlx5_ib_dev * dev = to_mdev ( ibdev ) ;
struct mlx5_core_dev * mdev = dev - > mdev ;
u64 tmp ;
int err ;
switch ( mlx5_get_vport_access_method ( ibdev ) ) {
case MLX5_VPORT_ACCESS_METHOD_MAD :
return mlx5_query_mad_ifc_system_image_guid ( ibdev ,
sys_image_guid ) ;
case MLX5_VPORT_ACCESS_METHOD_HCA :
err = mlx5_query_hca_vport_system_image_guid ( mdev , & tmp ) ;
if ( ! err )
* sys_image_guid = cpu_to_be64 ( tmp ) ;
return err ;
default :
return - EINVAL ;
}
}
static int mlx5_query_max_pkeys ( struct ib_device * ibdev ,
u16 * max_pkeys )
{
struct mlx5_ib_dev * dev = to_mdev ( ibdev ) ;
struct mlx5_core_dev * mdev = dev - > mdev ;
switch ( mlx5_get_vport_access_method ( ibdev ) ) {
case MLX5_VPORT_ACCESS_METHOD_MAD :
return mlx5_query_mad_ifc_max_pkeys ( ibdev , max_pkeys ) ;
case MLX5_VPORT_ACCESS_METHOD_HCA :
case MLX5_VPORT_ACCESS_METHOD_NIC :
* max_pkeys = mlx5_to_sw_pkey_sz ( MLX5_CAP_GEN ( mdev ,
pkey_table_size ) ) ;
return 0 ;
default :
return - EINVAL ;
}
}
static int mlx5_query_vendor_id ( struct ib_device * ibdev ,
u32 * vendor_id )
{
struct mlx5_ib_dev * dev = to_mdev ( ibdev ) ;
switch ( mlx5_get_vport_access_method ( ibdev ) ) {
case MLX5_VPORT_ACCESS_METHOD_MAD :
return mlx5_query_mad_ifc_vendor_id ( ibdev , vendor_id ) ;
case MLX5_VPORT_ACCESS_METHOD_HCA :
case MLX5_VPORT_ACCESS_METHOD_NIC :
return mlx5_core_query_vendor_id ( dev - > mdev , vendor_id ) ;
default :
return - EINVAL ;
}
}
static int mlx5_query_node_guid ( struct mlx5_ib_dev * dev ,
__be64 * node_guid )
{
u64 tmp ;
int err ;
switch ( mlx5_get_vport_access_method ( & dev - > ib_dev ) ) {
case MLX5_VPORT_ACCESS_METHOD_MAD :
return mlx5_query_mad_ifc_node_guid ( dev , node_guid ) ;
case MLX5_VPORT_ACCESS_METHOD_HCA :
err = mlx5_query_hca_vport_node_guid ( dev - > mdev , & tmp ) ;
if ( ! err )
* node_guid = cpu_to_be64 ( tmp ) ;
return err ;
default :
return - EINVAL ;
}
}
struct mlx5_reg_node_desc {
u8 desc [ 64 ] ;
} ;
static int mlx5_query_node_desc ( struct mlx5_ib_dev * dev , char * node_desc )
{
struct mlx5_reg_node_desc in ;
if ( mlx5_use_mad_ifc ( dev ) )
return mlx5_query_mad_ifc_node_desc ( dev , node_desc ) ;
memset ( & in , 0 , sizeof ( in ) ) ;
return mlx5_core_access_reg ( dev - > mdev , & in , sizeof ( in ) , node_desc ,
sizeof ( struct mlx5_reg_node_desc ) ,
MLX5_REG_NODE_DESC , 0 , 0 ) ;
}
2013-07-07 17:25:49 +03:00
static int mlx5_ib_query_device ( struct ib_device * ibdev ,
2015-06-11 16:35:25 +03:00
struct ib_device_attr * props ,
struct ib_udata * uhw )
2013-07-07 17:25:49 +03:00
{
struct mlx5_ib_dev * dev = to_mdev ( ibdev ) ;
2015-05-28 22:28:41 +03:00
struct mlx5_core_dev * mdev = dev - > mdev ;
2013-07-07 17:25:49 +03:00
int err = - ENOMEM ;
int max_rq_sg ;
int max_sq_sg ;
2015-07-21 14:40:12 +03:00
u64 min_page_size = 1ull < < MLX5_CAP_GEN ( mdev , log_pg_sz ) ;
2013-07-07 17:25:49 +03:00
2015-06-11 16:35:25 +03:00
if ( uhw - > inlen | | uhw - > outlen )
return - EINVAL ;
2015-06-04 19:30:46 +03:00
memset ( props , 0 , sizeof ( * props ) ) ;
err = mlx5_query_system_image_guid ( ibdev ,
& props - > sys_image_guid ) ;
if ( err )
return err ;
2013-07-07 17:25:49 +03:00
2015-06-04 19:30:46 +03:00
err = mlx5_query_max_pkeys ( ibdev , & props - > max_pkeys ) ;
2013-07-07 17:25:49 +03:00
if ( err )
2015-06-04 19:30:46 +03:00
return err ;
2013-07-07 17:25:49 +03:00
2015-06-04 19:30:46 +03:00
err = mlx5_query_vendor_id ( ibdev , & props - > vendor_id ) ;
if ( err )
return err ;
2013-07-07 17:25:49 +03:00
2014-07-28 23:30:22 +03:00
props - > fw_ver = ( ( u64 ) fw_rev_maj ( dev - > mdev ) < < 32 ) |
( fw_rev_min ( dev - > mdev ) < < 16 ) |
fw_rev_sub ( dev - > mdev ) ;
2013-07-07 17:25:49 +03:00
props - > device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
IB_DEVICE_PORT_ACTIVE_EVENT |
IB_DEVICE_SYS_IMAGE_GUID |
2014-02-06 17:41:25 +02:00
IB_DEVICE_RC_RNR_NAK_GEN ;
2015-05-28 22:28:41 +03:00
if ( MLX5_CAP_GEN ( mdev , pkv ) )
2013-07-07 17:25:49 +03:00
props - > device_cap_flags | = IB_DEVICE_BAD_PKEY_CNTR ;
2015-05-28 22:28:41 +03:00
if ( MLX5_CAP_GEN ( mdev , qkv ) )
2013-07-07 17:25:49 +03:00
props - > device_cap_flags | = IB_DEVICE_BAD_QKEY_CNTR ;
2015-05-28 22:28:41 +03:00
if ( MLX5_CAP_GEN ( mdev , apm ) )
2013-07-07 17:25:49 +03:00
props - > device_cap_flags | = IB_DEVICE_AUTO_PATH_MIG ;
props - > device_cap_flags | = IB_DEVICE_LOCAL_DMA_LKEY ;
2015-05-28 22:28:41 +03:00
if ( MLX5_CAP_GEN ( mdev , xrc ) )
2013-07-07 17:25:49 +03:00
props - > device_cap_flags | = IB_DEVICE_XRC ;
props - > device_cap_flags | = IB_DEVICE_MEM_MGT_EXTENSIONS ;
2015-05-28 22:28:41 +03:00
if ( MLX5_CAP_GEN ( mdev , sho ) ) {
2014-02-23 14:19:13 +02:00
props - > device_cap_flags | = IB_DEVICE_SIGNATURE_HANDOVER ;
/* At this stage no support for signature handover */
props - > sig_prot_cap = IB_PROT_T10DIF_TYPE_1 |
IB_PROT_T10DIF_TYPE_2 |
IB_PROT_T10DIF_TYPE_3 ;
props - > sig_guard_cap = IB_GUARD_T10DIF_CRC |
IB_GUARD_T10DIF_CSUM ;
}
2015-05-28 22:28:41 +03:00
if ( MLX5_CAP_GEN ( mdev , block_lb_mc ) )
2014-04-02 00:10:16 +03:00
props - > device_cap_flags | = IB_DEVICE_BLOCK_MULTICAST_LOOPBACK ;
2013-07-07 17:25:49 +03:00
2015-06-04 19:30:46 +03:00
props - > vendor_part_id = mdev - > pdev - > device ;
props - > hw_ver = mdev - > pdev - > revision ;
2013-07-07 17:25:49 +03:00
props - > max_mr_size = ~ 0ull ;
2015-07-21 14:40:12 +03:00
props - > page_size_cap = ~ ( min_page_size - 1 ) ;
2015-05-28 22:28:41 +03:00
props - > max_qp = 1 < < MLX5_CAP_GEN ( mdev , log_max_qp ) ;
props - > max_qp_wr = 1 < < MLX5_CAP_GEN ( mdev , log_max_qp_sz ) ;
max_rq_sg = MLX5_CAP_GEN ( mdev , max_wqe_sz_rq ) /
sizeof ( struct mlx5_wqe_data_seg ) ;
max_sq_sg = ( MLX5_CAP_GEN ( mdev , max_wqe_sz_sq ) -
sizeof ( struct mlx5_wqe_ctrl_seg ) ) /
sizeof ( struct mlx5_wqe_data_seg ) ;
2013-07-07 17:25:49 +03:00
props - > max_sge = min ( max_rq_sg , max_sq_sg ) ;
2015-05-28 22:28:41 +03:00
props - > max_cq = 1 < < MLX5_CAP_GEN ( mdev , log_max_cq ) ;
props - > max_cqe = ( 1 < < MLX5_CAP_GEN ( mdev , log_max_eq_sz ) ) - 1 ;
props - > max_mr = 1 < < MLX5_CAP_GEN ( mdev , log_max_mkey ) ;
props - > max_pd = 1 < < MLX5_CAP_GEN ( mdev , log_max_pd ) ;
props - > max_qp_rd_atom = 1 < < MLX5_CAP_GEN ( mdev , log_max_ra_req_qp ) ;
props - > max_qp_init_rd_atom = 1 < < MLX5_CAP_GEN ( mdev , log_max_ra_res_qp ) ;
props - > max_srq = 1 < < MLX5_CAP_GEN ( mdev , log_max_srq ) ;
props - > max_srq_wr = ( 1 < < MLX5_CAP_GEN ( mdev , log_max_srq_sz ) ) - 1 ;
props - > local_ca_ack_delay = MLX5_CAP_GEN ( mdev , local_ca_ack_delay ) ;
2013-07-07 17:25:49 +03:00
props - > max_res_rd_atom = props - > max_qp_rd_atom * props - > max_qp ;
props - > max_srq_sge = max_rq_sg - 1 ;
props - > max_fast_reg_page_list_len = ( unsigned int ) - 1 ;
2013-09-11 16:35:30 +03:00
props - > atomic_cap = IB_ATOMIC_NONE ;
props - > masked_atomic_cap = IB_ATOMIC_NONE ;
2015-05-28 22:28:41 +03:00
props - > max_mcast_grp = 1 < < MLX5_CAP_GEN ( mdev , log_max_mcg ) ;
props - > max_mcast_qp_attach = MLX5_CAP_GEN ( mdev , max_qp_mcg ) ;
2013-07-07 17:25:49 +03:00
props - > max_total_mcast_qp_attach = props - > max_mcast_qp_attach *
props - > max_mcast_grp ;
props - > max_map_per_fmr = INT_MAX ; /* no limit in ConnectIB */
2014-12-11 17:04:20 +02:00
# ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
2015-05-28 22:28:41 +03:00
if ( MLX5_CAP_GEN ( mdev , pg ) )
2014-12-11 17:04:20 +02:00
props - > device_cap_flags | = IB_DEVICE_ON_DEMAND_PAGING ;
props - > odp_caps = dev - > odp_caps ;
# endif
2015-06-04 19:30:46 +03:00
return 0 ;
2013-07-07 17:25:49 +03:00
}
2015-06-04 19:30:46 +03:00
enum mlx5_ib_width {
MLX5_IB_WIDTH_1X = 1 < < 0 ,
MLX5_IB_WIDTH_2X = 1 < < 1 ,
MLX5_IB_WIDTH_4X = 1 < < 2 ,
MLX5_IB_WIDTH_8X = 1 < < 3 ,
MLX5_IB_WIDTH_12X = 1 < < 4
} ;
static int translate_active_width ( struct ib_device * ibdev , u8 active_width ,
u8 * ib_width )
2013-07-07 17:25:49 +03:00
{
struct mlx5_ib_dev * dev = to_mdev ( ibdev ) ;
2015-06-04 19:30:46 +03:00
int err = 0 ;
if ( active_width & MLX5_IB_WIDTH_1X ) {
* ib_width = IB_WIDTH_1X ;
} else if ( active_width & MLX5_IB_WIDTH_2X ) {
mlx5_ib_dbg ( dev , " active_width %d is not supported by IB spec \n " ,
( int ) active_width ) ;
err = - EINVAL ;
} else if ( active_width & MLX5_IB_WIDTH_4X ) {
* ib_width = IB_WIDTH_4X ;
} else if ( active_width & MLX5_IB_WIDTH_8X ) {
* ib_width = IB_WIDTH_8X ;
} else if ( active_width & MLX5_IB_WIDTH_12X ) {
* ib_width = IB_WIDTH_12X ;
} else {
mlx5_ib_dbg ( dev , " Invalid active_width %d \n " ,
( int ) active_width ) ;
err = - EINVAL ;
2013-07-07 17:25:49 +03:00
}
2015-06-04 19:30:46 +03:00
return err ;
}
2013-07-07 17:25:49 +03:00
2015-06-04 19:30:46 +03:00
static int mlx5_mtu_to_ib_mtu ( int mtu )
{
switch ( mtu ) {
case 256 : return 1 ;
case 512 : return 2 ;
case 1024 : return 3 ;
case 2048 : return 4 ;
case 4096 : return 5 ;
default :
pr_warn ( " invalid mtu \n " ) ;
return - 1 ;
2013-07-07 17:25:49 +03:00
}
2015-06-04 19:30:46 +03:00
}
2013-07-07 17:25:49 +03:00
2015-06-04 19:30:46 +03:00
enum ib_max_vl_num {
__IB_MAX_VL_0 = 1 ,
__IB_MAX_VL_0_1 = 2 ,
__IB_MAX_VL_0_3 = 3 ,
__IB_MAX_VL_0_7 = 4 ,
__IB_MAX_VL_0_14 = 5 ,
} ;
2013-07-07 17:25:49 +03:00
2015-06-04 19:30:46 +03:00
enum mlx5_vl_hw_cap {
MLX5_VL_HW_0 = 1 ,
MLX5_VL_HW_0_1 = 2 ,
MLX5_VL_HW_0_2 = 3 ,
MLX5_VL_HW_0_3 = 4 ,
MLX5_VL_HW_0_4 = 5 ,
MLX5_VL_HW_0_5 = 6 ,
MLX5_VL_HW_0_6 = 7 ,
MLX5_VL_HW_0_7 = 8 ,
MLX5_VL_HW_0_14 = 15
} ;
2013-07-07 17:25:49 +03:00
2015-06-04 19:30:46 +03:00
static int translate_max_vl_num ( struct ib_device * ibdev , u8 vl_hw_cap ,
u8 * max_vl_num )
{
switch ( vl_hw_cap ) {
case MLX5_VL_HW_0 :
* max_vl_num = __IB_MAX_VL_0 ;
break ;
case MLX5_VL_HW_0_1 :
* max_vl_num = __IB_MAX_VL_0_1 ;
break ;
case MLX5_VL_HW_0_3 :
* max_vl_num = __IB_MAX_VL_0_3 ;
break ;
case MLX5_VL_HW_0_7 :
* max_vl_num = __IB_MAX_VL_0_7 ;
break ;
case MLX5_VL_HW_0_14 :
* max_vl_num = __IB_MAX_VL_0_14 ;
break ;
2013-07-07 17:25:49 +03:00
2015-06-04 19:30:46 +03:00
default :
return - EINVAL ;
2013-07-07 17:25:49 +03:00
}
2015-06-04 19:30:46 +03:00
return 0 ;
2013-07-07 17:25:49 +03:00
}
2015-06-04 19:30:46 +03:00
static int mlx5_query_hca_port ( struct ib_device * ibdev , u8 port ,
struct ib_port_attr * props )
2013-07-07 17:25:49 +03:00
{
2015-06-04 19:30:46 +03:00
struct mlx5_ib_dev * dev = to_mdev ( ibdev ) ;
struct mlx5_core_dev * mdev = dev - > mdev ;
struct mlx5_hca_vport_context * rep ;
int max_mtu ;
int oper_mtu ;
int err ;
u8 ib_link_width_oper ;
u8 vl_hw_cap ;
2013-07-07 17:25:49 +03:00
2015-06-04 19:30:46 +03:00
rep = kzalloc ( sizeof ( * rep ) , GFP_KERNEL ) ;
if ( ! rep ) {
err = - ENOMEM ;
2013-07-07 17:25:49 +03:00
goto out ;
}
2015-06-04 19:30:46 +03:00
memset ( props , 0 , sizeof ( * props ) ) ;
2013-07-07 17:25:49 +03:00
2015-06-04 19:30:46 +03:00
err = mlx5_query_hca_vport_context ( mdev , 0 , port , 0 , rep ) ;
2013-07-07 17:25:49 +03:00
if ( err )
goto out ;
2015-06-04 19:30:46 +03:00
props - > lid = rep - > lid ;
props - > lmc = rep - > lmc ;
props - > sm_lid = rep - > sm_lid ;
props - > sm_sl = rep - > sm_sl ;
props - > state = rep - > vport_state ;
props - > phys_state = rep - > port_physical_state ;
props - > port_cap_flags = rep - > cap_mask1 ;
props - > gid_tbl_len = mlx5_get_gid_table_len ( MLX5_CAP_GEN ( mdev , gid_table_size ) ) ;
props - > max_msg_sz = 1 < < MLX5_CAP_GEN ( mdev , log_max_msg ) ;
props - > pkey_tbl_len = mlx5_to_sw_pkey_sz ( MLX5_CAP_GEN ( mdev , pkey_table_size ) ) ;
props - > bad_pkey_cntr = rep - > pkey_violation_counter ;
props - > qkey_viol_cntr = rep - > qkey_violation_counter ;
props - > subnet_timeout = rep - > subnet_timeout ;
props - > init_type_reply = rep - > init_type_reply ;
2013-07-07 17:25:49 +03:00
2015-06-04 19:30:46 +03:00
err = mlx5_query_port_link_width_oper ( mdev , & ib_link_width_oper , port ) ;
if ( err )
2013-07-07 17:25:49 +03:00
goto out ;
2015-06-04 19:30:46 +03:00
err = translate_active_width ( ibdev , ib_link_width_oper ,
& props - > active_width ) ;
if ( err )
goto out ;
err = mlx5_query_port_proto_oper ( mdev , & props - > active_speed , MLX5_PTYS_IB ,
port ) ;
2013-07-07 17:25:49 +03:00
if ( err )
goto out ;
2015-06-11 14:47:27 +03:00
mlx5_query_port_max_mtu ( mdev , & max_mtu , port ) ;
2013-07-07 17:25:49 +03:00
2015-06-04 19:30:46 +03:00
props - > max_mtu = mlx5_mtu_to_ib_mtu ( max_mtu ) ;
2013-07-07 17:25:49 +03:00
2015-06-11 14:47:27 +03:00
mlx5_query_port_oper_mtu ( mdev , & oper_mtu , port ) ;
2013-07-07 17:25:49 +03:00
2015-06-04 19:30:46 +03:00
props - > active_mtu = mlx5_mtu_to_ib_mtu ( oper_mtu ) ;
2013-07-07 17:25:49 +03:00
2015-06-04 19:30:46 +03:00
err = mlx5_query_port_vl_hw_cap ( mdev , & vl_hw_cap , port ) ;
if ( err )
goto out ;
2013-07-07 17:25:49 +03:00
2015-06-04 19:30:46 +03:00
err = translate_max_vl_num ( ibdev , vl_hw_cap ,
& props - > max_vl_num ) ;
2013-07-07 17:25:49 +03:00
out :
2015-06-04 19:30:46 +03:00
kfree ( rep ) ;
2013-07-07 17:25:49 +03:00
return err ;
}
2015-06-04 19:30:46 +03:00
int mlx5_ib_query_port ( struct ib_device * ibdev , u8 port ,
struct ib_port_attr * props )
2013-07-07 17:25:49 +03:00
{
2015-06-04 19:30:46 +03:00
switch ( mlx5_get_vport_access_method ( ibdev ) ) {
case MLX5_VPORT_ACCESS_METHOD_MAD :
return mlx5_query_mad_ifc_port ( ibdev , port , props ) ;
2013-07-07 17:25:49 +03:00
2015-06-04 19:30:46 +03:00
case MLX5_VPORT_ACCESS_METHOD_HCA :
return mlx5_query_hca_port ( ibdev , port , props ) ;
2013-07-07 17:25:49 +03:00
2015-06-04 19:30:46 +03:00
default :
return - EINVAL ;
}
}
2013-07-07 17:25:49 +03:00
2015-06-04 19:30:46 +03:00
static int mlx5_ib_query_gid ( struct ib_device * ibdev , u8 port , int index ,
union ib_gid * gid )
{
struct mlx5_ib_dev * dev = to_mdev ( ibdev ) ;
struct mlx5_core_dev * mdev = dev - > mdev ;
2013-07-07 17:25:49 +03:00
2015-06-04 19:30:46 +03:00
switch ( mlx5_get_vport_access_method ( ibdev ) ) {
case MLX5_VPORT_ACCESS_METHOD_MAD :
return mlx5_query_mad_ifc_gids ( ibdev , port , index , gid ) ;
2013-07-07 17:25:49 +03:00
2015-06-04 19:30:46 +03:00
case MLX5_VPORT_ACCESS_METHOD_HCA :
return mlx5_query_hca_vport_gid ( mdev , 0 , port , 0 , index , gid ) ;
default :
return - EINVAL ;
}
2013-07-07 17:25:49 +03:00
}
2015-06-04 19:30:46 +03:00
static int mlx5_ib_query_pkey ( struct ib_device * ibdev , u8 port , u16 index ,
u16 * pkey )
{
struct mlx5_ib_dev * dev = to_mdev ( ibdev ) ;
struct mlx5_core_dev * mdev = dev - > mdev ;
switch ( mlx5_get_vport_access_method ( ibdev ) ) {
case MLX5_VPORT_ACCESS_METHOD_MAD :
return mlx5_query_mad_ifc_pkey ( ibdev , port , index , pkey ) ;
case MLX5_VPORT_ACCESS_METHOD_HCA :
case MLX5_VPORT_ACCESS_METHOD_NIC :
return mlx5_query_hca_vport_pkey ( mdev , 0 , port , 0 , index ,
pkey ) ;
default :
return - EINVAL ;
}
}
2013-07-07 17:25:49 +03:00
static int mlx5_ib_modify_device ( struct ib_device * ibdev , int mask ,
struct ib_device_modify * props )
{
struct mlx5_ib_dev * dev = to_mdev ( ibdev ) ;
struct mlx5_reg_node_desc in ;
struct mlx5_reg_node_desc out ;
int err ;
if ( mask & ~ IB_DEVICE_MODIFY_NODE_DESC )
return - EOPNOTSUPP ;
if ( ! ( mask & IB_DEVICE_MODIFY_NODE_DESC ) )
return 0 ;
/*
* If possible , pass node desc to FW , so it can generate
* a 144 trap . If cmd fails , just ignore .
*/
memcpy ( & in , props - > node_desc , 64 ) ;
2014-07-28 23:30:22 +03:00
err = mlx5_core_access_reg ( dev - > mdev , & in , sizeof ( in ) , & out ,
2013-07-07 17:25:49 +03:00
sizeof ( out ) , MLX5_REG_NODE_DESC , 0 , 1 ) ;
if ( err )
return err ;
memcpy ( ibdev - > node_desc , props - > node_desc , 64 ) ;
return err ;
}
static int mlx5_ib_modify_port ( struct ib_device * ibdev , u8 port , int mask ,
struct ib_port_modify * props )
{
struct mlx5_ib_dev * dev = to_mdev ( ibdev ) ;
struct ib_port_attr attr ;
u32 tmp ;
int err ;
mutex_lock ( & dev - > cap_mask_mutex ) ;
err = mlx5_ib_query_port ( ibdev , port , & attr ) ;
if ( err )
goto out ;
tmp = ( attr . port_cap_flags | props - > set_port_cap_mask ) &
~ props - > clr_port_cap_mask ;
2014-07-28 23:30:22 +03:00
err = mlx5_set_port_caps ( dev - > mdev , port , tmp ) ;
2013-07-07 17:25:49 +03:00
out :
mutex_unlock ( & dev - > cap_mask_mutex ) ;
return err ;
}
static struct ib_ucontext * mlx5_ib_alloc_ucontext ( struct ib_device * ibdev ,
struct ib_udata * udata )
{
struct mlx5_ib_dev * dev = to_mdev ( ibdev ) ;
2014-01-30 13:49:48 +02:00
struct mlx5_ib_alloc_ucontext_req_v2 req ;
2013-07-07 17:25:49 +03:00
struct mlx5_ib_alloc_ucontext_resp resp ;
struct mlx5_ib_ucontext * context ;
struct mlx5_uuar_info * uuari ;
struct mlx5_uar * uars ;
2014-01-14 17:45:12 +02:00
int gross_uuars ;
2013-07-07 17:25:49 +03:00
int num_uars ;
2014-01-30 13:49:48 +02:00
int ver ;
2013-07-07 17:25:49 +03:00
int uuarn ;
int err ;
int i ;
2014-07-28 23:30:23 +03:00
size_t reqlen ;
2013-07-07 17:25:49 +03:00
if ( ! dev - > ib_active )
return ERR_PTR ( - EAGAIN ) ;
2014-01-30 13:49:48 +02:00
memset ( & req , 0 , sizeof ( req ) ) ;
reqlen = udata - > inlen - sizeof ( struct ib_uverbs_cmd_hdr ) ;
if ( reqlen = = sizeof ( struct mlx5_ib_alloc_ucontext_req ) )
ver = 0 ;
else if ( reqlen = = sizeof ( struct mlx5_ib_alloc_ucontext_req_v2 ) )
ver = 2 ;
else
return ERR_PTR ( - EINVAL ) ;
err = ib_copy_from_udata ( & req , udata , reqlen ) ;
2013-07-07 17:25:49 +03:00
if ( err )
return ERR_PTR ( err ) ;
2014-01-30 13:49:48 +02:00
if ( req . flags | | req . reserved )
return ERR_PTR ( - EINVAL ) ;
2013-07-07 17:25:49 +03:00
if ( req . total_num_uuars > MLX5_MAX_UUARS )
return ERR_PTR ( - ENOMEM ) ;
if ( req . total_num_uuars = = 0 )
return ERR_PTR ( - EINVAL ) ;
2014-01-14 17:45:12 +02:00
req . total_num_uuars = ALIGN ( req . total_num_uuars ,
MLX5_NON_FP_BF_REGS_PER_PAGE ) ;
2013-07-07 17:25:49 +03:00
if ( req . num_low_latency_uuars > req . total_num_uuars - 1 )
return ERR_PTR ( - EINVAL ) ;
2014-01-14 17:45:12 +02:00
num_uars = req . total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE ;
gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE ;
2015-05-28 22:28:41 +03:00
resp . qp_tab_size = 1 < < MLX5_CAP_GEN ( dev - > mdev , log_max_qp ) ;
resp . bf_reg_size = 1 < < MLX5_CAP_GEN ( dev - > mdev , log_bf_reg_size ) ;
resp . cache_line_size = L1_CACHE_BYTES ;
resp . max_sq_desc_sz = MLX5_CAP_GEN ( dev - > mdev , max_wqe_sz_sq ) ;
resp . max_rq_desc_sz = MLX5_CAP_GEN ( dev - > mdev , max_wqe_sz_rq ) ;
resp . max_send_wqebb = 1 < < MLX5_CAP_GEN ( dev - > mdev , log_max_qp_sz ) ;
resp . max_recv_wr = 1 < < MLX5_CAP_GEN ( dev - > mdev , log_max_qp_sz ) ;
resp . max_srq_recv_wr = 1 < < MLX5_CAP_GEN ( dev - > mdev , log_max_srq_sz ) ;
2013-07-07 17:25:49 +03:00
context = kzalloc ( sizeof ( * context ) , GFP_KERNEL ) ;
if ( ! context )
return ERR_PTR ( - ENOMEM ) ;
uuari = & context - > uuari ;
mutex_init ( & uuari - > lock ) ;
uars = kcalloc ( num_uars , sizeof ( * uars ) , GFP_KERNEL ) ;
if ( ! uars ) {
err = - ENOMEM ;
goto out_ctx ;
}
2014-01-14 17:45:12 +02:00
uuari - > bitmap = kcalloc ( BITS_TO_LONGS ( gross_uuars ) ,
2013-07-07 17:25:49 +03:00
sizeof ( * uuari - > bitmap ) ,
GFP_KERNEL ) ;
if ( ! uuari - > bitmap ) {
err = - ENOMEM ;
goto out_uar_ctx ;
}
/*
* clear all fast path uuars
*/
2014-01-14 17:45:12 +02:00
for ( i = 0 ; i < gross_uuars ; i + + ) {
2013-07-07 17:25:49 +03:00
uuarn = i & 3 ;
if ( uuarn = = 2 | | uuarn = = 3 )
set_bit ( i , uuari - > bitmap ) ;
}
2014-01-14 17:45:12 +02:00
uuari - > count = kcalloc ( gross_uuars , sizeof ( * uuari - > count ) , GFP_KERNEL ) ;
2013-07-07 17:25:49 +03:00
if ( ! uuari - > count ) {
err = - ENOMEM ;
goto out_bitmap ;
}
for ( i = 0 ; i < num_uars ; i + + ) {
2014-07-28 23:30:22 +03:00
err = mlx5_cmd_alloc_uar ( dev - > mdev , & uars [ i ] . index ) ;
2013-07-07 17:25:49 +03:00
if ( err )
goto out_count ;
}
2014-12-11 17:04:26 +02:00
# ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
context - > ibucontext . invalidate_range = & mlx5_ib_invalidate_range ;
# endif
2013-07-07 17:25:49 +03:00
INIT_LIST_HEAD ( & context - > db_page_list ) ;
mutex_init ( & context - > db_page_mutex ) ;
resp . tot_uuars = req . total_num_uuars ;
2015-05-28 22:28:41 +03:00
resp . num_ports = MLX5_CAP_GEN ( dev - > mdev , num_ports ) ;
2013-07-25 20:04:36 +03:00
err = ib_copy_to_udata ( udata , & resp ,
sizeof ( resp ) - sizeof ( resp . reserved ) ) ;
2013-07-07 17:25:49 +03:00
if ( err )
goto out_uars ;
2014-01-30 13:49:48 +02:00
uuari - > ver = ver ;
2013-07-07 17:25:49 +03:00
uuari - > num_low_latency_uuars = req . num_low_latency_uuars ;
uuari - > uars = uars ;
uuari - > num_uars = num_uars ;
return & context - > ibucontext ;
out_uars :
for ( i - - ; i > = 0 ; i - - )
2014-07-28 23:30:22 +03:00
mlx5_cmd_free_uar ( dev - > mdev , uars [ i ] . index ) ;
2013-07-07 17:25:49 +03:00
out_count :
kfree ( uuari - > count ) ;
out_bitmap :
kfree ( uuari - > bitmap ) ;
out_uar_ctx :
kfree ( uars ) ;
out_ctx :
kfree ( context ) ;
return ERR_PTR ( err ) ;
}
static int mlx5_ib_dealloc_ucontext ( struct ib_ucontext * ibcontext )
{
struct mlx5_ib_ucontext * context = to_mucontext ( ibcontext ) ;
struct mlx5_ib_dev * dev = to_mdev ( ibcontext - > device ) ;
struct mlx5_uuar_info * uuari = & context - > uuari ;
int i ;
for ( i = 0 ; i < uuari - > num_uars ; i + + ) {
2014-07-28 23:30:22 +03:00
if ( mlx5_cmd_free_uar ( dev - > mdev , uuari - > uars [ i ] . index ) )
2013-07-07 17:25:49 +03:00
mlx5_ib_warn ( dev , " failed to free UAR 0x%x \n " , uuari - > uars [ i ] . index ) ;
}
kfree ( uuari - > count ) ;
kfree ( uuari - > bitmap ) ;
kfree ( uuari - > uars ) ;
kfree ( context ) ;
return 0 ;
}
static phys_addr_t uar_index2pfn ( struct mlx5_ib_dev * dev , int index )
{
2014-07-28 23:30:22 +03:00
return ( pci_resource_start ( dev - > mdev - > pdev , 0 ) > > PAGE_SHIFT ) + index ;
2013-07-07 17:25:49 +03:00
}
static int get_command ( unsigned long offset )
{
return ( offset > > MLX5_IB_MMAP_CMD_SHIFT ) & MLX5_IB_MMAP_CMD_MASK ;
}
static int get_arg ( unsigned long offset )
{
return offset & ( ( 1 < < MLX5_IB_MMAP_CMD_SHIFT ) - 1 ) ;
}
static int get_index ( unsigned long offset )
{
return get_arg ( offset ) ;
}
static int mlx5_ib_mmap ( struct ib_ucontext * ibcontext , struct vm_area_struct * vma )
{
struct mlx5_ib_ucontext * context = to_mucontext ( ibcontext ) ;
struct mlx5_ib_dev * dev = to_mdev ( ibcontext - > device ) ;
struct mlx5_uuar_info * uuari = & context - > uuari ;
unsigned long command ;
unsigned long idx ;
phys_addr_t pfn ;
command = get_command ( vma - > vm_pgoff ) ;
switch ( command ) {
case MLX5_IB_MMAP_REGULAR_PAGE :
if ( vma - > vm_end - vma - > vm_start ! = PAGE_SIZE )
return - EINVAL ;
idx = get_index ( vma - > vm_pgoff ) ;
2014-09-14 16:47:53 +03:00
if ( idx > = uuari - > num_uars )
return - EINVAL ;
2013-07-07 17:25:49 +03:00
pfn = uar_index2pfn ( dev , uuari - > uars [ idx ] . index ) ;
mlx5_ib_dbg ( dev , " uar idx 0x%lx, pfn 0x%llx \n " , idx ,
( unsigned long long ) pfn ) ;
vma - > vm_page_prot = pgprot_writecombine ( vma - > vm_page_prot ) ;
if ( io_remap_pfn_range ( vma , vma - > vm_start , pfn ,
PAGE_SIZE , vma - > vm_page_prot ) )
return - EAGAIN ;
mlx5_ib_dbg ( dev , " mapped WC at 0x%lx, PA 0x%llx \n " ,
vma - > vm_start ,
( unsigned long long ) pfn < < PAGE_SHIFT ) ;
break ;
case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES :
return - ENOSYS ;
default :
return - EINVAL ;
}
return 0 ;
}
static int alloc_pa_mkey ( struct mlx5_ib_dev * dev , u32 * key , u32 pdn )
{
struct mlx5_create_mkey_mbox_in * in ;
struct mlx5_mkey_seg * seg ;
struct mlx5_core_mr mr ;
int err ;
in = kzalloc ( sizeof ( * in ) , GFP_KERNEL ) ;
if ( ! in )
return - ENOMEM ;
seg = & in - > seg ;
seg - > flags = MLX5_PERM_LOCAL_READ | MLX5_ACCESS_MODE_PA ;
seg - > flags_pd = cpu_to_be32 ( pdn | MLX5_MKEY_LEN64 ) ;
seg - > qpn_mkey7_0 = cpu_to_be32 ( 0xffffff < < 8 ) ;
seg - > start_addr = 0 ;
2014-07-28 23:30:22 +03:00
err = mlx5_core_create_mkey ( dev - > mdev , & mr , in , sizeof ( * in ) ,
2013-10-23 09:53:14 +03:00
NULL , NULL , NULL ) ;
2013-07-07 17:25:49 +03:00
if ( err ) {
mlx5_ib_warn ( dev , " failed to create mkey, %d \n " , err ) ;
goto err_in ;
}
kfree ( in ) ;
* key = mr . key ;
return 0 ;
err_in :
kfree ( in ) ;
return err ;
}
static void free_pa_mkey ( struct mlx5_ib_dev * dev , u32 key )
{
struct mlx5_core_mr mr ;
int err ;
memset ( & mr , 0 , sizeof ( mr ) ) ;
mr . key = key ;
2014-07-28 23:30:22 +03:00
err = mlx5_core_destroy_mkey ( dev - > mdev , & mr ) ;
2013-07-07 17:25:49 +03:00
if ( err )
mlx5_ib_warn ( dev , " failed to destroy mkey 0x%x \n " , key ) ;
}
static struct ib_pd * mlx5_ib_alloc_pd ( struct ib_device * ibdev ,
struct ib_ucontext * context ,
struct ib_udata * udata )
{
struct mlx5_ib_alloc_pd_resp resp ;
struct mlx5_ib_pd * pd ;
int err ;
pd = kmalloc ( sizeof ( * pd ) , GFP_KERNEL ) ;
if ( ! pd )
return ERR_PTR ( - ENOMEM ) ;
2014-07-28 23:30:22 +03:00
err = mlx5_core_alloc_pd ( to_mdev ( ibdev ) - > mdev , & pd - > pdn ) ;
2013-07-07 17:25:49 +03:00
if ( err ) {
kfree ( pd ) ;
return ERR_PTR ( err ) ;
}
if ( context ) {
resp . pdn = pd - > pdn ;
if ( ib_copy_to_udata ( udata , & resp , sizeof ( resp ) ) ) {
2014-07-28 23:30:22 +03:00
mlx5_core_dealloc_pd ( to_mdev ( ibdev ) - > mdev , pd - > pdn ) ;
2013-07-07 17:25:49 +03:00
kfree ( pd ) ;
return ERR_PTR ( - EFAULT ) ;
}
} else {
err = alloc_pa_mkey ( to_mdev ( ibdev ) , & pd - > pa_lkey , pd - > pdn ) ;
if ( err ) {
2014-07-28 23:30:22 +03:00
mlx5_core_dealloc_pd ( to_mdev ( ibdev ) - > mdev , pd - > pdn ) ;
2013-07-07 17:25:49 +03:00
kfree ( pd ) ;
return ERR_PTR ( err ) ;
}
}
return & pd - > ibpd ;
}
static int mlx5_ib_dealloc_pd ( struct ib_pd * pd )
{
struct mlx5_ib_dev * mdev = to_mdev ( pd - > device ) ;
struct mlx5_ib_pd * mpd = to_mpd ( pd ) ;
if ( ! pd - > uobject )
free_pa_mkey ( mdev , mpd - > pa_lkey ) ;
2014-07-28 23:30:22 +03:00
mlx5_core_dealloc_pd ( mdev - > mdev , mpd - > pdn ) ;
2013-07-07 17:25:49 +03:00
kfree ( mpd ) ;
return 0 ;
}
static int mlx5_ib_mcg_attach ( struct ib_qp * ibqp , union ib_gid * gid , u16 lid )
{
struct mlx5_ib_dev * dev = to_mdev ( ibqp - > device ) ;
int err ;
2014-07-28 23:30:22 +03:00
err = mlx5_core_attach_mcg ( dev - > mdev , gid , ibqp - > qp_num ) ;
2013-07-07 17:25:49 +03:00
if ( err )
mlx5_ib_warn ( dev , " failed attaching QPN 0x%x, MGID %pI6 \n " ,
ibqp - > qp_num , gid - > raw ) ;
return err ;
}
static int mlx5_ib_mcg_detach ( struct ib_qp * ibqp , union ib_gid * gid , u16 lid )
{
struct mlx5_ib_dev * dev = to_mdev ( ibqp - > device ) ;
int err ;
2014-07-28 23:30:22 +03:00
err = mlx5_core_detach_mcg ( dev - > mdev , gid , ibqp - > qp_num ) ;
2013-07-07 17:25:49 +03:00
if ( err )
mlx5_ib_warn ( dev , " failed detaching QPN 0x%x, MGID %pI6 \n " ,
ibqp - > qp_num , gid - > raw ) ;
return err ;
}
static int init_node_data ( struct mlx5_ib_dev * dev )
{
2015-06-04 19:30:46 +03:00
int err ;
2013-07-07 17:25:49 +03:00
2015-06-04 19:30:46 +03:00
err = mlx5_query_node_desc ( dev , dev - > ib_dev . node_desc ) ;
2013-07-07 17:25:49 +03:00
if ( err )
2015-06-04 19:30:46 +03:00
return err ;
2013-07-07 17:25:49 +03:00
2015-06-04 19:30:46 +03:00
dev - > mdev - > rev_id = dev - > mdev - > pdev - > revision ;
2013-07-07 17:25:49 +03:00
2015-06-04 19:30:46 +03:00
return mlx5_query_node_guid ( dev , & dev - > ib_dev . node_guid ) ;
2013-07-07 17:25:49 +03:00
}
static ssize_t show_fw_pages ( struct device * device , struct device_attribute * attr ,
char * buf )
{
struct mlx5_ib_dev * dev =
container_of ( device , struct mlx5_ib_dev , ib_dev . dev ) ;
2014-07-28 23:30:22 +03:00
return sprintf ( buf , " %d \n " , dev - > mdev - > priv . fw_pages ) ;
2013-07-07 17:25:49 +03:00
}
static ssize_t show_reg_pages ( struct device * device ,
struct device_attribute * attr , char * buf )
{
struct mlx5_ib_dev * dev =
container_of ( device , struct mlx5_ib_dev , ib_dev . dev ) ;
2014-12-11 17:04:23 +02:00
return sprintf ( buf , " %d \n " , atomic_read ( & dev - > mdev - > priv . reg_pages ) ) ;
2013-07-07 17:25:49 +03:00
}
static ssize_t show_hca ( struct device * device , struct device_attribute * attr ,
char * buf )
{
struct mlx5_ib_dev * dev =
container_of ( device , struct mlx5_ib_dev , ib_dev . dev ) ;
2014-07-28 23:30:22 +03:00
return sprintf ( buf , " MT%d \n " , dev - > mdev - > pdev - > device ) ;
2013-07-07 17:25:49 +03:00
}
static ssize_t show_fw_ver ( struct device * device , struct device_attribute * attr ,
char * buf )
{
struct mlx5_ib_dev * dev =
container_of ( device , struct mlx5_ib_dev , ib_dev . dev ) ;
2014-07-28 23:30:22 +03:00
return sprintf ( buf , " %d.%d.%d \n " , fw_rev_maj ( dev - > mdev ) ,
fw_rev_min ( dev - > mdev ) , fw_rev_sub ( dev - > mdev ) ) ;
2013-07-07 17:25:49 +03:00
}
static ssize_t show_rev ( struct device * device , struct device_attribute * attr ,
char * buf )
{
struct mlx5_ib_dev * dev =
container_of ( device , struct mlx5_ib_dev , ib_dev . dev ) ;
2014-07-28 23:30:22 +03:00
return sprintf ( buf , " %x \n " , dev - > mdev - > rev_id ) ;
2013-07-07 17:25:49 +03:00
}
static ssize_t show_board ( struct device * device , struct device_attribute * attr ,
char * buf )
{
struct mlx5_ib_dev * dev =
container_of ( device , struct mlx5_ib_dev , ib_dev . dev ) ;
return sprintf ( buf , " %.*s \n " , MLX5_BOARD_ID_LEN ,
2014-07-28 23:30:22 +03:00
dev - > mdev - > board_id ) ;
2013-07-07 17:25:49 +03:00
}
static DEVICE_ATTR ( hw_rev , S_IRUGO , show_rev , NULL ) ;
static DEVICE_ATTR ( fw_ver , S_IRUGO , show_fw_ver , NULL ) ;
static DEVICE_ATTR ( hca_type , S_IRUGO , show_hca , NULL ) ;
static DEVICE_ATTR ( board_id , S_IRUGO , show_board , NULL ) ;
static DEVICE_ATTR ( fw_pages , S_IRUGO , show_fw_pages , NULL ) ;
static DEVICE_ATTR ( reg_pages , S_IRUGO , show_reg_pages , NULL ) ;
static struct device_attribute * mlx5_class_attributes [ ] = {
& dev_attr_hw_rev ,
& dev_attr_fw_ver ,
& dev_attr_hca_type ,
& dev_attr_board_id ,
& dev_attr_fw_pages ,
& dev_attr_reg_pages ,
} ;
2014-07-28 23:30:22 +03:00
static void mlx5_ib_event ( struct mlx5_core_dev * dev , void * context ,
2014-07-28 23:30:24 +03:00
enum mlx5_dev_event event , unsigned long param )
2013-07-07 17:25:49 +03:00
{
2014-07-28 23:30:22 +03:00
struct mlx5_ib_dev * ibdev = ( struct mlx5_ib_dev * ) context ;
2013-07-07 17:25:49 +03:00
struct ib_event ibev ;
2014-07-28 23:30:22 +03:00
2013-07-07 17:25:49 +03:00
u8 port = 0 ;
switch ( event ) {
case MLX5_DEV_EVENT_SYS_ERROR :
ibdev - > ib_active = false ;
ibev . event = IB_EVENT_DEVICE_FATAL ;
break ;
case MLX5_DEV_EVENT_PORT_UP :
ibev . event = IB_EVENT_PORT_ACTIVE ;
2014-07-28 23:30:24 +03:00
port = ( u8 ) param ;
2013-07-07 17:25:49 +03:00
break ;
case MLX5_DEV_EVENT_PORT_DOWN :
ibev . event = IB_EVENT_PORT_ERR ;
2014-07-28 23:30:24 +03:00
port = ( u8 ) param ;
2013-07-07 17:25:49 +03:00
break ;
case MLX5_DEV_EVENT_PORT_INITIALIZED :
/* not used by ULPs */
return ;
case MLX5_DEV_EVENT_LID_CHANGE :
ibev . event = IB_EVENT_LID_CHANGE ;
2014-07-28 23:30:24 +03:00
port = ( u8 ) param ;
2013-07-07 17:25:49 +03:00
break ;
case MLX5_DEV_EVENT_PKEY_CHANGE :
ibev . event = IB_EVENT_PKEY_CHANGE ;
2014-07-28 23:30:24 +03:00
port = ( u8 ) param ;
2013-07-07 17:25:49 +03:00
break ;
case MLX5_DEV_EVENT_GUID_CHANGE :
ibev . event = IB_EVENT_GID_CHANGE ;
2014-07-28 23:30:24 +03:00
port = ( u8 ) param ;
2013-07-07 17:25:49 +03:00
break ;
case MLX5_DEV_EVENT_CLIENT_REREG :
ibev . event = IB_EVENT_CLIENT_REREGISTER ;
2014-07-28 23:30:24 +03:00
port = ( u8 ) param ;
2013-07-07 17:25:49 +03:00
break ;
}
ibev . device = & ibdev - > ib_dev ;
ibev . element . port_num = port ;
2013-09-11 16:35:27 +03:00
if ( port < 1 | | port > ibdev - > num_ports ) {
mlx5_ib_warn ( ibdev , " warning: event on port %d \n " , port ) ;
return ;
}
2013-07-07 17:25:49 +03:00
if ( ibdev - > ib_active )
ib_dispatch_event ( & ibev ) ;
}
static void get_ext_port_caps ( struct mlx5_ib_dev * dev )
{
int port ;
2015-05-28 22:28:41 +03:00
for ( port = 1 ; port < = MLX5_CAP_GEN ( dev - > mdev , num_ports ) ; port + + )
2013-07-07 17:25:49 +03:00
mlx5_query_ext_port_caps ( dev , port ) ;
}
static int get_port_caps ( struct mlx5_ib_dev * dev )
{
struct ib_device_attr * dprops = NULL ;
struct ib_port_attr * pprops = NULL ;
2015-01-12 11:56:58 +03:00
int err = - ENOMEM ;
2013-07-07 17:25:49 +03:00
int port ;
2015-06-11 16:35:25 +03:00
struct ib_udata uhw = { . inlen = 0 , . outlen = 0 } ;
2013-07-07 17:25:49 +03:00
pprops = kmalloc ( sizeof ( * pprops ) , GFP_KERNEL ) ;
if ( ! pprops )
goto out ;
dprops = kmalloc ( sizeof ( * dprops ) , GFP_KERNEL ) ;
if ( ! dprops )
goto out ;
2015-06-11 16:35:25 +03:00
err = mlx5_ib_query_device ( & dev - > ib_dev , dprops , & uhw ) ;
2013-07-07 17:25:49 +03:00
if ( err ) {
mlx5_ib_warn ( dev , " query_device failed %d \n " , err ) ;
goto out ;
}
2015-05-28 22:28:41 +03:00
for ( port = 1 ; port < = MLX5_CAP_GEN ( dev - > mdev , num_ports ) ; port + + ) {
2013-07-07 17:25:49 +03:00
err = mlx5_ib_query_port ( & dev - > ib_dev , port , pprops ) ;
if ( err ) {
2015-05-28 22:28:41 +03:00
mlx5_ib_warn ( dev , " query_port %d failed %d \n " ,
port , err ) ;
2013-07-07 17:25:49 +03:00
break ;
}
2015-05-28 22:28:41 +03:00
dev - > mdev - > port_caps [ port - 1 ] . pkey_table_len =
dprops - > max_pkeys ;
dev - > mdev - > port_caps [ port - 1 ] . gid_table_len =
pprops - > gid_tbl_len ;
2013-07-07 17:25:49 +03:00
mlx5_ib_dbg ( dev , " pkey_table_len %d, gid_table_len %d \n " ,
dprops - > max_pkeys , pprops - > gid_tbl_len ) ;
}
out :
kfree ( pprops ) ;
kfree ( dprops ) ;
return err ;
}
static void destroy_umrc_res ( struct mlx5_ib_dev * dev )
{
int err ;
err = mlx5_mr_cache_cleanup ( dev ) ;
if ( err )
mlx5_ib_warn ( dev , " mr cache cleanup failed \n " ) ;
mlx5_ib_destroy_qp ( dev - > umrc . qp ) ;
ib_destroy_cq ( dev - > umrc . cq ) ;
ib_dereg_mr ( dev - > umrc . mr ) ;
ib_dealloc_pd ( dev - > umrc . pd ) ;
}
enum {
MAX_UMR_WR = 128 ,
} ;
static int create_umr_res ( struct mlx5_ib_dev * dev )
{
struct ib_qp_init_attr * init_attr = NULL ;
struct ib_qp_attr * attr = NULL ;
struct ib_pd * pd ;
struct ib_cq * cq ;
struct ib_qp * qp ;
struct ib_mr * mr ;
2015-06-11 16:35:21 +03:00
struct ib_cq_init_attr cq_attr = { } ;
2013-07-07 17:25:49 +03:00
int ret ;
attr = kzalloc ( sizeof ( * attr ) , GFP_KERNEL ) ;
init_attr = kzalloc ( sizeof ( * init_attr ) , GFP_KERNEL ) ;
if ( ! attr | | ! init_attr ) {
ret = - ENOMEM ;
goto error_0 ;
}
pd = ib_alloc_pd ( & dev - > ib_dev ) ;
if ( IS_ERR ( pd ) ) {
mlx5_ib_dbg ( dev , " Couldn't create PD for sync UMR QP \n " ) ;
ret = PTR_ERR ( pd ) ;
goto error_0 ;
}
mr = ib_get_dma_mr ( pd , IB_ACCESS_LOCAL_WRITE ) ;
if ( IS_ERR ( mr ) ) {
mlx5_ib_dbg ( dev , " Couldn't create DMA MR for sync UMR QP \n " ) ;
ret = PTR_ERR ( mr ) ;
goto error_1 ;
}
2015-06-11 16:35:21 +03:00
cq_attr . cqe = 128 ;
cq = ib_create_cq ( & dev - > ib_dev , mlx5_umr_cq_handler , NULL , NULL ,
& cq_attr ) ;
2013-07-07 17:25:49 +03:00
if ( IS_ERR ( cq ) ) {
mlx5_ib_dbg ( dev , " Couldn't create CQ for sync UMR QP \n " ) ;
ret = PTR_ERR ( cq ) ;
goto error_2 ;
}
ib_req_notify_cq ( cq , IB_CQ_NEXT_COMP ) ;
init_attr - > send_cq = cq ;
init_attr - > recv_cq = cq ;
init_attr - > sq_sig_type = IB_SIGNAL_ALL_WR ;
init_attr - > cap . max_send_wr = MAX_UMR_WR ;
init_attr - > cap . max_send_sge = 1 ;
init_attr - > qp_type = MLX5_IB_QPT_REG_UMR ;
init_attr - > port_num = 1 ;
qp = mlx5_ib_create_qp ( pd , init_attr , NULL ) ;
if ( IS_ERR ( qp ) ) {
mlx5_ib_dbg ( dev , " Couldn't create sync UMR QP \n " ) ;
ret = PTR_ERR ( qp ) ;
goto error_3 ;
}
qp - > device = & dev - > ib_dev ;
qp - > real_qp = qp ;
qp - > uobject = NULL ;
qp - > qp_type = MLX5_IB_QPT_REG_UMR ;
attr - > qp_state = IB_QPS_INIT ;
attr - > port_num = 1 ;
ret = mlx5_ib_modify_qp ( qp , attr , IB_QP_STATE | IB_QP_PKEY_INDEX |
IB_QP_PORT , NULL ) ;
if ( ret ) {
mlx5_ib_dbg ( dev , " Couldn't modify UMR QP \n " ) ;
goto error_4 ;
}
memset ( attr , 0 , sizeof ( * attr ) ) ;
attr - > qp_state = IB_QPS_RTR ;
attr - > path_mtu = IB_MTU_256 ;
ret = mlx5_ib_modify_qp ( qp , attr , IB_QP_STATE , NULL ) ;
if ( ret ) {
mlx5_ib_dbg ( dev , " Couldn't modify umr QP to rtr \n " ) ;
goto error_4 ;
}
memset ( attr , 0 , sizeof ( * attr ) ) ;
attr - > qp_state = IB_QPS_RTS ;
ret = mlx5_ib_modify_qp ( qp , attr , IB_QP_STATE , NULL ) ;
if ( ret ) {
mlx5_ib_dbg ( dev , " Couldn't modify umr QP to rts \n " ) ;
goto error_4 ;
}
dev - > umrc . qp = qp ;
dev - > umrc . cq = cq ;
dev - > umrc . mr = mr ;
dev - > umrc . pd = pd ;
sema_init ( & dev - > umrc . sem , MAX_UMR_WR ) ;
ret = mlx5_mr_cache_init ( dev ) ;
if ( ret ) {
mlx5_ib_warn ( dev , " mr cache init failed %d \n " , ret ) ;
goto error_4 ;
}
kfree ( attr ) ;
kfree ( init_attr ) ;
return 0 ;
error_4 :
mlx5_ib_destroy_qp ( qp ) ;
error_3 :
ib_destroy_cq ( cq ) ;
error_2 :
ib_dereg_mr ( mr ) ;
error_1 :
ib_dealloc_pd ( pd ) ;
error_0 :
kfree ( attr ) ;
kfree ( init_attr ) ;
return ret ;
}
static int create_dev_resources ( struct mlx5_ib_resources * devr )
{
struct ib_srq_init_attr attr ;
struct mlx5_ib_dev * dev ;
2015-06-11 16:35:20 +03:00
struct ib_cq_init_attr cq_attr = { . cqe = 1 } ;
2015-07-20 19:54:36 +03:00
u32 rsvd_lkey ;
2013-07-07 17:25:49 +03:00
int ret = 0 ;
dev = container_of ( devr , struct mlx5_ib_dev , devr ) ;
2015-07-20 19:54:36 +03:00
ret = mlx5_core_query_special_context ( dev - > mdev , & rsvd_lkey ) ;
if ( ret ) {
pr_err ( " Failed to query special context %d \n " , ret ) ;
return ret ;
}
dev - > ib_dev . local_dma_lkey = rsvd_lkey ;
2013-07-07 17:25:49 +03:00
devr - > p0 = mlx5_ib_alloc_pd ( & dev - > ib_dev , NULL , NULL ) ;
if ( IS_ERR ( devr - > p0 ) ) {
ret = PTR_ERR ( devr - > p0 ) ;
goto error0 ;
}
devr - > p0 - > device = & dev - > ib_dev ;
devr - > p0 - > uobject = NULL ;
atomic_set ( & devr - > p0 - > usecnt , 0 ) ;
2015-06-11 16:35:20 +03:00
devr - > c0 = mlx5_ib_create_cq ( & dev - > ib_dev , & cq_attr , NULL , NULL ) ;
2013-07-07 17:25:49 +03:00
if ( IS_ERR ( devr - > c0 ) ) {
ret = PTR_ERR ( devr - > c0 ) ;
goto error1 ;
}
devr - > c0 - > device = & dev - > ib_dev ;
devr - > c0 - > uobject = NULL ;
devr - > c0 - > comp_handler = NULL ;
devr - > c0 - > event_handler = NULL ;
devr - > c0 - > cq_context = NULL ;
atomic_set ( & devr - > c0 - > usecnt , 0 ) ;
devr - > x0 = mlx5_ib_alloc_xrcd ( & dev - > ib_dev , NULL , NULL ) ;
if ( IS_ERR ( devr - > x0 ) ) {
ret = PTR_ERR ( devr - > x0 ) ;
goto error2 ;
}
devr - > x0 - > device = & dev - > ib_dev ;
devr - > x0 - > inode = NULL ;
atomic_set ( & devr - > x0 - > usecnt , 0 ) ;
mutex_init ( & devr - > x0 - > tgt_qp_mutex ) ;
INIT_LIST_HEAD ( & devr - > x0 - > tgt_qp_list ) ;
devr - > x1 = mlx5_ib_alloc_xrcd ( & dev - > ib_dev , NULL , NULL ) ;
if ( IS_ERR ( devr - > x1 ) ) {
ret = PTR_ERR ( devr - > x1 ) ;
goto error3 ;
}
devr - > x1 - > device = & dev - > ib_dev ;
devr - > x1 - > inode = NULL ;
atomic_set ( & devr - > x1 - > usecnt , 0 ) ;
mutex_init ( & devr - > x1 - > tgt_qp_mutex ) ;
INIT_LIST_HEAD ( & devr - > x1 - > tgt_qp_list ) ;
memset ( & attr , 0 , sizeof ( attr ) ) ;
attr . attr . max_sge = 1 ;
attr . attr . max_wr = 1 ;
attr . srq_type = IB_SRQT_XRC ;
attr . ext . xrc . cq = devr - > c0 ;
attr . ext . xrc . xrcd = devr - > x0 ;
devr - > s0 = mlx5_ib_create_srq ( devr - > p0 , & attr , NULL ) ;
if ( IS_ERR ( devr - > s0 ) ) {
ret = PTR_ERR ( devr - > s0 ) ;
goto error4 ;
}
devr - > s0 - > device = & dev - > ib_dev ;
devr - > s0 - > pd = devr - > p0 ;
devr - > s0 - > uobject = NULL ;
devr - > s0 - > event_handler = NULL ;
devr - > s0 - > srq_context = NULL ;
devr - > s0 - > srq_type = IB_SRQT_XRC ;
devr - > s0 - > ext . xrc . xrcd = devr - > x0 ;
devr - > s0 - > ext . xrc . cq = devr - > c0 ;
atomic_inc ( & devr - > s0 - > ext . xrc . xrcd - > usecnt ) ;
atomic_inc ( & devr - > s0 - > ext . xrc . cq - > usecnt ) ;
atomic_inc ( & devr - > p0 - > usecnt ) ;
atomic_set ( & devr - > s0 - > usecnt , 0 ) ;
2015-06-04 19:30:48 +03:00
memset ( & attr , 0 , sizeof ( attr ) ) ;
attr . attr . max_sge = 1 ;
attr . attr . max_wr = 1 ;
attr . srq_type = IB_SRQT_BASIC ;
devr - > s1 = mlx5_ib_create_srq ( devr - > p0 , & attr , NULL ) ;
if ( IS_ERR ( devr - > s1 ) ) {
ret = PTR_ERR ( devr - > s1 ) ;
goto error5 ;
}
devr - > s1 - > device = & dev - > ib_dev ;
devr - > s1 - > pd = devr - > p0 ;
devr - > s1 - > uobject = NULL ;
devr - > s1 - > event_handler = NULL ;
devr - > s1 - > srq_context = NULL ;
devr - > s1 - > srq_type = IB_SRQT_BASIC ;
devr - > s1 - > ext . xrc . cq = devr - > c0 ;
atomic_inc ( & devr - > p0 - > usecnt ) ;
atomic_set ( & devr - > s0 - > usecnt , 0 ) ;
2013-07-07 17:25:49 +03:00
return 0 ;
2015-06-04 19:30:48 +03:00
error5 :
mlx5_ib_destroy_srq ( devr - > s0 ) ;
2013-07-07 17:25:49 +03:00
error4 :
mlx5_ib_dealloc_xrcd ( devr - > x1 ) ;
error3 :
mlx5_ib_dealloc_xrcd ( devr - > x0 ) ;
error2 :
mlx5_ib_destroy_cq ( devr - > c0 ) ;
error1 :
mlx5_ib_dealloc_pd ( devr - > p0 ) ;
error0 :
return ret ;
}
static void destroy_dev_resources ( struct mlx5_ib_resources * devr )
{
2015-06-04 19:30:48 +03:00
mlx5_ib_destroy_srq ( devr - > s1 ) ;
2013-07-07 17:25:49 +03:00
mlx5_ib_destroy_srq ( devr - > s0 ) ;
mlx5_ib_dealloc_xrcd ( devr - > x0 ) ;
mlx5_ib_dealloc_xrcd ( devr - > x1 ) ;
mlx5_ib_destroy_cq ( devr - > c0 ) ;
mlx5_ib_dealloc_pd ( devr - > p0 ) ;
}
2015-05-13 20:02:58 -04:00
static int mlx5_port_immutable ( struct ib_device * ibdev , u8 port_num ,
struct ib_port_immutable * immutable )
{
struct ib_port_attr attr ;
int err ;
err = mlx5_ib_query_port ( ibdev , port_num , & attr ) ;
if ( err )
return err ;
immutable - > pkey_tbl_len = attr . pkey_tbl_len ;
immutable - > gid_tbl_len = attr . gid_tbl_len ;
2015-05-13 20:02:59 -04:00
immutable - > core_cap_flags = RDMA_CORE_PORT_IBA_IB ;
2015-06-06 14:38:29 -04:00
immutable - > max_mad_size = IB_MGMT_MAD_SIZE ;
2015-05-13 20:02:58 -04:00
return 0 ;
}
2014-07-28 23:30:22 +03:00
static void * mlx5_ib_add ( struct mlx5_core_dev * mdev )
2013-07-07 17:25:49 +03:00
{
struct mlx5_ib_dev * dev ;
int err ;
int i ;
2015-06-04 19:30:47 +03:00
/* don't create IB instance over Eth ports, no RoCE yet! */
if ( MLX5_CAP_GEN ( mdev , port_type ) = = MLX5_CAP_PORT_TYPE_ETH )
return NULL ;
2013-07-07 17:25:49 +03:00
printk_once ( KERN_INFO " %s " , mlx5_version ) ;
dev = ( struct mlx5_ib_dev * ) ib_alloc_device ( sizeof ( * dev ) ) ;
if ( ! dev )
2014-07-28 23:30:22 +03:00
return NULL ;
2013-07-07 17:25:49 +03:00
2014-07-28 23:30:22 +03:00
dev - > mdev = mdev ;
2013-07-07 17:25:49 +03:00
err = get_port_caps ( dev ) ;
if ( err )
2014-07-28 23:30:22 +03:00
goto err_dealloc ;
2013-07-07 17:25:49 +03:00
2015-06-04 19:30:46 +03:00
if ( mlx5_use_mad_ifc ( dev ) )
get_ext_port_caps ( dev ) ;
2013-07-07 17:25:49 +03:00
MLX5_INIT_DOORBELL_LOCK ( & dev - > uar_lock ) ;
strlcpy ( dev - > ib_dev . name , " mlx5_%d " , IB_DEVICE_NAME_MAX ) ;
dev - > ib_dev . owner = THIS_MODULE ;
dev - > ib_dev . node_type = RDMA_NODE_IB_CA ;
2015-05-28 22:28:41 +03:00
dev - > num_ports = MLX5_CAP_GEN ( mdev , num_ports ) ;
2013-07-07 17:25:49 +03:00
dev - > ib_dev . phys_port_cnt = dev - > num_ports ;
2015-04-02 17:07:32 +03:00
dev - > ib_dev . num_comp_vectors =
dev - > mdev - > priv . eq_table . num_comp_vectors ;
2013-07-07 17:25:49 +03:00
dev - > ib_dev . dma_device = & mdev - > pdev - > dev ;
dev - > ib_dev . uverbs_abi_ver = MLX5_IB_UVERBS_ABI_VERSION ;
dev - > ib_dev . uverbs_cmd_mask =
( 1ull < < IB_USER_VERBS_CMD_GET_CONTEXT ) |
( 1ull < < IB_USER_VERBS_CMD_QUERY_DEVICE ) |
( 1ull < < IB_USER_VERBS_CMD_QUERY_PORT ) |
( 1ull < < IB_USER_VERBS_CMD_ALLOC_PD ) |
( 1ull < < IB_USER_VERBS_CMD_DEALLOC_PD ) |
( 1ull < < IB_USER_VERBS_CMD_REG_MR ) |
( 1ull < < IB_USER_VERBS_CMD_DEREG_MR ) |
( 1ull < < IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL ) |
( 1ull < < IB_USER_VERBS_CMD_CREATE_CQ ) |
( 1ull < < IB_USER_VERBS_CMD_RESIZE_CQ ) |
( 1ull < < IB_USER_VERBS_CMD_DESTROY_CQ ) |
( 1ull < < IB_USER_VERBS_CMD_CREATE_QP ) |
( 1ull < < IB_USER_VERBS_CMD_MODIFY_QP ) |
( 1ull < < IB_USER_VERBS_CMD_QUERY_QP ) |
( 1ull < < IB_USER_VERBS_CMD_DESTROY_QP ) |
( 1ull < < IB_USER_VERBS_CMD_ATTACH_MCAST ) |
( 1ull < < IB_USER_VERBS_CMD_DETACH_MCAST ) |
( 1ull < < IB_USER_VERBS_CMD_CREATE_SRQ ) |
( 1ull < < IB_USER_VERBS_CMD_MODIFY_SRQ ) |
( 1ull < < IB_USER_VERBS_CMD_QUERY_SRQ ) |
( 1ull < < IB_USER_VERBS_CMD_DESTROY_SRQ ) |
( 1ull < < IB_USER_VERBS_CMD_CREATE_XSRQ ) |
( 1ull < < IB_USER_VERBS_CMD_OPEN_QP ) ;
2015-02-08 13:28:52 +02:00
dev - > ib_dev . uverbs_ex_cmd_mask =
( 1ull < < IB_USER_VERBS_EX_CMD_QUERY_DEVICE ) ;
2013-07-07 17:25:49 +03:00
dev - > ib_dev . query_device = mlx5_ib_query_device ;
dev - > ib_dev . query_port = mlx5_ib_query_port ;
dev - > ib_dev . query_gid = mlx5_ib_query_gid ;
dev - > ib_dev . query_pkey = mlx5_ib_query_pkey ;
dev - > ib_dev . modify_device = mlx5_ib_modify_device ;
dev - > ib_dev . modify_port = mlx5_ib_modify_port ;
dev - > ib_dev . alloc_ucontext = mlx5_ib_alloc_ucontext ;
dev - > ib_dev . dealloc_ucontext = mlx5_ib_dealloc_ucontext ;
dev - > ib_dev . mmap = mlx5_ib_mmap ;
dev - > ib_dev . alloc_pd = mlx5_ib_alloc_pd ;
dev - > ib_dev . dealloc_pd = mlx5_ib_dealloc_pd ;
dev - > ib_dev . create_ah = mlx5_ib_create_ah ;
dev - > ib_dev . query_ah = mlx5_ib_query_ah ;
dev - > ib_dev . destroy_ah = mlx5_ib_destroy_ah ;
dev - > ib_dev . create_srq = mlx5_ib_create_srq ;
dev - > ib_dev . modify_srq = mlx5_ib_modify_srq ;
dev - > ib_dev . query_srq = mlx5_ib_query_srq ;
dev - > ib_dev . destroy_srq = mlx5_ib_destroy_srq ;
dev - > ib_dev . post_srq_recv = mlx5_ib_post_srq_recv ;
dev - > ib_dev . create_qp = mlx5_ib_create_qp ;
dev - > ib_dev . modify_qp = mlx5_ib_modify_qp ;
dev - > ib_dev . query_qp = mlx5_ib_query_qp ;
dev - > ib_dev . destroy_qp = mlx5_ib_destroy_qp ;
dev - > ib_dev . post_send = mlx5_ib_post_send ;
dev - > ib_dev . post_recv = mlx5_ib_post_recv ;
dev - > ib_dev . create_cq = mlx5_ib_create_cq ;
dev - > ib_dev . modify_cq = mlx5_ib_modify_cq ;
dev - > ib_dev . resize_cq = mlx5_ib_resize_cq ;
dev - > ib_dev . destroy_cq = mlx5_ib_destroy_cq ;
dev - > ib_dev . poll_cq = mlx5_ib_poll_cq ;
dev - > ib_dev . req_notify_cq = mlx5_ib_arm_cq ;
dev - > ib_dev . get_dma_mr = mlx5_ib_get_dma_mr ;
dev - > ib_dev . reg_user_mr = mlx5_ib_reg_user_mr ;
dev - > ib_dev . dereg_mr = mlx5_ib_dereg_mr ;
2014-02-23 14:19:06 +02:00
dev - > ib_dev . destroy_mr = mlx5_ib_destroy_mr ;
2013-07-07 17:25:49 +03:00
dev - > ib_dev . attach_mcast = mlx5_ib_mcg_attach ;
dev - > ib_dev . detach_mcast = mlx5_ib_mcg_detach ;
dev - > ib_dev . process_mad = mlx5_ib_process_mad ;
2014-02-23 14:19:06 +02:00
dev - > ib_dev . create_mr = mlx5_ib_create_mr ;
2013-07-07 17:25:49 +03:00
dev - > ib_dev . alloc_fast_reg_mr = mlx5_ib_alloc_fast_reg_mr ;
dev - > ib_dev . alloc_fast_reg_page_list = mlx5_ib_alloc_fast_reg_page_list ;
dev - > ib_dev . free_fast_reg_page_list = mlx5_ib_free_fast_reg_page_list ;
2014-02-23 14:19:12 +02:00
dev - > ib_dev . check_mr_status = mlx5_ib_check_mr_status ;
2015-05-13 20:02:58 -04:00
dev - > ib_dev . get_port_immutable = mlx5_port_immutable ;
2013-07-07 17:25:49 +03:00
2015-05-28 22:28:41 +03:00
mlx5_ib_internal_fill_odp_caps ( dev ) ;
2014-12-11 17:04:20 +02:00
2015-05-28 22:28:41 +03:00
if ( MLX5_CAP_GEN ( mdev , xrc ) ) {
2013-07-07 17:25:49 +03:00
dev - > ib_dev . alloc_xrcd = mlx5_ib_alloc_xrcd ;
dev - > ib_dev . dealloc_xrcd = mlx5_ib_dealloc_xrcd ;
dev - > ib_dev . uverbs_cmd_mask | =
( 1ull < < IB_USER_VERBS_CMD_OPEN_XRCD ) |
( 1ull < < IB_USER_VERBS_CMD_CLOSE_XRCD ) ;
}
err = init_node_data ( dev ) ;
if ( err )
2015-04-02 17:07:32 +03:00
goto err_dealloc ;
2013-07-07 17:25:49 +03:00
mutex_init ( & dev - > cap_mask_mutex ) ;
err = create_dev_resources ( & dev - > devr ) ;
if ( err )
2015-04-02 17:07:32 +03:00
goto err_dealloc ;
2013-07-07 17:25:49 +03:00
2014-12-11 17:04:23 +02:00
err = mlx5_ib_odp_init_one ( dev ) ;
2013-07-30 07:54:26 +08:00
if ( err )
2013-07-07 17:25:49 +03:00
goto err_rsrc ;
2014-12-11 17:04:23 +02:00
err = ib_register_device ( & dev - > ib_dev , NULL ) ;
if ( err )
goto err_odp ;
2013-07-07 17:25:49 +03:00
err = create_umr_res ( dev ) ;
if ( err )
goto err_dev ;
for ( i = 0 ; i < ARRAY_SIZE ( mlx5_class_attributes ) ; i + + ) {
2013-07-30 07:54:26 +08:00
err = device_create_file ( & dev - > ib_dev . dev ,
mlx5_class_attributes [ i ] ) ;
if ( err )
2013-07-07 17:25:49 +03:00
goto err_umrc ;
}
dev - > ib_active = true ;
2014-07-28 23:30:22 +03:00
return dev ;
2013-07-07 17:25:49 +03:00
err_umrc :
destroy_umrc_res ( dev ) ;
err_dev :
ib_unregister_device ( & dev - > ib_dev ) ;
2014-12-11 17:04:23 +02:00
err_odp :
mlx5_ib_odp_remove_one ( dev ) ;
2013-07-07 17:25:49 +03:00
err_rsrc :
destroy_dev_resources ( & dev - > devr ) ;
2014-07-28 23:30:22 +03:00
err_dealloc :
2013-07-07 17:25:49 +03:00
ib_dealloc_device ( ( struct ib_device * ) dev ) ;
2014-07-28 23:30:22 +03:00
return NULL ;
2013-07-07 17:25:49 +03:00
}
2014-07-28 23:30:22 +03:00
static void mlx5_ib_remove ( struct mlx5_core_dev * mdev , void * context )
2013-07-07 17:25:49 +03:00
{
2014-07-28 23:30:22 +03:00
struct mlx5_ib_dev * dev = context ;
2014-12-11 17:04:23 +02:00
2013-07-07 17:25:49 +03:00
ib_unregister_device ( & dev - > ib_dev ) ;
2014-09-14 16:47:50 +03:00
destroy_umrc_res ( dev ) ;
2014-12-11 17:04:23 +02:00
mlx5_ib_odp_remove_one ( dev ) ;
2013-07-07 17:25:49 +03:00
destroy_dev_resources ( & dev - > devr ) ;
ib_dealloc_device ( & dev - > ib_dev ) ;
}
2014-07-28 23:30:22 +03:00
static struct mlx5_interface mlx5_ib_interface = {
. add = mlx5_ib_add ,
. remove = mlx5_ib_remove ,
. event = mlx5_ib_event ,
2015-04-02 17:07:34 +03:00
. protocol = MLX5_INTERFACE_PROTOCOL_IB ,
2013-07-07 17:25:49 +03:00
} ;
static int __init mlx5_ib_init ( void )
{
2014-12-11 17:04:23 +02:00
int err ;
2014-07-28 23:30:22 +03:00
if ( deprecated_prof_sel ! = 2 )
pr_warn ( " prof_sel is deprecated for mlx5_ib, set it for mlx5_core \n " ) ;
2014-12-11 17:04:23 +02:00
err = mlx5_ib_odp_init ( ) ;
if ( err )
return err ;
err = mlx5_register_interface ( & mlx5_ib_interface ) ;
if ( err )
goto clean_odp ;
return err ;
clean_odp :
mlx5_ib_odp_cleanup ( ) ;
return err ;
2013-07-07 17:25:49 +03:00
}
static void __exit mlx5_ib_cleanup ( void )
{
2014-07-28 23:30:22 +03:00
mlx5_unregister_interface ( & mlx5_ib_interface ) ;
2014-12-11 17:04:23 +02:00
mlx5_ib_odp_cleanup ( ) ;
2013-07-07 17:25:49 +03:00
}
module_init ( mlx5_ib_init ) ;
module_exit ( mlx5_ib_cleanup ) ;