2011-05-20 22:46:11 +04:00
/*
2017-06-18 16:37:27 +03:00
* Copyright ( c ) 2017 Mellanox Technologies Inc . All rights reserved .
2011-05-20 22:46:11 +04:00
* Copyright ( c ) 2010 Voltaire Inc . All rights reserved .
*
* This software is available to you under a choice of one of two
* licenses . You may choose to be licensed under the terms of the GNU
* General Public License ( GPL ) Version 2 , available from the file
* COPYING in the main directory of this source tree , or the
* OpenIB . org BSD license below :
*
* Redistribution and use in source and binary forms , with or
* without modification , are permitted provided that the following
* conditions are met :
*
* - Redistributions of source code must retain the above
* copyright notice , this list of conditions and the following
* disclaimer .
*
* - Redistributions in binary form must reproduce the above
* copyright notice , this list of conditions and the following
* disclaimer in the documentation and / or other materials
* provided with the distribution .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND ,
* EXPRESS OR IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY , FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT . IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER LIABILITY , WHETHER IN AN
* ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING FROM , OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE .
*/
# define pr_fmt(fmt) "%s:%s: " fmt, KBUILD_MODNAME, __func__
2011-05-27 23:29:33 +04:00
# include <linux/export.h>
2011-05-20 22:46:11 +04:00
# include <net/netlink.h>
# include <net/net_namespace.h>
# include <net/sock.h>
# include <rdma/rdma_netlink.h>
2017-08-14 23:57:38 +03:00
# include <linux/module.h>
2017-05-14 15:49:57 +03:00
# include "core_priv.h"
2011-05-20 22:46:11 +04:00
2017-06-05 10:20:11 +03:00
# include "core_priv.h"
2011-05-20 22:46:11 +04:00
2017-06-05 10:20:11 +03:00
static DEFINE_MUTEX ( rdma_nl_mutex ) ;
2011-05-20 22:46:11 +04:00
static struct sock * nls ;
2017-06-05 10:20:11 +03:00
static struct {
2017-06-19 18:23:45 +03:00
const struct rdma_nl_cbs * cb_table ;
2017-06-05 10:20:11 +03:00
} rdma_nl_types [ RDMA_NL_NUM_CLIENTS ] ;
2011-05-20 22:46:11 +04:00
2017-06-18 15:51:16 +03:00
int rdma_nl_chk_listeners ( unsigned int group )
2015-08-14 15:52:07 +03:00
{
2017-06-18 15:51:16 +03:00
return ( netlink_has_listeners ( nls , group ) ) ? 0 : - 1 ;
2015-08-14 15:52:07 +03:00
}
2017-06-18 15:51:16 +03:00
EXPORT_SYMBOL ( rdma_nl_chk_listeners ) ;
2015-08-14 15:52:07 +03:00
2017-06-05 10:20:11 +03:00
static bool is_nl_msg_valid ( unsigned int type , unsigned int op )
2011-05-20 22:46:11 +04:00
{
2017-09-08 13:02:26 +03:00
static const unsigned int max_num_ops [ RDMA_NL_NUM_CLIENTS ] = {
0 ,
2017-06-05 10:20:11 +03:00
RDMA_NL_RDMA_CM_NUM_OPS ,
RDMA_NL_IWPM_NUM_OPS ,
0 ,
RDMA_NL_LS_NUM_OPS ,
2017-06-20 07:55:53 +03:00
RDMA_NLDEV_NUM_OPS } ;
2011-05-20 22:46:11 +04:00
2017-06-05 10:20:11 +03:00
/*
* This BUILD_BUG_ON is intended to catch addition of new
* RDMA netlink protocol without updating the array above .
*/
BUILD_BUG_ON ( RDMA_NL_NUM_CLIENTS ! = 6 ) ;
2011-05-20 22:46:11 +04:00
2017-09-08 13:02:26 +03:00
if ( type > = RDMA_NL_NUM_CLIENTS )
2017-06-05 10:20:11 +03:00
return false ;
2011-05-20 22:46:11 +04:00
2017-09-08 13:02:26 +03:00
return ( op < max_num_ops [ type ] ) ? true : false ;
2017-06-05 10:20:11 +03:00
}
2011-05-20 22:46:11 +04:00
2017-06-05 10:20:11 +03:00
static bool is_nl_valid ( unsigned int type , unsigned int op )
{
2017-06-15 12:46:33 +03:00
const struct rdma_nl_cbs * cb_table ;
if ( ! is_nl_msg_valid ( type , op ) )
return false ;
cb_table = rdma_nl_types [ type ] . cb_table ;
2017-08-14 23:57:39 +03:00
# ifdef CONFIG_MODULES
if ( ! cb_table ) {
mutex_unlock ( & rdma_nl_mutex ) ;
request_module ( " rdma-netlink-subsys-%d " , type ) ;
mutex_lock ( & rdma_nl_mutex ) ;
cb_table = rdma_nl_types [ type ] . cb_table ;
}
# endif
2017-06-15 12:46:33 +03:00
if ( ! cb_table | | ( ! cb_table [ op ] . dump & & ! cb_table [ op ] . doit ) )
2017-06-05 10:20:11 +03:00
return false ;
return true ;
}
2011-05-20 22:46:11 +04:00
2017-06-05 10:20:11 +03:00
void rdma_nl_register ( unsigned int index ,
2017-06-19 18:23:45 +03:00
const struct rdma_nl_cbs cb_table [ ] )
2017-06-05 10:20:11 +03:00
{
mutex_lock ( & rdma_nl_mutex ) ;
if ( ! is_nl_msg_valid ( index , 0 ) ) {
/*
* All clients are not interesting in success / failure of
* this call . They want to see the print to error log and
* continue their initialization . Print warning for them ,
* because it is programmer ' s error to be here .
*/
mutex_unlock ( & rdma_nl_mutex ) ;
WARN ( true ,
" The not-valid %u index was supplied to RDMA netlink \n " ,
index ) ;
return ;
}
2011-05-20 22:46:11 +04:00
2017-06-05 10:20:11 +03:00
if ( rdma_nl_types [ index ] . cb_table ) {
mutex_unlock ( & rdma_nl_mutex ) ;
WARN ( true ,
" The %u index is already registered in RDMA netlink \n " ,
index ) ;
return ;
}
2011-05-20 22:46:11 +04:00
2017-06-05 10:20:11 +03:00
rdma_nl_types [ index ] . cb_table = cb_table ;
mutex_unlock ( & rdma_nl_mutex ) ;
2011-05-20 22:46:11 +04:00
}
2017-06-05 10:20:11 +03:00
EXPORT_SYMBOL ( rdma_nl_register ) ;
2011-05-20 22:46:11 +04:00
2017-06-05 10:20:11 +03:00
void rdma_nl_unregister ( unsigned int index )
2011-05-20 22:46:11 +04:00
{
2017-06-05 10:20:11 +03:00
mutex_lock ( & rdma_nl_mutex ) ;
rdma_nl_types [ index ] . cb_table = NULL ;
mutex_unlock ( & rdma_nl_mutex ) ;
2011-05-20 22:46:11 +04:00
}
2017-06-05 10:20:11 +03:00
EXPORT_SYMBOL ( rdma_nl_unregister ) ;
2011-05-20 22:46:11 +04:00
void * ibnl_put_msg ( struct sk_buff * skb , struct nlmsghdr * * nlh , int seq ,
2014-03-27 02:07:35 +04:00
int len , int client , int op , int flags )
2011-05-20 22:46:11 +04:00
{
2017-06-18 16:38:04 +03:00
* nlh = nlmsg_put ( skb , 0 , seq , RDMA_NL_GET_TYPE ( client , op ) , len , flags ) ;
2012-06-27 08:43:19 +04:00
if ( ! * nlh )
2017-06-18 16:38:04 +03:00
return NULL ;
2012-06-27 08:43:19 +04:00
return nlmsg_data ( * nlh ) ;
2011-05-20 22:46:11 +04:00
}
EXPORT_SYMBOL ( ibnl_put_msg ) ;
int ibnl_put_attr ( struct sk_buff * skb , struct nlmsghdr * nlh ,
int len , void * data , int type )
{
2017-06-18 16:38:04 +03:00
if ( nla_put ( skb , type , len , data ) ) {
nlmsg_cancel ( skb , nlh ) ;
return - EMSGSIZE ;
}
2011-05-20 22:46:11 +04:00
return 0 ;
}
EXPORT_SYMBOL ( ibnl_put_attr ) ;
2017-06-08 09:05:12 +03:00
static int rdma_nl_rcv_msg ( struct sk_buff * skb , struct nlmsghdr * nlh ,
struct netlink_ext_ack * extack )
2011-05-20 22:46:11 +04:00
{
int type = nlh - > nlmsg_type ;
2017-06-05 10:20:11 +03:00
unsigned int index = RDMA_NL_GET_CLIENT ( type ) ;
2016-05-06 22:45:25 +03:00
unsigned int op = RDMA_NL_GET_OP ( type ) ;
2017-06-15 13:14:13 +03:00
const struct rdma_nl_cbs * cb_table ;
2011-05-20 22:46:11 +04:00
2017-06-05 10:20:11 +03:00
if ( ! is_nl_valid ( index , op ) )
return - EINVAL ;
2017-06-15 14:20:39 +03:00
cb_table = rdma_nl_types [ index ] . cb_table ;
2017-06-15 13:14:13 +03:00
if ( ( cb_table [ op ] . flags & RDMA_NL_ADMIN_PERM ) & &
2017-06-12 16:00:19 +03:00
! netlink_capable ( skb , CAP_NET_ADMIN ) )
return - EPERM ;
2017-06-15 14:20:39 +03:00
/* FIXME: Convert IWCM to properly handle doit callbacks */
if ( ( nlh - > nlmsg_flags & NLM_F_DUMP ) | | index = = RDMA_NL_RDMA_CM | |
index = = RDMA_NL_IWCM ) {
struct netlink_dump_control c = {
. dump = cb_table [ op ] . dump ,
} ;
2017-06-15 12:46:33 +03:00
return netlink_dump_start ( nls , skb , nlh , & c ) ;
2011-05-20 22:46:11 +04:00
}
2017-06-15 14:20:39 +03:00
2017-06-15 13:14:13 +03:00
if ( cb_table [ op ] . doit )
2017-06-15 14:20:39 +03:00
return cb_table [ op ] . doit ( skb , nlh , extack ) ;
2011-05-20 22:46:11 +04:00
2017-06-15 14:20:39 +03:00
return 0 ;
2011-05-20 22:46:11 +04:00
}
2017-06-08 09:05:12 +03:00
/*
* This function is similar to netlink_rcv_skb with one exception :
* It calls to the callback for the netlink messages without NLM_F_REQUEST
* flag . These messages are intended for RDMA_NL_LS consumer , so it is allowed
* for that consumer only .
*/
static int rdma_nl_rcv_skb ( struct sk_buff * skb , int ( * cb ) ( struct sk_buff * ,
struct nlmsghdr * ,
struct netlink_ext_ack * ) )
2015-08-14 15:52:07 +03:00
{
2017-06-08 09:05:12 +03:00
struct netlink_ext_ack extack = { } ;
2015-08-14 15:52:07 +03:00
struct nlmsghdr * nlh ;
2017-06-08 09:05:12 +03:00
int err ;
2015-08-14 15:52:07 +03:00
while ( skb - > len > = nlmsg_total_size ( 0 ) ) {
2017-06-08 09:05:12 +03:00
int msglen ;
2015-08-14 15:52:07 +03:00
nlh = nlmsg_hdr ( skb ) ;
2017-06-08 09:05:12 +03:00
err = 0 ;
2015-08-14 15:52:07 +03:00
if ( nlh - > nlmsg_len < NLMSG_HDRLEN | | skb - > len < nlh - > nlmsg_len )
2017-06-08 09:05:12 +03:00
return 0 ;
2015-08-14 15:52:07 +03:00
2017-06-08 09:05:12 +03:00
/*
* Generally speaking , the only requests are handled
* by the kernel , but RDMA_NL_LS is different , because it
* runs backward netlink scheme . Kernel initiates messages
* and waits for reply with data to keep pathrecord cache
* in sync .
*/
if ( ! ( nlh - > nlmsg_flags & NLM_F_REQUEST ) & &
( RDMA_NL_GET_CLIENT ( nlh - > nlmsg_type ) ! = RDMA_NL_LS ) )
goto ack ;
/* Skip control messages */
if ( nlh - > nlmsg_type < NLMSG_MIN_TYPE )
goto ack ;
2015-08-14 15:52:07 +03:00
2017-06-08 09:05:12 +03:00
err = cb ( skb , nlh , & extack ) ;
if ( err = = - EINTR )
goto skip ;
2015-08-14 15:52:07 +03:00
2017-06-08 09:05:12 +03:00
ack :
if ( nlh - > nlmsg_flags & NLM_F_ACK | | err )
netlink_ack ( skb , nlh , err , & extack ) ;
skip :
2015-08-14 15:52:07 +03:00
msglen = NLMSG_ALIGN ( nlh - > nlmsg_len ) ;
if ( msglen > skb - > len )
msglen = skb - > len ;
skb_pull ( skb , msglen ) ;
}
2017-06-08 09:05:12 +03:00
return 0 ;
2015-08-14 15:52:07 +03:00
}
2017-06-08 09:05:12 +03:00
static void rdma_nl_rcv ( struct sk_buff * skb )
2011-05-20 22:46:11 +04:00
{
2017-06-05 10:20:11 +03:00
mutex_lock ( & rdma_nl_mutex ) ;
2017-06-08 09:05:12 +03:00
rdma_nl_rcv_skb ( skb , & rdma_nl_rcv_msg ) ;
2017-06-05 10:20:11 +03:00
mutex_unlock ( & rdma_nl_mutex ) ;
2011-05-20 22:46:11 +04:00
}
2017-06-18 15:35:20 +03:00
int rdma_nl_unicast ( struct sk_buff * skb , u32 pid )
2014-03-27 02:07:35 +04:00
{
2016-07-28 23:02:26 +03:00
int err ;
2017-06-28 17:02:45 +03:00
err = netlink_unicast ( nls , skb , pid , MSG_DONTWAIT ) ;
2016-07-28 23:02:26 +03:00
return ( err < 0 ) ? err : 0 ;
2014-03-27 02:07:35 +04:00
}
2017-06-18 15:35:20 +03:00
EXPORT_SYMBOL ( rdma_nl_unicast ) ;
2014-03-27 02:07:35 +04:00
2017-06-18 15:35:20 +03:00
int rdma_nl_unicast_wait ( struct sk_buff * skb , __u32 pid )
2017-06-28 17:02:45 +03:00
{
int err ;
err = netlink_unicast ( nls , skb , pid , 0 ) ;
return ( err < 0 ) ? err : 0 ;
}
2017-06-18 15:35:20 +03:00
EXPORT_SYMBOL ( rdma_nl_unicast_wait ) ;
2017-06-28 17:02:45 +03:00
2017-06-18 15:44:32 +03:00
int rdma_nl_multicast ( struct sk_buff * skb , unsigned int group , gfp_t flags )
2014-03-27 02:07:35 +04:00
{
return nlmsg_multicast ( nls , skb , 0 , group , flags ) ;
}
2017-06-18 15:44:32 +03:00
EXPORT_SYMBOL ( rdma_nl_multicast ) ;
2014-03-27 02:07:35 +04:00
2017-06-05 10:20:11 +03:00
int __init rdma_nl_init ( void )
2011-05-20 22:46:11 +04:00
{
2012-06-29 10:15:21 +04:00
struct netlink_kernel_cfg cfg = {
2017-06-08 09:05:12 +03:00
. input = rdma_nl_rcv ,
2012-06-29 10:15:21 +04:00
} ;
2012-09-08 06:53:54 +04:00
nls = netlink_kernel_create ( & init_net , NETLINK_RDMA , & cfg ) ;
2017-06-05 10:20:11 +03:00
if ( ! nls )
2011-05-20 22:46:11 +04:00
return - ENOMEM ;
2016-07-28 23:02:26 +03:00
nls - > sk_sndtimeo = 10 * HZ ;
2011-05-20 22:46:11 +04:00
return 0 ;
}
2017-06-05 10:20:11 +03:00
void rdma_nl_exit ( void )
2011-05-20 22:46:11 +04:00
{
2017-06-05 10:20:11 +03:00
int idx ;
2011-05-20 22:46:11 +04:00
2017-06-05 10:20:11 +03:00
for ( idx = 0 ; idx < RDMA_NL_NUM_CLIENTS ; idx + + )
rdma_nl_unregister ( idx ) ;
2011-05-20 22:46:11 +04:00
netlink_kernel_release ( nls ) ;
}
2017-08-14 23:57:38 +03:00
MODULE_ALIAS_NET_PF_PROTO ( PF_NETLINK , NETLINK_RDMA ) ;