2020-08-27 09:54:40 -05:00
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2016-06-16 16:45:23 +03:00
/*
* Copyright ( c ) 2016 Mellanox Technologies Ltd . All rights reserved .
* Copyright ( c ) 2015 System Fabric Works , Inc . All rights reserved .
*/
# include <linux/skbuff.h>
# include "rxe.h"
# include "rxe_loc.h"
static int check_type_state ( struct rxe_dev * rxe , struct rxe_pkt_info * pkt ,
struct rxe_qp * qp )
{
if ( unlikely ( ! qp - > valid ) )
goto err1 ;
switch ( qp_type ( qp ) ) {
case IB_QPT_RC :
if ( unlikely ( ( pkt - > opcode & IB_OPCODE_RC ) ! = 0 ) ) {
pr_warn_ratelimited ( " bad qp type \n " ) ;
goto err1 ;
}
break ;
case IB_QPT_UC :
if ( unlikely ( ! ( pkt - > opcode & IB_OPCODE_UC ) ) ) {
pr_warn_ratelimited ( " bad qp type \n " ) ;
goto err1 ;
}
break ;
case IB_QPT_UD :
case IB_QPT_SMI :
case IB_QPT_GSI :
if ( unlikely ( ! ( pkt - > opcode & IB_OPCODE_UD ) ) ) {
pr_warn_ratelimited ( " bad qp type \n " ) ;
goto err1 ;
}
break ;
default :
pr_warn_ratelimited ( " unsupported qp type \n " ) ;
goto err1 ;
}
if ( pkt - > mask & RXE_REQ_MASK ) {
if ( unlikely ( qp - > resp . state ! = QP_STATE_READY ) )
goto err1 ;
} else if ( unlikely ( qp - > req . state < QP_STATE_READY | |
qp - > req . state > QP_STATE_DRAINED ) ) {
goto err1 ;
}
return 0 ;
err1 :
return - EINVAL ;
}
static void set_bad_pkey_cntr ( struct rxe_port * port )
{
spin_lock_bh ( & port - > port_lock ) ;
port - > attr . bad_pkey_cntr = min ( ( u32 ) 0xffff ,
port - > attr . bad_pkey_cntr + 1 ) ;
spin_unlock_bh ( & port - > port_lock ) ;
}
static void set_qkey_viol_cntr ( struct rxe_port * port )
{
spin_lock_bh ( & port - > port_lock ) ;
port - > attr . qkey_viol_cntr = min ( ( u32 ) 0xffff ,
port - > attr . qkey_viol_cntr + 1 ) ;
spin_unlock_bh ( & port - > port_lock ) ;
}
static int check_keys ( struct rxe_dev * rxe , struct rxe_pkt_info * pkt ,
u32 qpn , struct rxe_qp * qp )
{
struct rxe_port * port = & rxe - > port ;
u16 pkey = bth_pkey ( pkt ) ;
pkt - > pkey_index = 0 ;
2020-07-21 13:16:18 +03:00
if ( ! pkey_match ( pkey , IB_DEFAULT_PKEY_FULL ) ) {
pr_warn_ratelimited ( " bad pkey = 0x%x \n " , pkey ) ;
set_bad_pkey_cntr ( port ) ;
goto err1 ;
2016-06-16 16:45:23 +03:00
}
if ( ( qp_type ( qp ) = = IB_QPT_UD | | qp_type ( qp ) = = IB_QPT_GSI ) & &
2018-08-19 15:04:01 +08:00
pkt - > mask ) {
2016-06-16 16:45:23 +03:00
u32 qkey = ( qpn = = 1 ) ? GSI_QKEY : qp - > attr . qkey ;
if ( unlikely ( deth_qkey ( pkt ) ! = qkey ) ) {
pr_warn_ratelimited ( " bad qkey, got 0x%x expected 0x%x for qpn 0x%x \n " ,
deth_qkey ( pkt ) , qkey , qpn ) ;
set_qkey_viol_cntr ( port ) ;
goto err1 ;
}
}
return 0 ;
err1 :
return - EINVAL ;
}
static int check_addr ( struct rxe_dev * rxe , struct rxe_pkt_info * pkt ,
struct rxe_qp * qp )
{
struct sk_buff * skb = PKT_TO_SKB ( pkt ) ;
if ( qp_type ( qp ) ! = IB_QPT_RC & & qp_type ( qp ) ! = IB_QPT_UC )
goto done ;
if ( unlikely ( pkt - > port_num ! = qp - > attr . port_num ) ) {
pr_warn_ratelimited ( " port %d != qp port %d \n " ,
pkt - > port_num , qp - > attr . port_num ) ;
goto err1 ;
}
if ( skb - > protocol = = htons ( ETH_P_IP ) ) {
struct in_addr * saddr =
& qp - > pri_av . sgid_addr . _sockaddr_in . sin_addr ;
struct in_addr * daddr =
& qp - > pri_av . dgid_addr . _sockaddr_in . sin_addr ;
if ( ip_hdr ( skb ) - > daddr ! = saddr - > s_addr ) {
pr_warn_ratelimited ( " dst addr %pI4 != qp source addr %pI4 \n " ,
& ip_hdr ( skb ) - > daddr ,
& saddr - > s_addr ) ;
goto err1 ;
}
if ( ip_hdr ( skb ) - > saddr ! = daddr - > s_addr ) {
pr_warn_ratelimited ( " source addr %pI4 != qp dst addr %pI4 \n " ,
& ip_hdr ( skb ) - > saddr ,
& daddr - > s_addr ) ;
goto err1 ;
}
} else if ( skb - > protocol = = htons ( ETH_P_IPV6 ) ) {
struct in6_addr * saddr =
& qp - > pri_av . sgid_addr . _sockaddr_in6 . sin6_addr ;
struct in6_addr * daddr =
& qp - > pri_av . dgid_addr . _sockaddr_in6 . sin6_addr ;
if ( memcmp ( & ipv6_hdr ( skb ) - > daddr , saddr , sizeof ( * saddr ) ) ) {
pr_warn_ratelimited ( " dst addr %pI6 != qp source addr %pI6 \n " ,
& ipv6_hdr ( skb ) - > daddr , saddr ) ;
goto err1 ;
}
if ( memcmp ( & ipv6_hdr ( skb ) - > saddr , daddr , sizeof ( * daddr ) ) ) {
pr_warn_ratelimited ( " source addr %pI6 != qp dst addr %pI6 \n " ,
& ipv6_hdr ( skb ) - > saddr , daddr ) ;
goto err1 ;
}
}
done :
return 0 ;
err1 :
return - EINVAL ;
}
static int hdr_check ( struct rxe_pkt_info * pkt )
{
struct rxe_dev * rxe = pkt - > rxe ;
struct rxe_port * port = & rxe - > port ;
struct rxe_qp * qp = NULL ;
u32 qpn = bth_qpn ( pkt ) ;
int index ;
int err ;
if ( unlikely ( bth_tver ( pkt ) ! = BTH_TVER ) ) {
pr_warn_ratelimited ( " bad tver \n " ) ;
goto err1 ;
}
2018-07-13 03:10:20 -04:00
if ( unlikely ( qpn = = 0 ) ) {
pr_warn_once ( " QP 0 not supported " ) ;
goto err1 ;
}
2016-06-16 16:45:23 +03:00
if ( qpn ! = IB_MULTICAST_QPN ) {
2018-07-13 03:10:20 -04:00
index = ( qpn = = 1 ) ? port - > qp_gsi_index : qpn ;
2016-06-16 16:45:23 +03:00
qp = rxe_pool_get_index ( & rxe - > qp_pool , index ) ;
if ( unlikely ( ! qp ) ) {
pr_warn_ratelimited ( " no qp matches qpn 0x%x \n " , qpn ) ;
goto err1 ;
}
err = check_type_state ( rxe , pkt , qp ) ;
if ( unlikely ( err ) )
goto err2 ;
err = check_addr ( rxe , pkt , qp ) ;
if ( unlikely ( err ) )
goto err2 ;
err = check_keys ( rxe , pkt , qpn , qp ) ;
if ( unlikely ( err ) )
goto err2 ;
} else {
if ( unlikely ( ( pkt - > mask & RXE_GRH_MASK ) = = 0 ) ) {
pr_warn_ratelimited ( " no grh for mcast qpn \n " ) ;
goto err1 ;
}
}
pkt - > qp = qp ;
return 0 ;
err2 :
2018-06-14 05:45:42 -04:00
rxe_drop_ref ( qp ) ;
2016-06-16 16:45:23 +03:00
err1 :
return - EINVAL ;
}
2019-01-20 08:21:40 -05:00
static inline void rxe_rcv_pkt ( struct rxe_pkt_info * pkt , struct sk_buff * skb )
2016-06-16 16:45:23 +03:00
{
if ( pkt - > mask & RXE_REQ_MASK )
2019-01-20 08:21:40 -05:00
rxe_resp_queue_pkt ( pkt - > qp , skb ) ;
2016-06-16 16:45:23 +03:00
else
2019-01-20 08:21:40 -05:00
rxe_comp_queue_pkt ( pkt - > qp , skb ) ;
2016-06-16 16:45:23 +03:00
}
static void rxe_rcv_mcast_pkt ( struct rxe_dev * rxe , struct sk_buff * skb )
{
struct rxe_pkt_info * pkt = SKB_TO_PKT ( skb ) ;
struct rxe_mc_grp * mcg ;
struct rxe_mc_elem * mce ;
struct rxe_qp * qp ;
union ib_gid dgid ;
2020-10-08 15:36:52 -05:00
struct sk_buff * per_qp_skb ;
struct rxe_pkt_info * per_qp_pkt ;
2016-06-16 16:45:23 +03:00
int err ;
if ( skb - > protocol = = htons ( ETH_P_IP ) )
ipv6_addr_set_v4mapped ( ip_hdr ( skb ) - > daddr ,
( struct in6_addr * ) & dgid ) ;
else if ( skb - > protocol = = htons ( ETH_P_IPV6 ) )
memcpy ( & dgid , & ipv6_hdr ( skb ) - > daddr , sizeof ( dgid ) ) ;
/* lookup mcast group corresponding to mgid, takes a ref */
mcg = rxe_pool_get_key ( & rxe - > mc_grp_pool , & dgid ) ;
if ( ! mcg )
goto err1 ; /* mcast group not registered */
spin_lock_bh ( & mcg - > mcg_lock ) ;
list_for_each_entry ( mce , & mcg - > qp_list , qp_list ) {
qp = mce - > qp ;
pkt = SKB_TO_PKT ( skb ) ;
/* validate qp for incoming packet */
err = check_type_state ( rxe , pkt , qp ) ;
if ( err )
continue ;
err = check_keys ( rxe , pkt , bth_qpn ( pkt ) , qp ) ;
if ( err )
continue ;
2020-10-08 15:36:52 -05:00
/* for all but the last qp create a new clone of the
* skb and pass to the qp .
2016-06-16 16:45:23 +03:00
*/
2018-03-21 04:08:37 -04:00
if ( mce - > qp_list . next ! = & mcg - > qp_list )
2020-10-08 15:36:52 -05:00
per_qp_skb = skb_clone ( skb , GFP_ATOMIC ) ;
else
per_qp_skb = skb ;
2016-06-16 16:45:23 +03:00
2020-10-13 13:42:37 -05:00
if ( unlikely ( ! per_qp_skb ) )
continue ;
2020-10-08 15:36:52 -05:00
per_qp_pkt = SKB_TO_PKT ( per_qp_skb ) ;
per_qp_pkt - > qp = qp ;
2016-06-16 16:45:23 +03:00
rxe_add_ref ( qp ) ;
2020-10-08 15:36:52 -05:00
rxe_rcv_pkt ( per_qp_pkt , per_qp_skb ) ;
2016-06-16 16:45:23 +03:00
}
spin_unlock_bh ( & mcg - > mcg_lock ) ;
rxe_drop_ref ( mcg ) ; /* drop ref from rxe_pool_get_key. */
2020-10-08 15:36:52 -05:00
return ;
2016-06-16 16:45:23 +03:00
err1 :
2018-03-21 04:08:37 -04:00
kfree_skb ( skb ) ;
2016-06-16 16:45:23 +03:00
}
2020-10-08 16:27:53 -05:00
/**
* rxe_chk_dgid - validate destination IP address
* @ rxe : rxe device that received packet
* @ skb : the received packet buffer
*
* Accept any loopback packets
* Extract IP address from packet and
* Accept if multicast packet
* Accept if matches an SGID table entry
*/
static int rxe_chk_dgid ( struct rxe_dev * rxe , struct sk_buff * skb )
2016-06-16 16:45:23 +03:00
{
2020-06-30 15:36:05 +03:00
struct rxe_pkt_info * pkt = SKB_TO_PKT ( skb ) ;
2018-06-05 08:40:23 +03:00
const struct ib_gid_attr * gid_attr ;
2016-06-16 16:45:23 +03:00
union ib_gid dgid ;
union ib_gid * pdgid ;
2020-06-30 15:36:05 +03:00
if ( pkt - > mask & RXE_LOOPBACK_MASK )
return 0 ;
2016-06-16 16:45:23 +03:00
if ( skb - > protocol = = htons ( ETH_P_IP ) ) {
ipv6_addr_set_v4mapped ( ip_hdr ( skb ) - > daddr ,
( struct in6_addr * ) & dgid ) ;
pdgid = & dgid ;
} else {
pdgid = ( union ib_gid * ) & ipv6_hdr ( skb ) - > daddr ;
}
2020-10-08 16:27:53 -05:00
if ( rdma_is_multicast_addr ( ( struct in6_addr * ) pdgid ) )
return 0 ;
2018-06-05 08:40:23 +03:00
gid_attr = rdma_find_gid_by_port ( & rxe - > ib_dev , pdgid ,
IB_GID_TYPE_ROCE_UDP_ENCAP ,
1 , skb - > dev ) ;
if ( IS_ERR ( gid_attr ) )
return PTR_ERR ( gid_attr ) ;
rdma_put_gid_attr ( gid_attr ) ;
return 0 ;
2016-06-16 16:45:23 +03:00
}
/* rxe_rcv is called from the interface driver */
2018-04-20 17:05:03 +03:00
void rxe_rcv ( struct sk_buff * skb )
2016-06-16 16:45:23 +03:00
{
int err ;
struct rxe_pkt_info * pkt = SKB_TO_PKT ( skb ) ;
struct rxe_dev * rxe = pkt - > rxe ;
__be32 * icrcp ;
u32 calc_icrc , pack_icrc ;
pkt - > offset = 0 ;
if ( unlikely ( skb - > len < pkt - > offset + RXE_BTH_BYTES ) )
goto drop ;
2020-10-08 16:27:53 -05:00
if ( rxe_chk_dgid ( rxe , skb ) < 0 ) {
pr_warn_ratelimited ( " failed checking dgid \n " ) ;
2016-06-16 16:45:23 +03:00
goto drop ;
}
pkt - > opcode = bth_opcode ( pkt ) ;
pkt - > psn = bth_psn ( pkt ) ;
pkt - > qp = NULL ;
pkt - > mask | = rxe_opcode [ pkt - > opcode ] . mask ;
if ( unlikely ( skb - > len < header_size ( pkt ) ) )
goto drop ;
err = hdr_check ( pkt ) ;
if ( unlikely ( err ) )
goto drop ;
/* Verify ICRC */
icrcp = ( __be32 * ) ( pkt - > hdr + pkt - > paylen - RXE_ICRC_SIZE ) ;
pack_icrc = be32_to_cpu ( * icrcp ) ;
calc_icrc = rxe_icrc_hdr ( pkt , skb ) ;
2017-04-20 20:55:55 +03:00
calc_icrc = rxe_crc32 ( rxe , calc_icrc , ( u8 * ) payload_addr ( pkt ) ,
2019-12-02 20:03:20 -06:00
payload_size ( pkt ) + bth_pad ( pkt ) ) ;
2017-01-10 11:15:40 -08:00
calc_icrc = ( __force u32 ) cpu_to_be32 ( ~ calc_icrc ) ;
2016-06-16 16:45:23 +03:00
if ( unlikely ( calc_icrc ! = pack_icrc ) ) {
if ( skb - > protocol = = htons ( ETH_P_IPV6 ) )
2016-11-23 12:39:17 -05:00
pr_warn_ratelimited ( " bad ICRC from %pI6c \n " ,
& ipv6_hdr ( skb ) - > saddr ) ;
2016-06-16 16:45:23 +03:00
else if ( skb - > protocol = = htons ( ETH_P_IP ) )
2016-11-23 12:39:17 -05:00
pr_warn_ratelimited ( " bad ICRC from %pI4 \n " ,
& ip_hdr ( skb ) - > saddr ) ;
2016-06-16 16:45:23 +03:00
else
2016-11-23 12:39:17 -05:00
pr_warn_ratelimited ( " bad ICRC from unknown \n " ) ;
2016-06-16 16:45:23 +03:00
goto drop ;
}
2017-03-10 18:23:56 +02:00
rxe_counter_inc ( rxe , RXE_CNT_RCVD_PKTS ) ;
2016-06-16 16:45:23 +03:00
if ( unlikely ( bth_qpn ( pkt ) = = IB_MULTICAST_QPN ) )
rxe_rcv_mcast_pkt ( rxe , skb ) ;
else
2019-01-20 08:21:40 -05:00
rxe_rcv_pkt ( pkt , skb ) ;
2016-06-16 16:45:23 +03:00
2018-04-20 17:05:03 +03:00
return ;
2016-06-16 16:45:23 +03:00
drop :
if ( pkt - > qp )
rxe_drop_ref ( pkt - > qp ) ;
kfree_skb ( skb ) ;
}