2005-04-17 02:20:36 +04:00
/*
* Copyright ( c ) 2004 Mellanox Technologies Ltd . All rights reserved .
* Copyright ( c ) 2004 Infinicon Corporation . All rights reserved .
* Copyright ( c ) 2004 Intel Corporation . All rights reserved .
* Copyright ( c ) 2004 Topspin Corporation . All rights reserved .
* Copyright ( c ) 2004 Voltaire Corporation . All rights reserved .
2005-08-11 10:03:10 +04:00
* Copyright ( c ) 2005 Sun Microsystems , Inc . All rights reserved .
2007-03-05 03:15:11 +03:00
* Copyright ( c ) 2005 , 2006 , 2007 Cisco Systems . All rights reserved .
2005-04-17 02:20:36 +04:00
*
* This software is available to you under a choice of one of two
* licenses . You may choose to be licensed under the terms of the GNU
* General Public License ( GPL ) Version 2 , available from the file
* COPYING in the main directory of this source tree , or the
* OpenIB . org BSD license below :
*
* Redistribution and use in source and binary forms , with or
* without modification , are permitted provided that the following
* conditions are met :
*
* - Redistributions of source code must retain the above
* copyright notice , this list of conditions and the following
* disclaimer .
*
* - Redistributions in binary form must reproduce the above
* copyright notice , this list of conditions and the following
* disclaimer in the documentation and / or other materials
* provided with the distribution .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND ,
* EXPRESS OR IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY , FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT . IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER LIABILITY , WHETHER IN AN
* ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING FROM , OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE .
*/
# if !defined(IB_VERBS_H)
# define IB_VERBS_H
# include <linux/types.h>
# include <linux/device.h>
2006-12-13 01:27:41 +03:00
# include <linux/dma-mapping.h>
2007-02-05 01:11:55 +03:00
# include <linux/kref.h>
2007-07-31 17:49:15 +04:00
# include <linux/list.h>
# include <linux/rwsem.h>
2010-10-19 19:24:36 +04:00
# include <linux/workqueue.h>
2015-12-11 22:53:03 +03:00
# include <linux/irq_poll.h>
IB/core: Ethernet L2 attributes in verbs/cm structures
This patch add the support for Ethernet L2 attributes in the
verbs/cm/cma structures.
When dealing with L2 Ethernet, we should use smac, dmac, vlan ID and priority
in a similar manner that the IB L2 (and the L4 PKEY) attributes are used.
Thus, those attributes were added to the following structures:
* ib_ah_attr - added dmac
* ib_qp_attr - added smac and vlan_id, (sl remains vlan priority)
* ib_wc - added smac, vlan_id
* ib_sa_path_rec - added smac, dmac, vlan_id
* cm_av - added smac and vlan_id
For the path record structure, extra care was taken to avoid the new
fields when packing it into wire format, so we don't break the IB CM
and SA wire protocol.
On the active side, the CM fills. its internal structures from the
path provided by the ULP. We add there taking the ETH L2 attributes
and placing them into the CM Address Handle (struct cm_av).
On the passive side, the CM fills its internal structures from the WC
associated with the REQ message. We add there taking the ETH L2
attributes from the WC.
When the HW driver provides the required ETH L2 attributes in the WC,
they set the IB_WC_WITH_SMAC and IB_WC_WITH_VLAN flags. The IB core
code checks for the presence of these flags, and in their absence does
address resolution from the ib_init_ah_from_wc() helper function.
ib_modify_qp_is_ok is also updated to consider the link layer. Some
parameters are mandatory for Ethernet link layer, while they are
irrelevant for IB. Vendor drivers are modified to support the new
function signature.
Signed-off-by: Matan Barak <matanb@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
2013-12-12 20:03:11 +04:00
# include <uapi/linux/if_ether.h>
2015-12-23 15:56:51 +03:00
# include <net/ipv6.h>
# include <net/ip.h>
2015-12-15 21:30:10 +03:00
# include <linux/string.h>
# include <linux/slab.h>
2017-04-13 06:29:20 +03:00
# include <linux/netdevice.h>
RDMA/core: Sync unregistration with netlink commands
When the rdma device is getting removed, get resource info can race with
device removal, as below:
CPU-0 CPU-1
-------- --------
rdma_nl_rcv_msg()
nldev_res_get_cq_dumpit()
mutex_lock(device_lock);
get device reference
mutex_unlock(device_lock); [..]
ib_unregister_device()
/* Valid reference to
* device->dev exists.
*/
ib_dealloc_device()
[..]
provider->fill_res_entry();
Even though device object is not freed, fill_res_entry() can get called on
device which doesn't have a driver anymore. Kernel core device reference
count is not sufficient, as this only keeps the structure valid, and
doesn't guarantee the driver is still loaded.
Similar race can occur with device renaming and device removal, where
device_rename() tries to rename a unregistered device. While this is fine
for devices of a class which are not net namespace aware, but it is
incorrect for net namespace aware class coming in subsequent series. If a
class is net namespace aware, then the below [1] call trace is observed in
above situation.
Therefore, to avoid the race, keep a reference count and let device
unregistration wait until all netlink users drop the reference.
[1] Call trace:
kernfs: ns required in 'infiniband' for 'mlx5_0'
WARNING: CPU: 18 PID: 44270 at fs/kernfs/dir.c:842 kernfs_find_ns+0x104/0x120
libahci i2c_core mlxfw libata dca [last unloaded: devlink]
RIP: 0010:kernfs_find_ns+0x104/0x120
Call Trace:
kernfs_find_and_get_ns+0x2e/0x50
sysfs_rename_link_ns+0x40/0xb0
device_rename+0xb2/0xf0
ib_device_rename+0xb3/0x100 [ib_core]
nldev_set_doit+0x165/0x190 [ib_core]
rdma_nl_rcv_msg+0x249/0x250 [ib_core]
? netlink_deliver_tap+0x8f/0x3e0
rdma_nl_rcv+0xd6/0x120 [ib_core]
netlink_unicast+0x17c/0x230
netlink_sendmsg+0x2f0/0x3e0
sock_sendmsg+0x30/0x40
__sys_sendto+0xdc/0x160
Fixes: da5c85078215 ("RDMA/nldev: add driver-specific resource tracking")
Signed-off-by: Parav Pandit <parav@mellanox.com>
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
2018-11-16 04:50:57 +03:00
# include <linux/refcount.h>
2016-03-11 23:58:38 +03:00
# include <linux/if_link.h>
2011-07-27 03:09:06 +04:00
# include <linux/atomic.h>
2014-12-11 18:04:18 +03:00
# include <linux/mmu_notifier.h>
2016-12-24 22:46:01 +03:00
# include <linux/uaccess.h>
2017-01-10 03:02:14 +03:00
# include <linux/cgroup_rdma.h>
2019-03-28 16:12:58 +03:00
# include <linux/irqflags.h>
# include <linux/preempt.h>
2017-03-27 15:20:14 +03:00
# include <uapi/rdma/ib_user_verbs.h>
2018-01-28 12:17:20 +03:00
# include <rdma/restrack.h>
2019-06-11 18:52:37 +03:00
# include <rdma/signature.h>
2018-03-19 16:02:34 +03:00
# include <uapi/rdma/rdma_user_ioctl.h>
IB/uverbs: Add flow_action create and destroy verbs
A verbs application may receive and transmits packets using a data
path pipeline. Sometimes, the first stage in the receive pipeline or
the last stage in the transmit pipeline involves transforming a
packet, either in order to make it easier for later stages to process
it or to prepare it for transmission over the wire. Such transformation
could be stripping/encapsulating the packet (i.e. vxlan),
decrypting/encrypting it (i.e. ipsec), altering headers, doing some
complex FPGA changes, etc.
Some hardware could do such transformations without software data path
intervention at all. The flow steering API supports steering a
packet (either to a QP or dropping it) and some simple packet
immutable actions (i.e. tagging a packet). Complex actions, that may
change the packet, could bloat the flow steering API extensively.
Sometimes the same action should be applied to several flows.
In this case, it's easier to bind several flows to the same action and
modify it than change all matching flows.
Introducing a new flow_action object that abstracts any packet
transformation (out of a standard and well defined set of actions).
This flow_action object could be tied to a flow steering rule via a
new specification.
Currently, we support esp flow_action, which encrypts or decrypts a
packet according to the given parameters. However, we present a
flexible schema that could be used to other transformation actions tied
to flow rules.
Reviewed-by: Yishai Hadas <yishaih@mellanox.com>
Signed-off-by: Matan Barak <matanb@mellanox.com>
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
2018-03-28 09:27:45 +03:00
# include <uapi/rdma/ib_user_ioctl_verbs.h>
2005-04-17 02:20:36 +04:00
2017-06-27 16:49:53 +03:00
# define IB_FW_VERSION_NAME_MAX ETHTOOL_FWVERS_LEN
2018-09-16 20:48:04 +03:00
struct ib_umem_odp ;
2010-10-19 19:24:36 +04:00
extern struct workqueue_struct * ib_wq ;
2015-12-11 22:53:03 +03:00
extern struct workqueue_struct * ib_comp_wq ;
2018-08-27 08:35:55 +03:00
extern struct workqueue_struct * ib_comp_unbound_wq ;
2010-10-19 19:24:36 +04:00
2019-05-01 13:48:13 +03:00
__printf ( 3 , 4 ) __cold
void ibdev_printk ( const char * level , const struct ib_device * ibdev ,
const char * format , . . . ) ;
__printf ( 2 , 3 ) __cold
void ibdev_emerg ( const struct ib_device * ibdev , const char * format , . . . ) ;
__printf ( 2 , 3 ) __cold
void ibdev_alert ( const struct ib_device * ibdev , const char * format , . . . ) ;
__printf ( 2 , 3 ) __cold
void ibdev_crit ( const struct ib_device * ibdev , const char * format , . . . ) ;
__printf ( 2 , 3 ) __cold
void ibdev_err ( const struct ib_device * ibdev , const char * format , . . . ) ;
__printf ( 2 , 3 ) __cold
void ibdev_warn ( const struct ib_device * ibdev , const char * format , . . . ) ;
__printf ( 2 , 3 ) __cold
void ibdev_notice ( const struct ib_device * ibdev , const char * format , . . . ) ;
__printf ( 2 , 3 ) __cold
void ibdev_info ( const struct ib_device * ibdev , const char * format , . . . ) ;
# if defined(CONFIG_DYNAMIC_DEBUG)
# define ibdev_dbg(__dev, format, args...) \
dynamic_ibdev_dbg ( __dev , format , # # args )
# elif defined(DEBUG)
# define ibdev_dbg(__dev, format, args...) \
ibdev_printk ( KERN_DEBUG , __dev , format , # # args )
# else
__printf ( 2 , 3 ) __cold
static inline
void ibdev_dbg ( const struct ib_device * ibdev , const char * format , . . . ) { }
# endif
2005-04-17 02:20:36 +04:00
union ib_gid {
u8 raw [ 16 ] ;
struct {
2005-08-14 08:05:57 +04:00
__be64 subnet_prefix ;
__be64 interface_id ;
2005-04-17 02:20:36 +04:00
} global ;
} ;
2015-07-30 18:33:29 +03:00
extern union ib_gid zgid ;
2015-12-23 15:56:47 +03:00
enum ib_gid_type {
/* If link layer is Ethernet, this is RoCE V1 */
IB_GID_TYPE_IB = 0 ,
IB_GID_TYPE_ROCE = 0 ,
2015-12-23 15:56:50 +03:00
IB_GID_TYPE_ROCE_UDP_ENCAP = 1 ,
2015-12-23 15:56:47 +03:00
IB_GID_TYPE_SIZE
} ;
2016-01-14 18:50:38 +03:00
# define ROCE_V2_UDP_DPORT 4791
IB/core: Add RoCE GID table management
RoCE GIDs are based on IP addresses configured on Ethernet net-devices
which relate to the RDMA (RoCE) device port.
Currently, each of the low-level drivers that support RoCE (ocrdma,
mlx4) manages its own RoCE port GID table. As there's nothing which is
essentially vendor specific, we generalize that, and enhance the RDMA
core GID cache to do this job.
In order to populate the GID table, we listen for events:
(a) netdev up/down/change_addr events - if a netdev is built onto
our RoCE device, we need to add/delete its IPs. This involves
adding all GIDs related to this ndev, add default GIDs, etc.
(b) inet events - add new GIDs (according to the IP addresses)
to the table.
For programming the port RoCE GID table, providers must implement
the add_gid and del_gid callbacks.
RoCE GID management requires us to state the associated net_device
alongside the GID. This information is necessary in order to manage
the GID table. For example, when a net_device is removed, its
associated GIDs need to be removed as well.
RoCE mandates generating a default GID for each port, based on the
related net-device's IPv6 link local. In contrast to the GID based on
the regular IPv6 link-local (as we generate GID per IP address),
the default GID is also available when the net device is down (in
order to support loopback).
Locking is done as follows:
The patch modify the GID table code both for new RoCE drivers
implementing the add_gid/del_gid callbacks and for current RoCE and
IB drivers that do not. The flows for updating the table are
different, so the locking requirements are too.
While updating RoCE GID table, protection against multiple writers is
achieved via mutex_lock(&table->lock). Since writing to a table
requires us to find an entry (possible a free entry) in the table and
then modify it, this mutex protects both the find_gid and write_gid
ensuring the atomicity of the action.
Each entry in the GID cache is protected by rwlock. In RoCE, writing
(usually results from netdev notifier) involves invoking the vendor's
add_gid and del_gid callbacks, which could sleep.
Therefore, an invalid flag is added for each entry. Updates for RoCE are
done via a workqueue, thus sleeping is permitted.
In IB, updates are done in write_lock_irq(&device->cache.lock), thus
write_gid isn't allowed to sleep and add_gid/del_gid are not called.
When passing net-device into/out-of the GID cache, the device
is always passed held (dev_hold).
The code uses a single work item for updating all RDMA devices,
following a netdev or inet notifier.
The patch moves the cache from being a client (which was incorrect,
as the cache is part of the IB infrastructure) to being explicitly
initialized/freed when a device is registered/removed.
Signed-off-by: Matan Barak <matanb@mellanox.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
2015-07-30 18:33:26 +03:00
struct ib_gid_attr {
2019-05-02 10:48:07 +03:00
struct net_device __rcu * ndev ;
IB/core: Refactor GID modify code for RoCE
Code is refactored to prepare separate functions for RoCE which can do more
complex operations related to reference counting, while still
maintainining code readability. This includes
(a) Simplification to not perform netdevice checks and modifications
for IB link layer.
(b) Do not add RoCE GID entry which has NULL netdevice; instead return
an error.
(c) If GID addition fails at provider level add_gid(), do not add the
entry in the cache and keep the entry marked as INVALID.
(d) Simplify and reuse the ib_cache_gid_add()/del() routines so that they
can be used even for modifying default GIDs. This avoid some code
duplication in modifying default GIDs.
(e) find_gid() routine refers to the data entry flags to qualify a GID
as valid or invalid GID rather than depending on attributes and zeroness
of the GID content.
(f) gid_table_reserve_default() sets the GID default attribute at
beginning while setting up the GID table. There is no need to use
default_gid flag in low level functions such as write_gid(), add_gid(),
del_gid(), as they never need to update the DEFAULT property of the GID
entry while during GID table update.
As as result of this refactor, reserved GID 0:0:0:0:0:0:0:0 is no longer
searchable as described below.
A unicast GID entry of 0:0:0:0:0:0:0:0 is Reserved GID as per the IB
spec version 1.3 section 4.1.1, point (6) whose snippet is below.
"The unicast GID address 0:0:0:0:0:0:0:0 is reserved - referred to as
the Reserved GID. It shall never be assigned to any endport. It shall
not be used as a destination address or in a global routing header
(GRH)."
GID table cache now only stores valid GID entries. Before this patch,
Reserved GID 0:0:0:0:0:0:0:0 was searchable in the GID table using
ib_find_cached_gid_by_port() and other similar find routines.
Zero GID is no longer searchable as it shall not to be present in GRH or
path recored entry as described in IB spec version 1.3 section 4.1.1,
point (6), section 12.7.10 and section 12.7.20.
ib_cache_update() is simplified to check link layer once, use unified
locking scheme for all link layers, removed temporary gid table
allocation/free logic.
Additionally,
(a) Expand ib_gid_attr to store port and index so that GID query
routines can get port and index information from the attribute structure.
(b) Expand ib_gid_attr to store device as well so that in future code when
GID reference counting is done, device is used to reach back to the GID
table entry.
Signed-off-by: Parav Pandit <parav@mellanox.com>
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
2018-04-01 15:08:21 +03:00
struct ib_device * device ;
2018-06-05 08:40:15 +03:00
union ib_gid gid ;
IB/core: Refactor GID modify code for RoCE
Code is refactored to prepare separate functions for RoCE which can do more
complex operations related to reference counting, while still
maintainining code readability. This includes
(a) Simplification to not perform netdevice checks and modifications
for IB link layer.
(b) Do not add RoCE GID entry which has NULL netdevice; instead return
an error.
(c) If GID addition fails at provider level add_gid(), do not add the
entry in the cache and keep the entry marked as INVALID.
(d) Simplify and reuse the ib_cache_gid_add()/del() routines so that they
can be used even for modifying default GIDs. This avoid some code
duplication in modifying default GIDs.
(e) find_gid() routine refers to the data entry flags to qualify a GID
as valid or invalid GID rather than depending on attributes and zeroness
of the GID content.
(f) gid_table_reserve_default() sets the GID default attribute at
beginning while setting up the GID table. There is no need to use
default_gid flag in low level functions such as write_gid(), add_gid(),
del_gid(), as they never need to update the DEFAULT property of the GID
entry while during GID table update.
As as result of this refactor, reserved GID 0:0:0:0:0:0:0:0 is no longer
searchable as described below.
A unicast GID entry of 0:0:0:0:0:0:0:0 is Reserved GID as per the IB
spec version 1.3 section 4.1.1, point (6) whose snippet is below.
"The unicast GID address 0:0:0:0:0:0:0:0 is reserved - referred to as
the Reserved GID. It shall never be assigned to any endport. It shall
not be used as a destination address or in a global routing header
(GRH)."
GID table cache now only stores valid GID entries. Before this patch,
Reserved GID 0:0:0:0:0:0:0:0 was searchable in the GID table using
ib_find_cached_gid_by_port() and other similar find routines.
Zero GID is no longer searchable as it shall not to be present in GRH or
path recored entry as described in IB spec version 1.3 section 4.1.1,
point (6), section 12.7.10 and section 12.7.20.
ib_cache_update() is simplified to check link layer once, use unified
locking scheme for all link layers, removed temporary gid table
allocation/free logic.
Additionally,
(a) Expand ib_gid_attr to store port and index so that GID query
routines can get port and index information from the attribute structure.
(b) Expand ib_gid_attr to store device as well so that in future code when
GID reference counting is done, device is used to reach back to the GID
table entry.
Signed-off-by: Parav Pandit <parav@mellanox.com>
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
2018-04-01 15:08:21 +03:00
enum ib_gid_type gid_type ;
u16 index ;
u8 port_num ;
IB/core: Add RoCE GID table management
RoCE GIDs are based on IP addresses configured on Ethernet net-devices
which relate to the RDMA (RoCE) device port.
Currently, each of the low-level drivers that support RoCE (ocrdma,
mlx4) manages its own RoCE port GID table. As there's nothing which is
essentially vendor specific, we generalize that, and enhance the RDMA
core GID cache to do this job.
In order to populate the GID table, we listen for events:
(a) netdev up/down/change_addr events - if a netdev is built onto
our RoCE device, we need to add/delete its IPs. This involves
adding all GIDs related to this ndev, add default GIDs, etc.
(b) inet events - add new GIDs (according to the IP addresses)
to the table.
For programming the port RoCE GID table, providers must implement
the add_gid and del_gid callbacks.
RoCE GID management requires us to state the associated net_device
alongside the GID. This information is necessary in order to manage
the GID table. For example, when a net_device is removed, its
associated GIDs need to be removed as well.
RoCE mandates generating a default GID for each port, based on the
related net-device's IPv6 link local. In contrast to the GID based on
the regular IPv6 link-local (as we generate GID per IP address),
the default GID is also available when the net device is down (in
order to support loopback).
Locking is done as follows:
The patch modify the GID table code both for new RoCE drivers
implementing the add_gid/del_gid callbacks and for current RoCE and
IB drivers that do not. The flows for updating the table are
different, so the locking requirements are too.
While updating RoCE GID table, protection against multiple writers is
achieved via mutex_lock(&table->lock). Since writing to a table
requires us to find an entry (possible a free entry) in the table and
then modify it, this mutex protects both the find_gid and write_gid
ensuring the atomicity of the action.
Each entry in the GID cache is protected by rwlock. In RoCE, writing
(usually results from netdev notifier) involves invoking the vendor's
add_gid and del_gid callbacks, which could sleep.
Therefore, an invalid flag is added for each entry. Updates for RoCE are
done via a workqueue, thus sleeping is permitted.
In IB, updates are done in write_lock_irq(&device->cache.lock), thus
write_gid isn't allowed to sleep and add_gid/del_gid are not called.
When passing net-device into/out-of the GID cache, the device
is always passed held (dev_hold).
The code uses a single work item for updating all RDMA devices,
following a netdev or inet notifier.
The patch moves the cache from being a client (which was incorrect,
as the cache is part of the IB infrastructure) to being explicitly
initialized/freed when a device is registered/removed.
Signed-off-by: Matan Barak <matanb@mellanox.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
2015-07-30 18:33:26 +03:00
} ;
2016-03-11 23:58:37 +03:00
enum {
/* set the local administered indication */
IB_SA_WELL_KNOWN_GUID = BIT_ULL ( 57 ) | 2 ,
} ;
2006-08-04 01:02:42 +04:00
enum rdma_transport_type {
RDMA_TRANSPORT_IB ,
2013-09-10 07:36:59 +04:00
RDMA_TRANSPORT_IWARP ,
2014-01-10 02:48:19 +04:00
RDMA_TRANSPORT_USNIC ,
2019-05-05 20:59:21 +03:00
RDMA_TRANSPORT_USNIC_UDP ,
RDMA_TRANSPORT_UNSPECIFIED ,
2006-08-04 01:02:42 +04:00
} ;
2015-05-05 15:50:18 +03:00
enum rdma_protocol_type {
RDMA_PROTOCOL_IB ,
RDMA_PROTOCOL_IBOE ,
RDMA_PROTOCOL_IWARP ,
RDMA_PROTOCOL_USNIC_UDP
} ;
2014-06-04 21:00:16 +04:00
__attribute_const__ enum rdma_transport_type
2019-06-14 03:38:17 +03:00
rdma_node_get_transport ( unsigned int node_type ) ;
2006-08-04 01:02:42 +04:00
2015-12-23 15:56:51 +03:00
enum rdma_network_type {
RDMA_NETWORK_IB ,
RDMA_NETWORK_ROCE_V1 = RDMA_NETWORK_IB ,
RDMA_NETWORK_IPV4 ,
RDMA_NETWORK_IPV6
} ;
static inline enum ib_gid_type ib_network_to_gid_type ( enum rdma_network_type network_type )
{
if ( network_type = = RDMA_NETWORK_IPV4 | |
network_type = = RDMA_NETWORK_IPV6 )
return IB_GID_TYPE_ROCE_UDP_ENCAP ;
/* IB_GID_TYPE_IB same as RDMA_NETWORK_ROCE_V1 */
return IB_GID_TYPE_IB ;
}
2018-06-13 10:22:06 +03:00
static inline enum rdma_network_type
rdma_gid_attr_network_type ( const struct ib_gid_attr * attr )
2015-12-23 15:56:51 +03:00
{
2018-06-13 10:22:06 +03:00
if ( attr - > gid_type = = IB_GID_TYPE_IB )
2015-12-23 15:56:51 +03:00
return RDMA_NETWORK_IB ;
2018-06-13 10:22:06 +03:00
if ( ipv6_addr_v4mapped ( ( struct in6_addr * ) & attr - > gid ) )
2015-12-23 15:56:51 +03:00
return RDMA_NETWORK_IPV4 ;
else
return RDMA_NETWORK_IPV6 ;
}
2010-09-28 04:51:10 +04:00
enum rdma_link_layer {
IB_LINK_LAYER_UNSPECIFIED ,
IB_LINK_LAYER_INFINIBAND ,
IB_LINK_LAYER_ETHERNET ,
} ;
2005-04-17 02:20:36 +04:00
enum ib_device_cap_flags {
2015-12-20 13:16:09 +03:00
IB_DEVICE_RESIZE_MAX_WR = ( 1 < < 0 ) ,
IB_DEVICE_BAD_PKEY_CNTR = ( 1 < < 1 ) ,
IB_DEVICE_BAD_QKEY_CNTR = ( 1 < < 2 ) ,
IB_DEVICE_RAW_MULTI = ( 1 < < 3 ) ,
IB_DEVICE_AUTO_PATH_MIG = ( 1 < < 4 ) ,
IB_DEVICE_CHANGE_PHY_PORT = ( 1 < < 5 ) ,
IB_DEVICE_UD_AV_PORT_ENFORCE = ( 1 < < 6 ) ,
IB_DEVICE_CURR_QP_STATE_MOD = ( 1 < < 7 ) ,
IB_DEVICE_SHUTDOWN_PORT = ( 1 < < 8 ) ,
2017-08-17 15:50:37 +03:00
/* Not in use, former INIT_TYPE = (1 << 9),*/
2015-12-20 13:16:09 +03:00
IB_DEVICE_PORT_ACTIVE_EVENT = ( 1 < < 10 ) ,
IB_DEVICE_SYS_IMAGE_GUID = ( 1 < < 11 ) ,
IB_DEVICE_RC_RNR_NAK_GEN = ( 1 < < 12 ) ,
IB_DEVICE_SRQ_RESIZE = ( 1 < < 13 ) ,
IB_DEVICE_N_NOTIFY_CQ = ( 1 < < 14 ) ,
2015-12-23 21:12:45 +03:00
/*
* This device supports a per - device lkey or stag that can be
* used without performing a memory registration for the local
* memory . Note that ULPs should never check this flag , but
* instead of use the local_dma_lkey flag in the ib_pd structure ,
* which will always contain a usable lkey .
*/
2015-12-20 13:16:09 +03:00
IB_DEVICE_LOCAL_DMA_LKEY = ( 1 < < 15 ) ,
2017-08-17 15:50:37 +03:00
/* Reserved, old SEND_W_INV = (1 << 16),*/
2015-12-20 13:16:09 +03:00
IB_DEVICE_MEM_WINDOW = ( 1 < < 17 ) ,
2008-01-30 19:30:57 +03:00
/*
* Devices should set IB_DEVICE_UD_IP_SUM if they support
* insertion of UDP and TCP checksum on outgoing UD IPoIB
* messages and can verify the validity of checksum for
* incoming messages . Setting this flag implies that the
* IPoIB driver may set NETIF_F_IP_CSUM for datagram mode .
*/
2015-12-20 13:16:09 +03:00
IB_DEVICE_UD_IP_CSUM = ( 1 < < 18 ) ,
IB_DEVICE_UD_TSO = ( 1 < < 19 ) ,
IB_DEVICE_XRC = ( 1 < < 20 ) ,
2015-12-23 21:12:45 +03:00
/*
* This device supports the IB " base memory management extension " ,
* which includes support for fast registrations ( IB_WR_REG_MR ,
* IB_WR_LOCAL_INV and IB_WR_SEND_WITH_INV verbs ) . This flag should
* also be set by any iWarp device which must support FRs to comply
* to the iWarp verbs spec . iWarp devices also support the
* IB_WR_RDMA_READ_WITH_INV verb for RDMA READs that invalidate the
* stag .
*/
2015-12-20 13:16:09 +03:00
IB_DEVICE_MEM_MGT_EXTENSIONS = ( 1 < < 21 ) ,
IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = ( 1 < < 22 ) ,
IB_DEVICE_MEM_WINDOW_TYPE_2A = ( 1 < < 23 ) ,
IB_DEVICE_MEM_WINDOW_TYPE_2B = ( 1 < < 24 ) ,
IB_DEVICE_RC_IP_CSUM = ( 1 < < 25 ) ,
2017-01-18 16:39:54 +03:00
/* Deprecated. Please use IB_RAW_PACKET_CAP_IP_CSUM. */
2015-12-20 13:16:09 +03:00
IB_DEVICE_RAW_IP_CSUM = ( 1 < < 26 ) ,
2015-12-20 13:16:10 +03:00
/*
* Devices should set IB_DEVICE_CROSS_CHANNEL if they
* support execution of WQEs that involve synchronization
* of I / O operations with single completion queue managed
* by hardware .
*/
2017-08-17 15:50:37 +03:00
IB_DEVICE_CROSS_CHANNEL = ( 1 < < 27 ) ,
2015-12-20 13:16:09 +03:00
IB_DEVICE_MANAGED_FLOW_STEERING = ( 1 < < 29 ) ,
IB_DEVICE_SIGNATURE_HANDOVER = ( 1 < < 30 ) ,
2016-06-06 19:34:39 +03:00
IB_DEVICE_ON_DEMAND_PAGING = ( 1ULL < < 31 ) ,
2016-02-29 20:07:32 +03:00
IB_DEVICE_SG_GAPS_REG = ( 1ULL < < 32 ) ,
2016-06-06 19:34:40 +03:00
IB_DEVICE_VIRTUAL_FUNCTION = ( 1ULL < < 33 ) ,
2017-01-18 16:39:54 +03:00
/* Deprecated. Please use IB_RAW_PACKET_CAP_SCATTER_FCS. */
2016-06-06 19:34:40 +03:00
IB_DEVICE_RAW_SCATTER_FCS = ( 1ULL < < 34 ) ,
2017-04-13 06:29:21 +03:00
IB_DEVICE_RDMA_NETDEV_OPA_VNIC = ( 1ULL < < 35 ) ,
2017-10-29 14:59:44 +03:00
/* The device supports padding incoming writes to cacheline. */
IB_DEVICE_PCI_WRITE_END_PADDING = ( 1ULL < < 36 ) ,
2019-02-15 22:03:53 +03:00
IB_DEVICE_ALLOW_USER_UNREG = ( 1ULL < < 37 ) ,
2014-02-23 16:19:05 +04:00
} ;
2005-04-17 02:20:36 +04:00
enum ib_atomic_cap {
IB_ATOMIC_NONE ,
IB_ATOMIC_HCA ,
IB_ATOMIC_GLOB
} ;
2014-12-11 18:04:16 +03:00
enum ib_odp_general_cap_bits {
2017-01-18 17:58:06 +03:00
IB_ODP_SUPPORT = 1 < < 0 ,
IB_ODP_SUPPORT_IMPLICIT = 1 < < 1 ,
2014-12-11 18:04:16 +03:00
} ;
enum ib_odp_transport_cap_bits {
IB_ODP_SUPPORT_SEND = 1 < < 0 ,
IB_ODP_SUPPORT_RECV = 1 < < 1 ,
IB_ODP_SUPPORT_WRITE = 1 < < 2 ,
IB_ODP_SUPPORT_READ = 1 < < 3 ,
IB_ODP_SUPPORT_ATOMIC = 1 < < 4 ,
2019-01-22 09:48:41 +03:00
IB_ODP_SUPPORT_SRQ_RECV = 1 < < 5 ,
2014-12-11 18:04:16 +03:00
} ;
struct ib_odp_caps {
uint64_t general_caps ;
struct {
uint32_t rc_odp_caps ;
uint32_t uc_odp_caps ;
uint32_t ud_odp_caps ;
2019-01-22 09:48:42 +03:00
uint32_t xrc_odp_caps ;
2014-12-11 18:04:16 +03:00
} per_transport_caps ;
} ;
2016-08-28 11:28:43 +03:00
struct ib_rss_caps {
/* Corresponding bit will be set if qp type from
* ' enum ib_qp_type ' is supported , e . g .
* supported_qpts | = 1 < < IB_QPT_UD
*/
u32 supported_qpts ;
u32 max_rwq_indirection_tables ;
u32 max_rwq_indirection_table_size ;
} ;
2017-08-17 15:52:03 +03:00
enum ib_tm_cap_flags {
/* Support tag matching on RC transport */
IB_TM_CAP_RC = 1 < < 0 ,
} ;
2017-09-24 21:46:29 +03:00
struct ib_tm_caps {
2017-08-17 15:52:03 +03:00
/* Max size of RNDV header */
u32 max_rndv_hdr_size ;
/* Max number of entries in tag matching list */
u32 max_num_tags ;
/* From enum ib_tm_cap_flags */
u32 flags ;
/* Max number of outstanding list operations */
u32 max_ops ;
/* Max number of SGE in tag matching entry */
u32 max_sge ;
} ;
2015-06-11 16:35:20 +03:00
struct ib_cq_init_attr {
unsigned int cqe ;
int comp_vector ;
u32 flags ;
} ;
2017-11-13 11:51:13 +03:00
enum ib_cq_attr_mask {
IB_CQ_MODERATE = 1 < < 0 ,
} ;
2017-11-13 11:51:16 +03:00
struct ib_cq_caps {
u16 max_cq_moderation_count ;
u16 max_cq_moderation_period ;
} ;
2018-04-05 18:53:25 +03:00
struct ib_dm_mr_attr {
u64 length ;
u64 offset ;
u32 access_flags ;
} ;
2018-04-05 18:53:24 +03:00
struct ib_dm_alloc_attr {
u64 length ;
u32 alignment ;
u32 flags ;
} ;
2005-04-17 02:20:36 +04:00
struct ib_device_attr {
u64 fw_ver ;
2005-08-14 08:05:57 +04:00
__be64 sys_image_guid ;
2005-04-17 02:20:36 +04:00
u64 max_mr_size ;
u64 page_size_cap ;
u32 vendor_id ;
u32 vendor_part_id ;
u32 hw_ver ;
int max_qp ;
int max_qp_wr ;
2016-02-23 11:25:25 +03:00
u64 device_cap_flags ;
2018-06-18 18:05:26 +03:00
int max_send_sge ;
int max_recv_sge ;
2005-04-17 02:20:36 +04:00
int max_sge_rd ;
int max_cq ;
int max_cqe ;
int max_mr ;
int max_pd ;
int max_qp_rd_atom ;
int max_ee_rd_atom ;
int max_res_rd_atom ;
int max_qp_init_rd_atom ;
int max_ee_init_rd_atom ;
enum ib_atomic_cap atomic_cap ;
IB/core: Add support for masked atomic operations
- Add new IB_WR_MASKED_ATOMIC_CMP_AND_SWP and IB_WR_MASKED_ATOMIC_FETCH_AND_ADD
send opcodes that can be used to post "masked atomic compare and
swap" and "masked atomic fetch and add" work request respectively.
- Add masked_atomic_cap capability.
- Add mask fields to atomic struct of ib_send_wr
- Add new opcodes to ib_wc_opcode
The new operations are described more precisely below:
* Masked Compare and Swap (MskCmpSwap)
The MskCmpSwap atomic operation is an extension to the CmpSwap
operation defined in the IB spec. MskCmpSwap allows the user to
select a portion of the 64 bit target data for the “compare” check as
well as to restrict the swap to a (possibly different) portion. The
pseudo code below describes the operation:
| atomic_response = *va
| if (!((compare_add ^ *va) & compare_add_mask)) then
| *va = (*va & ~(swap_mask)) | (swap & swap_mask)
|
| return atomic_response
The additional operands are carried in the Extended Transport Header.
Atomic response generation and packet format for MskCmpSwap is as for
standard IB Atomic operations.
* Masked Fetch and Add (MFetchAdd)
The MFetchAdd Atomic operation extends the functionality of the
standard IB FetchAdd by allowing the user to split the target into
multiple fields of selectable length. The atomic add is done
independently on each one of this fields. A bit set in the
field_boundary parameter specifies the field boundaries. The pseudo
code below describes the operation:
| bit_adder(ci, b1, b2, *co)
| {
| value = ci + b1 + b2
| *co = !!(value & 2)
|
| return value & 1
| }
|
| #define MASK_IS_SET(mask, attr) (!!((mask)&(attr)))
| bit_position = 1
| carry = 0
| atomic_response = 0
|
| for i = 0 to 63
| {
| if ( i != 0 )
| bit_position = bit_position << 1
|
| bit_add_res = bit_adder(carry, MASK_IS_SET(*va, bit_position),
| MASK_IS_SET(compare_add, bit_position), &new_carry)
| if (bit_add_res)
| atomic_response |= bit_position
|
| carry = ((new_carry) && (!MASK_IS_SET(compare_add_mask, bit_position)))
| }
|
| return atomic_response
Signed-off-by: Vladimir Sokolovsky <vlad@mellanox.co.il>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
2010-04-14 18:23:01 +04:00
enum ib_atomic_cap masked_atomic_cap ;
2005-04-17 02:20:36 +04:00
int max_ee ;
int max_rdd ;
int max_mw ;
int max_raw_ipv6_qp ;
int max_raw_ethy_qp ;
int max_mcast_grp ;
int max_mcast_qp_attach ;
int max_total_mcast_qp_attach ;
int max_ah ;
int max_fmr ;
int max_map_per_fmr ;
int max_srq ;
int max_srq_wr ;
int max_srq_sge ;
2008-07-15 10:48:45 +04:00
unsigned int max_fast_reg_page_list_len ;
2005-04-17 02:20:36 +04:00
u16 max_pkeys ;
u8 local_ca_ack_delay ;
2014-02-23 16:19:05 +04:00
int sig_prot_cap ;
int sig_guard_cap ;
2014-12-11 18:04:16 +03:00
struct ib_odp_caps odp_caps ;
2015-06-11 16:35:24 +03:00
uint64_t timestamp_mask ;
uint64_t hca_core_clock ; /* in KHZ */
2016-08-28 11:28:43 +03:00
struct ib_rss_caps rss_caps ;
u32 max_wq_type_rq ;
2017-01-18 16:39:54 +03:00
u32 raw_packet_caps ; /* Use ib_raw_packet_caps enum */
2017-09-24 21:46:29 +03:00
struct ib_tm_caps tm_caps ;
2017-11-13 11:51:16 +03:00
struct ib_cq_caps cq_caps ;
2018-04-05 18:53:23 +03:00
u64 max_dm_size ;
2005-04-17 02:20:36 +04:00
} ;
enum ib_mtu {
IB_MTU_256 = 1 ,
IB_MTU_512 = 2 ,
IB_MTU_1024 = 3 ,
IB_MTU_2048 = 4 ,
IB_MTU_4096 = 5
} ;
static inline int ib_mtu_enum_to_int ( enum ib_mtu mtu )
{
switch ( mtu ) {
case IB_MTU_256 : return 256 ;
case IB_MTU_512 : return 512 ;
case IB_MTU_1024 : return 1024 ;
case IB_MTU_2048 : return 2048 ;
case IB_MTU_4096 : return 4096 ;
default : return - 1 ;
}
}
2016-12-26 09:40:57 +03:00
static inline enum ib_mtu ib_mtu_int_to_enum ( int mtu )
{
if ( mtu > = 4096 )
return IB_MTU_4096 ;
else if ( mtu > = 2048 )
return IB_MTU_2048 ;
else if ( mtu > = 1024 )
return IB_MTU_1024 ;
else if ( mtu > = 512 )
return IB_MTU_512 ;
else
return IB_MTU_256 ;
}
2005-04-17 02:20:36 +04:00
enum ib_port_state {
IB_PORT_NOP = 0 ,
IB_PORT_DOWN = 1 ,
IB_PORT_INIT = 2 ,
IB_PORT_ARMED = 3 ,
IB_PORT_ACTIVE = 4 ,
IB_PORT_ACTIVE_DEFER = 5
} ;
enum ib_port_width {
IB_WIDTH_1X = 1 ,
2018-12-09 12:49:49 +03:00
IB_WIDTH_2X = 16 ,
2005-04-17 02:20:36 +04:00
IB_WIDTH_4X = 2 ,
IB_WIDTH_8X = 4 ,
IB_WIDTH_12X = 8
} ;
static inline int ib_width_enum_to_int ( enum ib_port_width width )
{
switch ( width ) {
case IB_WIDTH_1X : return 1 ;
2018-12-09 12:49:49 +03:00
case IB_WIDTH_2X : return 2 ;
2005-04-17 02:20:36 +04:00
case IB_WIDTH_4X : return 4 ;
case IB_WIDTH_8X : return 8 ;
case IB_WIDTH_12X : return 12 ;
default : return - 1 ;
}
}
2012-02-28 20:49:50 +04:00
enum ib_port_speed {
IB_SPEED_SDR = 1 ,
IB_SPEED_DDR = 2 ,
IB_SPEED_QDR = 4 ,
IB_SPEED_FDR10 = 8 ,
IB_SPEED_FDR = 16 ,
2017-04-20 20:53:31 +03:00
IB_SPEED_EDR = 32 ,
IB_SPEED_HDR = 64
2012-02-28 20:49:50 +04:00
} ;
IB/core: Make device counter infrastructure dynamic
In practice, each RDMA device has a unique set of counters that the
hardware implements. Having a central set of counters that they must
all adhere to is limiting and causes many useful counters to not be
available.
Therefore we create a dynamic counter registration infrastructure.
The driver must implement a stats structure allocation routine, in
which the driver must place the directory name it wants, a list of
names for all of the counters, an array of u64 counters themselves,
plus a few generic configuration options.
We then implement a core routine to create a sysfs file for each
of the named stats elements, and a core routine to retrieve the
stats when any of the sysfs attribute files are read.
To avoid excessive beating on the stats generation routine in the
drivers, the core code also caches the stats for a short period of
time so that someone attempting to read all of the stats in a
given device's directory will not result in a stats generation
call per file read.
Future work will attempt to standardize just the shared stats
elements, and possibly add a method to get the stats via netlink
in addition to sysfs.
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Mark Bloch <markb@mellanox.com>
Reviewed-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
[ Add caching, make structure names more informative, add i40iw support,
other significant rewrites from the original patch ]
2016-05-16 20:49:33 +03:00
/**
* struct rdma_hw_stats
2018-03-27 15:51:05 +03:00
* @ lock - Mutex to protect parallel write access to lifespan and values
* of counters , which are 64 bits and not guaranteeed to be written
* atomicaly on 32 bits systems .
IB/core: Make device counter infrastructure dynamic
In practice, each RDMA device has a unique set of counters that the
hardware implements. Having a central set of counters that they must
all adhere to is limiting and causes many useful counters to not be
available.
Therefore we create a dynamic counter registration infrastructure.
The driver must implement a stats structure allocation routine, in
which the driver must place the directory name it wants, a list of
names for all of the counters, an array of u64 counters themselves,
plus a few generic configuration options.
We then implement a core routine to create a sysfs file for each
of the named stats elements, and a core routine to retrieve the
stats when any of the sysfs attribute files are read.
To avoid excessive beating on the stats generation routine in the
drivers, the core code also caches the stats for a short period of
time so that someone attempting to read all of the stats in a
given device's directory will not result in a stats generation
call per file read.
Future work will attempt to standardize just the shared stats
elements, and possibly add a method to get the stats via netlink
in addition to sysfs.
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Mark Bloch <markb@mellanox.com>
Reviewed-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
[ Add caching, make structure names more informative, add i40iw support,
other significant rewrites from the original patch ]
2016-05-16 20:49:33 +03:00
* @ timestamp - Used by the core code to track when the last update was
* @ lifespan - Used by the core code to determine how old the counters
* should be before being updated again . Stored in jiffies , defaults
* to 10 milliseconds , drivers can override the default be specifying
* their own value during their allocation routine .
* @ name - Array of pointers to static names used for the counters in
* directory .
* @ num_counters - How many hardware counters there are . If name is
* shorter than this number , a kernel oops will result . Driver authors
* are encouraged to leave BUILD_BUG_ON ( ARRAY_SIZE ( @ name ) < num_counters )
* in their code to prevent this .
* @ value - Array of u64 counters that are accessed by the sysfs code and
* filled in by the drivers get_stats routine
*/
struct rdma_hw_stats {
2018-03-27 15:51:05 +03:00
struct mutex lock ; /* Protect lifespan and values[] */
IB/core: Make device counter infrastructure dynamic
In practice, each RDMA device has a unique set of counters that the
hardware implements. Having a central set of counters that they must
all adhere to is limiting and causes many useful counters to not be
available.
Therefore we create a dynamic counter registration infrastructure.
The driver must implement a stats structure allocation routine, in
which the driver must place the directory name it wants, a list of
names for all of the counters, an array of u64 counters themselves,
plus a few generic configuration options.
We then implement a core routine to create a sysfs file for each
of the named stats elements, and a core routine to retrieve the
stats when any of the sysfs attribute files are read.
To avoid excessive beating on the stats generation routine in the
drivers, the core code also caches the stats for a short period of
time so that someone attempting to read all of the stats in a
given device's directory will not result in a stats generation
call per file read.
Future work will attempt to standardize just the shared stats
elements, and possibly add a method to get the stats via netlink
in addition to sysfs.
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Mark Bloch <markb@mellanox.com>
Reviewed-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
[ Add caching, make structure names more informative, add i40iw support,
other significant rewrites from the original patch ]
2016-05-16 20:49:33 +03:00
unsigned long timestamp ;
unsigned long lifespan ;
const char * const * names ;
int num_counters ;
u64 value [ ] ;
2008-07-15 10:48:48 +04:00
} ;
IB/core: Make device counter infrastructure dynamic
In practice, each RDMA device has a unique set of counters that the
hardware implements. Having a central set of counters that they must
all adhere to is limiting and causes many useful counters to not be
available.
Therefore we create a dynamic counter registration infrastructure.
The driver must implement a stats structure allocation routine, in
which the driver must place the directory name it wants, a list of
names for all of the counters, an array of u64 counters themselves,
plus a few generic configuration options.
We then implement a core routine to create a sysfs file for each
of the named stats elements, and a core routine to retrieve the
stats when any of the sysfs attribute files are read.
To avoid excessive beating on the stats generation routine in the
drivers, the core code also caches the stats for a short period of
time so that someone attempting to read all of the stats in a
given device's directory will not result in a stats generation
call per file read.
Future work will attempt to standardize just the shared stats
elements, and possibly add a method to get the stats via netlink
in addition to sysfs.
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Mark Bloch <markb@mellanox.com>
Reviewed-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
[ Add caching, make structure names more informative, add i40iw support,
other significant rewrites from the original patch ]
2016-05-16 20:49:33 +03:00
# define RDMA_HW_STATS_DEFAULT_LIFESPAN 10
/**
* rdma_alloc_hw_stats_struct - Helper function to allocate dynamic struct
* for drivers .
* @ names - Array of static const char *
* @ num_counters - How many elements in array
* @ lifespan - How many milliseconds between updates
*/
static inline struct rdma_hw_stats * rdma_alloc_hw_stats_struct (
const char * const * names , int num_counters ,
unsigned long lifespan )
{
struct rdma_hw_stats * stats ;
stats = kzalloc ( sizeof ( * stats ) + num_counters * sizeof ( u64 ) ,
GFP_KERNEL ) ;
if ( ! stats )
return NULL ;
stats - > names = names ;
stats - > num_counters = num_counters ;
stats - > lifespan = msecs_to_jiffies ( lifespan ) ;
return stats ;
}
2015-05-14 03:02:59 +03:00
/* Define bits for the various functionality this port needs to be supported by
* the core .
*/
/* Management 0x00000FFF */
# define RDMA_CORE_CAP_IB_MAD 0x00000001
# define RDMA_CORE_CAP_IB_SMI 0x00000002
# define RDMA_CORE_CAP_IB_CM 0x00000004
# define RDMA_CORE_CAP_IW_CM 0x00000008
# define RDMA_CORE_CAP_IB_SA 0x00000010
2015-06-06 21:38:32 +03:00
# define RDMA_CORE_CAP_OPA_MAD 0x00000020
2015-05-14 03:02:59 +03:00
/* Address format 0x000FF000 */
# define RDMA_CORE_CAP_AF_IB 0x00001000
# define RDMA_CORE_CAP_ETH_AH 0x00002000
2017-03-21 02:38:09 +03:00
# define RDMA_CORE_CAP_OPA_AH 0x00004000
2018-07-04 15:57:50 +03:00
# define RDMA_CORE_CAP_IB_GRH_REQUIRED 0x00008000
2015-05-14 03:02:59 +03:00
/* Protocol 0xFFF00000 */
# define RDMA_CORE_CAP_PROT_IB 0x00100000
# define RDMA_CORE_CAP_PROT_ROCE 0x00200000
# define RDMA_CORE_CAP_PROT_IWARP 0x00400000
2015-12-23 15:56:50 +03:00
# define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000
2017-01-24 14:02:35 +03:00
# define RDMA_CORE_CAP_PROT_RAW_PACKET 0x01000000
2017-01-24 14:02:38 +03:00
# define RDMA_CORE_CAP_PROT_USNIC 0x02000000
2015-05-14 03:02:59 +03:00
2018-07-04 15:57:50 +03:00
# define RDMA_CORE_PORT_IB_GRH_REQUIRED (RDMA_CORE_CAP_IB_GRH_REQUIRED \
| RDMA_CORE_CAP_PROT_ROCE \
| RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP )
2015-05-14 03:02:59 +03:00
# define RDMA_CORE_PORT_IBA_IB (RDMA_CORE_CAP_PROT_IB \
| RDMA_CORE_CAP_IB_MAD \
| RDMA_CORE_CAP_IB_SMI \
| RDMA_CORE_CAP_IB_CM \
| RDMA_CORE_CAP_IB_SA \
| RDMA_CORE_CAP_AF_IB )
# define RDMA_CORE_PORT_IBA_ROCE (RDMA_CORE_CAP_PROT_ROCE \
| RDMA_CORE_CAP_IB_MAD \
| RDMA_CORE_CAP_IB_CM \
| RDMA_CORE_CAP_AF_IB \
| RDMA_CORE_CAP_ETH_AH )
2015-12-23 15:56:50 +03:00
# define RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP \
( RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP \
| RDMA_CORE_CAP_IB_MAD \
| RDMA_CORE_CAP_IB_CM \
| RDMA_CORE_CAP_AF_IB \
| RDMA_CORE_CAP_ETH_AH )
2015-05-14 03:02:59 +03:00
# define RDMA_CORE_PORT_IWARP (RDMA_CORE_CAP_PROT_IWARP \
| RDMA_CORE_CAP_IW_CM )
2015-06-06 21:38:32 +03:00
# define RDMA_CORE_PORT_INTEL_OPA (RDMA_CORE_PORT_IBA_IB \
| RDMA_CORE_CAP_OPA_MAD )
2015-05-14 03:02:59 +03:00
2017-01-24 14:02:35 +03:00
# define RDMA_CORE_PORT_RAW_PACKET (RDMA_CORE_CAP_PROT_RAW_PACKET)
2017-01-24 14:02:38 +03:00
# define RDMA_CORE_PORT_USNIC (RDMA_CORE_CAP_PROT_USNIC)
2005-04-17 02:20:36 +04:00
struct ib_port_attr {
2016-03-11 23:58:36 +03:00
u64 subnet_prefix ;
2005-04-17 02:20:36 +04:00
enum ib_port_state state ;
enum ib_mtu max_mtu ;
enum ib_mtu active_mtu ;
int gid_tbl_len ;
2018-07-04 15:57:48 +03:00
unsigned int ip_gids : 1 ;
/* This is the value from PortInfo CapabilityMask, defined by IBA */
2005-04-17 02:20:36 +04:00
u32 port_cap_flags ;
u32 max_msg_sz ;
u32 bad_pkey_cntr ;
u32 qkey_viol_cntr ;
u16 pkey_tbl_len ;
2017-06-08 20:37:48 +03:00
u32 sm_lid ;
2017-06-08 20:37:47 +03:00
u32 lid ;
2005-04-17 02:20:36 +04:00
u8 lmc ;
u8 max_vl_num ;
u8 sm_sl ;
u8 subnet_timeout ;
u8 init_type_reply ;
u8 active_width ;
u8 active_speed ;
u8 phys_state ;
2018-12-09 12:49:48 +03:00
u16 port_cap_flags2 ;
2005-04-17 02:20:36 +04:00
} ;
enum ib_device_modify_flags {
2006-02-02 20:47:14 +03:00
IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 < < 0 ,
IB_DEVICE_MODIFY_NODE_DESC = 1 < < 1
2005-04-17 02:20:36 +04:00
} ;
2016-08-25 20:57:07 +03:00
# define IB_DEVICE_NODE_DESC_MAX 64
2005-04-17 02:20:36 +04:00
struct ib_device_modify {
u64 sys_image_guid ;
2016-08-25 20:57:07 +03:00
char node_desc [ IB_DEVICE_NODE_DESC_MAX ] ;
2005-04-17 02:20:36 +04:00
} ;
enum ib_port_modify_flags {
IB_PORT_SHUTDOWN = 1 ,
IB_PORT_INIT_TYPE = ( 1 < < 2 ) ,
2017-06-02 03:04:02 +03:00
IB_PORT_RESET_QKEY_CNTR = ( 1 < < 3 ) ,
IB_PORT_OPA_MASK_CHG = ( 1 < < 4 )
2005-04-17 02:20:36 +04:00
} ;
struct ib_port_modify {
u32 set_port_cap_mask ;
u32 clr_port_cap_mask ;
u8 init_type ;
} ;
enum ib_event_type {
IB_EVENT_CQ_ERR ,
IB_EVENT_QP_FATAL ,
IB_EVENT_QP_REQ_ERR ,
IB_EVENT_QP_ACCESS_ERR ,
IB_EVENT_COMM_EST ,
IB_EVENT_SQ_DRAINED ,
IB_EVENT_PATH_MIG ,
IB_EVENT_PATH_MIG_ERR ,
IB_EVENT_DEVICE_FATAL ,
IB_EVENT_PORT_ACTIVE ,
IB_EVENT_PORT_ERR ,
IB_EVENT_LID_CHANGE ,
IB_EVENT_PKEY_CHANGE ,
2005-08-18 23:23:08 +04:00
IB_EVENT_SM_CHANGE ,
IB_EVENT_SRQ_ERR ,
IB_EVENT_SRQ_LIMIT_REACHED ,
2006-06-18 07:37:35 +04:00
IB_EVENT_QP_LAST_WQE_REACHED ,
2011-06-15 18:39:29 +04:00
IB_EVENT_CLIENT_REREGISTER ,
IB_EVENT_GID_CHANGE ,
2016-05-23 15:20:49 +03:00
IB_EVENT_WQ_FATAL ,
2005-04-17 02:20:36 +04:00
} ;
2015-08-03 20:01:52 +03:00
const char * __attribute_const__ ib_event_msg ( enum ib_event_type event ) ;
2015-05-18 13:40:28 +03:00
2005-04-17 02:20:36 +04:00
struct ib_event {
struct ib_device * device ;
union {
struct ib_cq * cq ;
struct ib_qp * qp ;
2005-08-18 23:23:08 +04:00
struct ib_srq * srq ;
2016-05-23 15:20:49 +03:00
struct ib_wq * wq ;
2005-04-17 02:20:36 +04:00
u8 port_num ;
} element ;
enum ib_event_type event ;
} ;
struct ib_event_handler {
struct ib_device * device ;
void ( * handler ) ( struct ib_event_handler * , struct ib_event * ) ;
struct list_head list ;
} ;
# define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \
do { \
( _ptr ) - > device = _device ; \
( _ptr ) - > handler = _handler ; \
INIT_LIST_HEAD ( & ( _ptr ) - > list ) ; \
} while ( 0 )
struct ib_global_route {
2018-06-13 10:22:03 +03:00
const struct ib_gid_attr * sgid_attr ;
2005-04-17 02:20:36 +04:00
union ib_gid dgid ;
u32 flow_label ;
u8 sgid_index ;
u8 hop_limit ;
u8 traffic_class ;
} ;
2005-07-27 22:45:34 +04:00
struct ib_grh {
2005-08-14 08:05:57 +04:00
__be32 version_tclass_flow ;
__be16 paylen ;
2005-07-27 22:45:34 +04:00
u8 next_hdr ;
u8 hop_limit ;
union ib_gid sgid ;
union ib_gid dgid ;
} ;
2015-12-23 15:56:51 +03:00
union rdma_network_hdr {
struct ib_grh ibgrh ;
struct {
/* The IB spec states that if it's IPv4, the header
* is located in the last 20 bytes of the header .
*/
u8 reserved [ 20 ] ;
struct iphdr roce4grh ;
} ;
} ;
2017-05-12 19:19:55 +03:00
# define IB_QPN_MASK 0xFFFFFF
2005-04-17 02:20:36 +04:00
enum {
IB_MULTICAST_QPN = 0xffffff
} ;
2009-02-15 09:58:35 +03:00
# define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF)
2016-01-06 21:04:31 +03:00
# define IB_MULTICAST_LID_BASE cpu_to_be16(0xC000)
2005-08-14 08:05:57 +04:00
2005-04-17 02:20:36 +04:00
enum ib_ah_flags {
IB_AH_GRH = 1
} ;
2006-04-10 20:43:47 +04:00
enum ib_rate {
IB_RATE_PORT_CURRENT = 0 ,
IB_RATE_2_5_GBPS = 2 ,
IB_RATE_5_GBPS = 5 ,
IB_RATE_10_GBPS = 3 ,
IB_RATE_20_GBPS = 6 ,
IB_RATE_30_GBPS = 4 ,
IB_RATE_40_GBPS = 7 ,
IB_RATE_60_GBPS = 8 ,
IB_RATE_80_GBPS = 9 ,
2011-10-05 15:21:47 +04:00
IB_RATE_120_GBPS = 10 ,
IB_RATE_14_GBPS = 11 ,
IB_RATE_56_GBPS = 12 ,
IB_RATE_112_GBPS = 13 ,
IB_RATE_168_GBPS = 14 ,
IB_RATE_25_GBPS = 15 ,
IB_RATE_100_GBPS = 16 ,
IB_RATE_200_GBPS = 17 ,
2018-12-09 12:49:50 +03:00
IB_RATE_300_GBPS = 18 ,
IB_RATE_28_GBPS = 19 ,
IB_RATE_50_GBPS = 20 ,
IB_RATE_400_GBPS = 21 ,
IB_RATE_600_GBPS = 22 ,
2006-04-10 20:43:47 +04:00
} ;
/**
* ib_rate_to_mult - Convert the IB rate enum to a multiple of the
* base rate of 2.5 Gbit / sec . For example , IB_RATE_5_GBPS will be
* converted to 2 , since 5 Gbit / sec is 2 * 2.5 Gbit / sec .
* @ rate : rate to convert .
*/
2014-06-04 21:00:16 +04:00
__attribute_const__ int ib_rate_to_mult ( enum ib_rate rate ) ;
2006-04-10 20:43:47 +04:00
2011-10-05 15:21:47 +04:00
/**
* ib_rate_to_mbps - Convert the IB rate enum to Mbps .
* For example , IB_RATE_2_5_GBPS will be converted to 2500.
* @ rate : rate to convert .
*/
2014-06-04 21:00:16 +04:00
__attribute_const__ int ib_rate_to_mbps ( enum ib_rate rate ) ;
2011-10-05 15:21:47 +04:00
2014-02-23 16:19:04 +04:00
/**
2015-07-30 10:32:35 +03:00
* enum ib_mr_type - memory region type
* @ IB_MR_TYPE_MEM_REG : memory region that is used for
* normal registration
* @ IB_MR_TYPE_SIGNATURE : memory region that is used for
* signature operations ( data - integrity
* capable regions )
2016-02-29 20:07:32 +03:00
* @ IB_MR_TYPE_SG_GAPS : memory region that is capable to
* register any arbitrary sg lists ( without
* the normal mr constraints - see
* ib_map_mr_sg )
2019-06-11 18:52:38 +03:00
* @ IB_MR_TYPE_DM : memory region that is used for device
* memory registration
* @ IB_MR_TYPE_USER : memory region that is used for the user - space
* application
* @ IB_MR_TYPE_DMA : memory region that is used for DMA operations
* without address translations ( VA = PA )
2019-06-11 18:52:39 +03:00
* @ IB_MR_TYPE_INTEGRITY : memory region that is used for
* data integrity operations
2014-02-23 16:19:04 +04:00
*/
2015-07-30 10:32:35 +03:00
enum ib_mr_type {
IB_MR_TYPE_MEM_REG ,
IB_MR_TYPE_SIGNATURE ,
2016-02-29 20:07:32 +03:00
IB_MR_TYPE_SG_GAPS ,
2019-06-11 18:52:38 +03:00
IB_MR_TYPE_DM ,
IB_MR_TYPE_USER ,
IB_MR_TYPE_DMA ,
2019-06-11 18:52:39 +03:00
IB_MR_TYPE_INTEGRITY ,
2014-02-23 16:19:04 +04:00
} ;
2014-02-23 16:19:05 +04:00
enum ib_mr_status_check {
IB_MR_CHECK_SIG_STATUS = 1 ,
} ;
/**
* struct ib_mr_status - Memory region status container
*
* @ fail_status : Bitmask of MR checks status . For each
* failed check a corresponding status bit is set .
* @ sig_err : Additional info for IB_MR_CEHCK_SIG_STATUS
* failure .
*/
struct ib_mr_status {
u32 fail_status ;
struct ib_sig_err sig_err ;
} ;
2006-04-10 20:43:47 +04:00
/**
* mult_to_ib_rate - Convert a multiple of 2.5 Gbit / sec to an IB rate
* enum .
* @ mult : multiple to convert .
*/
2014-06-04 21:00:16 +04:00
__attribute_const__ enum ib_rate mult_to_ib_rate ( int mult ) ;
2006-04-10 20:43:47 +04:00
2017-04-29 21:41:29 +03:00
enum rdma_ah_attr_type {
2018-02-01 21:57:03 +03:00
RDMA_AH_ATTR_TYPE_UNDEFINED ,
2017-04-29 21:41:29 +03:00
RDMA_AH_ATTR_TYPE_IB ,
RDMA_AH_ATTR_TYPE_ROCE ,
2017-04-29 21:41:30 +03:00
RDMA_AH_ATTR_TYPE_OPA ,
2017-04-29 21:41:29 +03:00
} ;
struct ib_ah_attr {
u16 dlid ;
u8 src_path_bits ;
} ;
struct roce_ah_attr {
u8 dmac [ ETH_ALEN ] ;
} ;
2017-04-29 21:41:30 +03:00
struct opa_ah_attr {
u32 dlid ;
u8 src_path_bits ;
2017-08-04 23:54:16 +03:00
bool make_grd ;
2017-04-29 21:41:30 +03:00
} ;
2017-04-29 21:41:18 +03:00
struct rdma_ah_attr {
2005-04-17 02:20:36 +04:00
struct ib_global_route grh ;
u8 sl ;
u8 static_rate ;
u8 port_num ;
2017-04-29 21:41:29 +03:00
u8 ah_flags ;
enum rdma_ah_attr_type type ;
union {
struct ib_ah_attr ib ;
struct roce_ah_attr roce ;
2017-04-29 21:41:30 +03:00
struct opa_ah_attr opa ;
2017-04-29 21:41:29 +03:00
} ;
2005-04-17 02:20:36 +04:00
} ;
enum ib_wc_status {
IB_WC_SUCCESS ,
IB_WC_LOC_LEN_ERR ,
IB_WC_LOC_QP_OP_ERR ,
IB_WC_LOC_EEC_OP_ERR ,
IB_WC_LOC_PROT_ERR ,
IB_WC_WR_FLUSH_ERR ,
IB_WC_MW_BIND_ERR ,
IB_WC_BAD_RESP_ERR ,
IB_WC_LOC_ACCESS_ERR ,
IB_WC_REM_INV_REQ_ERR ,
IB_WC_REM_ACCESS_ERR ,
IB_WC_REM_OP_ERR ,
IB_WC_RETRY_EXC_ERR ,
IB_WC_RNR_RETRY_EXC_ERR ,
IB_WC_LOC_RDD_VIOL_ERR ,
IB_WC_REM_INV_RD_REQ_ERR ,
IB_WC_REM_ABORT_ERR ,
IB_WC_INV_EECN_ERR ,
IB_WC_INV_EEC_STATE_ERR ,
IB_WC_FATAL_ERR ,
IB_WC_RESP_TIMEOUT_ERR ,
IB_WC_GENERAL_ERR
} ;
2015-08-03 20:01:52 +03:00
const char * __attribute_const__ ib_wc_status_msg ( enum ib_wc_status status ) ;
2015-05-18 13:40:28 +03:00
2005-04-17 02:20:36 +04:00
enum ib_wc_opcode {
IB_WC_SEND ,
IB_WC_RDMA_WRITE ,
IB_WC_RDMA_READ ,
IB_WC_COMP_SWAP ,
IB_WC_FETCH_ADD ,
2008-04-17 08:09:27 +04:00
IB_WC_LSO ,
2008-07-15 10:48:45 +04:00
IB_WC_LOCAL_INV ,
2015-10-13 19:11:24 +03:00
IB_WC_REG_MR ,
IB/core: Add support for masked atomic operations
- Add new IB_WR_MASKED_ATOMIC_CMP_AND_SWP and IB_WR_MASKED_ATOMIC_FETCH_AND_ADD
send opcodes that can be used to post "masked atomic compare and
swap" and "masked atomic fetch and add" work request respectively.
- Add masked_atomic_cap capability.
- Add mask fields to atomic struct of ib_send_wr
- Add new opcodes to ib_wc_opcode
The new operations are described more precisely below:
* Masked Compare and Swap (MskCmpSwap)
The MskCmpSwap atomic operation is an extension to the CmpSwap
operation defined in the IB spec. MskCmpSwap allows the user to
select a portion of the 64 bit target data for the “compare” check as
well as to restrict the swap to a (possibly different) portion. The
pseudo code below describes the operation:
| atomic_response = *va
| if (!((compare_add ^ *va) & compare_add_mask)) then
| *va = (*va & ~(swap_mask)) | (swap & swap_mask)
|
| return atomic_response
The additional operands are carried in the Extended Transport Header.
Atomic response generation and packet format for MskCmpSwap is as for
standard IB Atomic operations.
* Masked Fetch and Add (MFetchAdd)
The MFetchAdd Atomic operation extends the functionality of the
standard IB FetchAdd by allowing the user to split the target into
multiple fields of selectable length. The atomic add is done
independently on each one of this fields. A bit set in the
field_boundary parameter specifies the field boundaries. The pseudo
code below describes the operation:
| bit_adder(ci, b1, b2, *co)
| {
| value = ci + b1 + b2
| *co = !!(value & 2)
|
| return value & 1
| }
|
| #define MASK_IS_SET(mask, attr) (!!((mask)&(attr)))
| bit_position = 1
| carry = 0
| atomic_response = 0
|
| for i = 0 to 63
| {
| if ( i != 0 )
| bit_position = bit_position << 1
|
| bit_add_res = bit_adder(carry, MASK_IS_SET(*va, bit_position),
| MASK_IS_SET(compare_add, bit_position), &new_carry)
| if (bit_add_res)
| atomic_response |= bit_position
|
| carry = ((new_carry) && (!MASK_IS_SET(compare_add_mask, bit_position)))
| }
|
| return atomic_response
Signed-off-by: Vladimir Sokolovsky <vlad@mellanox.co.il>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
2010-04-14 18:23:01 +04:00
IB_WC_MASKED_COMP_SWAP ,
IB_WC_MASKED_FETCH_ADD ,
2005-04-17 02:20:36 +04:00
/*
* Set value of IB_WC_RECV so consumers can test if a completion is a
* receive by testing ( opcode & IB_WC_RECV ) .
*/
IB_WC_RECV = 1 < < 7 ,
IB_WC_RECV_RDMA_WITH_IMM
} ;
enum ib_wc_flags {
IB_WC_GRH = 1 ,
2008-07-15 10:48:45 +04:00
IB_WC_WITH_IMM = ( 1 < < 1 ) ,
IB_WC_WITH_INVALIDATE = ( 1 < < 2 ) ,
2012-01-11 21:03:51 +04:00
IB_WC_IP_CSUM_OK = ( 1 < < 3 ) ,
IB/core: Ethernet L2 attributes in verbs/cm structures
This patch add the support for Ethernet L2 attributes in the
verbs/cm/cma structures.
When dealing with L2 Ethernet, we should use smac, dmac, vlan ID and priority
in a similar manner that the IB L2 (and the L4 PKEY) attributes are used.
Thus, those attributes were added to the following structures:
* ib_ah_attr - added dmac
* ib_qp_attr - added smac and vlan_id, (sl remains vlan priority)
* ib_wc - added smac, vlan_id
* ib_sa_path_rec - added smac, dmac, vlan_id
* cm_av - added smac and vlan_id
For the path record structure, extra care was taken to avoid the new
fields when packing it into wire format, so we don't break the IB CM
and SA wire protocol.
On the active side, the CM fills. its internal structures from the
path provided by the ULP. We add there taking the ETH L2 attributes
and placing them into the CM Address Handle (struct cm_av).
On the passive side, the CM fills its internal structures from the WC
associated with the REQ message. We add there taking the ETH L2
attributes from the WC.
When the HW driver provides the required ETH L2 attributes in the WC,
they set the IB_WC_WITH_SMAC and IB_WC_WITH_VLAN flags. The IB core
code checks for the presence of these flags, and in their absence does
address resolution from the ib_init_ah_from_wc() helper function.
ib_modify_qp_is_ok is also updated to consider the link layer. Some
parameters are mandatory for Ethernet link layer, while they are
irrelevant for IB. Vendor drivers are modified to support the new
function signature.
Signed-off-by: Matan Barak <matanb@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
2013-12-12 20:03:11 +04:00
IB_WC_WITH_SMAC = ( 1 < < 4 ) ,
IB_WC_WITH_VLAN = ( 1 < < 5 ) ,
2015-12-23 15:56:51 +03:00
IB_WC_WITH_NETWORK_HDR_TYPE = ( 1 < < 6 ) ,
2005-04-17 02:20:36 +04:00
} ;
struct ib_wc {
2015-12-11 22:53:03 +03:00
union {
u64 wr_id ;
struct ib_cqe * wr_cqe ;
} ;
2005-04-17 02:20:36 +04:00
enum ib_wc_status status ;
enum ib_wc_opcode opcode ;
u32 vendor_err ;
u32 byte_len ;
2006-12-31 22:09:42 +03:00
struct ib_qp * qp ;
2008-07-15 10:48:45 +04:00
union {
__be32 imm_data ;
u32 invalidate_rkey ;
} ex ;
2005-04-17 02:20:36 +04:00
u32 src_qp ;
IB/core: Fix ib_wc structure size to remain in 64 bytes boundary
The change of slid from u16 to u32 results in sizeof(struct ib_wc)
cross 64B boundary, which causes more cache misses. This patch
rearranges the fields and remain the size to 64B.
Pahole output before this change:
struct ib_wc {
union {
u64 wr_id; /* 8 */
struct ib_cqe * wr_cqe; /* 8 */
}; /* 0 8 */
enum ib_wc_status status; /* 8 4 */
enum ib_wc_opcode opcode; /* 12 4 */
u32 vendor_err; /* 16 4 */
u32 byte_len; /* 20 4 */
struct ib_qp * qp; /* 24 8 */
union {
__be32 imm_data; /* 4 */
u32 invalidate_rkey; /* 4 */
} ex; /* 32 4 */
u32 src_qp; /* 36 4 */
int wc_flags; /* 40 4 */
u16 pkey_index; /* 44 2 */
/* XXX 2 bytes hole, try to pack */
u32 slid; /* 48 4 */
u8 sl; /* 52 1 */
u8 dlid_path_bits; /* 53 1 */
u8 port_num; /* 54 1 */
u8 smac[6]; /* 55 6 */
/* XXX 1 byte hole, try to pack */
u16 vlan_id; /* 62 2 */
/* --- cacheline 1 boundary (64 bytes) --- */
u8 network_hdr_type; /* 64 1 */
/* size: 72, cachelines: 2, members: 17 */
/* sum members: 62, holes: 2, sum holes: 3 */
/* padding: 7 */
/* last cacheline: 8 bytes */
};
Pahole output after this change:
struct ib_wc {
union {
u64 wr_id; /* 8 */
struct ib_cqe * wr_cqe; /* 8 */
}; /* 0 8 */
enum ib_wc_status status; /* 8 4 */
enum ib_wc_opcode opcode; /* 12 4 */
u32 vendor_err; /* 16 4 */
u32 byte_len; /* 20 4 */
struct ib_qp * qp; /* 24 8 */
union {
__be32 imm_data; /* 4 */
u32 invalidate_rkey; /* 4 */
} ex; /* 32 4 */
u32 src_qp; /* 36 4 */
u32 slid; /* 40 4 */
int wc_flags; /* 44 4 */
u16 pkey_index; /* 48 2 */
u8 sl; /* 50 1 */
u8 dlid_path_bits; /* 51 1 */
u8 port_num; /* 52 1 */
u8 smac[6]; /* 53 6 */
/* XXX 1 byte hole, try to pack */
u16 vlan_id; /* 60 2 */
u8 network_hdr_type; /* 62 1 */
/* size: 64, cachelines: 1, members: 17 */
/* sum members: 62, holes: 1, sum holes: 1 */
/* padding: 1 */
};
Cc: <stable@vger.kernel.org> # v4.13
Fixes: 7db20ecd1d97 ("IB/core: Change wc.slid from 16 to 32 bits")
Signed-off-by: Bodong Wang <bodong@mellanox.com>
Reviewed-by: Parav Pandit <parav@mellanox.com>
Signed-off-by: Leon Romanovsky <leon@kernel.org>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
2018-01-12 08:58:41 +03:00
u32 slid ;
2005-04-17 02:20:36 +04:00
int wc_flags ;
u16 pkey_index ;
u8 sl ;
u8 dlid_path_bits ;
u8 port_num ; /* valid only for DR SMPs on switches */
IB/core: Ethernet L2 attributes in verbs/cm structures
This patch add the support for Ethernet L2 attributes in the
verbs/cm/cma structures.
When dealing with L2 Ethernet, we should use smac, dmac, vlan ID and priority
in a similar manner that the IB L2 (and the L4 PKEY) attributes are used.
Thus, those attributes were added to the following structures:
* ib_ah_attr - added dmac
* ib_qp_attr - added smac and vlan_id, (sl remains vlan priority)
* ib_wc - added smac, vlan_id
* ib_sa_path_rec - added smac, dmac, vlan_id
* cm_av - added smac and vlan_id
For the path record structure, extra care was taken to avoid the new
fields when packing it into wire format, so we don't break the IB CM
and SA wire protocol.
On the active side, the CM fills. its internal structures from the
path provided by the ULP. We add there taking the ETH L2 attributes
and placing them into the CM Address Handle (struct cm_av).
On the passive side, the CM fills its internal structures from the WC
associated with the REQ message. We add there taking the ETH L2
attributes from the WC.
When the HW driver provides the required ETH L2 attributes in the WC,
they set the IB_WC_WITH_SMAC and IB_WC_WITH_VLAN flags. The IB core
code checks for the presence of these flags, and in their absence does
address resolution from the ib_init_ah_from_wc() helper function.
ib_modify_qp_is_ok is also updated to consider the link layer. Some
parameters are mandatory for Ethernet link layer, while they are
irrelevant for IB. Vendor drivers are modified to support the new
function signature.
Signed-off-by: Matan Barak <matanb@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
2013-12-12 20:03:11 +04:00
u8 smac [ ETH_ALEN ] ;
u16 vlan_id ;
2015-12-23 15:56:51 +03:00
u8 network_hdr_type ;
2005-04-17 02:20:36 +04:00
} ;
IB: Return "maybe missed event" hint from ib_req_notify_cq()
The semantics defined by the InfiniBand specification say that
completion events are only generated when a completions is added to a
completion queue (CQ) after completion notification is requested. In
other words, this means that the following race is possible:
while (CQ is not empty)
ib_poll_cq(CQ);
// new completion is added after while loop is exited
ib_req_notify_cq(CQ);
// no event is generated for the existing completion
To close this race, the IB spec recommends doing another poll of the
CQ after requesting notification.
However, it is not always possible to arrange code this way (for
example, we have found that NAPI for IPoIB cannot poll after
requesting notification). Also, some hardware (eg Mellanox HCAs)
actually will generate an event for completions added before the call
to ib_req_notify_cq() -- which is allowed by the spec, since there's
no way for any upper-layer consumer to know exactly when a completion
was really added -- so the extra poll of the CQ is just a waste.
Motivated by this, we add a new flag "IB_CQ_REPORT_MISSED_EVENTS" for
ib_req_notify_cq() so that it can return a hint about whether the a
completion may have been added before the request for notification.
The return value of ib_req_notify_cq() is extended so:
< 0 means an error occurred while requesting notification
== 0 means notification was requested successfully, and if
IB_CQ_REPORT_MISSED_EVENTS was passed in, then no
events were missed and it is safe to wait for another
event.
> 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was
passed in. It means that the consumer must poll the
CQ again to make sure it is empty to avoid the race
described above.
We add a flag to enable this behavior rather than turning it on
unconditionally, because checking for missed events may incur
significant overhead for some low-level drivers, and consumers that
don't care about the results of this test shouldn't be forced to pay
for the test.
Signed-off-by: Roland Dreier <rolandd@cisco.com>
2007-05-07 08:02:48 +04:00
enum ib_cq_notify_flags {
IB_CQ_SOLICITED = 1 < < 0 ,
IB_CQ_NEXT_COMP = 1 < < 1 ,
IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP ,
IB_CQ_REPORT_MISSED_EVENTS = 1 < < 2 ,
2005-04-17 02:20:36 +04:00
} ;
2011-05-24 03:31:36 +04:00
enum ib_srq_type {
2011-05-24 06:42:29 +04:00
IB_SRQT_BASIC ,
2017-08-17 15:52:05 +03:00
IB_SRQT_XRC ,
IB_SRQT_TM ,
2011-05-24 03:31:36 +04:00
} ;
2017-08-17 15:52:04 +03:00
static inline bool ib_srq_has_cq ( enum ib_srq_type srq_type )
{
2017-08-17 15:52:05 +03:00
return srq_type = = IB_SRQT_XRC | |
srq_type = = IB_SRQT_TM ;
2017-08-17 15:52:04 +03:00
}
2005-08-18 23:23:08 +04:00
enum ib_srq_attr_mask {
IB_SRQ_MAX_WR = 1 < < 0 ,
IB_SRQ_LIMIT = 1 < < 1 ,
} ;
struct ib_srq_attr {
u32 max_wr ;
u32 max_sge ;
u32 srq_limit ;
} ;
struct ib_srq_init_attr {
void ( * event_handler ) ( struct ib_event * , void * ) ;
void * srq_context ;
struct ib_srq_attr attr ;
2011-05-24 03:31:36 +04:00
enum ib_srq_type srq_type ;
2011-05-24 06:42:29 +04:00
2017-08-17 15:52:04 +03:00
struct {
struct ib_cq * cq ;
union {
struct {
struct ib_xrcd * xrcd ;
} xrc ;
2017-08-17 15:52:05 +03:00
struct {
u32 max_num_tags ;
} tag_matching ;
2017-08-17 15:52:04 +03:00
} ;
2011-05-24 06:42:29 +04:00
} ext ;
2005-08-18 23:23:08 +04:00
} ;
2005-04-17 02:20:36 +04:00
struct ib_qp_cap {
u32 max_send_wr ;
u32 max_recv_wr ;
u32 max_send_sge ;
u32 max_recv_sge ;
u32 max_inline_data ;
2016-05-03 19:01:09 +03:00
/*
* Maximum number of rdma_rw_ctx structures in flight at a time .
* ib_create_qp ( ) will calculate the right amount of neededed WRs
* and MRs based on this .
*/
u32 max_rdma_ctxs ;
2005-04-17 02:20:36 +04:00
} ;
enum ib_sig_type {
IB_SIGNAL_ALL_WR ,
IB_SIGNAL_REQ_WR
} ;
enum ib_qp_type {
/*
* IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
* here ( and in that order ) since the MAD layer uses them as
* indices into a 2 - entry table .
*/
IB_QPT_SMI ,
IB_QPT_GSI ,
IB_QPT_RC ,
IB_QPT_UC ,
IB_QPT_UD ,
IB_QPT_RAW_IPV6 ,
2011-05-24 06:59:25 +04:00
IB_QPT_RAW_ETHERTYPE ,
2012-03-01 14:17:51 +04:00
IB_QPT_RAW_PACKET = 8 ,
2011-05-24 06:59:25 +04:00
IB_QPT_XRC_INI = 9 ,
IB_QPT_XRC_TGT ,
2013-07-07 18:25:52 +04:00
IB_QPT_MAX ,
2018-01-02 17:19:30 +03:00
IB_QPT_DRIVER = 0xFF ,
2013-07-07 18:25:52 +04:00
/* Reserve a range for qp types internal to the low level driver.
* These qp types will not be visible at the IB core layer , so the
* IB_QPT_MAX usages should not be affected in the core layer
*/
IB_QPT_RESERVED1 = 0x1000 ,
IB_QPT_RESERVED2 ,
IB_QPT_RESERVED3 ,
IB_QPT_RESERVED4 ,
IB_QPT_RESERVED5 ,
IB_QPT_RESERVED6 ,
IB_QPT_RESERVED7 ,
IB_QPT_RESERVED8 ,
IB_QPT_RESERVED9 ,
IB_QPT_RESERVED10 ,
2005-04-17 02:20:36 +04:00
} ;
2008-04-17 08:09:27 +04:00
enum ib_qp_create_flags {
2008-07-15 10:48:48 +04:00
IB_QP_CREATE_IPOIB_UD_LSO = 1 < < 0 ,
IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 < < 1 ,
2015-12-20 13:16:10 +03:00
IB_QP_CREATE_CROSS_CHANNEL = 1 < < 2 ,
IB_QP_CREATE_MANAGED_SEND = 1 < < 3 ,
IB_QP_CREATE_MANAGED_RECV = 1 < < 4 ,
2013-11-07 17:25:12 +04:00
IB_QP_CREATE_NETIF_QP = 1 < < 5 ,
2014-02-23 16:19:05 +04:00
IB_QP_CREATE_SIGNATURE_EN = 1 < < 6 ,
2017-05-23 14:38:16 +03:00
/* FREE = 1 << 7, */
2016-04-17 17:19:36 +03:00
IB_QP_CREATE_SCATTER_FCS = 1 < < 8 ,
2017-01-18 16:39:56 +03:00
IB_QP_CREATE_CVLAN_STRIPPING = 1 < < 9 ,
2017-06-08 16:15:06 +03:00
IB_QP_CREATE_SOURCE_QPN = 1 < < 10 ,
2017-10-29 14:59:44 +03:00
IB_QP_CREATE_PCI_WRITE_END_PADDING = 1 < < 11 ,
2012-08-03 12:40:37 +04:00
/* reserve bits 26-31 for low level drivers' internal use */
IB_QP_CREATE_RESERVED_START = 1 < < 26 ,
IB_QP_CREATE_RESERVED_END = 1 < < 31 ,
2008-04-17 08:09:27 +04:00
} ;
2013-08-01 19:49:53 +04:00
/*
* Note : users may not call ib_close_qp or ib_destroy_qp from the event_handler
* callback to destroy the passed in QP .
*/
2005-04-17 02:20:36 +04:00
struct ib_qp_init_attr {
2018-09-04 18:45:20 +03:00
/* Consumer's event_handler callback must not block */
2005-04-17 02:20:36 +04:00
void ( * event_handler ) ( struct ib_event * , void * ) ;
2018-09-04 18:45:20 +03:00
2005-04-17 02:20:36 +04:00
void * qp_context ;
struct ib_cq * send_cq ;
struct ib_cq * recv_cq ;
struct ib_srq * srq ;
2011-05-24 06:59:25 +04:00
struct ib_xrcd * xrcd ; /* XRC TGT QPs only */
2005-04-17 02:20:36 +04:00
struct ib_qp_cap cap ;
enum ib_sig_type sq_sig_type ;
enum ib_qp_type qp_type ;
2018-09-24 22:57:16 +03:00
u32 create_flags ;
2016-05-03 19:01:09 +03:00
/*
* Only needed for special QP types , or when using the RW API .
*/
u8 port_num ;
2016-05-23 15:20:54 +03:00
struct ib_rwq_ind_table * rwq_ind_tbl ;
2017-06-08 16:15:06 +03:00
u32 source_qpn ;
2005-04-17 02:20:36 +04:00
} ;
2011-08-09 02:31:51 +04:00
struct ib_qp_open_attr {
void ( * event_handler ) ( struct ib_event * , void * ) ;
void * qp_context ;
u32 qp_num ;
enum ib_qp_type qp_type ;
} ;
2005-04-17 02:20:36 +04:00
enum ib_rnr_timeout {
IB_RNR_TIMER_655_36 = 0 ,
IB_RNR_TIMER_000_01 = 1 ,
IB_RNR_TIMER_000_02 = 2 ,
IB_RNR_TIMER_000_03 = 3 ,
IB_RNR_TIMER_000_04 = 4 ,
IB_RNR_TIMER_000_06 = 5 ,
IB_RNR_TIMER_000_08 = 6 ,
IB_RNR_TIMER_000_12 = 7 ,
IB_RNR_TIMER_000_16 = 8 ,
IB_RNR_TIMER_000_24 = 9 ,
IB_RNR_TIMER_000_32 = 10 ,
IB_RNR_TIMER_000_48 = 11 ,
IB_RNR_TIMER_000_64 = 12 ,
IB_RNR_TIMER_000_96 = 13 ,
IB_RNR_TIMER_001_28 = 14 ,
IB_RNR_TIMER_001_92 = 15 ,
IB_RNR_TIMER_002_56 = 16 ,
IB_RNR_TIMER_003_84 = 17 ,
IB_RNR_TIMER_005_12 = 18 ,
IB_RNR_TIMER_007_68 = 19 ,
IB_RNR_TIMER_010_24 = 20 ,
IB_RNR_TIMER_015_36 = 21 ,
IB_RNR_TIMER_020_48 = 22 ,
IB_RNR_TIMER_030_72 = 23 ,
IB_RNR_TIMER_040_96 = 24 ,
IB_RNR_TIMER_061_44 = 25 ,
IB_RNR_TIMER_081_92 = 26 ,
IB_RNR_TIMER_122_88 = 27 ,
IB_RNR_TIMER_163_84 = 28 ,
IB_RNR_TIMER_245_76 = 29 ,
IB_RNR_TIMER_327_68 = 30 ,
IB_RNR_TIMER_491_52 = 31
} ;
enum ib_qp_attr_mask {
IB_QP_STATE = 1 ,
IB_QP_CUR_STATE = ( 1 < < 1 ) ,
IB_QP_EN_SQD_ASYNC_NOTIFY = ( 1 < < 2 ) ,
IB_QP_ACCESS_FLAGS = ( 1 < < 3 ) ,
IB_QP_PKEY_INDEX = ( 1 < < 4 ) ,
IB_QP_PORT = ( 1 < < 5 ) ,
IB_QP_QKEY = ( 1 < < 6 ) ,
IB_QP_AV = ( 1 < < 7 ) ,
IB_QP_PATH_MTU = ( 1 < < 8 ) ,
IB_QP_TIMEOUT = ( 1 < < 9 ) ,
IB_QP_RETRY_CNT = ( 1 < < 10 ) ,
IB_QP_RNR_RETRY = ( 1 < < 11 ) ,
IB_QP_RQ_PSN = ( 1 < < 12 ) ,
IB_QP_MAX_QP_RD_ATOMIC = ( 1 < < 13 ) ,
IB_QP_ALT_PATH = ( 1 < < 14 ) ,
IB_QP_MIN_RNR_TIMER = ( 1 < < 15 ) ,
IB_QP_SQ_PSN = ( 1 < < 16 ) ,
IB_QP_MAX_DEST_RD_ATOMIC = ( 1 < < 17 ) ,
IB_QP_PATH_MIG_STATE = ( 1 < < 18 ) ,
IB_QP_CAP = ( 1 < < 19 ) ,
IB/core: Ethernet L2 attributes in verbs/cm structures
This patch add the support for Ethernet L2 attributes in the
verbs/cm/cma structures.
When dealing with L2 Ethernet, we should use smac, dmac, vlan ID and priority
in a similar manner that the IB L2 (and the L4 PKEY) attributes are used.
Thus, those attributes were added to the following structures:
* ib_ah_attr - added dmac
* ib_qp_attr - added smac and vlan_id, (sl remains vlan priority)
* ib_wc - added smac, vlan_id
* ib_sa_path_rec - added smac, dmac, vlan_id
* cm_av - added smac and vlan_id
For the path record structure, extra care was taken to avoid the new
fields when packing it into wire format, so we don't break the IB CM
and SA wire protocol.
On the active side, the CM fills. its internal structures from the
path provided by the ULP. We add there taking the ETH L2 attributes
and placing them into the CM Address Handle (struct cm_av).
On the passive side, the CM fills its internal structures from the WC
associated with the REQ message. We add there taking the ETH L2
attributes from the WC.
When the HW driver provides the required ETH L2 attributes in the WC,
they set the IB_WC_WITH_SMAC and IB_WC_WITH_VLAN flags. The IB core
code checks for the presence of these flags, and in their absence does
address resolution from the ib_init_ah_from_wc() helper function.
ib_modify_qp_is_ok is also updated to consider the link layer. Some
parameters are mandatory for Ethernet link layer, while they are
irrelevant for IB. Vendor drivers are modified to support the new
function signature.
Signed-off-by: Matan Barak <matanb@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
2013-12-12 20:03:11 +04:00
IB_QP_DEST_QPN = ( 1 < < 20 ) ,
2015-10-15 18:38:53 +03:00
IB_QP_RESERVED1 = ( 1 < < 21 ) ,
IB_QP_RESERVED2 = ( 1 < < 22 ) ,
IB_QP_RESERVED3 = ( 1 < < 23 ) ,
IB_QP_RESERVED4 = ( 1 < < 24 ) ,
2016-12-01 14:43:14 +03:00
IB_QP_RATE_LIMIT = ( 1 < < 25 ) ,
2005-04-17 02:20:36 +04:00
} ;
enum ib_qp_state {
IB_QPS_RESET ,
IB_QPS_INIT ,
IB_QPS_RTR ,
IB_QPS_RTS ,
IB_QPS_SQD ,
IB_QPS_SQE ,
IB_QPS_ERR
} ;
enum ib_mig_state {
IB_MIG_MIGRATED ,
IB_MIG_REARM ,
IB_MIG_ARMED
} ;
2013-02-06 20:19:12 +04:00
enum ib_mw_type {
IB_MW_TYPE_1 = 1 ,
IB_MW_TYPE_2 = 2
} ;
2005-04-17 02:20:36 +04:00
struct ib_qp_attr {
enum ib_qp_state qp_state ;
enum ib_qp_state cur_qp_state ;
enum ib_mtu path_mtu ;
enum ib_mig_state path_mig_state ;
u32 qkey ;
u32 rq_psn ;
u32 sq_psn ;
u32 dest_qp_num ;
int qp_access_flags ;
struct ib_qp_cap cap ;
2017-04-29 21:41:18 +03:00
struct rdma_ah_attr ah_attr ;
struct rdma_ah_attr alt_ah_attr ;
2005-04-17 02:20:36 +04:00
u16 pkey_index ;
u16 alt_pkey_index ;
u8 en_sqd_async_notify ;
u8 sq_draining ;
u8 max_rd_atomic ;
u8 max_dest_rd_atomic ;
u8 min_rnr_timer ;
u8 port_num ;
u8 timeout ;
u8 retry_cnt ;
u8 rnr_retry ;
u8 alt_port_num ;
u8 alt_timeout ;
2016-12-01 14:43:14 +03:00
u32 rate_limit ;
2005-04-17 02:20:36 +04:00
} ;
enum ib_wr_opcode {
2018-08-15 01:33:02 +03:00
/* These are shared with userspace */
IB_WR_RDMA_WRITE = IB_UVERBS_WR_RDMA_WRITE ,
IB_WR_RDMA_WRITE_WITH_IMM = IB_UVERBS_WR_RDMA_WRITE_WITH_IMM ,
IB_WR_SEND = IB_UVERBS_WR_SEND ,
IB_WR_SEND_WITH_IMM = IB_UVERBS_WR_SEND_WITH_IMM ,
IB_WR_RDMA_READ = IB_UVERBS_WR_RDMA_READ ,
IB_WR_ATOMIC_CMP_AND_SWP = IB_UVERBS_WR_ATOMIC_CMP_AND_SWP ,
IB_WR_ATOMIC_FETCH_AND_ADD = IB_UVERBS_WR_ATOMIC_FETCH_AND_ADD ,
IB_WR_LSO = IB_UVERBS_WR_TSO ,
IB_WR_SEND_WITH_INV = IB_UVERBS_WR_SEND_WITH_INV ,
IB_WR_RDMA_READ_WITH_INV = IB_UVERBS_WR_RDMA_READ_WITH_INV ,
IB_WR_LOCAL_INV = IB_UVERBS_WR_LOCAL_INV ,
IB_WR_MASKED_ATOMIC_CMP_AND_SWP =
IB_UVERBS_WR_MASKED_ATOMIC_CMP_AND_SWP ,
IB_WR_MASKED_ATOMIC_FETCH_AND_ADD =
IB_UVERBS_WR_MASKED_ATOMIC_FETCH_AND_ADD ,
/* These are kernel only and can not be issued by userspace */
IB_WR_REG_MR = 0x20 ,
2014-02-23 16:19:05 +04:00
IB_WR_REG_SIG_MR ,
2018-08-15 01:33:02 +03:00
2013-07-07 18:25:52 +04:00
/* reserve values for low level drivers' internal use.
* These values will not be used at all in the ib core layer .
*/
IB_WR_RESERVED1 = 0xf0 ,
IB_WR_RESERVED2 ,
IB_WR_RESERVED3 ,
IB_WR_RESERVED4 ,
IB_WR_RESERVED5 ,
IB_WR_RESERVED6 ,
IB_WR_RESERVED7 ,
IB_WR_RESERVED8 ,
IB_WR_RESERVED9 ,
IB_WR_RESERVED10 ,
2005-04-17 02:20:36 +04:00
} ;
enum ib_send_flags {
IB_SEND_FENCE = 1 ,
IB_SEND_SIGNALED = ( 1 < < 1 ) ,
IB_SEND_SOLICITED = ( 1 < < 2 ) ,
2008-01-30 19:30:57 +03:00
IB_SEND_INLINE = ( 1 < < 3 ) ,
2013-07-07 18:25:52 +04:00
IB_SEND_IP_CSUM = ( 1 < < 4 ) ,
/* reserve bits 26-31 for low level drivers' internal use */
IB_SEND_RESERVED_START = ( 1 < < 26 ) ,
IB_SEND_RESERVED_END = ( 1 < < 31 ) ,
2005-04-17 02:20:36 +04:00
} ;
struct ib_sge {
u64 addr ;
u32 length ;
u32 lkey ;
} ;
2015-12-11 22:53:03 +03:00
struct ib_cqe {
void ( * done ) ( struct ib_cq * cq , struct ib_wc * wc ) ;
} ;
2005-04-17 02:20:36 +04:00
struct ib_send_wr {
struct ib_send_wr * next ;
2015-12-11 22:53:03 +03:00
union {
u64 wr_id ;
struct ib_cqe * wr_cqe ;
} ;
2005-04-17 02:20:36 +04:00
struct ib_sge * sg_list ;
int num_sge ;
enum ib_wr_opcode opcode ;
int send_flags ;
2008-04-17 08:09:32 +04:00
union {
__be32 imm_data ;
u32 invalidate_rkey ;
} ex ;
2005-04-17 02:20:36 +04:00
} ;
2015-10-08 11:16:33 +03:00
struct ib_rdma_wr {
struct ib_send_wr wr ;
u64 remote_addr ;
u32 rkey ;
} ;
2018-07-18 19:25:14 +03:00
static inline const struct ib_rdma_wr * rdma_wr ( const struct ib_send_wr * wr )
2015-10-08 11:16:33 +03:00
{
return container_of ( wr , struct ib_rdma_wr , wr ) ;
}
struct ib_atomic_wr {
struct ib_send_wr wr ;
u64 remote_addr ;
u64 compare_add ;
u64 swap ;
u64 compare_add_mask ;
u64 swap_mask ;
u32 rkey ;
} ;
2018-07-18 19:25:14 +03:00
static inline const struct ib_atomic_wr * atomic_wr ( const struct ib_send_wr * wr )
2015-10-08 11:16:33 +03:00
{
return container_of ( wr , struct ib_atomic_wr , wr ) ;
}
struct ib_ud_wr {
struct ib_send_wr wr ;
struct ib_ah * ah ;
void * header ;
int hlen ;
int mss ;
u32 remote_qpn ;
u32 remote_qkey ;
u16 pkey_index ; /* valid for GSI only */
u8 port_num ; /* valid for DR SMPs on switch only */
} ;
2018-07-18 19:25:14 +03:00
static inline const struct ib_ud_wr * ud_wr ( const struct ib_send_wr * wr )
2015-10-08 11:16:33 +03:00
{
return container_of ( wr , struct ib_ud_wr , wr ) ;
}
2015-10-13 19:11:24 +03:00
struct ib_reg_wr {
struct ib_send_wr wr ;
struct ib_mr * mr ;
u32 key ;
int access ;
} ;
2018-07-18 19:25:14 +03:00
static inline const struct ib_reg_wr * reg_wr ( const struct ib_send_wr * wr )
2015-10-13 19:11:24 +03:00
{
return container_of ( wr , struct ib_reg_wr , wr ) ;
}
2015-10-08 11:16:33 +03:00
struct ib_sig_handover_wr {
struct ib_send_wr wr ;
struct ib_sig_attrs * sig_attrs ;
struct ib_mr * sig_mr ;
int access_flags ;
struct ib_sge * prot ;
} ;
2018-07-18 19:25:14 +03:00
static inline const struct ib_sig_handover_wr *
sig_handover_wr ( const struct ib_send_wr * wr )
2015-10-08 11:16:33 +03:00
{
return container_of ( wr , struct ib_sig_handover_wr , wr ) ;
}
2005-04-17 02:20:36 +04:00
struct ib_recv_wr {
struct ib_recv_wr * next ;
2015-12-11 22:53:03 +03:00
union {
u64 wr_id ;
struct ib_cqe * wr_cqe ;
} ;
2005-04-17 02:20:36 +04:00
struct ib_sge * sg_list ;
int num_sge ;
} ;
enum ib_access_flags {
2018-07-12 01:20:44 +03:00
IB_ACCESS_LOCAL_WRITE = IB_UVERBS_ACCESS_LOCAL_WRITE ,
IB_ACCESS_REMOTE_WRITE = IB_UVERBS_ACCESS_REMOTE_WRITE ,
IB_ACCESS_REMOTE_READ = IB_UVERBS_ACCESS_REMOTE_READ ,
IB_ACCESS_REMOTE_ATOMIC = IB_UVERBS_ACCESS_REMOTE_ATOMIC ,
IB_ACCESS_MW_BIND = IB_UVERBS_ACCESS_MW_BIND ,
IB_ZERO_BASED = IB_UVERBS_ACCESS_ZERO_BASED ,
IB_ACCESS_ON_DEMAND = IB_UVERBS_ACCESS_ON_DEMAND ,
IB_ACCESS_HUGETLB = IB_UVERBS_ACCESS_HUGETLB ,
IB_ACCESS_SUPPORTED = ( ( IB_ACCESS_HUGETLB < < 1 ) - 1 )
2005-04-17 02:20:36 +04:00
} ;
2015-12-23 21:12:47 +03:00
/*
* XXX : these are apparently used for - > rereg_user_mr , no idea why they
* are hidden here instead of a uapi header !
*/
2005-04-17 02:20:36 +04:00
enum ib_mr_rereg_flags {
IB_MR_REREG_TRANS = 1 ,
IB_MR_REREG_PD = ( 1 < < 1 ) ,
2014-07-31 12:01:28 +04:00
IB_MR_REREG_ACCESS = ( 1 < < 2 ) ,
IB_MR_REREG_SUPPORTED = ( ( IB_MR_REREG_ACCESS < < 1 ) - 1 )
2005-04-17 02:20:36 +04:00
} ;
struct ib_fmr_attr {
int max_pages ;
int max_maps ;
2006-02-02 21:43:45 +03:00
u8 page_shift ;
2005-04-17 02:20:36 +04:00
} ;
2014-12-11 18:04:18 +03:00
struct ib_umem ;
2017-04-04 13:31:42 +03:00
enum rdma_remove_reason {
2018-06-20 17:11:39 +03:00
/*
* Userspace requested uobject deletion or initial try
* to remove uobject via cleanup . Call could fail
*/
2017-04-04 13:31:42 +03:00
RDMA_REMOVE_DESTROY ,
/* Context deletion. This call should delete the actual object itself */
RDMA_REMOVE_CLOSE ,
/* Driver is being hot-unplugged. This call should delete the actual object itself */
RDMA_REMOVE_DRIVER_REMOVE ,
2018-07-26 06:40:12 +03:00
/* uobj is being cleaned-up before being committed */
RDMA_REMOVE_ABORT ,
2017-04-04 13:31:42 +03:00
} ;
2017-01-10 03:02:14 +03:00
struct ib_rdmacg_object {
# ifdef CONFIG_CGROUP_RDMA
struct rdma_cgroup * cg ; /* owner rdma cgroup */
# endif
} ;
2005-07-08 04:57:10 +04:00
struct ib_ucontext {
struct ib_device * device ;
2017-04-04 13:31:41 +03:00
struct ib_uverbs_file * ufile ;
2018-07-11 05:55:19 +03:00
/*
* ' closing ' can be read by the driver only during a destroy callback ,
* it is set when we are closing the file descriptor and indicates
* that mm_sem may be locked .
*/
2018-09-03 20:18:03 +03:00
bool closing ;
2014-12-11 18:04:17 +03:00
2018-06-20 17:11:39 +03:00
bool cleanup_retryable ;
2017-04-04 13:31:42 +03:00
2018-09-16 20:48:04 +03:00
void ( * invalidate_range ) ( struct ib_umem_odp * umem_odp ,
2014-12-11 18:04:18 +03:00
unsigned long start , unsigned long end ) ;
2018-09-16 20:48:08 +03:00
struct mutex per_mm_list_lock ;
struct list_head per_mm_list ;
2017-01-10 03:02:14 +03:00
struct ib_rdmacg_object cg_obj ;
2018-11-28 14:16:43 +03:00
/*
* Implementation details of the RDMA core , don ' t use in drivers :
*/
struct rdma_restrack_entry res ;
2005-07-08 04:57:10 +04:00
} ;
struct ib_uobject {
u64 user_handle ; /* handle given to us by userspace */
2018-07-04 11:32:07 +03:00
/* ufile & ucontext owning this object */
struct ib_uverbs_file * ufile ;
/* FIXME, save memory: ufile->context == context */
2005-07-08 04:57:10 +04:00
struct ib_ucontext * context ; /* associated user context */
IB/uverbs: Don't serialize with ib_uverbs_idr_mutex
Currently, all userspace verbs operations that call into the kernel
are serialized by ib_uverbs_idr_mutex. This can be a scalability
issue for some workloads, especially for devices driven by the ipath
driver, which needs to call into the kernel even for datapath
operations.
Fix this by adding reference counts to the userspace objects, and then
converting ib_uverbs_idr_mutex into a spinlock that only protects the
idrs long enough to take a reference on the object being looked up.
Because remove operations may fail, we have to do a slightly funky
two-step deletion, which is described in the comments at the top of
uverbs_cmd.c.
This also still leaves ib_uverbs_idr_lock as a single lock that is
possibly subject to contention. However, the lock hold time will only
be a single idr operation, so multiple threads should still be able to
make progress, even if ib_uverbs_idr_lock is being ping-ponged.
Surprisingly, these changes even shrink the object code:
add/remove: 23/5 grow/shrink: 4/21 up/down: 633/-693 (-60)
Signed-off-by: Roland Dreier <rolandd@cisco.com>
2006-06-18 07:44:49 +04:00
void * object ; /* containing object */
2005-07-08 04:57:10 +04:00
struct list_head list ; /* link to context's list */
2017-01-10 03:02:14 +03:00
struct ib_rdmacg_object cg_obj ; /* rdmacg object */
2008-04-17 08:01:06 +04:00
int id ; /* index into kernel idr */
IB/uverbs: Don't serialize with ib_uverbs_idr_mutex
Currently, all userspace verbs operations that call into the kernel
are serialized by ib_uverbs_idr_mutex. This can be a scalability
issue for some workloads, especially for devices driven by the ipath
driver, which needs to call into the kernel even for datapath
operations.
Fix this by adding reference counts to the userspace objects, and then
converting ib_uverbs_idr_mutex into a spinlock that only protects the
idrs long enough to take a reference on the object being looked up.
Because remove operations may fail, we have to do a slightly funky
two-step deletion, which is described in the comments at the top of
uverbs_cmd.c.
This also still leaves ib_uverbs_idr_lock as a single lock that is
possibly subject to contention. However, the lock hold time will only
be a single idr operation, so multiple threads should still be able to
make progress, even if ib_uverbs_idr_lock is being ping-ponged.
Surprisingly, these changes even shrink the object code:
add/remove: 23/5 grow/shrink: 4/21 up/down: 633/-693 (-60)
Signed-off-by: Roland Dreier <rolandd@cisco.com>
2006-06-18 07:44:49 +04:00
struct kref ref ;
2017-04-04 13:31:42 +03:00
atomic_t usecnt ; /* protects exclusive access */
2015-11-02 20:13:25 +03:00
struct rcu_head rcu ; /* kfree_rcu() overhead */
2017-04-04 13:31:42 +03:00
2018-08-10 05:14:37 +03:00
const struct uverbs_api_object * uapi_object ;
2005-07-08 04:57:10 +04:00
} ;
struct ib_udata {
2013-12-12 02:01:44 +04:00
const void __user * inbuf ;
2005-07-08 04:57:10 +04:00
void __user * outbuf ;
size_t inlen ;
size_t outlen ;
} ;
2005-04-17 02:20:36 +04:00
struct ib_pd {
2015-08-05 23:14:45 +03:00
u32 local_dma_lkey ;
2016-09-05 13:56:17 +03:00
u32 flags ;
2005-07-08 04:57:10 +04:00
struct ib_device * device ;
struct ib_uobject * uobject ;
atomic_t usecnt ; /* count all resources */
2016-09-05 13:56:16 +03:00
2016-09-05 13:56:17 +03:00
u32 unsafe_global_rkey ;
2016-09-05 13:56:16 +03:00
/*
* Implementation details of the RDMA core , don ' t use in drivers :
*/
struct ib_mr * __internal_mr ;
2018-01-28 12:17:20 +03:00
struct rdma_restrack_entry res ;
2005-04-17 02:20:36 +04:00
} ;
2011-05-24 04:52:46 +04:00
struct ib_xrcd {
struct ib_device * device ;
2011-05-27 10:06:44 +04:00
atomic_t usecnt ; /* count all exposed resources */
2011-05-24 19:33:46 +04:00
struct inode * inode ;
2011-05-27 10:06:44 +04:00
struct mutex tgt_qp_mutex ;
struct list_head tgt_qp_list ;
2011-05-24 04:52:46 +04:00
} ;
2005-04-17 02:20:36 +04:00
struct ib_ah {
struct ib_device * device ;
struct ib_pd * pd ;
2005-07-08 04:57:10 +04:00
struct ib_uobject * uobject ;
2018-06-13 10:22:08 +03:00
const struct ib_gid_attr * sgid_attr ;
2017-04-29 21:41:29 +03:00
enum rdma_ah_attr_type type ;
2005-04-17 02:20:36 +04:00
} ;
typedef void ( * ib_comp_handler ) ( struct ib_cq * cq , void * cq_context ) ;
2015-12-11 22:53:03 +03:00
enum ib_poll_context {
2018-08-27 08:35:55 +03:00
IB_POLL_DIRECT , /* caller context, no hw completions */
IB_POLL_SOFTIRQ , /* poll from softirq context */
IB_POLL_WORKQUEUE , /* poll from workqueue */
IB_POLL_UNBOUND_WORKQUEUE , /* poll from unbound workqueue */
2015-12-11 22:53:03 +03:00
} ;
2005-04-17 02:20:36 +04:00
struct ib_cq {
2005-07-08 04:57:10 +04:00
struct ib_device * device ;
struct ib_uobject * uobject ;
ib_comp_handler comp_handler ;
void ( * event_handler ) ( struct ib_event * , void * ) ;
2008-07-15 10:48:44 +04:00
void * cq_context ;
2005-07-08 04:57:10 +04:00
int cqe ;
atomic_t usecnt ; /* count number of work queues */
2015-12-11 22:53:03 +03:00
enum ib_poll_context poll_ctx ;
struct ib_wc * wc ;
union {
struct irq_poll iop ;
struct work_struct work ;
} ;
2018-08-27 08:35:55 +03:00
struct workqueue_struct * comp_wq ;
2018-01-28 12:17:20 +03:00
/*
* Implementation details of the RDMA core , don ' t use in drivers :
*/
struct rdma_restrack_entry res ;
2005-04-17 02:20:36 +04:00
} ;
struct ib_srq {
2005-08-18 23:23:08 +04:00
struct ib_device * device ;
struct ib_pd * pd ;
struct ib_uobject * uobject ;
void ( * event_handler ) ( struct ib_event * , void * ) ;
void * srq_context ;
2011-05-24 03:31:36 +04:00
enum ib_srq_type srq_type ;
2005-04-17 02:20:36 +04:00
atomic_t usecnt ;
2011-05-24 06:42:29 +04:00
2017-08-17 15:52:04 +03:00
struct {
struct ib_cq * cq ;
union {
struct {
struct ib_xrcd * xrcd ;
u32 srq_num ;
} xrc ;
} ;
2011-05-24 06:42:29 +04:00
} ext ;
2005-04-17 02:20:36 +04:00
} ;
2017-01-18 16:39:54 +03:00
enum ib_raw_packet_caps {
/* Strip cvlan from incoming packet and report it in the matching work
* completion is supported .
*/
IB_RAW_PACKET_CAP_CVLAN_STRIPPING = ( 1 < < 0 ) ,
/* Scatter FCS field of an incoming packet to host memory is supported.
*/
IB_RAW_PACKET_CAP_SCATTER_FCS = ( 1 < < 1 ) ,
/* Checksum offloads are supported (for both send and receive). */
IB_RAW_PACKET_CAP_IP_CSUM = ( 1 < < 2 ) ,
2017-05-30 10:29:10 +03:00
/* When a packet is received for an RQ with no receive WQEs, the
* packet processing is delayed .
*/
IB_RAW_PACKET_CAP_DELAY_DROP = ( 1 < < 3 ) ,
2017-01-18 16:39:54 +03:00
} ;
IB/core: Introduce Work Queue object and its verbs
Introduce Work Queue object and its create/destroy/modify verbs.
QP can be created without internal WQs "packaged" inside it,
this QP can be configured to use "external" WQ object as its
receive/send queue.
WQ is a necessary component for RSS technology since RSS mechanism
is supposed to distribute the traffic between multiple
Receive Work Queues.
WQ associated (many to one) with Completion Queue and it owns WQ
properties (PD, WQ size, etc.).
WQ has a type, this patch introduces the IB_WQT_RQ (i.e.receive queue),
it may be extend to others such as IB_WQT_SQ. (send queue).
WQ from type IB_WQT_RQ contains receive work requests.
PD is an attribute of a work queue (i.e. send/receive queue), it's used
by the hardware for security validation before scattering to a memory
region which is pointed by the WQ. For that, an external WQ object
needs a PD, letting the hardware makes that validation.
When accessing a memory region that is pointed by the WQ its PD
is used and not the QP's PD, this behavior is similar
to a SRQ and a QP.
WQ context is subject to a well-defined state transitions done by
the modify_wq verb.
When WQ is created its initial state becomes IB_WQS_RESET.
>From IB_WQS_RESET it can be modified to itself or to IB_WQS_RDY.
>From IB_WQS_RDY it can be modified to itself, to IB_WQS_RESET
or to IB_WQS_ERR.
>From IB_WQS_ERR it can be modified to IB_WQS_RESET.
Note: transition to IB_WQS_ERR might occur implicitly in case there
was some HW error.
Signed-off-by: Yishai Hadas <yishaih@mellanox.com>
Signed-off-by: Matan Barak <matanb@mellanox.com>
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Doug Ledford <dledford@redhat.com>
2016-05-23 15:20:48 +03:00
enum ib_wq_type {
IB_WQT_RQ
} ;
enum ib_wq_state {
IB_WQS_RESET ,
IB_WQS_RDY ,
IB_WQS_ERR
} ;
struct ib_wq {
struct ib_device * device ;
struct ib_uobject * uobject ;
void * wq_context ;
void ( * event_handler ) ( struct ib_event * , void * ) ;
struct ib_pd * pd ;
struct ib_cq * cq ;
u32 wq_num ;
enum ib_wq_state state ;
enum ib_wq_type wq_type ;
atomic_t usecnt ;
} ;
2017-01-18 16:39:55 +03:00
enum ib_wq_flags {
IB_WQ_FLAGS_CVLAN_STRIPPING = 1 < < 0 ,
2017-01-18 16:39:57 +03:00
IB_WQ_FLAGS_SCATTER_FCS = 1 < < 1 ,
2017-05-30 10:29:10 +03:00
IB_WQ_FLAGS_DELAY_DROP = 1 < < 2 ,
2017-10-29 14:59:44 +03:00
IB_WQ_FLAGS_PCI_WRITE_END_PADDING = 1 < < 3 ,
2017-01-18 16:39:55 +03:00
} ;
IB/core: Introduce Work Queue object and its verbs
Introduce Work Queue object and its create/destroy/modify verbs.
QP can be created without internal WQs "packaged" inside it,
this QP can be configured to use "external" WQ object as its
receive/send queue.
WQ is a necessary component for RSS technology since RSS mechanism
is supposed to distribute the traffic between multiple
Receive Work Queues.
WQ associated (many to one) with Completion Queue and it owns WQ
properties (PD, WQ size, etc.).
WQ has a type, this patch introduces the IB_WQT_RQ (i.e.receive queue),
it may be extend to others such as IB_WQT_SQ. (send queue).
WQ from type IB_WQT_RQ contains receive work requests.
PD is an attribute of a work queue (i.e. send/receive queue), it's used
by the hardware for security validation before scattering to a memory
region which is pointed by the WQ. For that, an external WQ object
needs a PD, letting the hardware makes that validation.
When accessing a memory region that is pointed by the WQ its PD
is used and not the QP's PD, this behavior is similar
to a SRQ and a QP.
WQ context is subject to a well-defined state transitions done by
the modify_wq verb.
When WQ is created its initial state becomes IB_WQS_RESET.
>From IB_WQS_RESET it can be modified to itself or to IB_WQS_RDY.
>From IB_WQS_RDY it can be modified to itself, to IB_WQS_RESET
or to IB_WQS_ERR.
>From IB_WQS_ERR it can be modified to IB_WQS_RESET.
Note: transition to IB_WQS_ERR might occur implicitly in case there
was some HW error.
Signed-off-by: Yishai Hadas <yishaih@mellanox.com>
Signed-off-by: Matan Barak <matanb@mellanox.com>
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Doug Ledford <dledford@redhat.com>
2016-05-23 15:20:48 +03:00
struct ib_wq_init_attr {
void * wq_context ;
enum ib_wq_type wq_type ;
u32 max_wr ;
u32 max_sge ;
struct ib_cq * cq ;
void ( * event_handler ) ( struct ib_event * , void * ) ;
2017-01-18 16:39:55 +03:00
u32 create_flags ; /* Use enum ib_wq_flags */
IB/core: Introduce Work Queue object and its verbs
Introduce Work Queue object and its create/destroy/modify verbs.
QP can be created without internal WQs "packaged" inside it,
this QP can be configured to use "external" WQ object as its
receive/send queue.
WQ is a necessary component for RSS technology since RSS mechanism
is supposed to distribute the traffic between multiple
Receive Work Queues.
WQ associated (many to one) with Completion Queue and it owns WQ
properties (PD, WQ size, etc.).
WQ has a type, this patch introduces the IB_WQT_RQ (i.e.receive queue),
it may be extend to others such as IB_WQT_SQ. (send queue).
WQ from type IB_WQT_RQ contains receive work requests.
PD is an attribute of a work queue (i.e. send/receive queue), it's used
by the hardware for security validation before scattering to a memory
region which is pointed by the WQ. For that, an external WQ object
needs a PD, letting the hardware makes that validation.
When accessing a memory region that is pointed by the WQ its PD
is used and not the QP's PD, this behavior is similar
to a SRQ and a QP.
WQ context is subject to a well-defined state transitions done by
the modify_wq verb.
When WQ is created its initial state becomes IB_WQS_RESET.
>From IB_WQS_RESET it can be modified to itself or to IB_WQS_RDY.
>From IB_WQS_RDY it can be modified to itself, to IB_WQS_RESET
or to IB_WQS_ERR.
>From IB_WQS_ERR it can be modified to IB_WQS_RESET.
Note: transition to IB_WQS_ERR might occur implicitly in case there
was some HW error.
Signed-off-by: Yishai Hadas <yishaih@mellanox.com>
Signed-off-by: Matan Barak <matanb@mellanox.com>
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Doug Ledford <dledford@redhat.com>
2016-05-23 15:20:48 +03:00
} ;
enum ib_wq_attr_mask {
2017-01-18 16:39:55 +03:00
IB_WQ_STATE = 1 < < 0 ,
IB_WQ_CUR_STATE = 1 < < 1 ,
IB_WQ_FLAGS = 1 < < 2 ,
IB/core: Introduce Work Queue object and its verbs
Introduce Work Queue object and its create/destroy/modify verbs.
QP can be created without internal WQs "packaged" inside it,
this QP can be configured to use "external" WQ object as its
receive/send queue.
WQ is a necessary component for RSS technology since RSS mechanism
is supposed to distribute the traffic between multiple
Receive Work Queues.
WQ associated (many to one) with Completion Queue and it owns WQ
properties (PD, WQ size, etc.).
WQ has a type, this patch introduces the IB_WQT_RQ (i.e.receive queue),
it may be extend to others such as IB_WQT_SQ. (send queue).
WQ from type IB_WQT_RQ contains receive work requests.
PD is an attribute of a work queue (i.e. send/receive queue), it's used
by the hardware for security validation before scattering to a memory
region which is pointed by the WQ. For that, an external WQ object
needs a PD, letting the hardware makes that validation.
When accessing a memory region that is pointed by the WQ its PD
is used and not the QP's PD, this behavior is similar
to a SRQ and a QP.
WQ context is subject to a well-defined state transitions done by
the modify_wq verb.
When WQ is created its initial state becomes IB_WQS_RESET.
>From IB_WQS_RESET it can be modified to itself or to IB_WQS_RDY.
>From IB_WQS_RDY it can be modified to itself, to IB_WQS_RESET
or to IB_WQS_ERR.
>From IB_WQS_ERR it can be modified to IB_WQS_RESET.
Note: transition to IB_WQS_ERR might occur implicitly in case there
was some HW error.
Signed-off-by: Yishai Hadas <yishaih@mellanox.com>
Signed-off-by: Matan Barak <matanb@mellanox.com>
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Doug Ledford <dledford@redhat.com>
2016-05-23 15:20:48 +03:00
} ;
struct ib_wq_attr {
enum ib_wq_state wq_state ;
enum ib_wq_state curr_wq_state ;
2017-01-18 16:39:55 +03:00
u32 flags ; /* Use enum ib_wq_flags */
u32 flags_mask ; /* Use enum ib_wq_flags */
IB/core: Introduce Work Queue object and its verbs
Introduce Work Queue object and its create/destroy/modify verbs.
QP can be created without internal WQs "packaged" inside it,
this QP can be configured to use "external" WQ object as its
receive/send queue.
WQ is a necessary component for RSS technology since RSS mechanism
is supposed to distribute the traffic between multiple
Receive Work Queues.
WQ associated (many to one) with Completion Queue and it owns WQ
properties (PD, WQ size, etc.).
WQ has a type, this patch introduces the IB_WQT_RQ (i.e.receive queue),
it may be extend to others such as IB_WQT_SQ. (send queue).
WQ from type IB_WQT_RQ contains receive work requests.
PD is an attribute of a work queue (i.e. send/receive queue), it's used
by the hardware for security validation before scattering to a memory
region which is pointed by the WQ. For that, an external WQ object
needs a PD, letting the hardware makes that validation.
When accessing a memory region that is pointed by the WQ its PD
is used and not the QP's PD, this behavior is similar
to a SRQ and a QP.
WQ context is subject to a well-defined state transitions done by
the modify_wq verb.
When WQ is created its initial state becomes IB_WQS_RESET.
>From IB_WQS_RESET it can be modified to itself or to IB_WQS_RDY.
>From IB_WQS_RDY it can be modified to itself, to IB_WQS_RESET
or to IB_WQS_ERR.
>From IB_WQS_ERR it can be modified to IB_WQS_RESET.
Note: transition to IB_WQS_ERR might occur implicitly in case there
was some HW error.
Signed-off-by: Yishai Hadas <yishaih@mellanox.com>
Signed-off-by: Matan Barak <matanb@mellanox.com>
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Doug Ledford <dledford@redhat.com>
2016-05-23 15:20:48 +03:00
} ;
2016-05-23 15:20:51 +03:00
struct ib_rwq_ind_table {
struct ib_device * device ;
struct ib_uobject * uobject ;
atomic_t usecnt ;
u32 ind_tbl_num ;
u32 log_ind_tbl_size ;
struct ib_wq * * ind_tbl ;
} ;
struct ib_rwq_ind_table_init_attr {
u32 log_ind_tbl_size ;
/* Each entry is a pointer to Receive Work Queue */
struct ib_wq * * ind_tbl ;
} ;
IB/core: Enforce PKey security on QPs
Add new LSM hooks to allocate and free security contexts and check for
permission to access a PKey.
Allocate and free a security context when creating and destroying a QP.
This context is used for controlling access to PKeys.
When a request is made to modify a QP that changes the port, PKey index,
or alternate path, check that the QP has permission for the PKey in the
PKey table index on the subnet prefix of the port. If the QP is shared
make sure all handles to the QP also have access.
Store which port and PKey index a QP is using. After the reset to init
transition the user can modify the port, PKey index and alternate path
independently. So port and PKey settings changes can be a merge of the
previous settings and the new ones.
In order to maintain access control if there are PKey table or subnet
prefix change keep a list of all QPs are using each PKey index on
each port. If a change occurs all QPs using that device and port must
have access enforced for the new cache settings.
These changes add a transaction to the QP modify process. Association
with the old port and PKey index must be maintained if the modify fails,
and must be removed if it succeeds. Association with the new port and
PKey index must be established prior to the modify and removed if the
modify fails.
1. When a QP is modified to a particular Port, PKey index or alternate
path insert that QP into the appropriate lists.
2. Check permission to access the new settings.
3. If step 2 grants access attempt to modify the QP.
4a. If steps 2 and 3 succeed remove any prior associations.
4b. If ether fails remove the new setting associations.
If a PKey table or subnet prefix changes walk the list of QPs and
check that they have permission. If not send the QP to the error state
and raise a fatal error event. If it's a shared QP make sure all the
QPs that share the real_qp have permission as well. If the QP that
owns a security structure is denied access the security structure is
marked as such and the QP is added to an error_list. Once the moving
the QP to error is complete the security structure mark is cleared.
Maintaining the lists correctly turns QP destroy into a transaction.
The hardware driver for the device frees the ib_qp structure, so while
the destroy is in progress the ib_qp pointer in the ib_qp_security
struct is undefined. When the destroy process begins the ib_qp_security
structure is marked as destroying. This prevents any action from being
taken on the QP pointer. After the QP is destroyed successfully it
could still listed on an error_list wait for it to be processed by that
flow before cleaning up the structure.
If the destroy fails the QPs port and PKey settings are reinserted into
the appropriate lists, the destroying flag is cleared, and access control
is enforced, in case there were any cache changes during the destroy
flow.
To keep the security changes isolated a new file is used to hold security
related functionality.
Signed-off-by: Daniel Jurgens <danielj@mellanox.com>
Acked-by: Doug Ledford <dledford@redhat.com>
[PM: merge fixup in ib_verbs.h and uverbs_cmd.c]
Signed-off-by: Paul Moore <paul@paul-moore.com>
2017-05-19 15:48:52 +03:00
enum port_pkey_state {
IB_PORT_PKEY_NOT_VALID = 0 ,
IB_PORT_PKEY_VALID = 1 ,
IB_PORT_PKEY_LISTED = 2 ,
} ;
struct ib_qp_security ;
struct ib_port_pkey {
enum port_pkey_state state ;
u16 pkey_index ;
u8 port_num ;
struct list_head qp_list ;
struct list_head to_error_list ;
struct ib_qp_security * sec ;
} ;
struct ib_ports_pkeys {
struct ib_port_pkey main ;
struct ib_port_pkey alt ;
} ;
struct ib_qp_security {
struct ib_qp * qp ;
struct ib_device * dev ;
/* Hold this mutex when changing port and pkey settings. */
struct mutex mutex ;
struct ib_ports_pkeys * ports_pkeys ;
/* A list of all open shared QP handles. Required to enforce security
* properly for all users of a shared QP .
*/
struct list_head shared_qp_list ;
void * security ;
bool destroying ;
atomic_t error_list_count ;
struct completion error_complete ;
int error_comps_pending ;
} ;
2016-07-21 23:03:30 +03:00
/*
* @ max_write_sge : Maximum SGE elements per RDMA WRITE request .
* @ max_read_sge : Maximum SGE elements per RDMA READ request .
*/
2005-04-17 02:20:36 +04:00
struct ib_qp {
struct ib_device * device ;
struct ib_pd * pd ;
struct ib_cq * send_cq ;
struct ib_cq * recv_cq ;
2016-05-03 19:01:07 +03:00
spinlock_t mr_lock ;
int mrs_used ;
2016-05-03 19:01:09 +03:00
struct list_head rdma_mrs ;
2016-05-03 19:01:12 +03:00
struct list_head sig_mrs ;
2005-04-17 02:20:36 +04:00
struct ib_srq * srq ;
2011-05-24 06:59:25 +04:00
struct ib_xrcd * xrcd ; /* XRC TGT QPs only */
2011-05-27 10:06:44 +04:00
struct list_head xrcd_list ;
2016-05-03 19:01:07 +03:00
IB/core: Add receive flow steering support
The RDMA stack allows for applications to create IB_QPT_RAW_PACKET
QPs, which receive plain Ethernet packets, specifically packets that
don't carry any QPN to be matched by the receiving side. Applications
using these QPs must be provided with a method to program some
steering rule with the HW so packets arriving at the local port can be
routed to them.
This patch adds ib_create_flow(), which allow providing a flow
specification for a QP. When there's a match between the
specification and a received packet, the packet is forwarded to that
QP, in a the same way one uses ib_attach_multicast() for IB UD
multicast handling.
Flow specifications are provided as instances of struct ib_flow_spec_yyy,
which describe L2, L3 and L4 headers. Currently specs for Ethernet, IPv4,
TCP and UDP are defined. Flow specs are made of values and masks.
The input to ib_create_flow() is a struct ib_flow_attr, which contains
a few mandatory control elements and optional flow specs.
struct ib_flow_attr {
enum ib_flow_attr_type type;
u16 size;
u16 priority;
u32 flags;
u8 num_of_specs;
u8 port;
/* Following are the optional layers according to user request
* struct ib_flow_spec_yyy
* struct ib_flow_spec_zzz
*/
};
As these specs are eventually coming from user space, they are defined and
used in a way which allows adding new spec types without kernel/user ABI
change, just with a little API enhancement which defines the newly added spec.
The flow spec structures are defined with TLV (Type-Length-Value)
entries, which allows calling ib_create_flow() with a list of variable
length of optional specs.
For the actual processing of ib_flow_attr the driver uses the number
of specs and the size mandatory fields along with the TLV nature of
the specs.
Steering rules processing order is according to the domain over which
the rule is set and the rule priority. All rules set by user space
applicatations fall into the IB_FLOW_DOMAIN_USER domain, other domains
could be used by future IPoIB RFS and Ethetool flow-steering interface
implementation. Lower numerical value for the priority field means
higher priority.
The returned value from ib_create_flow() is a struct ib_flow, which
contains a database pointer (handle) provided by the HW driver to be
used when calling ib_destroy_flow().
Applications that offload TCP/IP traffic can also be written over IB
UD QPs. The ib_create_flow() / ib_destroy_flow() API is designed to
support UD QPs too. A HW driver can set IB_DEVICE_MANAGED_FLOW_STEERING
to denote support for flow steering.
The ib_flow_attr enum type supports usage of flow steering for promiscuous
and sniffer purposes:
IB_FLOW_ATTR_NORMAL - "regular" rule, steering according to rule specification
IB_FLOW_ATTR_ALL_DEFAULT - default unicast and multicast rule, receive
all Ethernet traffic which isn't steered to any QP
IB_FLOW_ATTR_MC_DEFAULT - same as IB_FLOW_ATTR_ALL_DEFAULT but only for multicast
IB_FLOW_ATTR_SNIFFER - sniffer rule, receive all port traffic
ALL_DEFAULT and MC_DEFAULT rules options are valid only for Ethernet link type.
Signed-off-by: Hadar Hen Zion <hadarh@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
2013-08-07 15:01:59 +04:00
/* count times opened, mcast attaches, flow attaches */
atomic_t usecnt ;
2011-08-09 02:31:51 +04:00
struct list_head open_list ;
struct ib_qp * real_qp ;
2005-07-08 04:57:10 +04:00
struct ib_uobject * uobject ;
2005-04-17 02:20:36 +04:00
void ( * event_handler ) ( struct ib_event * , void * ) ;
void * qp_context ;
2018-06-13 10:22:08 +03:00
/* sgid_attrs associated with the AV's */
const struct ib_gid_attr * av_sgid_attr ;
const struct ib_gid_attr * alt_path_sgid_attr ;
2005-04-17 02:20:36 +04:00
u32 qp_num ;
2016-07-21 23:03:30 +03:00
u32 max_write_sge ;
u32 max_read_sge ;
2005-04-17 02:20:36 +04:00
enum ib_qp_type qp_type ;
2016-05-23 15:20:54 +03:00
struct ib_rwq_ind_table * rwq_ind_tbl ;
IB/core: Enforce PKey security on QPs
Add new LSM hooks to allocate and free security contexts and check for
permission to access a PKey.
Allocate and free a security context when creating and destroying a QP.
This context is used for controlling access to PKeys.
When a request is made to modify a QP that changes the port, PKey index,
or alternate path, check that the QP has permission for the PKey in the
PKey table index on the subnet prefix of the port. If the QP is shared
make sure all handles to the QP also have access.
Store which port and PKey index a QP is using. After the reset to init
transition the user can modify the port, PKey index and alternate path
independently. So port and PKey settings changes can be a merge of the
previous settings and the new ones.
In order to maintain access control if there are PKey table or subnet
prefix change keep a list of all QPs are using each PKey index on
each port. If a change occurs all QPs using that device and port must
have access enforced for the new cache settings.
These changes add a transaction to the QP modify process. Association
with the old port and PKey index must be maintained if the modify fails,
and must be removed if it succeeds. Association with the new port and
PKey index must be established prior to the modify and removed if the
modify fails.
1. When a QP is modified to a particular Port, PKey index or alternate
path insert that QP into the appropriate lists.
2. Check permission to access the new settings.
3. If step 2 grants access attempt to modify the QP.
4a. If steps 2 and 3 succeed remove any prior associations.
4b. If ether fails remove the new setting associations.
If a PKey table or subnet prefix changes walk the list of QPs and
check that they have permission. If not send the QP to the error state
and raise a fatal error event. If it's a shared QP make sure all the
QPs that share the real_qp have permission as well. If the QP that
owns a security structure is denied access the security structure is
marked as such and the QP is added to an error_list. Once the moving
the QP to error is complete the security structure mark is cleared.
Maintaining the lists correctly turns QP destroy into a transaction.
The hardware driver for the device frees the ib_qp structure, so while
the destroy is in progress the ib_qp pointer in the ib_qp_security
struct is undefined. When the destroy process begins the ib_qp_security
structure is marked as destroying. This prevents any action from being
taken on the QP pointer. After the QP is destroyed successfully it
could still listed on an error_list wait for it to be processed by that
flow before cleaning up the structure.
If the destroy fails the QPs port and PKey settings are reinserted into
the appropriate lists, the destroying flag is cleared, and access control
is enforced, in case there were any cache changes during the destroy
flow.
To keep the security changes isolated a new file is used to hold security
related functionality.
Signed-off-by: Daniel Jurgens <danielj@mellanox.com>
Acked-by: Doug Ledford <dledford@redhat.com>
[PM: merge fixup in ib_verbs.h and uverbs_cmd.c]
Signed-off-by: Paul Moore <paul@paul-moore.com>
2017-05-19 15:48:52 +03:00
struct ib_qp_security * qp_sec ;
2017-08-23 08:35:40 +03:00
u8 port ;
2018-01-28 12:17:20 +03:00
/*
* Implementation details of the RDMA core , don ' t use in drivers :
*/
struct rdma_restrack_entry res ;
2005-04-17 02:20:36 +04:00
} ;
2018-04-05 18:53:24 +03:00
struct ib_dm {
struct ib_device * device ;
u32 length ;
u32 flags ;
struct ib_uobject * uobject ;
atomic_t usecnt ;
} ;
2005-04-17 02:20:36 +04:00
struct ib_mr {
2005-07-08 04:57:10 +04:00
struct ib_device * device ;
struct ib_pd * pd ;
u32 lkey ;
u32 rkey ;
2015-10-13 19:11:24 +03:00
u64 iova ;
2017-09-24 21:46:31 +03:00
u64 length ;
2015-10-13 19:11:24 +03:00
unsigned int page_size ;
2019-06-11 18:52:38 +03:00
enum ib_mr_type type ;
2016-05-03 19:01:08 +03:00
bool need_inval ;
2016-05-03 19:01:07 +03:00
union {
struct ib_uobject * uobject ; /* user */
struct list_head qp_entry ; /* FR */
} ;
2018-03-02 00:58:13 +03:00
2018-04-05 18:53:25 +03:00
struct ib_dm * dm ;
2018-03-02 00:58:13 +03:00
/*
* Implementation details of the RDMA core , don ' t use in drivers :
*/
struct rdma_restrack_entry res ;
2005-04-17 02:20:36 +04:00
} ;
struct ib_mw {
struct ib_device * device ;
struct ib_pd * pd ;
2005-07-08 04:57:10 +04:00
struct ib_uobject * uobject ;
2005-04-17 02:20:36 +04:00
u32 rkey ;
2013-02-06 20:19:12 +04:00
enum ib_mw_type type ;
2005-04-17 02:20:36 +04:00
} ;
struct ib_fmr {
struct ib_device * device ;
struct ib_pd * pd ;
struct list_head list ;
u32 lkey ;
u32 rkey ;
} ;
IB/core: Add receive flow steering support
The RDMA stack allows for applications to create IB_QPT_RAW_PACKET
QPs, which receive plain Ethernet packets, specifically packets that
don't carry any QPN to be matched by the receiving side. Applications
using these QPs must be provided with a method to program some
steering rule with the HW so packets arriving at the local port can be
routed to them.
This patch adds ib_create_flow(), which allow providing a flow
specification for a QP. When there's a match between the
specification and a received packet, the packet is forwarded to that
QP, in a the same way one uses ib_attach_multicast() for IB UD
multicast handling.
Flow specifications are provided as instances of struct ib_flow_spec_yyy,
which describe L2, L3 and L4 headers. Currently specs for Ethernet, IPv4,
TCP and UDP are defined. Flow specs are made of values and masks.
The input to ib_create_flow() is a struct ib_flow_attr, which contains
a few mandatory control elements and optional flow specs.
struct ib_flow_attr {
enum ib_flow_attr_type type;
u16 size;
u16 priority;
u32 flags;
u8 num_of_specs;
u8 port;
/* Following are the optional layers according to user request
* struct ib_flow_spec_yyy
* struct ib_flow_spec_zzz
*/
};
As these specs are eventually coming from user space, they are defined and
used in a way which allows adding new spec types without kernel/user ABI
change, just with a little API enhancement which defines the newly added spec.
The flow spec structures are defined with TLV (Type-Length-Value)
entries, which allows calling ib_create_flow() with a list of variable
length of optional specs.
For the actual processing of ib_flow_attr the driver uses the number
of specs and the size mandatory fields along with the TLV nature of
the specs.
Steering rules processing order is according to the domain over which
the rule is set and the rule priority. All rules set by user space
applicatations fall into the IB_FLOW_DOMAIN_USER domain, other domains
could be used by future IPoIB RFS and Ethetool flow-steering interface
implementation. Lower numerical value for the priority field means
higher priority.
The returned value from ib_create_flow() is a struct ib_flow, which
contains a database pointer (handle) provided by the HW driver to be
used when calling ib_destroy_flow().
Applications that offload TCP/IP traffic can also be written over IB
UD QPs. The ib_create_flow() / ib_destroy_flow() API is designed to
support UD QPs too. A HW driver can set IB_DEVICE_MANAGED_FLOW_STEERING
to denote support for flow steering.
The ib_flow_attr enum type supports usage of flow steering for promiscuous
and sniffer purposes:
IB_FLOW_ATTR_NORMAL - "regular" rule, steering according to rule specification
IB_FLOW_ATTR_ALL_DEFAULT - default unicast and multicast rule, receive
all Ethernet traffic which isn't steered to any QP
IB_FLOW_ATTR_MC_DEFAULT - same as IB_FLOW_ATTR_ALL_DEFAULT but only for multicast
IB_FLOW_ATTR_SNIFFER - sniffer rule, receive all port traffic
ALL_DEFAULT and MC_DEFAULT rules options are valid only for Ethernet link type.
Signed-off-by: Hadar Hen Zion <hadarh@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
2013-08-07 15:01:59 +04:00
/* Supported steering options */
enum ib_flow_attr_type {
/* steering according to rule specifications */
IB_FLOW_ATTR_NORMAL = 0x0 ,
/* default unicast and multicast rule -
* receive all Eth traffic which isn ' t steered to any QP
*/
IB_FLOW_ATTR_ALL_DEFAULT = 0x1 ,
/* default multicast rule -
* receive all Eth multicast traffic which isn ' t steered to any QP
*/
IB_FLOW_ATTR_MC_DEFAULT = 0x2 ,
/* sniffer rule - receive all port traffic */
IB_FLOW_ATTR_SNIFFER = 0x3
} ;
/* Supported steering header types */
enum ib_flow_spec_type {
/* L2 headers*/
2016-11-14 20:04:48 +03:00
IB_FLOW_SPEC_ETH = 0x20 ,
IB_FLOW_SPEC_IB = 0x22 ,
IB/core: Add receive flow steering support
The RDMA stack allows for applications to create IB_QPT_RAW_PACKET
QPs, which receive plain Ethernet packets, specifically packets that
don't carry any QPN to be matched by the receiving side. Applications
using these QPs must be provided with a method to program some
steering rule with the HW so packets arriving at the local port can be
routed to them.
This patch adds ib_create_flow(), which allow providing a flow
specification for a QP. When there's a match between the
specification and a received packet, the packet is forwarded to that
QP, in a the same way one uses ib_attach_multicast() for IB UD
multicast handling.
Flow specifications are provided as instances of struct ib_flow_spec_yyy,
which describe L2, L3 and L4 headers. Currently specs for Ethernet, IPv4,
TCP and UDP are defined. Flow specs are made of values and masks.
The input to ib_create_flow() is a struct ib_flow_attr, which contains
a few mandatory control elements and optional flow specs.
struct ib_flow_attr {
enum ib_flow_attr_type type;
u16 size;
u16 priority;
u32 flags;
u8 num_of_specs;
u8 port;
/* Following are the optional layers according to user request
* struct ib_flow_spec_yyy
* struct ib_flow_spec_zzz
*/
};
As these specs are eventually coming from user space, they are defined and
used in a way which allows adding new spec types without kernel/user ABI
change, just with a little API enhancement which defines the newly added spec.
The flow spec structures are defined with TLV (Type-Length-Value)
entries, which allows calling ib_create_flow() with a list of variable
length of optional specs.
For the actual processing of ib_flow_attr the driver uses the number
of specs and the size mandatory fields along with the TLV nature of
the specs.
Steering rules processing order is according to the domain over which
the rule is set and the rule priority. All rules set by user space
applicatations fall into the IB_FLOW_DOMAIN_USER domain, other domains
could be used by future IPoIB RFS and Ethetool flow-steering interface
implementation. Lower numerical value for the priority field means
higher priority.
The returned value from ib_create_flow() is a struct ib_flow, which
contains a database pointer (handle) provided by the HW driver to be
used when calling ib_destroy_flow().
Applications that offload TCP/IP traffic can also be written over IB
UD QPs. The ib_create_flow() / ib_destroy_flow() API is designed to
support UD QPs too. A HW driver can set IB_DEVICE_MANAGED_FLOW_STEERING
to denote support for flow steering.
The ib_flow_attr enum type supports usage of flow steering for promiscuous
and sniffer purposes:
IB_FLOW_ATTR_NORMAL - "regular" rule, steering according to rule specification
IB_FLOW_ATTR_ALL_DEFAULT - default unicast and multicast rule, receive
all Ethernet traffic which isn't steered to any QP
IB_FLOW_ATTR_MC_DEFAULT - same as IB_FLOW_ATTR_ALL_DEFAULT but only for multicast
IB_FLOW_ATTR_SNIFFER - sniffer rule, receive all port traffic
ALL_DEFAULT and MC_DEFAULT rules options are valid only for Ethernet link type.
Signed-off-by: Hadar Hen Zion <hadarh@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
2013-08-07 15:01:59 +04:00
/* L3 header*/
2016-11-14 20:04:48 +03:00
IB_FLOW_SPEC_IPV4 = 0x30 ,
IB_FLOW_SPEC_IPV6 = 0x31 ,
2018-03-28 09:27:49 +03:00
IB_FLOW_SPEC_ESP = 0x34 ,
IB/core: Add receive flow steering support
The RDMA stack allows for applications to create IB_QPT_RAW_PACKET
QPs, which receive plain Ethernet packets, specifically packets that
don't carry any QPN to be matched by the receiving side. Applications
using these QPs must be provided with a method to program some
steering rule with the HW so packets arriving at the local port can be
routed to them.
This patch adds ib_create_flow(), which allow providing a flow
specification for a QP. When there's a match between the
specification and a received packet, the packet is forwarded to that
QP, in a the same way one uses ib_attach_multicast() for IB UD
multicast handling.
Flow specifications are provided as instances of struct ib_flow_spec_yyy,
which describe L2, L3 and L4 headers. Currently specs for Ethernet, IPv4,
TCP and UDP are defined. Flow specs are made of values and masks.
The input to ib_create_flow() is a struct ib_flow_attr, which contains
a few mandatory control elements and optional flow specs.
struct ib_flow_attr {
enum ib_flow_attr_type type;
u16 size;
u16 priority;
u32 flags;
u8 num_of_specs;
u8 port;
/* Following are the optional layers according to user request
* struct ib_flow_spec_yyy
* struct ib_flow_spec_zzz
*/
};
As these specs are eventually coming from user space, they are defined and
used in a way which allows adding new spec types without kernel/user ABI
change, just with a little API enhancement which defines the newly added spec.
The flow spec structures are defined with TLV (Type-Length-Value)
entries, which allows calling ib_create_flow() with a list of variable
length of optional specs.
For the actual processing of ib_flow_attr the driver uses the number
of specs and the size mandatory fields along with the TLV nature of
the specs.
Steering rules processing order is according to the domain over which
the rule is set and the rule priority. All rules set by user space
applicatations fall into the IB_FLOW_DOMAIN_USER domain, other domains
could be used by future IPoIB RFS and Ethetool flow-steering interface
implementation. Lower numerical value for the priority field means
higher priority.
The returned value from ib_create_flow() is a struct ib_flow, which
contains a database pointer (handle) provided by the HW driver to be
used when calling ib_destroy_flow().
Applications that offload TCP/IP traffic can also be written over IB
UD QPs. The ib_create_flow() / ib_destroy_flow() API is designed to
support UD QPs too. A HW driver can set IB_DEVICE_MANAGED_FLOW_STEERING
to denote support for flow steering.
The ib_flow_attr enum type supports usage of flow steering for promiscuous
and sniffer purposes:
IB_FLOW_ATTR_NORMAL - "regular" rule, steering according to rule specification
IB_FLOW_ATTR_ALL_DEFAULT - default unicast and multicast rule, receive
all Ethernet traffic which isn't steered to any QP
IB_FLOW_ATTR_MC_DEFAULT - same as IB_FLOW_ATTR_ALL_DEFAULT but only for multicast
IB_FLOW_ATTR_SNIFFER - sniffer rule, receive all port traffic
ALL_DEFAULT and MC_DEFAULT rules options are valid only for Ethernet link type.
Signed-off-by: Hadar Hen Zion <hadarh@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
2013-08-07 15:01:59 +04:00
/* L4 headers*/
2016-11-14 20:04:48 +03:00
IB_FLOW_SPEC_TCP = 0x40 ,
IB_FLOW_SPEC_UDP = 0x41 ,
2016-11-14 20:04:47 +03:00
IB_FLOW_SPEC_VXLAN_TUNNEL = 0x50 ,
2018-05-13 14:33:30 +03:00
IB_FLOW_SPEC_GRE = 0x51 ,
2018-05-13 14:33:32 +03:00
IB_FLOW_SPEC_MPLS = 0x60 ,
2016-11-14 20:04:51 +03:00
IB_FLOW_SPEC_INNER = 0x100 ,
2017-01-18 15:59:48 +03:00
/* Actions */
IB_FLOW_SPEC_ACTION_TAG = 0x1000 ,
2017-04-03 13:13:51 +03:00
IB_FLOW_SPEC_ACTION_DROP = 0x1001 ,
2018-03-28 09:27:46 +03:00
IB_FLOW_SPEC_ACTION_HANDLE = 0x1002 ,
2018-05-31 16:43:36 +03:00
IB_FLOW_SPEC_ACTION_COUNT = 0x1003 ,
IB/core: Add receive flow steering support
The RDMA stack allows for applications to create IB_QPT_RAW_PACKET
QPs, which receive plain Ethernet packets, specifically packets that
don't carry any QPN to be matched by the receiving side. Applications
using these QPs must be provided with a method to program some
steering rule with the HW so packets arriving at the local port can be
routed to them.
This patch adds ib_create_flow(), which allow providing a flow
specification for a QP. When there's a match between the
specification and a received packet, the packet is forwarded to that
QP, in a the same way one uses ib_attach_multicast() for IB UD
multicast handling.
Flow specifications are provided as instances of struct ib_flow_spec_yyy,
which describe L2, L3 and L4 headers. Currently specs for Ethernet, IPv4,
TCP and UDP are defined. Flow specs are made of values and masks.
The input to ib_create_flow() is a struct ib_flow_attr, which contains
a few mandatory control elements and optional flow specs.
struct ib_flow_attr {
enum ib_flow_attr_type type;
u16 size;
u16 priority;
u32 flags;
u8 num_of_specs;
u8 port;
/* Following are the optional layers according to user request
* struct ib_flow_spec_yyy
* struct ib_flow_spec_zzz
*/
};
As these specs are eventually coming from user space, they are defined and
used in a way which allows adding new spec types without kernel/user ABI
change, just with a little API enhancement which defines the newly added spec.
The flow spec structures are defined with TLV (Type-Length-Value)
entries, which allows calling ib_create_flow() with a list of variable
length of optional specs.
For the actual processing of ib_flow_attr the driver uses the number
of specs and the size mandatory fields along with the TLV nature of
the specs.
Steering rules processing order is according to the domain over which
the rule is set and the rule priority. All rules set by user space
applicatations fall into the IB_FLOW_DOMAIN_USER domain, other domains
could be used by future IPoIB RFS and Ethetool flow-steering interface
implementation. Lower numerical value for the priority field means
higher priority.
The returned value from ib_create_flow() is a struct ib_flow, which
contains a database pointer (handle) provided by the HW driver to be
used when calling ib_destroy_flow().
Applications that offload TCP/IP traffic can also be written over IB
UD QPs. The ib_create_flow() / ib_destroy_flow() API is designed to
support UD QPs too. A HW driver can set IB_DEVICE_MANAGED_FLOW_STEERING
to denote support for flow steering.
The ib_flow_attr enum type supports usage of flow steering for promiscuous
and sniffer purposes:
IB_FLOW_ATTR_NORMAL - "regular" rule, steering according to rule specification
IB_FLOW_ATTR_ALL_DEFAULT - default unicast and multicast rule, receive
all Ethernet traffic which isn't steered to any QP
IB_FLOW_ATTR_MC_DEFAULT - same as IB_FLOW_ATTR_ALL_DEFAULT but only for multicast
IB_FLOW_ATTR_SNIFFER - sniffer rule, receive all port traffic
ALL_DEFAULT and MC_DEFAULT rules options are valid only for Ethernet link type.
Signed-off-by: Hadar Hen Zion <hadarh@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
2013-08-07 15:01:59 +04:00
} ;
2013-11-07 17:25:13 +04:00
# define IB_FLOW_SPEC_LAYER_MASK 0xF0
2018-05-31 16:43:36 +03:00
# define IB_FLOW_SPEC_SUPPORT_LAYERS 10
2013-09-01 19:39:52 +04:00
IB/core: Add receive flow steering support
The RDMA stack allows for applications to create IB_QPT_RAW_PACKET
QPs, which receive plain Ethernet packets, specifically packets that
don't carry any QPN to be matched by the receiving side. Applications
using these QPs must be provided with a method to program some
steering rule with the HW so packets arriving at the local port can be
routed to them.
This patch adds ib_create_flow(), which allow providing a flow
specification for a QP. When there's a match between the
specification and a received packet, the packet is forwarded to that
QP, in a the same way one uses ib_attach_multicast() for IB UD
multicast handling.
Flow specifications are provided as instances of struct ib_flow_spec_yyy,
which describe L2, L3 and L4 headers. Currently specs for Ethernet, IPv4,
TCP and UDP are defined. Flow specs are made of values and masks.
The input to ib_create_flow() is a struct ib_flow_attr, which contains
a few mandatory control elements and optional flow specs.
struct ib_flow_attr {
enum ib_flow_attr_type type;
u16 size;
u16 priority;
u32 flags;
u8 num_of_specs;
u8 port;
/* Following are the optional layers according to user request
* struct ib_flow_spec_yyy
* struct ib_flow_spec_zzz
*/
};
As these specs are eventually coming from user space, they are defined and
used in a way which allows adding new spec types without kernel/user ABI
change, just with a little API enhancement which defines the newly added spec.
The flow spec structures are defined with TLV (Type-Length-Value)
entries, which allows calling ib_create_flow() with a list of variable
length of optional specs.
For the actual processing of ib_flow_attr the driver uses the number
of specs and the size mandatory fields along with the TLV nature of
the specs.
Steering rules processing order is according to the domain over which
the rule is set and the rule priority. All rules set by user space
applicatations fall into the IB_FLOW_DOMAIN_USER domain, other domains
could be used by future IPoIB RFS and Ethetool flow-steering interface
implementation. Lower numerical value for the priority field means
higher priority.
The returned value from ib_create_flow() is a struct ib_flow, which
contains a database pointer (handle) provided by the HW driver to be
used when calling ib_destroy_flow().
Applications that offload TCP/IP traffic can also be written over IB
UD QPs. The ib_create_flow() / ib_destroy_flow() API is designed to
support UD QPs too. A HW driver can set IB_DEVICE_MANAGED_FLOW_STEERING
to denote support for flow steering.
The ib_flow_attr enum type supports usage of flow steering for promiscuous
and sniffer purposes:
IB_FLOW_ATTR_NORMAL - "regular" rule, steering according to rule specification
IB_FLOW_ATTR_ALL_DEFAULT - default unicast and multicast rule, receive
all Ethernet traffic which isn't steered to any QP
IB_FLOW_ATTR_MC_DEFAULT - same as IB_FLOW_ATTR_ALL_DEFAULT but only for multicast
IB_FLOW_ATTR_SNIFFER - sniffer rule, receive all port traffic
ALL_DEFAULT and MC_DEFAULT rules options are valid only for Ethernet link type.
Signed-off-by: Hadar Hen Zion <hadarh@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
2013-08-07 15:01:59 +04:00
/* Flow steering rule priority is set according to it's domain.
* Lower domain value means higher priority .
*/
enum ib_flow_domain {
IB_FLOW_DOMAIN_USER ,
IB_FLOW_DOMAIN_ETHTOOL ,
IB_FLOW_DOMAIN_RFS ,
IB_FLOW_DOMAIN_NIC ,
IB_FLOW_DOMAIN_NUM /* Must be last */
} ;
2016-02-18 19:31:05 +03:00
enum ib_flow_flags {
IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL < < 1 , /* Continue match, no steal */
2018-03-28 09:27:47 +03:00
IB_FLOW_ATTR_FLAGS_EGRESS = 1UL < < 2 , /* Egress flow */
IB_FLOW_ATTR_FLAGS_RESERVED = 1UL < < 3 /* Must be last */
2016-02-18 19:31:05 +03:00
} ;
IB/core: Add receive flow steering support
The RDMA stack allows for applications to create IB_QPT_RAW_PACKET
QPs, which receive plain Ethernet packets, specifically packets that
don't carry any QPN to be matched by the receiving side. Applications
using these QPs must be provided with a method to program some
steering rule with the HW so packets arriving at the local port can be
routed to them.
This patch adds ib_create_flow(), which allow providing a flow
specification for a QP. When there's a match between the
specification and a received packet, the packet is forwarded to that
QP, in a the same way one uses ib_attach_multicast() for IB UD
multicast handling.
Flow specifications are provided as instances of struct ib_flow_spec_yyy,
which describe L2, L3 and L4 headers. Currently specs for Ethernet, IPv4,
TCP and UDP are defined. Flow specs are made of values and masks.
The input to ib_create_flow() is a struct ib_flow_attr, which contains
a few mandatory control elements and optional flow specs.
struct ib_flow_attr {
enum ib_flow_attr_type type;
u16 size;
u16 priority;
u32 flags;
u8 num_of_specs;
u8 port;
/* Following are the optional layers according to user request
* struct ib_flow_spec_yyy
* struct ib_flow_spec_zzz
*/
};
As these specs are eventually coming from user space, they are defined and
used in a way which allows adding new spec types without kernel/user ABI
change, just with a little API enhancement which defines the newly added spec.
The flow spec structures are defined with TLV (Type-Length-Value)
entries, which allows calling ib_create_flow() with a list of variable
length of optional specs.
For the actual processing of ib_flow_attr the driver uses the number
of specs and the size mandatory fields along with the TLV nature of
the specs.
Steering rules processing order is according to the domain over which
the rule is set and the rule priority. All rules set by user space
applicatations fall into the IB_FLOW_DOMAIN_USER domain, other domains
could be used by future IPoIB RFS and Ethetool flow-steering interface
implementation. Lower numerical value for the priority field means
higher priority.
The returned value from ib_create_flow() is a struct ib_flow, which
contains a database pointer (handle) provided by the HW driver to be
used when calling ib_destroy_flow().
Applications that offload TCP/IP traffic can also be written over IB
UD QPs. The ib_create_flow() / ib_destroy_flow() API is designed to
support UD QPs too. A HW driver can set IB_DEVICE_MANAGED_FLOW_STEERING
to denote support for flow steering.
The ib_flow_attr enum type supports usage of flow steering for promiscuous
and sniffer purposes:
IB_FLOW_ATTR_NORMAL - "regular" rule, steering according to rule specification
IB_FLOW_ATTR_ALL_DEFAULT - default unicast and multicast rule, receive
all Ethernet traffic which isn't steered to any QP
IB_FLOW_ATTR_MC_DEFAULT - same as IB_FLOW_ATTR_ALL_DEFAULT but only for multicast
IB_FLOW_ATTR_SNIFFER - sniffer rule, receive all port traffic
ALL_DEFAULT and MC_DEFAULT rules options are valid only for Ethernet link type.
Signed-off-by: Hadar Hen Zion <hadarh@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
2013-08-07 15:01:59 +04:00
struct ib_flow_eth_filter {
u8 dst_mac [ 6 ] ;
u8 src_mac [ 6 ] ;
__be16 ether_type ;
__be16 vlan_tag ;
2016-08-30 16:58:32 +03:00
/* Must be last */
u8 real_sz [ 0 ] ;
IB/core: Add receive flow steering support
The RDMA stack allows for applications to create IB_QPT_RAW_PACKET
QPs, which receive plain Ethernet packets, specifically packets that
don't carry any QPN to be matched by the receiving side. Applications
using these QPs must be provided with a method to program some
steering rule with the HW so packets arriving at the local port can be
routed to them.
This patch adds ib_create_flow(), which allow providing a flow
specification for a QP. When there's a match between the
specification and a received packet, the packet is forwarded to that
QP, in a the same way one uses ib_attach_multicast() for IB UD
multicast handling.
Flow specifications are provided as instances of struct ib_flow_spec_yyy,
which describe L2, L3 and L4 headers. Currently specs for Ethernet, IPv4,
TCP and UDP are defined. Flow specs are made of values and masks.
The input to ib_create_flow() is a struct ib_flow_attr, which contains
a few mandatory control elements and optional flow specs.
struct ib_flow_attr {
enum ib_flow_attr_type type;
u16 size;
u16 priority;
u32 flags;
u8 num_of_specs;
u8 port;
/* Following are the optional layers according to user request
* struct ib_flow_spec_yyy
* struct ib_flow_spec_zzz
*/
};
As these specs are eventually coming from user space, they are defined and
used in a way which allows adding new spec types without kernel/user ABI
change, just with a little API enhancement which defines the newly added spec.
The flow spec structures are defined with TLV (Type-Length-Value)
entries, which allows calling ib_create_flow() with a list of variable
length of optional specs.
For the actual processing of ib_flow_attr the driver uses the number
of specs and the size mandatory fields along with the TLV nature of
the specs.
Steering rules processing order is according to the domain over which
the rule is set and the rule priority. All rules set by user space
applicatations fall into the IB_FLOW_DOMAIN_USER domain, other domains
could be used by future IPoIB RFS and Ethetool flow-steering interface
implementation. Lower numerical value for the priority field means
higher priority.
The returned value from ib_create_flow() is a struct ib_flow, which
contains a database pointer (handle) provided by the HW driver to be
used when calling ib_destroy_flow().
Applications that offload TCP/IP traffic can also be written over IB
UD QPs. The ib_create_flow() / ib_destroy_flow() API is designed to
support UD QPs too. A HW driver can set IB_DEVICE_MANAGED_FLOW_STEERING
to denote support for flow steering.
The ib_flow_attr enum type supports usage of flow steering for promiscuous
and sniffer purposes:
IB_FLOW_ATTR_NORMAL - "regular" rule, steering according to rule specification
IB_FLOW_ATTR_ALL_DEFAULT - default unicast and multicast rule, receive
all Ethernet traffic which isn't steered to any QP
IB_FLOW_ATTR_MC_DEFAULT - same as IB_FLOW_ATTR_ALL_DEFAULT but only for multicast
IB_FLOW_ATTR_SNIFFER - sniffer rule, receive all port traffic
ALL_DEFAULT and MC_DEFAULT rules options are valid only for Ethernet link type.
Signed-off-by: Hadar Hen Zion <hadarh@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
2013-08-07 15:01:59 +04:00
} ;
struct ib_flow_spec_eth {
2016-11-14 20:04:51 +03:00
u32 type ;
IB/core: Add receive flow steering support
The RDMA stack allows for applications to create IB_QPT_RAW_PACKET
QPs, which receive plain Ethernet packets, specifically packets that
don't carry any QPN to be matched by the receiving side. Applications
using these QPs must be provided with a method to program some
steering rule with the HW so packets arriving at the local port can be
routed to them.
This patch adds ib_create_flow(), which allow providing a flow
specification for a QP. When there's a match between the
specification and a received packet, the packet is forwarded to that
QP, in a the same way one uses ib_attach_multicast() for IB UD
multicast handling.
Flow specifications are provided as instances of struct ib_flow_spec_yyy,
which describe L2, L3 and L4 headers. Currently specs for Ethernet, IPv4,
TCP and UDP are defined. Flow specs are made of values and masks.
The input to ib_create_flow() is a struct ib_flow_attr, which contains
a few mandatory control elements and optional flow specs.
struct ib_flow_attr {
enum ib_flow_attr_type type;
u16 size;
u16 priority;
u32 flags;
u8 num_of_specs;
u8 port;
/* Following are the optional layers according to user request
* struct ib_flow_spec_yyy
* struct ib_flow_spec_zzz
*/
};
As these specs are eventually coming from user space, they are defined and
used in a way which allows adding new spec types without kernel/user ABI
change, just with a little API enhancement which defines the newly added spec.
The flow spec structures are defined with TLV (Type-Length-Value)
entries, which allows calling ib_create_flow() with a list of variable
length of optional specs.
For the actual processing of ib_flow_attr the driver uses the number
of specs and the size mandatory fields along with the TLV nature of
the specs.
Steering rules processing order is according to the domain over which
the rule is set and the rule priority. All rules set by user space
applicatations fall into the IB_FLOW_DOMAIN_USER domain, other domains
could be used by future IPoIB RFS and Ethetool flow-steering interface
implementation. Lower numerical value for the priority field means
higher priority.
The returned value from ib_create_flow() is a struct ib_flow, which
contains a database pointer (handle) provided by the HW driver to be
used when calling ib_destroy_flow().
Applications that offload TCP/IP traffic can also be written over IB
UD QPs. The ib_create_flow() / ib_destroy_flow() API is designed to
support UD QPs too. A HW driver can set IB_DEVICE_MANAGED_FLOW_STEERING
to denote support for flow steering.
The ib_flow_attr enum type supports usage of flow steering for promiscuous
and sniffer purposes:
IB_FLOW_ATTR_NORMAL - "regular" rule, steering according to rule specification
IB_FLOW_ATTR_ALL_DEFAULT - default unicast and multicast rule, receive
all Ethernet traffic which isn't steered to any QP
IB_FLOW_ATTR_MC_DEFAULT - same as IB_FLOW_ATTR_ALL_DEFAULT but only for multicast
IB_FLOW_ATTR_SNIFFER - sniffer rule, receive all port traffic
ALL_DEFAULT and MC_DEFAULT rules options are valid only for Ethernet link type.
Signed-off-by: Hadar Hen Zion <hadarh@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
2013-08-07 15:01:59 +04:00
u16 size ;
struct ib_flow_eth_filter val ;
struct ib_flow_eth_filter mask ;
} ;
2013-11-07 17:25:13 +04:00
struct ib_flow_ib_filter {
__be16 dlid ;
__u8 sl ;
2016-08-30 16:58:32 +03:00
/* Must be last */
u8 real_sz [ 0 ] ;
2013-11-07 17:25:13 +04:00
} ;
struct ib_flow_spec_ib {
2016-11-14 20:04:51 +03:00
u32 type ;
2013-11-07 17:25:13 +04:00
u16 size ;
struct ib_flow_ib_filter val ;
struct ib_flow_ib_filter mask ;
} ;
2016-08-30 16:58:33 +03:00
/* IPv4 header flags */
enum ib_ipv4_flags {
IB_IPV4_DONT_FRAG = 0x2 , /* Don't enable packet fragmentation */
IB_IPV4_MORE_FRAG = 0 X4 /* For All fragmented packets except the
last have this flag set */
} ;
IB/core: Add receive flow steering support
The RDMA stack allows for applications to create IB_QPT_RAW_PACKET
QPs, which receive plain Ethernet packets, specifically packets that
don't carry any QPN to be matched by the receiving side. Applications
using these QPs must be provided with a method to program some
steering rule with the HW so packets arriving at the local port can be
routed to them.
This patch adds ib_create_flow(), which allow providing a flow
specification for a QP. When there's a match between the
specification and a received packet, the packet is forwarded to that
QP, in a the same way one uses ib_attach_multicast() for IB UD
multicast handling.
Flow specifications are provided as instances of struct ib_flow_spec_yyy,
which describe L2, L3 and L4 headers. Currently specs for Ethernet, IPv4,
TCP and UDP are defined. Flow specs are made of values and masks.
The input to ib_create_flow() is a struct ib_flow_attr, which contains
a few mandatory control elements and optional flow specs.
struct ib_flow_attr {
enum ib_flow_attr_type type;
u16 size;
u16 priority;
u32 flags;
u8 num_of_specs;
u8 port;
/* Following are the optional layers according to user request
* struct ib_flow_spec_yyy
* struct ib_flow_spec_zzz
*/
};
As these specs are eventually coming from user space, they are defined and
used in a way which allows adding new spec types without kernel/user ABI
change, just with a little API enhancement which defines the newly added spec.
The flow spec structures are defined with TLV (Type-Length-Value)
entries, which allows calling ib_create_flow() with a list of variable
length of optional specs.
For the actual processing of ib_flow_attr the driver uses the number
of specs and the size mandatory fields along with the TLV nature of
the specs.
Steering rules processing order is according to the domain over which
the rule is set and the rule priority. All rules set by user space
applicatations fall into the IB_FLOW_DOMAIN_USER domain, other domains
could be used by future IPoIB RFS and Ethetool flow-steering interface
implementation. Lower numerical value for the priority field means
higher priority.
The returned value from ib_create_flow() is a struct ib_flow, which
contains a database pointer (handle) provided by the HW driver to be
used when calling ib_destroy_flow().
Applications that offload TCP/IP traffic can also be written over IB
UD QPs. The ib_create_flow() / ib_destroy_flow() API is designed to
support UD QPs too. A HW driver can set IB_DEVICE_MANAGED_FLOW_STEERING
to denote support for flow steering.
The ib_flow_attr enum type supports usage of flow steering for promiscuous
and sniffer purposes:
IB_FLOW_ATTR_NORMAL - "regular" rule, steering according to rule specification
IB_FLOW_ATTR_ALL_DEFAULT - default unicast and multicast rule, receive
all Ethernet traffic which isn't steered to any QP
IB_FLOW_ATTR_MC_DEFAULT - same as IB_FLOW_ATTR_ALL_DEFAULT but only for multicast
IB_FLOW_ATTR_SNIFFER - sniffer rule, receive all port traffic
ALL_DEFAULT and MC_DEFAULT rules options are valid only for Ethernet link type.
Signed-off-by: Hadar Hen Zion <hadarh@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
2013-08-07 15:01:59 +04:00
struct ib_flow_ipv4_filter {
__be32 src_ip ;
__be32 dst_ip ;
2016-08-30 16:58:33 +03:00
u8 proto ;
u8 tos ;
u8 ttl ;
u8 flags ;
2016-08-30 16:58:32 +03:00
/* Must be last */
u8 real_sz [ 0 ] ;
IB/core: Add receive flow steering support
The RDMA stack allows for applications to create IB_QPT_RAW_PACKET
QPs, which receive plain Ethernet packets, specifically packets that
don't carry any QPN to be matched by the receiving side. Applications
using these QPs must be provided with a method to program some
steering rule with the HW so packets arriving at the local port can be
routed to them.
This patch adds ib_create_flow(), which allow providing a flow
specification for a QP. When there's a match between the
specification and a received packet, the packet is forwarded to that
QP, in a the same way one uses ib_attach_multicast() for IB UD
multicast handling.
Flow specifications are provided as instances of struct ib_flow_spec_yyy,
which describe L2, L3 and L4 headers. Currently specs for Ethernet, IPv4,
TCP and UDP are defined. Flow specs are made of values and masks.
The input to ib_create_flow() is a struct ib_flow_attr, which contains
a few mandatory control elements and optional flow specs.
struct ib_flow_attr {
enum ib_flow_attr_type type;
u16 size;
u16 priority;
u32 flags;
u8 num_of_specs;
u8 port;
/* Following are the optional layers according to user request
* struct ib_flow_spec_yyy
* struct ib_flow_spec_zzz
*/
};
As these specs are eventually coming from user space, they are defined and
used in a way which allows adding new spec types without kernel/user ABI
change, just with a little API enhancement which defines the newly added spec.
The flow spec structures are defined with TLV (Type-Length-Value)
entries, which allows calling ib_create_flow() with a list of variable
length of optional specs.
For the actual processing of ib_flow_attr the driver uses the number
of specs and the size mandatory fields along with the TLV nature of
the specs.
Steering rules processing order is according to the domain over which
the rule is set and the rule priority. All rules set by user space
applicatations fall into the IB_FLOW_DOMAIN_USER domain, other domains
could be used by future IPoIB RFS and Ethetool flow-steering interface
implementation. Lower numerical value for the priority field means
higher priority.
The returned value from ib_create_flow() is a struct ib_flow, which
contains a database pointer (handle) provided by the HW driver to be
used when calling ib_destroy_flow().
Applications that offload TCP/IP traffic can also be written over IB
UD QPs. The ib_create_flow() / ib_destroy_flow() API is designed to
support UD QPs too. A HW driver can set IB_DEVICE_MANAGED_FLOW_STEERING
to denote support for flow steering.
The ib_flow_attr enum type supports usage of flow steering for promiscuous
and sniffer purposes:
IB_FLOW_ATTR_NORMAL - "regular" rule, steering according to rule specification
IB_FLOW_ATTR_ALL_DEFAULT - default unicast and multicast rule, receive
all Ethernet traffic which isn't steered to any QP
IB_FLOW_ATTR_MC_DEFAULT - same as IB_FLOW_ATTR_ALL_DEFAULT but only for multicast
IB_FLOW_ATTR_SNIFFER - sniffer rule, receive all port traffic
ALL_DEFAULT and MC_DEFAULT rules options are valid only for Ethernet link type.
Signed-off-by: Hadar Hen Zion <hadarh@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
2013-08-07 15:01:59 +04:00
} ;
struct ib_flow_spec_ipv4 {
2016-11-14 20:04:51 +03:00
u32 type ;
IB/core: Add receive flow steering support
The RDMA stack allows for applications to create IB_QPT_RAW_PACKET
QPs, which receive plain Ethernet packets, specifically packets that
don't carry any QPN to be matched by the receiving side. Applications
using these QPs must be provided with a method to program some
steering rule with the HW so packets arriving at the local port can be
routed to them.
This patch adds ib_create_flow(), which allow providing a flow
specification for a QP. When there's a match between the
specification and a received packet, the packet is forwarded to that
QP, in a the same way one uses ib_attach_multicast() for IB UD
multicast handling.
Flow specifications are provided as instances of struct ib_flow_spec_yyy,
which describe L2, L3 and L4 headers. Currently specs for Ethernet, IPv4,
TCP and UDP are defined. Flow specs are made of values and masks.
The input to ib_create_flow() is a struct ib_flow_attr, which contains
a few mandatory control elements and optional flow specs.
struct ib_flow_attr {
enum ib_flow_attr_type type;
u16 size;
u16 priority;
u32 flags;
u8 num_of_specs;
u8 port;
/* Following are the optional layers according to user request
* struct ib_flow_spec_yyy
* struct ib_flow_spec_zzz
*/
};
As these specs are eventually coming from user space, they are defined and
used in a way which allows adding new spec types without kernel/user ABI
change, just with a little API enhancement which defines the newly added spec.
The flow spec structures are defined with TLV (Type-Length-Value)
entries, which allows calling ib_create_flow() with a list of variable
length of optional specs.
For the actual processing of ib_flow_attr the driver uses the number
of specs and the size mandatory fields along with the TLV nature of
the specs.
Steering rules processing order is according to the domain over which
the rule is set and the rule priority. All rules set by user space
applicatations fall into the IB_FLOW_DOMAIN_USER domain, other domains
could be used by future IPoIB RFS and Ethetool flow-steering interface
implementation. Lower numerical value for the priority field means
higher priority.
The returned value from ib_create_flow() is a struct ib_flow, which
contains a database pointer (handle) provided by the HW driver to be
used when calling ib_destroy_flow().
Applications that offload TCP/IP traffic can also be written over IB
UD QPs. The ib_create_flow() / ib_destroy_flow() API is designed to
support UD QPs too. A HW driver can set IB_DEVICE_MANAGED_FLOW_STEERING
to denote support for flow steering.
The ib_flow_attr enum type supports usage of flow steering for promiscuous
and sniffer purposes:
IB_FLOW_ATTR_NORMAL - "regular" rule, steering according to rule specification
IB_FLOW_ATTR_ALL_DEFAULT - default unicast and multicast rule, receive
all Ethernet traffic which isn't steered to any QP
IB_FLOW_ATTR_MC_DEFAULT - same as IB_FLOW_ATTR_ALL_DEFAULT but only for multicast
IB_FLOW_ATTR_SNIFFER - sniffer rule, receive all port traffic
ALL_DEFAULT and MC_DEFAULT rules options are valid only for Ethernet link type.
Signed-off-by: Hadar Hen Zion <hadarh@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
2013-08-07 15:01:59 +04:00
u16 size ;
struct ib_flow_ipv4_filter val ;
struct ib_flow_ipv4_filter mask ;
} ;
2016-06-17 15:14:50 +03:00
struct ib_flow_ipv6_filter {
u8 src_ip [ 16 ] ;
u8 dst_ip [ 16 ] ;
2016-08-30 16:58:34 +03:00
__be32 flow_label ;
u8 next_hdr ;
u8 traffic_class ;
u8 hop_limit ;
2016-08-30 16:58:32 +03:00
/* Must be last */
u8 real_sz [ 0 ] ;
2016-06-17 15:14:50 +03:00
} ;
struct ib_flow_spec_ipv6 {
2016-11-14 20:04:51 +03:00
u32 type ;
2016-06-17 15:14:50 +03:00
u16 size ;
struct ib_flow_ipv6_filter val ;
struct ib_flow_ipv6_filter mask ;
} ;
IB/core: Add receive flow steering support
The RDMA stack allows for applications to create IB_QPT_RAW_PACKET
QPs, which receive plain Ethernet packets, specifically packets that
don't carry any QPN to be matched by the receiving side. Applications
using these QPs must be provided with a method to program some
steering rule with the HW so packets arriving at the local port can be
routed to them.
This patch adds ib_create_flow(), which allow providing a flow
specification for a QP. When there's a match between the
specification and a received packet, the packet is forwarded to that
QP, in a the same way one uses ib_attach_multicast() for IB UD
multicast handling.
Flow specifications are provided as instances of struct ib_flow_spec_yyy,
which describe L2, L3 and L4 headers. Currently specs for Ethernet, IPv4,
TCP and UDP are defined. Flow specs are made of values and masks.
The input to ib_create_flow() is a struct ib_flow_attr, which contains
a few mandatory control elements and optional flow specs.
struct ib_flow_attr {
enum ib_flow_attr_type type;
u16 size;
u16 priority;
u32 flags;
u8 num_of_specs;
u8 port;
/* Following are the optional layers according to user request
* struct ib_flow_spec_yyy
* struct ib_flow_spec_zzz
*/
};
As these specs are eventually coming from user space, they are defined and
used in a way which allows adding new spec types without kernel/user ABI
change, just with a little API enhancement which defines the newly added spec.
The flow spec structures are defined with TLV (Type-Length-Value)
entries, which allows calling ib_create_flow() with a list of variable
length of optional specs.
For the actual processing of ib_flow_attr the driver uses the number
of specs and the size mandatory fields along with the TLV nature of
the specs.
Steering rules processing order is according to the domain over which
the rule is set and the rule priority. All rules set by user space
applicatations fall into the IB_FLOW_DOMAIN_USER domain, other domains
could be used by future IPoIB RFS and Ethetool flow-steering interface
implementation. Lower numerical value for the priority field means
higher priority.
The returned value from ib_create_flow() is a struct ib_flow, which
contains a database pointer (handle) provided by the HW driver to be
used when calling ib_destroy_flow().
Applications that offload TCP/IP traffic can also be written over IB
UD QPs. The ib_create_flow() / ib_destroy_flow() API is designed to
support UD QPs too. A HW driver can set IB_DEVICE_MANAGED_FLOW_STEERING
to denote support for flow steering.
The ib_flow_attr enum type supports usage of flow steering for promiscuous
and sniffer purposes:
IB_FLOW_ATTR_NORMAL - "regular" rule, steering according to rule specification
IB_FLOW_ATTR_ALL_DEFAULT - default unicast and multicast rule, receive
all Ethernet traffic which isn't steered to any QP
IB_FLOW_ATTR_MC_DEFAULT - same as IB_FLOW_ATTR_ALL_DEFAULT but only for multicast
IB_FLOW_ATTR_SNIFFER - sniffer rule, receive all port traffic
ALL_DEFAULT and MC_DEFAULT rules options are valid only for Ethernet link type.
Signed-off-by: Hadar Hen Zion <hadarh@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
2013-08-07 15:01:59 +04:00
struct ib_flow_tcp_udp_filter {
__be16 dst_port ;
__be16 src_port ;
2016-08-30 16:58:32 +03:00
/* Must be last */
u8 real_sz [ 0 ] ;
IB/core: Add receive flow steering support
The RDMA stack allows for applications to create IB_QPT_RAW_PACKET
QPs, which receive plain Ethernet packets, specifically packets that
don't carry any QPN to be matched by the receiving side. Applications
using these QPs must be provided with a method to program some
steering rule with the HW so packets arriving at the local port can be
routed to them.
This patch adds ib_create_flow(), which allow providing a flow
specification for a QP. When there's a match between the
specification and a received packet, the packet is forwarded to that
QP, in a the same way one uses ib_attach_multicast() for IB UD
multicast handling.
Flow specifications are provided as instances of struct ib_flow_spec_yyy,
which describe L2, L3 and L4 headers. Currently specs for Ethernet, IPv4,
TCP and UDP are defined. Flow specs are made of values and masks.
The input to ib_create_flow() is a struct ib_flow_attr, which contains
a few mandatory control elements and optional flow specs.
struct ib_flow_attr {
enum ib_flow_attr_type type;
u16 size;
u16 priority;
u32 flags;
u8 num_of_specs;
u8 port;
/* Following are the optional layers according to user request
* struct ib_flow_spec_yyy
* struct ib_flow_spec_zzz
*/
};
As these specs are eventually coming from user space, they are defined and
used in a way which allows adding new spec types without kernel/user ABI
change, just with a little API enhancement which defines the newly added spec.
The flow spec structures are defined with TLV (Type-Length-Value)
entries, which allows calling ib_create_flow() with a list of variable
length of optional specs.
For the actual processing of ib_flow_attr the driver uses the number
of specs and the size mandatory fields along with the TLV nature of
the specs.
Steering rules processing order is according to the domain over which
the rule is set and the rule priority. All rules set by user space
applicatations fall into the IB_FLOW_DOMAIN_USER domain, other domains
could be used by future IPoIB RFS and Ethetool flow-steering interface
implementation. Lower numerical value for the priority field means
higher priority.
The returned value from ib_create_flow() is a struct ib_flow, which
contains a database pointer (handle) provided by the HW driver to be
used when calling ib_destroy_flow().
Applications that offload TCP/IP traffic can also be written over IB
UD QPs. The ib_create_flow() / ib_destroy_flow() API is designed to
support UD QPs too. A HW driver can set IB_DEVICE_MANAGED_FLOW_STEERING
to denote support for flow steering.
The ib_flow_attr enum type supports usage of flow steering for promiscuous
and sniffer purposes:
IB_FLOW_ATTR_NORMAL - "regular" rule, steering according to rule specification
IB_FLOW_ATTR_ALL_DEFAULT - default unicast and multicast rule, receive
all Ethernet traffic which isn't steered to any QP
IB_FLOW_ATTR_MC_DEFAULT - same as IB_FLOW_ATTR_ALL_DEFAULT but only for multicast
IB_FLOW_ATTR_SNIFFER - sniffer rule, receive all port traffic
ALL_DEFAULT and MC_DEFAULT rules options are valid only for Ethernet link type.
Signed-off-by: Hadar Hen Zion <hadarh@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
2013-08-07 15:01:59 +04:00
} ;
struct ib_flow_spec_tcp_udp {
2016-11-14 20:04:51 +03:00
u32 type ;
IB/core: Add receive flow steering support
The RDMA stack allows for applications to create IB_QPT_RAW_PACKET
QPs, which receive plain Ethernet packets, specifically packets that
don't carry any QPN to be matched by the receiving side. Applications
using these QPs must be provided with a method to program some
steering rule with the HW so packets arriving at the local port can be
routed to them.
This patch adds ib_create_flow(), which allow providing a flow
specification for a QP. When there's a match between the
specification and a received packet, the packet is forwarded to that
QP, in a the same way one uses ib_attach_multicast() for IB UD
multicast handling.
Flow specifications are provided as instances of struct ib_flow_spec_yyy,
which describe L2, L3 and L4 headers. Currently specs for Ethernet, IPv4,
TCP and UDP are defined. Flow specs are made of values and masks.
The input to ib_create_flow() is a struct ib_flow_attr, which contains
a few mandatory control elements and optional flow specs.
struct ib_flow_attr {
enum ib_flow_attr_type type;
u16 size;
u16 priority;
u32 flags;
u8 num_of_specs;
u8 port;
/* Following are the optional layers according to user request
* struct ib_flow_spec_yyy
* struct ib_flow_spec_zzz
*/
};
As these specs are eventually coming from user space, they are defined and
used in a way which allows adding new spec types without kernel/user ABI
change, just with a little API enhancement which defines the newly added spec.
The flow spec structures are defined with TLV (Type-Length-Value)
entries, which allows calling ib_create_flow() with a list of variable
length of optional specs.
For the actual processing of ib_flow_attr the driver uses the number
of specs and the size mandatory fields along with the TLV nature of
the specs.
Steering rules processing order is according to the domain over which
the rule is set and the rule priority. All rules set by user space
applicatations fall into the IB_FLOW_DOMAIN_USER domain, other domains
could be used by future IPoIB RFS and Ethetool flow-steering interface
implementation. Lower numerical value for the priority field means
higher priority.
The returned value from ib_create_flow() is a struct ib_flow, which
contains a database pointer (handle) provided by the HW driver to be
used when calling ib_destroy_flow().
Applications that offload TCP/IP traffic can also be written over IB
UD QPs. The ib_create_flow() / ib_destroy_flow() API is designed to
support UD QPs too. A HW driver can set IB_DEVICE_MANAGED_FLOW_STEERING
to denote support for flow steering.
The ib_flow_attr enum type supports usage of flow steering for promiscuous
and sniffer purposes:
IB_FLOW_ATTR_NORMAL - "regular" rule, steering according to rule specification
IB_FLOW_ATTR_ALL_DEFAULT - default unicast and multicast rule, receive
all Ethernet traffic which isn't steered to any QP
IB_FLOW_ATTR_MC_DEFAULT - same as IB_FLOW_ATTR_ALL_DEFAULT but only for multicast
IB_FLOW_ATTR_SNIFFER - sniffer rule, receive all port traffic
ALL_DEFAULT and MC_DEFAULT rules options are valid only for Ethernet link type.
Signed-off-by: Hadar Hen Zion <hadarh@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
2013-08-07 15:01:59 +04:00
u16 size ;
struct ib_flow_tcp_udp_filter val ;
struct ib_flow_tcp_udp_filter mask ;
} ;
2016-11-14 20:04:47 +03:00
struct ib_flow_tunnel_filter {
__be32 tunnel_id ;
u8 real_sz [ 0 ] ;
} ;
/* ib_flow_spec_tunnel describes the Vxlan tunnel
* the tunnel_id from val has the vni value
*/
struct ib_flow_spec_tunnel {
2016-11-14 20:04:51 +03:00
u32 type ;
2016-11-14 20:04:47 +03:00
u16 size ;
struct ib_flow_tunnel_filter val ;
struct ib_flow_tunnel_filter mask ;
} ;
2018-03-28 09:27:49 +03:00
struct ib_flow_esp_filter {
__be32 spi ;
__be32 seq ;
/* Must be last */
u8 real_sz [ 0 ] ;
} ;
struct ib_flow_spec_esp {
u32 type ;
u16 size ;
struct ib_flow_esp_filter val ;
struct ib_flow_esp_filter mask ;
} ;
2018-05-13 14:33:30 +03:00
struct ib_flow_gre_filter {
__be16 c_ks_res0_ver ;
__be16 protocol ;
__be32 key ;
/* Must be last */
u8 real_sz [ 0 ] ;
} ;
struct ib_flow_spec_gre {
u32 type ;
u16 size ;
struct ib_flow_gre_filter val ;
struct ib_flow_gre_filter mask ;
} ;
2018-05-13 14:33:32 +03:00
struct ib_flow_mpls_filter {
__be32 tag ;
/* Must be last */
u8 real_sz [ 0 ] ;
} ;
struct ib_flow_spec_mpls {
u32 type ;
u16 size ;
struct ib_flow_mpls_filter val ;
struct ib_flow_mpls_filter mask ;
} ;
2017-01-18 15:59:48 +03:00
struct ib_flow_spec_action_tag {
enum ib_flow_spec_type type ;
u16 size ;
u32 tag_id ;
} ;
2017-04-03 13:13:51 +03:00
struct ib_flow_spec_action_drop {
enum ib_flow_spec_type type ;
u16 size ;
} ;
2018-03-28 09:27:46 +03:00
struct ib_flow_spec_action_handle {
enum ib_flow_spec_type type ;
u16 size ;
struct ib_flow_action * act ;
} ;
2018-05-31 16:43:36 +03:00
enum ib_counters_description {
IB_COUNTER_PACKETS ,
IB_COUNTER_BYTES ,
} ;
struct ib_flow_spec_action_count {
enum ib_flow_spec_type type ;
u16 size ;
struct ib_counters * counters ;
} ;
IB/core: Add receive flow steering support
The RDMA stack allows for applications to create IB_QPT_RAW_PACKET
QPs, which receive plain Ethernet packets, specifically packets that
don't carry any QPN to be matched by the receiving side. Applications
using these QPs must be provided with a method to program some
steering rule with the HW so packets arriving at the local port can be
routed to them.
This patch adds ib_create_flow(), which allow providing a flow
specification for a QP. When there's a match between the
specification and a received packet, the packet is forwarded to that
QP, in a the same way one uses ib_attach_multicast() for IB UD
multicast handling.
Flow specifications are provided as instances of struct ib_flow_spec_yyy,
which describe L2, L3 and L4 headers. Currently specs for Ethernet, IPv4,
TCP and UDP are defined. Flow specs are made of values and masks.
The input to ib_create_flow() is a struct ib_flow_attr, which contains
a few mandatory control elements and optional flow specs.
struct ib_flow_attr {
enum ib_flow_attr_type type;
u16 size;
u16 priority;
u32 flags;
u8 num_of_specs;
u8 port;
/* Following are the optional layers according to user request
* struct ib_flow_spec_yyy
* struct ib_flow_spec_zzz
*/
};
As these specs are eventually coming from user space, they are defined and
used in a way which allows adding new spec types without kernel/user ABI
change, just with a little API enhancement which defines the newly added spec.
The flow spec structures are defined with TLV (Type-Length-Value)
entries, which allows calling ib_create_flow() with a list of variable
length of optional specs.
For the actual processing of ib_flow_attr the driver uses the number
of specs and the size mandatory fields along with the TLV nature of
the specs.
Steering rules processing order is according to the domain over which
the rule is set and the rule priority. All rules set by user space
applicatations fall into the IB_FLOW_DOMAIN_USER domain, other domains
could be used by future IPoIB RFS and Ethetool flow-steering interface
implementation. Lower numerical value for the priority field means
higher priority.
The returned value from ib_create_flow() is a struct ib_flow, which
contains a database pointer (handle) provided by the HW driver to be
used when calling ib_destroy_flow().
Applications that offload TCP/IP traffic can also be written over IB
UD QPs. The ib_create_flow() / ib_destroy_flow() API is designed to
support UD QPs too. A HW driver can set IB_DEVICE_MANAGED_FLOW_STEERING
to denote support for flow steering.
The ib_flow_attr enum type supports usage of flow steering for promiscuous
and sniffer purposes:
IB_FLOW_ATTR_NORMAL - "regular" rule, steering according to rule specification
IB_FLOW_ATTR_ALL_DEFAULT - default unicast and multicast rule, receive
all Ethernet traffic which isn't steered to any QP
IB_FLOW_ATTR_MC_DEFAULT - same as IB_FLOW_ATTR_ALL_DEFAULT but only for multicast
IB_FLOW_ATTR_SNIFFER - sniffer rule, receive all port traffic
ALL_DEFAULT and MC_DEFAULT rules options are valid only for Ethernet link type.
Signed-off-by: Hadar Hen Zion <hadarh@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
2013-08-07 15:01:59 +04:00
union ib_flow_spec {
struct {
2016-11-14 20:04:51 +03:00
u32 type ;
IB/core: Add receive flow steering support
The RDMA stack allows for applications to create IB_QPT_RAW_PACKET
QPs, which receive plain Ethernet packets, specifically packets that
don't carry any QPN to be matched by the receiving side. Applications
using these QPs must be provided with a method to program some
steering rule with the HW so packets arriving at the local port can be
routed to them.
This patch adds ib_create_flow(), which allow providing a flow
specification for a QP. When there's a match between the
specification and a received packet, the packet is forwarded to that
QP, in a the same way one uses ib_attach_multicast() for IB UD
multicast handling.
Flow specifications are provided as instances of struct ib_flow_spec_yyy,
which describe L2, L3 and L4 headers. Currently specs for Ethernet, IPv4,
TCP and UDP are defined. Flow specs are made of values and masks.
The input to ib_create_flow() is a struct ib_flow_attr, which contains
a few mandatory control elements and optional flow specs.
struct ib_flow_attr {
enum ib_flow_attr_type type;
u16 size;
u16 priority;
u32 flags;
u8 num_of_specs;
u8 port;
/* Following are the optional layers according to user request
* struct ib_flow_spec_yyy
* struct ib_flow_spec_zzz
*/
};
As these specs are eventually coming from user space, they are defined and
used in a way which allows adding new spec types without kernel/user ABI
change, just with a little API enhancement which defines the newly added spec.
The flow spec structures are defined with TLV (Type-Length-Value)
entries, which allows calling ib_create_flow() with a list of variable
length of optional specs.
For the actual processing of ib_flow_attr the driver uses the number
of specs and the size mandatory fields along with the TLV nature of
the specs.
Steering rules processing order is according to the domain over which
the rule is set and the rule priority. All rules set by user space
applicatations fall into the IB_FLOW_DOMAIN_USER domain, other domains
could be used by future IPoIB RFS and Ethetool flow-steering interface
implementation. Lower numerical value for the priority field means
higher priority.
The returned value from ib_create_flow() is a struct ib_flow, which
contains a database pointer (handle) provided by the HW driver to be
used when calling ib_destroy_flow().
Applications that offload TCP/IP traffic can also be written over IB
UD QPs. The ib_create_flow() / ib_destroy_flow() API is designed to
support UD QPs too. A HW driver can set IB_DEVICE_MANAGED_FLOW_STEERING
to denote support for flow steering.
The ib_flow_attr enum type supports usage of flow steering for promiscuous
and sniffer purposes:
IB_FLOW_ATTR_NORMAL - "regular" rule, steering according to rule specification
IB_FLOW_ATTR_ALL_DEFAULT - default unicast and multicast rule, receive
all Ethernet traffic which isn't steered to any QP
IB_FLOW_ATTR_MC_DEFAULT - same as IB_FLOW_ATTR_ALL_DEFAULT but only for multicast
IB_FLOW_ATTR_SNIFFER - sniffer rule, receive all port traffic
ALL_DEFAULT and MC_DEFAULT rules options are valid only for Ethernet link type.
Signed-off-by: Hadar Hen Zion <hadarh@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
2013-08-07 15:01:59 +04:00
u16 size ;
} ;
struct ib_flow_spec_eth eth ;
2013-11-07 17:25:13 +04:00
struct ib_flow_spec_ib ib ;
IB/core: Add receive flow steering support
The RDMA stack allows for applications to create IB_QPT_RAW_PACKET
QPs, which receive plain Ethernet packets, specifically packets that
don't carry any QPN to be matched by the receiving side. Applications
using these QPs must be provided with a method to program some
steering rule with the HW so packets arriving at the local port can be
routed to them.
This patch adds ib_create_flow(), which allow providing a flow
specification for a QP. When there's a match between the
specification and a received packet, the packet is forwarded to that
QP, in a the same way one uses ib_attach_multicast() for IB UD
multicast handling.
Flow specifications are provided as instances of struct ib_flow_spec_yyy,
which describe L2, L3 and L4 headers. Currently specs for Ethernet, IPv4,
TCP and UDP are defined. Flow specs are made of values and masks.
The input to ib_create_flow() is a struct ib_flow_attr, which contains
a few mandatory control elements and optional flow specs.
struct ib_flow_attr {
enum ib_flow_attr_type type;
u16 size;
u16 priority;
u32 flags;
u8 num_of_specs;
u8 port;
/* Following are the optional layers according to user request
* struct ib_flow_spec_yyy
* struct ib_flow_spec_zzz
*/
};
As these specs are eventually coming from user space, they are defined and
used in a way which allows adding new spec types without kernel/user ABI
change, just with a little API enhancement which defines the newly added spec.
The flow spec structures are defined with TLV (Type-Length-Value)
entries, which allows calling ib_create_flow() with a list of variable
length of optional specs.
For the actual processing of ib_flow_attr the driver uses the number
of specs and the size mandatory fields along with the TLV nature of
the specs.
Steering rules processing order is according to the domain over which
the rule is set and the rule priority. All rules set by user space
applicatations fall into the IB_FLOW_DOMAIN_USER domain, other domains
could be used by future IPoIB RFS and Ethetool flow-steering interface
implementation. Lower numerical value for the priority field means
higher priority.
The returned value from ib_create_flow() is a struct ib_flow, which
contains a database pointer (handle) provided by the HW driver to be
used when calling ib_destroy_flow().
Applications that offload TCP/IP traffic can also be written over IB
UD QPs. The ib_create_flow() / ib_destroy_flow() API is designed to
support UD QPs too. A HW driver can set IB_DEVICE_MANAGED_FLOW_STEERING
to denote support for flow steering.
The ib_flow_attr enum type supports usage of flow steering for promiscuous
and sniffer purposes:
IB_FLOW_ATTR_NORMAL - "regular" rule, steering according to rule specification
IB_FLOW_ATTR_ALL_DEFAULT - default unicast and multicast rule, receive
all Ethernet traffic which isn't steered to any QP
IB_FLOW_ATTR_MC_DEFAULT - same as IB_FLOW_ATTR_ALL_DEFAULT but only for multicast
IB_FLOW_ATTR_SNIFFER - sniffer rule, receive all port traffic
ALL_DEFAULT and MC_DEFAULT rules options are valid only for Ethernet link type.
Signed-off-by: Hadar Hen Zion <hadarh@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
2013-08-07 15:01:59 +04:00
struct ib_flow_spec_ipv4 ipv4 ;
struct ib_flow_spec_tcp_udp tcp_udp ;
2016-06-17 15:14:50 +03:00
struct ib_flow_spec_ipv6 ipv6 ;
2016-11-14 20:04:47 +03:00
struct ib_flow_spec_tunnel tunnel ;
2018-03-28 09:27:49 +03:00
struct ib_flow_spec_esp esp ;
2018-05-13 14:33:30 +03:00
struct ib_flow_spec_gre gre ;
2018-05-13 14:33:32 +03:00
struct ib_flow_spec_mpls mpls ;
2017-01-18 15:59:48 +03:00
struct ib_flow_spec_action_tag flow_tag ;
2017-04-03 13:13:51 +03:00
struct ib_flow_spec_action_drop drop ;
2018-03-28 09:27:46 +03:00
struct ib_flow_spec_action_handle action ;
2018-05-31 16:43:36 +03:00
struct ib_flow_spec_action_count flow_count ;
IB/core: Add receive flow steering support
The RDMA stack allows for applications to create IB_QPT_RAW_PACKET
QPs, which receive plain Ethernet packets, specifically packets that
don't carry any QPN to be matched by the receiving side. Applications
using these QPs must be provided with a method to program some
steering rule with the HW so packets arriving at the local port can be
routed to them.
This patch adds ib_create_flow(), which allow providing a flow
specification for a QP. When there's a match between the
specification and a received packet, the packet is forwarded to that
QP, in a the same way one uses ib_attach_multicast() for IB UD
multicast handling.
Flow specifications are provided as instances of struct ib_flow_spec_yyy,
which describe L2, L3 and L4 headers. Currently specs for Ethernet, IPv4,
TCP and UDP are defined. Flow specs are made of values and masks.
The input to ib_create_flow() is a struct ib_flow_attr, which contains
a few mandatory control elements and optional flow specs.
struct ib_flow_attr {
enum ib_flow_attr_type type;
u16 size;
u16 priority;
u32 flags;
u8 num_of_specs;
u8 port;
/* Following are the optional layers according to user request
* struct ib_flow_spec_yyy
* struct ib_flow_spec_zzz
*/
};
As these specs are eventually coming from user space, they are defined and
used in a way which allows adding new spec types without kernel/user ABI
change, just with a little API enhancement which defines the newly added spec.
The flow spec structures are defined with TLV (Type-Length-Value)
entries, which allows calling ib_create_flow() with a list of variable
length of optional specs.
For the actual processing of ib_flow_attr the driver uses the number
of specs and the size mandatory fields along with the TLV nature of
the specs.
Steering rules processing order is according to the domain over which
the rule is set and the rule priority. All rules set by user space
applicatations fall into the IB_FLOW_DOMAIN_USER domain, other domains
could be used by future IPoIB RFS and Ethetool flow-steering interface
implementation. Lower numerical value for the priority field means
higher priority.
The returned value from ib_create_flow() is a struct ib_flow, which
contains a database pointer (handle) provided by the HW driver to be
used when calling ib_destroy_flow().
Applications that offload TCP/IP traffic can also be written over IB
UD QPs. The ib_create_flow() / ib_destroy_flow() API is designed to
support UD QPs too. A HW driver can set IB_DEVICE_MANAGED_FLOW_STEERING
to denote support for flow steering.
The ib_flow_attr enum type supports usage of flow steering for promiscuous
and sniffer purposes:
IB_FLOW_ATTR_NORMAL - "regular" rule, steering according to rule specification
IB_FLOW_ATTR_ALL_DEFAULT - default unicast and multicast rule, receive
all Ethernet traffic which isn't steered to any QP
IB_FLOW_ATTR_MC_DEFAULT - same as IB_FLOW_ATTR_ALL_DEFAULT but only for multicast
IB_FLOW_ATTR_SNIFFER - sniffer rule, receive all port traffic
ALL_DEFAULT and MC_DEFAULT rules options are valid only for Ethernet link type.
Signed-off-by: Hadar Hen Zion <hadarh@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
2013-08-07 15:01:59 +04:00
} ;
struct ib_flow_attr {
enum ib_flow_attr_type type ;
u16 size ;
u16 priority ;
u32 flags ;
u8 num_of_specs ;
u8 port ;
2018-06-07 17:57:16 +03:00
union ib_flow_spec flows [ ] ;
IB/core: Add receive flow steering support
The RDMA stack allows for applications to create IB_QPT_RAW_PACKET
QPs, which receive plain Ethernet packets, specifically packets that
don't carry any QPN to be matched by the receiving side. Applications
using these QPs must be provided with a method to program some
steering rule with the HW so packets arriving at the local port can be
routed to them.
This patch adds ib_create_flow(), which allow providing a flow
specification for a QP. When there's a match between the
specification and a received packet, the packet is forwarded to that
QP, in a the same way one uses ib_attach_multicast() for IB UD
multicast handling.
Flow specifications are provided as instances of struct ib_flow_spec_yyy,
which describe L2, L3 and L4 headers. Currently specs for Ethernet, IPv4,
TCP and UDP are defined. Flow specs are made of values and masks.
The input to ib_create_flow() is a struct ib_flow_attr, which contains
a few mandatory control elements and optional flow specs.
struct ib_flow_attr {
enum ib_flow_attr_type type;
u16 size;
u16 priority;
u32 flags;
u8 num_of_specs;
u8 port;
/* Following are the optional layers according to user request
* struct ib_flow_spec_yyy
* struct ib_flow_spec_zzz
*/
};
As these specs are eventually coming from user space, they are defined and
used in a way which allows adding new spec types without kernel/user ABI
change, just with a little API enhancement which defines the newly added spec.
The flow spec structures are defined with TLV (Type-Length-Value)
entries, which allows calling ib_create_flow() with a list of variable
length of optional specs.
For the actual processing of ib_flow_attr the driver uses the number
of specs and the size mandatory fields along with the TLV nature of
the specs.
Steering rules processing order is according to the domain over which
the rule is set and the rule priority. All rules set by user space
applicatations fall into the IB_FLOW_DOMAIN_USER domain, other domains
could be used by future IPoIB RFS and Ethetool flow-steering interface
implementation. Lower numerical value for the priority field means
higher priority.
The returned value from ib_create_flow() is a struct ib_flow, which
contains a database pointer (handle) provided by the HW driver to be
used when calling ib_destroy_flow().
Applications that offload TCP/IP traffic can also be written over IB
UD QPs. The ib_create_flow() / ib_destroy_flow() API is designed to
support UD QPs too. A HW driver can set IB_DEVICE_MANAGED_FLOW_STEERING
to denote support for flow steering.
The ib_flow_attr enum type supports usage of flow steering for promiscuous
and sniffer purposes:
IB_FLOW_ATTR_NORMAL - "regular" rule, steering according to rule specification
IB_FLOW_ATTR_ALL_DEFAULT - default unicast and multicast rule, receive
all Ethernet traffic which isn't steered to any QP
IB_FLOW_ATTR_MC_DEFAULT - same as IB_FLOW_ATTR_ALL_DEFAULT but only for multicast
IB_FLOW_ATTR_SNIFFER - sniffer rule, receive all port traffic
ALL_DEFAULT and MC_DEFAULT rules options are valid only for Ethernet link type.
Signed-off-by: Hadar Hen Zion <hadarh@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
2013-08-07 15:01:59 +04:00
} ;
struct ib_flow {
struct ib_qp * qp ;
2018-07-23 15:25:08 +03:00
struct ib_device * device ;
IB/core: Add receive flow steering support
The RDMA stack allows for applications to create IB_QPT_RAW_PACKET
QPs, which receive plain Ethernet packets, specifically packets that
don't carry any QPN to be matched by the receiving side. Applications
using these QPs must be provided with a method to program some
steering rule with the HW so packets arriving at the local port can be
routed to them.
This patch adds ib_create_flow(), which allow providing a flow
specification for a QP. When there's a match between the
specification and a received packet, the packet is forwarded to that
QP, in a the same way one uses ib_attach_multicast() for IB UD
multicast handling.
Flow specifications are provided as instances of struct ib_flow_spec_yyy,
which describe L2, L3 and L4 headers. Currently specs for Ethernet, IPv4,
TCP and UDP are defined. Flow specs are made of values and masks.
The input to ib_create_flow() is a struct ib_flow_attr, which contains
a few mandatory control elements and optional flow specs.
struct ib_flow_attr {
enum ib_flow_attr_type type;
u16 size;
u16 priority;
u32 flags;
u8 num_of_specs;
u8 port;
/* Following are the optional layers according to user request
* struct ib_flow_spec_yyy
* struct ib_flow_spec_zzz
*/
};
As these specs are eventually coming from user space, they are defined and
used in a way which allows adding new spec types without kernel/user ABI
change, just with a little API enhancement which defines the newly added spec.
The flow spec structures are defined with TLV (Type-Length-Value)
entries, which allows calling ib_create_flow() with a list of variable
length of optional specs.
For the actual processing of ib_flow_attr the driver uses the number
of specs and the size mandatory fields along with the TLV nature of
the specs.
Steering rules processing order is according to the domain over which
the rule is set and the rule priority. All rules set by user space
applicatations fall into the IB_FLOW_DOMAIN_USER domain, other domains
could be used by future IPoIB RFS and Ethetool flow-steering interface
implementation. Lower numerical value for the priority field means
higher priority.
The returned value from ib_create_flow() is a struct ib_flow, which
contains a database pointer (handle) provided by the HW driver to be
used when calling ib_destroy_flow().
Applications that offload TCP/IP traffic can also be written over IB
UD QPs. The ib_create_flow() / ib_destroy_flow() API is designed to
support UD QPs too. A HW driver can set IB_DEVICE_MANAGED_FLOW_STEERING
to denote support for flow steering.
The ib_flow_attr enum type supports usage of flow steering for promiscuous
and sniffer purposes:
IB_FLOW_ATTR_NORMAL - "regular" rule, steering according to rule specification
IB_FLOW_ATTR_ALL_DEFAULT - default unicast and multicast rule, receive
all Ethernet traffic which isn't steered to any QP
IB_FLOW_ATTR_MC_DEFAULT - same as IB_FLOW_ATTR_ALL_DEFAULT but only for multicast
IB_FLOW_ATTR_SNIFFER - sniffer rule, receive all port traffic
ALL_DEFAULT and MC_DEFAULT rules options are valid only for Ethernet link type.
Signed-off-by: Hadar Hen Zion <hadarh@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
2013-08-07 15:01:59 +04:00
struct ib_uobject * uobject ;
} ;
IB/uverbs: Add flow_action create and destroy verbs
A verbs application may receive and transmits packets using a data
path pipeline. Sometimes, the first stage in the receive pipeline or
the last stage in the transmit pipeline involves transforming a
packet, either in order to make it easier for later stages to process
it or to prepare it for transmission over the wire. Such transformation
could be stripping/encapsulating the packet (i.e. vxlan),
decrypting/encrypting it (i.e. ipsec), altering headers, doing some
complex FPGA changes, etc.
Some hardware could do such transformations without software data path
intervention at all. The flow steering API supports steering a
packet (either to a QP or dropping it) and some simple packet
immutable actions (i.e. tagging a packet). Complex actions, that may
change the packet, could bloat the flow steering API extensively.
Sometimes the same action should be applied to several flows.
In this case, it's easier to bind several flows to the same action and
modify it than change all matching flows.
Introducing a new flow_action object that abstracts any packet
transformation (out of a standard and well defined set of actions).
This flow_action object could be tied to a flow steering rule via a
new specification.
Currently, we support esp flow_action, which encrypts or decrypts a
packet according to the given parameters. However, we present a
flexible schema that could be used to other transformation actions tied
to flow rules.
Reviewed-by: Yishai Hadas <yishaih@mellanox.com>
Signed-off-by: Matan Barak <matanb@mellanox.com>
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
2018-03-28 09:27:45 +03:00
enum ib_flow_action_type {
IB_FLOW_ACTION_UNSPECIFIED ,
IB_FLOW_ACTION_ESP = 1 ,
} ;
struct ib_flow_action_attrs_esp_keymats {
enum ib_uverbs_flow_action_esp_keymat protocol ;
union {
struct ib_uverbs_flow_action_esp_keymat_aes_gcm aes_gcm ;
} keymat ;
} ;
struct ib_flow_action_attrs_esp_replays {
enum ib_uverbs_flow_action_esp_replay protocol ;
union {
struct ib_uverbs_flow_action_esp_replay_bmp bmp ;
} replay ;
} ;
enum ib_flow_action_attrs_esp_flags {
/* All user-space flags at the top: Use enum ib_uverbs_flow_action_esp_flags
* This is done in order to share the same flags between user - space and
* kernel and spare an unnecessary translation .
*/
/* Kernel flags */
IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED = 1ULL < < 32 ,
2018-03-28 09:27:48 +03:00
IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS = 1ULL < < 33 ,
IB/uverbs: Add flow_action create and destroy verbs
A verbs application may receive and transmits packets using a data
path pipeline. Sometimes, the first stage in the receive pipeline or
the last stage in the transmit pipeline involves transforming a
packet, either in order to make it easier for later stages to process
it or to prepare it for transmission over the wire. Such transformation
could be stripping/encapsulating the packet (i.e. vxlan),
decrypting/encrypting it (i.e. ipsec), altering headers, doing some
complex FPGA changes, etc.
Some hardware could do such transformations without software data path
intervention at all. The flow steering API supports steering a
packet (either to a QP or dropping it) and some simple packet
immutable actions (i.e. tagging a packet). Complex actions, that may
change the packet, could bloat the flow steering API extensively.
Sometimes the same action should be applied to several flows.
In this case, it's easier to bind several flows to the same action and
modify it than change all matching flows.
Introducing a new flow_action object that abstracts any packet
transformation (out of a standard and well defined set of actions).
This flow_action object could be tied to a flow steering rule via a
new specification.
Currently, we support esp flow_action, which encrypts or decrypts a
packet according to the given parameters. However, we present a
flexible schema that could be used to other transformation actions tied
to flow rules.
Reviewed-by: Yishai Hadas <yishaih@mellanox.com>
Signed-off-by: Matan Barak <matanb@mellanox.com>
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
2018-03-28 09:27:45 +03:00
} ;
struct ib_flow_spec_list {
struct ib_flow_spec_list * next ;
union ib_flow_spec spec ;
} ;
struct ib_flow_action_attrs_esp {
struct ib_flow_action_attrs_esp_keymats * keymat ;
struct ib_flow_action_attrs_esp_replays * replay ;
struct ib_flow_spec_list * encap ;
/* Used only if IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED is enabled.
* Value of 0 is a valid value .
*/
u32 esn ;
u32 spi ;
u32 seq ;
u32 tfc_pad ;
/* Use enum ib_flow_action_attrs_esp_flags */
u64 flags ;
u64 hard_limit_pkts ;
} ;
struct ib_flow_action {
struct ib_device * device ;
struct ib_uobject * uobject ;
enum ib_flow_action_type type ;
atomic_t usecnt ;
} ;
2015-06-06 21:38:31 +03:00
struct ib_mad_hdr ;
2005-04-17 02:20:36 +04:00
struct ib_grh ;
enum ib_process_mad_flags {
IB_MAD_IGNORE_MKEY = 1 ,
IB_MAD_IGNORE_BKEY = 2 ,
IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
} ;
enum ib_mad_result {
IB_MAD_RESULT_FAILURE = 0 , /* (!SUCCESS is the important flag) */
IB_MAD_RESULT_SUCCESS = 1 < < 0 , /* MAD was successfully processed */
IB_MAD_RESULT_REPLY = 1 < < 1 , /* Reply packet needs to be sent */
IB_MAD_RESULT_CONSUMED = 1 < < 2 /* Packet consumed: stop processing */
} ;
2017-01-17 12:11:12 +03:00
struct ib_port_cache {
2017-05-19 15:48:51 +03:00
u64 subnet_prefix ;
2017-01-17 12:11:12 +03:00
struct ib_pkey_cache * pkey ;
struct ib_gid_table * gid ;
u8 lmc ;
enum ib_port_state port_state ;
} ;
2005-04-17 02:20:36 +04:00
struct ib_cache {
rwlock_t lock ;
struct ib_event_handler event_handler ;
} ;
2015-05-14 03:02:58 +03:00
struct ib_port_immutable {
int pkey_tbl_len ;
int gid_tbl_len ;
2015-05-14 03:02:59 +03:00
u32 core_cap_flags ;
2015-06-06 21:38:29 +03:00
u32 max_mad_size ;
2015-05-14 03:02:58 +03:00
} ;
2019-02-13 07:12:48 +03:00
struct ib_port_data {
2019-02-13 07:12:51 +03:00
struct ib_device * ib_dev ;
2019-02-13 07:12:48 +03:00
struct ib_port_immutable immutable ;
spinlock_t pkey_list_lock ;
struct list_head pkey_list ;
2019-02-13 07:12:49 +03:00
struct ib_port_cache cache ;
2019-02-13 07:12:50 +03:00
spinlock_t netdev_lock ;
2019-02-13 07:12:51 +03:00
struct net_device __rcu * netdev ;
struct hlist_node ndev_hash_link ;
2019-02-13 07:12:48 +03:00
} ;
2017-04-13 06:29:20 +03:00
/* rdma netdev type - specifies protocol type */
enum rdma_netdev_t {
2017-04-10 11:22:25 +03:00
RDMA_NETDEV_OPA_VNIC ,
RDMA_NETDEV_IPOIB ,
2017-04-13 06:29:20 +03:00
} ;
/**
* struct rdma_netdev - rdma netdev
* For cases where netstack interfacing is required .
*/
struct rdma_netdev {
void * clnt_priv ;
struct ib_device * hca ;
u8 port_num ;
2018-07-29 11:34:56 +03:00
/*
* cleanup function must be specified .
* FIXME : This is only used for OPA_VNIC and that usage should be
* removed too .
*/
2017-06-30 23:14:46 +03:00
void ( * free_rdma_netdev ) ( struct net_device * netdev ) ;
2017-04-13 06:29:20 +03:00
/* control functions */
void ( * set_id ) ( struct net_device * netdev , int id ) ;
2017-04-10 11:22:25 +03:00
/* send packet */
int ( * send ) ( struct net_device * dev , struct sk_buff * skb ,
struct ib_ah * address , u32 dqpn ) ;
/* multicast */
int ( * attach_mcast ) ( struct net_device * dev , struct ib_device * hca ,
union ib_gid * gid , u16 mlid ,
int set_qkey , u32 qkey ) ;
int ( * detach_mcast ) ( struct net_device * dev , struct ib_device * hca ,
union ib_gid * gid , u16 mlid ) ;
2017-04-13 06:29:20 +03:00
} ;
2018-08-14 14:08:51 +03:00
struct rdma_netdev_alloc_params {
size_t sizeof_priv ;
unsigned int txqs ;
unsigned int rxqs ;
void * param ;
int ( * initialize_rdma_netdev ) ( struct ib_device * device , u8 port_num ,
struct net_device * netdev , void * param ) ;
} ;
2018-05-31 16:43:31 +03:00
struct ib_counters {
struct ib_device * device ;
struct ib_uobject * uobject ;
/* num of objects attached */
atomic_t usecnt ;
} ;
2018-05-31 16:43:33 +03:00
struct ib_counters_read_attr {
u64 * counters_buff ;
u32 ncounters ;
u32 flags ; /* use enum ib_read_counters_flags */
} ;
IB/uverbs: Add flow_action create and destroy verbs
A verbs application may receive and transmits packets using a data
path pipeline. Sometimes, the first stage in the receive pipeline or
the last stage in the transmit pipeline involves transforming a
packet, either in order to make it easier for later stages to process
it or to prepare it for transmission over the wire. Such transformation
could be stripping/encapsulating the packet (i.e. vxlan),
decrypting/encrypting it (i.e. ipsec), altering headers, doing some
complex FPGA changes, etc.
Some hardware could do such transformations without software data path
intervention at all. The flow steering API supports steering a
packet (either to a QP or dropping it) and some simple packet
immutable actions (i.e. tagging a packet). Complex actions, that may
change the packet, could bloat the flow steering API extensively.
Sometimes the same action should be applied to several flows.
In this case, it's easier to bind several flows to the same action and
modify it than change all matching flows.
Introducing a new flow_action object that abstracts any packet
transformation (out of a standard and well defined set of actions).
This flow_action object could be tied to a flow steering rule via a
new specification.
Currently, we support esp flow_action, which encrypts or decrypts a
packet according to the given parameters. However, we present a
flexible schema that could be used to other transformation actions tied
to flow rules.
Reviewed-by: Yishai Hadas <yishaih@mellanox.com>
Signed-off-by: Matan Barak <matanb@mellanox.com>
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
2018-03-28 09:27:45 +03:00
struct uverbs_attr_bundle ;
2019-04-29 14:59:06 +03:00
struct iw_cm_id ;
struct iw_cm_conn_param ;
IB/uverbs: Add flow_action create and destroy verbs
A verbs application may receive and transmits packets using a data
path pipeline. Sometimes, the first stage in the receive pipeline or
the last stage in the transmit pipeline involves transforming a
packet, either in order to make it easier for later stages to process
it or to prepare it for transmission over the wire. Such transformation
could be stripping/encapsulating the packet (i.e. vxlan),
decrypting/encrypting it (i.e. ipsec), altering headers, doing some
complex FPGA changes, etc.
Some hardware could do such transformations without software data path
intervention at all. The flow steering API supports steering a
packet (either to a QP or dropping it) and some simple packet
immutable actions (i.e. tagging a packet). Complex actions, that may
change the packet, could bloat the flow steering API extensively.
Sometimes the same action should be applied to several flows.
In this case, it's easier to bind several flows to the same action and
modify it than change all matching flows.
Introducing a new flow_action object that abstracts any packet
transformation (out of a standard and well defined set of actions).
This flow_action object could be tied to a flow steering rule via a
new specification.
Currently, we support esp flow_action, which encrypts or decrypts a
packet according to the given parameters. However, we present a
flexible schema that could be used to other transformation actions tied
to flow rules.
Reviewed-by: Yishai Hadas <yishaih@mellanox.com>
Signed-off-by: Matan Barak <matanb@mellanox.com>
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
2018-03-28 09:27:45 +03:00
2019-02-03 15:55:50 +03:00
# define INIT_RDMA_OBJ_SIZE(ib_struct, drv_struct, member) \
. size_ # # ib_struct = \
( sizeof ( struct drv_struct ) + \
BUILD_BUG_ON_ZERO ( offsetof ( struct drv_struct , member ) ) + \
BUILD_BUG_ON_ZERO ( \
! __same_type ( ( ( struct drv_struct * ) NULL ) - > member , \
struct ib_struct ) ) )
2019-03-28 16:12:58 +03:00
# define rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, gfp) \
( ( struct ib_type * ) kzalloc ( ib_dev - > ops . size_ # # ib_type , gfp ) )
2019-02-03 15:55:50 +03:00
# define rdma_zalloc_drv_obj(ib_dev, ib_type) \
2019-03-28 16:12:58 +03:00
rdma_zalloc_drv_obj_gfp ( ib_dev , ib_type , GFP_KERNEL )
2019-02-03 15:55:50 +03:00
# define DECLARE_RDMA_OBJ_SIZE(ib_struct) size_t size_##ib_struct
2018-12-10 22:09:30 +03:00
/**
* struct ib_device_ops - InfiniBand device operations
* This structure defines all the InfiniBand device operations , providers will
* need to define the supported operations , otherwise they will be set to null .
*/
struct ib_device_ops {
2019-06-05 20:39:26 +03:00
struct module * owner ;
2019-06-05 20:39:24 +03:00
enum rdma_driver_id driver_id ;
2019-06-05 20:39:25 +03:00
u32 uverbs_abi_ver ;
2019-06-14 03:38:19 +03:00
unsigned int uverbs_no_driver_id_binding : 1 ;
2019-06-05 20:39:24 +03:00
2018-12-10 22:09:30 +03:00
int ( * post_send ) ( struct ib_qp * qp , const struct ib_send_wr * send_wr ,
const struct ib_send_wr * * bad_send_wr ) ;
int ( * post_recv ) ( struct ib_qp * qp , const struct ib_recv_wr * recv_wr ,
const struct ib_recv_wr * * bad_recv_wr ) ;
void ( * drain_rq ) ( struct ib_qp * qp ) ;
void ( * drain_sq ) ( struct ib_qp * qp ) ;
int ( * poll_cq ) ( struct ib_cq * cq , int num_entries , struct ib_wc * wc ) ;
int ( * peek_cq ) ( struct ib_cq * cq , int wc_cnt ) ;
int ( * req_notify_cq ) ( struct ib_cq * cq , enum ib_cq_notify_flags flags ) ;
int ( * req_ncomp_notif ) ( struct ib_cq * cq , int wc_cnt ) ;
int ( * post_srq_recv ) ( struct ib_srq * srq ,
const struct ib_recv_wr * recv_wr ,
const struct ib_recv_wr * * bad_recv_wr ) ;
int ( * process_mad ) ( struct ib_device * device , int process_mad_flags ,
u8 port_num , const struct ib_wc * in_wc ,
const struct ib_grh * in_grh ,
const struct ib_mad_hdr * in_mad , size_t in_mad_size ,
struct ib_mad_hdr * out_mad , size_t * out_mad_size ,
u16 * out_mad_pkey_index ) ;
int ( * query_device ) ( struct ib_device * device ,
struct ib_device_attr * device_attr ,
struct ib_udata * udata ) ;
int ( * modify_device ) ( struct ib_device * device , int device_modify_mask ,
struct ib_device_modify * device_modify ) ;
void ( * get_dev_fw_str ) ( struct ib_device * device , char * str ) ;
const struct cpumask * ( * get_vector_affinity ) ( struct ib_device * ibdev ,
int comp_vector ) ;
int ( * query_port ) ( struct ib_device * device , u8 port_num ,
struct ib_port_attr * port_attr ) ;
int ( * modify_port ) ( struct ib_device * device , u8 port_num ,
int port_modify_mask ,
struct ib_port_modify * port_modify ) ;
/**
* The following mandatory functions are used only at device
* registration . Keep functions such as these at the end of this
* structure to avoid cache line misses when accessing struct ib_device
* in fast paths .
*/
int ( * get_port_immutable ) ( struct ib_device * device , u8 port_num ,
struct ib_port_immutable * immutable ) ;
enum rdma_link_layer ( * get_link_layer ) ( struct ib_device * device ,
u8 port_num ) ;
/**
* When calling get_netdev , the HW vendor ' s driver should return the
* net device of device @ device at port @ port_num or NULL if such
* a net device doesn ' t exist . The vendor driver should call dev_hold
* on this net device . The HW vendor ' s device driver must guarantee
* that this function returns NULL before the net device has finished
* NETDEV_UNREGISTER state .
*/
struct net_device * ( * get_netdev ) ( struct ib_device * device , u8 port_num ) ;
/**
* rdma netdev operation
*
* Driver implementing alloc_rdma_netdev or rdma_netdev_get_params
* must return - EOPNOTSUPP if it doesn ' t support the specified type .
*/
struct net_device * ( * alloc_rdma_netdev ) (
struct ib_device * device , u8 port_num , enum rdma_netdev_t type ,
const char * name , unsigned char name_assign_type ,
void ( * setup ) ( struct net_device * ) ) ;
int ( * rdma_netdev_get_params ) ( struct ib_device * device , u8 port_num ,
enum rdma_netdev_t type ,
struct rdma_netdev_alloc_params * params ) ;
/**
* query_gid should be return GID value for @ device , when @ port_num
* link layer is either IB or iWarp . It is no - op if @ port_num port
* is RoCE link layer .
*/
int ( * query_gid ) ( struct ib_device * device , u8 port_num , int index ,
union ib_gid * gid ) ;
/**
* When calling add_gid , the HW vendor ' s driver should add the gid
* of device of port at gid index available at @ attr . Meta - info of
* that gid ( for example , the network device related to this gid ) is
* available at @ attr . @ context allows the HW vendor driver to store
* extra information together with a GID entry . The HW vendor driver may
* allocate memory to contain this information and store it in @ context
* when a new GID entry is written to . Params are consistent until the
* next call of add_gid or delete_gid . The function should return 0 on
* success or error otherwise . The function could be called
* concurrently for different ports . This function is only called when
* roce_gid_table is used .
*/
int ( * add_gid ) ( const struct ib_gid_attr * attr , void * * context ) ;
/**
* When calling del_gid , the HW vendor ' s driver should delete the
* gid of device @ device at gid index gid_index of port port_num
* available in @ attr .
* Upon the deletion of a GID entry , the HW vendor must free any
* allocated memory . The caller will clear @ context afterwards .
* This function is only called when roce_gid_table is used .
*/
int ( * del_gid ) ( const struct ib_gid_attr * attr , void * * context ) ;
int ( * query_pkey ) ( struct ib_device * device , u8 port_num , u16 index ,
u16 * pkey ) ;
2019-02-12 21:39:16 +03:00
int ( * alloc_ucontext ) ( struct ib_ucontext * context ,
struct ib_udata * udata ) ;
void ( * dealloc_ucontext ) ( struct ib_ucontext * context ) ;
2018-12-10 22:09:30 +03:00
int ( * mmap ) ( struct ib_ucontext * context , struct vm_area_struct * vma ) ;
void ( * disassociate_ucontext ) ( struct ib_ucontext * ibcontext ) ;
2019-03-31 19:10:07 +03:00
int ( * alloc_pd ) ( struct ib_pd * pd , struct ib_udata * udata ) ;
2019-03-31 19:10:05 +03:00
void ( * dealloc_pd ) ( struct ib_pd * pd , struct ib_udata * udata ) ;
2019-04-03 16:42:42 +03:00
int ( * create_ah ) ( struct ib_ah * ah , struct rdma_ah_attr * ah_attr ,
u32 flags , struct ib_udata * udata ) ;
2018-12-10 22:09:30 +03:00
int ( * modify_ah ) ( struct ib_ah * ah , struct rdma_ah_attr * ah_attr ) ;
int ( * query_ah ) ( struct ib_ah * ah , struct rdma_ah_attr * ah_attr ) ;
2019-04-03 16:42:42 +03:00
void ( * destroy_ah ) ( struct ib_ah * ah , u32 flags ) ;
2019-04-03 16:42:43 +03:00
int ( * create_srq ) ( struct ib_srq * srq ,
struct ib_srq_init_attr * srq_init_attr ,
struct ib_udata * udata ) ;
2018-12-10 22:09:30 +03:00
int ( * modify_srq ) ( struct ib_srq * srq , struct ib_srq_attr * srq_attr ,
enum ib_srq_attr_mask srq_attr_mask ,
struct ib_udata * udata ) ;
int ( * query_srq ) ( struct ib_srq * srq , struct ib_srq_attr * srq_attr ) ;
2019-04-03 16:42:43 +03:00
void ( * destroy_srq ) ( struct ib_srq * srq , struct ib_udata * udata ) ;
2018-12-10 22:09:30 +03:00
struct ib_qp * ( * create_qp ) ( struct ib_pd * pd ,
struct ib_qp_init_attr * qp_init_attr ,
struct ib_udata * udata ) ;
int ( * modify_qp ) ( struct ib_qp * qp , struct ib_qp_attr * qp_attr ,
int qp_attr_mask , struct ib_udata * udata ) ;
int ( * query_qp ) ( struct ib_qp * qp , struct ib_qp_attr * qp_attr ,
int qp_attr_mask , struct ib_qp_init_attr * qp_init_attr ) ;
2019-03-31 19:10:05 +03:00
int ( * destroy_qp ) ( struct ib_qp * qp , struct ib_udata * udata ) ;
2019-05-28 14:37:29 +03:00
int ( * create_cq ) ( struct ib_cq * cq , const struct ib_cq_init_attr * attr ,
struct ib_udata * udata ) ;
2018-12-10 22:09:30 +03:00
int ( * modify_cq ) ( struct ib_cq * cq , u16 cq_count , u16 cq_period ) ;
2019-05-28 14:37:28 +03:00
void ( * destroy_cq ) ( struct ib_cq * cq , struct ib_udata * udata ) ;
2018-12-10 22:09:30 +03:00
int ( * resize_cq ) ( struct ib_cq * cq , int cqe , struct ib_udata * udata ) ;
struct ib_mr * ( * get_dma_mr ) ( struct ib_pd * pd , int mr_access_flags ) ;
struct ib_mr * ( * reg_user_mr ) ( struct ib_pd * pd , u64 start , u64 length ,
u64 virt_addr , int mr_access_flags ,
struct ib_udata * udata ) ;
int ( * rereg_user_mr ) ( struct ib_mr * mr , int flags , u64 start , u64 length ,
u64 virt_addr , int mr_access_flags ,
struct ib_pd * pd , struct ib_udata * udata ) ;
2019-03-31 19:10:05 +03:00
int ( * dereg_mr ) ( struct ib_mr * mr , struct ib_udata * udata ) ;
2018-12-10 22:09:30 +03:00
struct ib_mr * ( * alloc_mr ) ( struct ib_pd * pd , enum ib_mr_type mr_type ,
2019-03-31 19:10:05 +03:00
u32 max_num_sg , struct ib_udata * udata ) ;
2019-06-11 18:52:39 +03:00
struct ib_mr * ( * alloc_mr_integrity ) ( struct ib_pd * pd ,
u32 max_num_data_sg ,
u32 max_num_meta_sg ) ;
2018-12-11 14:37:52 +03:00
int ( * advise_mr ) ( struct ib_pd * pd ,
enum ib_uverbs_advise_mr_advice advice , u32 flags ,
struct ib_sge * sg_list , u32 num_sge ,
struct uverbs_attr_bundle * attrs ) ;
2018-12-10 22:09:30 +03:00
int ( * map_mr_sg ) ( struct ib_mr * mr , struct scatterlist * sg , int sg_nents ,
unsigned int * sg_offset ) ;
int ( * check_mr_status ) ( struct ib_mr * mr , u32 check_mask ,
struct ib_mr_status * mr_status ) ;
struct ib_mw * ( * alloc_mw ) ( struct ib_pd * pd , enum ib_mw_type type ,
struct ib_udata * udata ) ;
int ( * dealloc_mw ) ( struct ib_mw * mw ) ;
struct ib_fmr * ( * alloc_fmr ) ( struct ib_pd * pd , int mr_access_flags ,
struct ib_fmr_attr * fmr_attr ) ;
int ( * map_phys_fmr ) ( struct ib_fmr * fmr , u64 * page_list , int list_len ,
u64 iova ) ;
int ( * unmap_fmr ) ( struct list_head * fmr_list ) ;
int ( * dealloc_fmr ) ( struct ib_fmr * fmr ) ;
int ( * attach_mcast ) ( struct ib_qp * qp , union ib_gid * gid , u16 lid ) ;
int ( * detach_mcast ) ( struct ib_qp * qp , union ib_gid * gid , u16 lid ) ;
struct ib_xrcd * ( * alloc_xrcd ) ( struct ib_device * device ,
struct ib_udata * udata ) ;
2019-03-31 19:10:05 +03:00
int ( * dealloc_xrcd ) ( struct ib_xrcd * xrcd , struct ib_udata * udata ) ;
2018-12-10 22:09:30 +03:00
struct ib_flow * ( * create_flow ) ( struct ib_qp * qp ,
struct ib_flow_attr * flow_attr ,
int domain , struct ib_udata * udata ) ;
int ( * destroy_flow ) ( struct ib_flow * flow_id ) ;
struct ib_flow_action * ( * create_flow_action_esp ) (
struct ib_device * device ,
const struct ib_flow_action_attrs_esp * attr ,
struct uverbs_attr_bundle * attrs ) ;
int ( * destroy_flow_action ) ( struct ib_flow_action * action ) ;
int ( * modify_flow_action_esp ) (
struct ib_flow_action * action ,
const struct ib_flow_action_attrs_esp * attr ,
struct uverbs_attr_bundle * attrs ) ;
int ( * set_vf_link_state ) ( struct ib_device * device , int vf , u8 port ,
int state ) ;
int ( * get_vf_config ) ( struct ib_device * device , int vf , u8 port ,
struct ifla_vf_info * ivf ) ;
int ( * get_vf_stats ) ( struct ib_device * device , int vf , u8 port ,
struct ifla_vf_stats * stats ) ;
int ( * set_vf_guid ) ( struct ib_device * device , int vf , u8 port , u64 guid ,
int type ) ;
struct ib_wq * ( * create_wq ) ( struct ib_pd * pd ,
struct ib_wq_init_attr * init_attr ,
struct ib_udata * udata ) ;
2019-06-12 15:27:41 +03:00
void ( * destroy_wq ) ( struct ib_wq * wq , struct ib_udata * udata ) ;
2018-12-10 22:09:30 +03:00
int ( * modify_wq ) ( struct ib_wq * wq , struct ib_wq_attr * attr ,
u32 wq_attr_mask , struct ib_udata * udata ) ;
struct ib_rwq_ind_table * ( * create_rwq_ind_table ) (
struct ib_device * device ,
struct ib_rwq_ind_table_init_attr * init_attr ,
struct ib_udata * udata ) ;
int ( * destroy_rwq_ind_table ) ( struct ib_rwq_ind_table * wq_ind_table ) ;
struct ib_dm * ( * alloc_dm ) ( struct ib_device * device ,
struct ib_ucontext * context ,
struct ib_dm_alloc_attr * attr ,
struct uverbs_attr_bundle * attrs ) ;
2019-03-31 19:10:05 +03:00
int ( * dealloc_dm ) ( struct ib_dm * dm , struct uverbs_attr_bundle * attrs ) ;
2018-12-10 22:09:30 +03:00
struct ib_mr * ( * reg_dm_mr ) ( struct ib_pd * pd , struct ib_dm * dm ,
struct ib_dm_mr_attr * attr ,
struct uverbs_attr_bundle * attrs ) ;
struct ib_counters * ( * create_counters ) (
struct ib_device * device , struct uverbs_attr_bundle * attrs ) ;
int ( * destroy_counters ) ( struct ib_counters * counters ) ;
int ( * read_counters ) ( struct ib_counters * counters ,
struct ib_counters_read_attr * counters_read_attr ,
struct uverbs_attr_bundle * attrs ) ;
/**
* alloc_hw_stats - Allocate a struct rdma_hw_stats and fill in the
* driver initialized data . The struct is kfree ( ) ' ed by the sysfs
* core when the device is removed . A lifespan of - 1 in the return
* struct tells the core to set a default lifespan .
*/
struct rdma_hw_stats * ( * alloc_hw_stats ) ( struct ib_device * device ,
u8 port_num ) ;
/**
* get_hw_stats - Fill in the counter value ( s ) in the stats struct .
* @ index - The index in the value array we wish to have updated , or
* num_counters if we want all stats updated
* Return codes -
* < 0 - Error , no counters updated
* index - Updated the single counter pointed to by index
* num_counters - Updated all counters ( will reset the timestamp
* and prevent further calls for lifespan milliseconds )
* Drivers are allowed to update all counters in leiu of just the
* one given in index at their option
*/
int ( * get_hw_stats ) ( struct ib_device * device ,
struct rdma_hw_stats * stats , u8 port , int index ) ;
2018-12-18 15:28:30 +03:00
/*
* This function is called once for each port when a ib device is
* registered .
*/
int ( * init_port ) ( struct ib_device * device , u8 port_num ,
struct kobject * port_sysfs ) ;
2019-01-30 13:49:02 +03:00
/**
* Allows rdma drivers to add their own restrack attributes .
*/
int ( * fill_res_entry ) ( struct sk_buff * msg ,
struct rdma_restrack_entry * entry ) ;
2019-02-03 15:55:51 +03:00
2019-02-13 07:12:53 +03:00
/* Device lifecycle callbacks */
2019-02-13 07:12:56 +03:00
/*
* Called after the device becomes registered , before clients are
* attached
*/
int ( * enable_driver ) ( struct ib_device * dev ) ;
2019-02-13 07:12:53 +03:00
/*
* This is called as part of ib_dealloc_device ( ) .
*/
void ( * dealloc_driver ) ( struct ib_device * dev ) ;
2019-04-29 14:59:06 +03:00
/* iWarp CM callbacks */
void ( * iw_add_ref ) ( struct ib_qp * qp ) ;
void ( * iw_rem_ref ) ( struct ib_qp * qp ) ;
struct ib_qp * ( * iw_get_qp ) ( struct ib_device * device , int qpn ) ;
int ( * iw_connect ) ( struct iw_cm_id * cm_id ,
struct iw_cm_conn_param * conn_param ) ;
int ( * iw_accept ) ( struct iw_cm_id * cm_id ,
struct iw_cm_conn_param * conn_param ) ;
int ( * iw_reject ) ( struct iw_cm_id * cm_id , const void * pdata ,
u8 pdata_len ) ;
int ( * iw_create_listen ) ( struct iw_cm_id * cm_id , int backlog ) ;
int ( * iw_destroy_listen ) ( struct iw_cm_id * cm_id ) ;
2019-04-03 16:42:42 +03:00
DECLARE_RDMA_OBJ_SIZE ( ib_ah ) ;
2019-05-28 14:37:29 +03:00
DECLARE_RDMA_OBJ_SIZE ( ib_cq ) ;
2019-02-03 15:55:51 +03:00
DECLARE_RDMA_OBJ_SIZE ( ib_pd ) ;
2019-04-03 16:42:43 +03:00
DECLARE_RDMA_OBJ_SIZE ( ib_srq ) ;
2019-02-12 21:39:16 +03:00
DECLARE_RDMA_OBJ_SIZE ( ib_ucontext ) ;
2018-12-10 22:09:30 +03:00
} ;
2019-02-26 14:56:11 +03:00
struct ib_core_device {
/* device must be the first element in structure until,
* union of ib_core_device and device exists in ib_device .
*/
struct device dev ;
2019-02-26 14:56:13 +03:00
possible_net_t rdma_net ;
2019-02-26 14:56:11 +03:00
struct kobject * ports_kobj ;
struct list_head port_list ;
struct ib_device * owner ; /* reach back to owner ib_device */
} ;
2019-02-18 23:25:47 +03:00
2019-02-26 14:56:11 +03:00
struct rdma_restrack_root ;
2005-04-17 02:20:36 +04:00
struct ib_device {
2017-03-08 01:56:53 +03:00
/* Do not access @dma_device directly from ULP nor from HW drivers. */
struct device * dma_device ;
2018-12-10 22:09:48 +03:00
struct ib_device_ops ops ;
2005-04-17 02:20:36 +04:00
char name [ IB_DEVICE_NAME_MAX ] ;
2019-02-13 07:12:51 +03:00
struct rcu_head rcu_head ;
2005-04-17 02:20:36 +04:00
struct list_head event_handler_list ;
spinlock_t event_handler_lock ;
2019-02-07 08:41:54 +03:00
struct rw_semaphore client_data_rwsem ;
2019-02-07 08:41:53 +03:00
struct xarray client_data ;
2019-02-13 07:12:53 +03:00
struct mutex unregistration_lock ;
2005-04-17 02:20:36 +04:00
struct ib_cache cache ;
2015-05-14 03:02:58 +03:00
/**
2019-02-13 07:12:48 +03:00
* port_data is indexed by port number
2015-05-14 03:02:58 +03:00
*/
2019-02-13 07:12:48 +03:00
struct ib_port_data * port_data ;
2005-04-17 02:20:36 +04:00
2007-05-03 14:48:47 +04:00
int num_comp_vectors ;
2019-02-26 14:56:11 +03:00
union {
struct device dev ;
struct ib_core_device coredev ;
} ;
2018-10-11 22:31:53 +03:00
/* First group for device attributes,
* Second group for driver provided attributes ( optional ) .
* It is NULL terminated array .
*/
const struct attribute_group * groups [ 3 ] ;
2018-09-05 09:47:58 +03:00
2010-02-02 22:09:16 +03:00
u64 uverbs_cmd_mask ;
IB/core: extended command: an improved infrastructure for uverbs commands
Commit 400dbc96583f ("IB/core: Infrastructure for extensible uverbs
commands") added an infrastructure for extensible uverbs commands
while later commit 436f2ad05a0b ("IB/core: Export ib_create/destroy_flow
through uverbs") exported ib_create_flow()/ib_destroy_flow() functions
using this new infrastructure.
According to the commit 400dbc96583f, the purpose of this
infrastructure is to support passing around provider (eg. hardware)
specific buffers when userspace issue commands to the kernel, so that
it would be possible to extend uverbs (eg. core) buffers independently
from the provider buffers.
But the new kernel command function prototypes were not modified to
take advantage of this extension. This issue was exposed by Roland
Dreier in a previous review[1].
So the following patch is an attempt to a revised extensible command
infrastructure.
This improved extensible command infrastructure distinguish between
core (eg. legacy)'s command/response buffers from provider
(eg. hardware)'s command/response buffers: each extended command
implementing function is given a struct ib_udata to hold core
(eg. uverbs) input and output buffers, and another struct ib_udata to
hold the hw (eg. provider) input and output buffers.
Having those buffers identified separately make it easier to increase
one buffer to support extension without having to add some code to
guess the exact size of each command/response parts: This should make
the extended functions more reliable.
Additionally, instead of relying on command identifier being greater
than IB_USER_VERBS_CMD_THRESHOLD, the proposed infrastructure rely on
unused bits in command field: on the 32 bits provided by command
field, only 6 bits are really needed to encode the identifier of
commands currently supported by the kernel. (Even using only 6 bits
leaves room for about 23 new commands).
So this patch makes use of some high order bits in command field to
store flags, leaving enough room for more command identifiers than one
will ever need (eg. 256).
The new flags are used to specify if the command should be processed
as an extended one or a legacy one. While designing the new command
format, care was taken to make usage of flags itself extensible.
Using high order bits of the commands field ensure that newer
libibverbs on older kernel will properly fail when trying to call
extended commands. On the other hand, older libibverbs on newer kernel
will never be able to issue calls to extended commands.
The extended command header includes the optional response pointer so
that output buffer length and output buffer pointer are located
together in the command, allowing proper parameters checking. This
should make implementing functions easier and safer.
Additionally the extended header ensure 64bits alignment, while making
all sizes multiple of 8 bytes, extending the maximum buffer size:
legacy extended
Maximum command buffer: 256KBytes 1024KBytes (512KBytes + 512KBytes)
Maximum response buffer: 256KBytes 1024KBytes (512KBytes + 512KBytes)
For the purpose of doing proper buffer size accounting, the headers
size are no more taken in account in "in_words".
One of the odds of the current extensible infrastructure, reading
twice the "legacy" command header, is fixed by removing the "legacy"
command header from the extended command header: they are processed as
two different parts of the command: memory is read once and
information are not duplicated: it's making clear that's an extended
command scheme and not a different command scheme.
The proposed scheme will format input (command) and output (response)
buffers this way:
- command:
legacy header +
extended header +
command data (core + hw):
+----------------------------------------+
| flags | 00 00 | command |
| in_words | out_words |
+----------------------------------------+
| response |
| response |
| provider_in_words | provider_out_words |
| padding |
+----------------------------------------+
| |
. <uverbs input> .
. (in_words * 8) .
| |
+----------------------------------------+
| |
. <provider input> .
. (provider_in_words * 8) .
| |
+----------------------------------------+
- response, if present:
+----------------------------------------+
| |
. <uverbs output space> .
. (out_words * 8) .
| |
+----------------------------------------+
| |
. <provider output space> .
. (provider_out_words * 8) .
| |
+----------------------------------------+
The overall design is to ensure that the extensible infrastructure is
itself extensible while begin more reliable with more input and bound
checking.
Note:
The unused field in the extended header would be perfect candidate to
hold the command "comp_mask" (eg. bit field used to handle
compatibility). This was suggested by Roland Dreier in a previous
review[2]. But "comp_mask" field is likely to be present in the uverb
input and/or provider input, likewise for the response, as noted by
Matan Barak[3], so it doesn't make sense to put "comp_mask" in the
header.
[1]:
http://marc.info/?i=CAL1RGDWxmM17W2o_era24A-TTDeKyoL6u3NRu_=t_dhV_ZA9MA@mail.gmail.com
[2]:
http://marc.info/?i=CAL1RGDXJtrc849M6_XNZT5xO1+ybKtLWGq6yg6LhoSsKpsmkYA@mail.gmail.com
[3]:
http://marc.info/?i=525C1149.6000701@mellanox.com
Signed-off-by: Yann Droneaud <ydroneaud@opteya.com>
Link: http://marc.info/?i=cover.1383773832.git.ydroneaud@opteya.com
[ Convert "ret ? ret : 0" to the equivalent "ret". - Roland ]
Signed-off-by: Roland Dreier <roland@purestorage.com>
2013-11-07 02:21:49 +04:00
u64 uverbs_ex_cmd_mask ;
2005-09-30 01:17:48 +04:00
2016-08-25 20:57:07 +03:00
char node_desc [ IB_DEVICE_NODE_DESC_MAX ] ;
2006-01-10 18:39:34 +03:00
__be64 node_guid ;
2008-07-15 10:48:53 +04:00
u32 local_dma_lkey ;
2015-06-29 16:57:00 +03:00
u16 is_switch : 1 ;
2019-01-22 11:08:22 +03:00
/* Indicates kernel verbs support, should not be used in drivers */
u16 kverbs_provider : 1 ;
2005-04-17 02:20:36 +04:00
u8 node_type ;
u8 phys_port_cnt ;
2015-12-18 11:59:44 +03:00
struct ib_device_attr attrs ;
IB/core: Make device counter infrastructure dynamic
In practice, each RDMA device has a unique set of counters that the
hardware implements. Having a central set of counters that they must
all adhere to is limiting and causes many useful counters to not be
available.
Therefore we create a dynamic counter registration infrastructure.
The driver must implement a stats structure allocation routine, in
which the driver must place the directory name it wants, a list of
names for all of the counters, an array of u64 counters themselves,
plus a few generic configuration options.
We then implement a core routine to create a sysfs file for each
of the named stats elements, and a core routine to retrieve the
stats when any of the sysfs attribute files are read.
To avoid excessive beating on the stats generation routine in the
drivers, the core code also caches the stats for a short period of
time so that someone attempting to read all of the stats in a
given device's directory will not result in a stats generation
call per file read.
Future work will attempt to standardize just the shared stats
elements, and possibly add a method to get the stats via netlink
in addition to sysfs.
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Mark Bloch <markb@mellanox.com>
Reviewed-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
[ Add caching, make structure names more informative, add i40iw support,
other significant rewrites from the original patch ]
2016-05-16 20:49:33 +03:00
struct attribute_group * hw_stats_ag ;
struct rdma_hw_stats * hw_stats ;
2015-05-14 03:02:58 +03:00
2017-01-10 03:02:14 +03:00
# ifdef CONFIG_CGROUP_RDMA
struct rdmacg_device cg_device ;
# endif
2017-06-18 14:39:59 +03:00
u32 index ;
2019-02-18 23:25:47 +03:00
struct rdma_restrack_root * res ;
2017-06-18 14:39:59 +03:00
2018-11-12 23:59:50 +03:00
const struct uapi_definition * driver_def ;
2019-01-11 00:02:24 +03:00
RDMA/core: Sync unregistration with netlink commands
When the rdma device is getting removed, get resource info can race with
device removal, as below:
CPU-0 CPU-1
-------- --------
rdma_nl_rcv_msg()
nldev_res_get_cq_dumpit()
mutex_lock(device_lock);
get device reference
mutex_unlock(device_lock); [..]
ib_unregister_device()
/* Valid reference to
* device->dev exists.
*/
ib_dealloc_device()
[..]
provider->fill_res_entry();
Even though device object is not freed, fill_res_entry() can get called on
device which doesn't have a driver anymore. Kernel core device reference
count is not sufficient, as this only keeps the structure valid, and
doesn't guarantee the driver is still loaded.
Similar race can occur with device renaming and device removal, where
device_rename() tries to rename a unregistered device. While this is fine
for devices of a class which are not net namespace aware, but it is
incorrect for net namespace aware class coming in subsequent series. If a
class is net namespace aware, then the below [1] call trace is observed in
above situation.
Therefore, to avoid the race, keep a reference count and let device
unregistration wait until all netlink users drop the reference.
[1] Call trace:
kernfs: ns required in 'infiniband' for 'mlx5_0'
WARNING: CPU: 18 PID: 44270 at fs/kernfs/dir.c:842 kernfs_find_ns+0x104/0x120
libahci i2c_core mlxfw libata dca [last unloaded: devlink]
RIP: 0010:kernfs_find_ns+0x104/0x120
Call Trace:
kernfs_find_and_get_ns+0x2e/0x50
sysfs_rename_link_ns+0x40/0xb0
device_rename+0xb2/0xf0
ib_device_rename+0xb3/0x100 [ib_core]
nldev_set_doit+0x165/0x190 [ib_core]
rdma_nl_rcv_msg+0x249/0x250 [ib_core]
? netlink_deliver_tap+0x8f/0x3e0
rdma_nl_rcv+0xd6/0x120 [ib_core]
netlink_unicast+0x17c/0x230
netlink_sendmsg+0x2f0/0x3e0
sock_sendmsg+0x30/0x40
__sys_sendto+0xdc/0x160
Fixes: da5c85078215 ("RDMA/nldev: add driver-specific resource tracking")
Signed-off-by: Parav Pandit <parav@mellanox.com>
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
2018-11-16 04:50:57 +03:00
/*
2019-01-11 00:02:24 +03:00
* Positive refcount indicates that the device is currently
* registered and cannot be unregistered .
RDMA/core: Sync unregistration with netlink commands
When the rdma device is getting removed, get resource info can race with
device removal, as below:
CPU-0 CPU-1
-------- --------
rdma_nl_rcv_msg()
nldev_res_get_cq_dumpit()
mutex_lock(device_lock);
get device reference
mutex_unlock(device_lock); [..]
ib_unregister_device()
/* Valid reference to
* device->dev exists.
*/
ib_dealloc_device()
[..]
provider->fill_res_entry();
Even though device object is not freed, fill_res_entry() can get called on
device which doesn't have a driver anymore. Kernel core device reference
count is not sufficient, as this only keeps the structure valid, and
doesn't guarantee the driver is still loaded.
Similar race can occur with device renaming and device removal, where
device_rename() tries to rename a unregistered device. While this is fine
for devices of a class which are not net namespace aware, but it is
incorrect for net namespace aware class coming in subsequent series. If a
class is net namespace aware, then the below [1] call trace is observed in
above situation.
Therefore, to avoid the race, keep a reference count and let device
unregistration wait until all netlink users drop the reference.
[1] Call trace:
kernfs: ns required in 'infiniband' for 'mlx5_0'
WARNING: CPU: 18 PID: 44270 at fs/kernfs/dir.c:842 kernfs_find_ns+0x104/0x120
libahci i2c_core mlxfw libata dca [last unloaded: devlink]
RIP: 0010:kernfs_find_ns+0x104/0x120
Call Trace:
kernfs_find_and_get_ns+0x2e/0x50
sysfs_rename_link_ns+0x40/0xb0
device_rename+0xb2/0xf0
ib_device_rename+0xb3/0x100 [ib_core]
nldev_set_doit+0x165/0x190 [ib_core]
rdma_nl_rcv_msg+0x249/0x250 [ib_core]
? netlink_deliver_tap+0x8f/0x3e0
rdma_nl_rcv+0xd6/0x120 [ib_core]
netlink_unicast+0x17c/0x230
netlink_sendmsg+0x2f0/0x3e0
sock_sendmsg+0x30/0x40
__sys_sendto+0xdc/0x160
Fixes: da5c85078215 ("RDMA/nldev: add driver-specific resource tracking")
Signed-off-by: Parav Pandit <parav@mellanox.com>
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
2018-11-16 04:50:57 +03:00
*/
refcount_t refcount ;
struct completion unreg_completion ;
2019-02-13 07:12:53 +03:00
struct work_struct unregistration_work ;
2019-02-15 22:03:53 +03:00
const struct rdma_link_ops * link_ops ;
2019-02-26 14:56:13 +03:00
/* Protects compat_devs xarray modifications */
struct mutex compat_devs_mutex ;
/* Maintains compat devices for each net namespace */
struct xarray compat_devs ;
2019-04-29 14:59:06 +03:00
/* Used by iWarp CM */
char iw_ifname [ IFNAMSIZ ] ;
u32 iw_driver_flags ;
2005-04-17 02:20:36 +04:00
} ;
2019-06-14 03:38:18 +03:00
struct ib_client_nl_info ;
2005-04-17 02:20:36 +04:00
struct ib_client {
2019-02-07 08:41:52 +03:00
const char * name ;
2005-04-17 02:20:36 +04:00
void ( * add ) ( struct ib_device * ) ;
2015-07-30 17:50:14 +03:00
void ( * remove ) ( struct ib_device * , void * client_data ) ;
2019-06-14 03:38:18 +03:00
int ( * get_nl_info ) ( struct ib_device * ibdev , void * client_data ,
struct ib_client_nl_info * res ) ;
int ( * get_global_nl_info ) ( struct ib_client_nl_info * res ) ;
2005-04-17 02:20:36 +04:00
2015-07-30 17:50:15 +03:00
/* Returns the net_dev belonging to this ib_client and matching the
* given parameters .
* @ dev : An RDMA device that the net_dev use for communication .
* @ port : A physical port number on the RDMA device .
* @ pkey : P_Key that the net_dev uses if applicable .
* @ gid : A GID that the net_dev uses to communicate .
* @ addr : An IP address the net_dev is configured with .
* @ client_data : The device ' s client data set by ib_set_client_data ( ) .
*
* An ib_client that implements a net_dev on top of RDMA devices
* ( such as IP over IB ) should implement this callback , allowing the
* rdma_cm module to find the right net_dev for a given request .
*
* The caller is responsible for calling dev_put on the returned
* netdev . */
struct net_device * ( * get_net_dev_by_params ) (
struct ib_device * dev ,
u8 port ,
u16 pkey ,
const union ib_gid * gid ,
const struct sockaddr * addr ,
void * client_data ) ;
2005-04-17 02:20:36 +04:00
struct list_head list ;
2019-02-07 08:41:52 +03:00
u32 client_id ;
2019-01-22 11:08:22 +03:00
/* kverbs are not required by the client */
u8 no_kverbs_req : 1 ;
2005-04-17 02:20:36 +04:00
} ;
2019-05-06 16:53:33 +03:00
/*
* IB block DMA iterator
*
* Iterates the DMA - mapped SGL in contiguous memory blocks aligned
* to a HW supported page size .
*/
struct ib_block_iter {
/* internal states */
struct scatterlist * __sg ; /* sg holding the current aligned block */
dma_addr_t __dma_addr ; /* unaligned DMA address of this block */
unsigned int __sg_nents ; /* number of SG entries */
unsigned int __sg_advance ; /* number of bytes to advance in sg in next step */
unsigned int __pg_bit ; /* alignment of current block */
} ;
2019-01-30 13:49:11 +03:00
struct ib_device * _ib_alloc_device ( size_t size ) ;
# define ib_alloc_device(drv_struct, member) \
container_of ( _ib_alloc_device ( sizeof ( struct drv_struct ) + \
BUILD_BUG_ON_ZERO ( offsetof ( \
struct drv_struct , member ) ) ) , \
struct drv_struct , member )
2005-04-17 02:20:36 +04:00
void ib_dealloc_device ( struct ib_device * device ) ;
2017-06-27 16:49:53 +03:00
void ib_get_device_fw_str ( struct ib_device * device , char * str ) ;
2016-06-15 09:21:56 +03:00
2018-12-18 15:28:30 +03:00
int ib_register_device ( struct ib_device * device , const char * name ) ;
2005-04-17 02:20:36 +04:00
void ib_unregister_device ( struct ib_device * device ) ;
2019-02-13 07:12:53 +03:00
void ib_unregister_driver ( enum rdma_driver_id driver_id ) ;
void ib_unregister_device_and_put ( struct ib_device * device ) ;
void ib_unregister_device_queued ( struct ib_device * ib_dev ) ;
2005-04-17 02:20:36 +04:00
int ib_register_client ( struct ib_client * client ) ;
void ib_unregister_client ( struct ib_client * client ) ;
2019-05-06 16:53:33 +03:00
void __rdma_block_iter_start ( struct ib_block_iter * biter ,
struct scatterlist * sglist ,
unsigned int nents ,
unsigned long pgsz ) ;
bool __rdma_block_iter_next ( struct ib_block_iter * biter ) ;
/**
* rdma_block_iter_dma_address - get the aligned dma address of the current
* block held by the block iterator .
* @ biter : block iterator holding the memory block
*/
static inline dma_addr_t
rdma_block_iter_dma_address ( struct ib_block_iter * biter )
{
return biter - > __dma_addr & ~ ( BIT_ULL ( biter - > __pg_bit ) - 1 ) ;
}
/**
* rdma_for_each_block - iterate over contiguous memory blocks of the sg list
* @ sglist : sglist to iterate over
* @ biter : block iterator holding the memory block
* @ nents : maximum number of sg entries to iterate over
* @ pgsz : best HW supported page size to use
*
* Callers may use rdma_block_iter_dma_address ( ) to get each
* blocks aligned DMA address .
*/
# define rdma_for_each_block(sglist, biter, nents, pgsz) \
for ( __rdma_block_iter_start ( biter , sglist , nents , \
pgsz ) ; \
__rdma_block_iter_next ( biter ) ; )
2019-02-07 08:41:53 +03:00
/**
* ib_get_client_data - Get IB client context
* @ device : Device to get context for
* @ client : Client to get context for
*
* ib_get_client_data ( ) returns the client context data set with
* ib_set_client_data ( ) . This can only be called while the client is
* registered to the device , once the ib_client remove ( ) callback returns this
* cannot be called .
*/
static inline void * ib_get_client_data ( struct ib_device * device ,
struct ib_client * client )
{
return xa_load ( & device - > client_data , client - > client_id ) ;
}
2005-04-17 02:20:36 +04:00
void ib_set_client_data ( struct ib_device * device , struct ib_client * client ,
void * data ) ;
2018-12-10 22:09:30 +03:00
void ib_set_device_ops ( struct ib_device * device ,
const struct ib_device_ops * ops ) ;
2005-04-17 02:20:36 +04:00
2018-09-16 20:43:08 +03:00
# if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
int rdma_user_mmap_io ( struct ib_ucontext * ucontext , struct vm_area_struct * vma ,
unsigned long pfn , unsigned long size , pgprot_t prot ) ;
# else
static inline int rdma_user_mmap_io ( struct ib_ucontext * ucontext ,
struct vm_area_struct * vma ,
unsigned long pfn , unsigned long size ,
pgprot_t prot )
{
return - EINVAL ;
}
# endif
2005-07-08 04:57:10 +04:00
static inline int ib_copy_from_udata ( void * dest , struct ib_udata * udata , size_t len )
{
return copy_from_user ( dest , udata - > inbuf , len ) ? - EFAULT : 0 ;
}
static inline int ib_copy_to_udata ( struct ib_udata * udata , void * src , size_t len )
{
2015-02-06 00:10:18 +03:00
return copy_to_user ( udata - > outbuf , src , len ) ? - EFAULT : 0 ;
2005-07-08 04:57:10 +04:00
}
2018-03-19 16:02:36 +03:00
static inline bool ib_is_buffer_cleared ( const void __user * p ,
size_t len )
2015-12-15 21:30:10 +03:00
{
2016-08-22 19:23:24 +03:00
bool ret ;
2015-12-15 21:30:10 +03:00
u8 * buf ;
if ( len > USHRT_MAX )
return false ;
2016-08-22 19:23:24 +03:00
buf = memdup_user ( p , len ) ;
if ( IS_ERR ( buf ) )
2015-12-15 21:30:10 +03:00
return false ;
ret = ! memchr_inv ( buf , 0 , len ) ;
kfree ( buf ) ;
return ret ;
}
2018-03-19 16:02:36 +03:00
static inline bool ib_is_udata_cleared ( struct ib_udata * udata ,
size_t offset ,
size_t len )
{
return ib_is_buffer_cleared ( udata - > inbuf + offset , len ) ;
}
2018-06-20 17:11:39 +03:00
/**
* ib_is_destroy_retryable - Check whether the uobject destruction
* is retryable .
* @ ret : The initial destruction return code
* @ why : remove reason
* @ uobj : The uobject that is destroyed
*
* This function is a helper function that IB layer and low - level drivers
* can use to consider whether the destruction of the given uobject is
* retry - able .
* It checks the original return code , if it wasn ' t success the destruction
* is retryable according to the ucontext state ( i . e . cleanup_retryable ) and
* the remove reason . ( i . e . why ) .
* Must be called with the object locked for destroy .
*/
static inline bool ib_is_destroy_retryable ( int ret , enum rdma_remove_reason why ,
struct ib_uobject * uobj )
{
return ret & & ( why = = RDMA_REMOVE_DESTROY | |
uobj - > context - > cleanup_retryable ) ;
}
/**
* ib_destroy_usecnt - Called during destruction to check the usecnt
* @ usecnt : The usecnt atomic
* @ why : remove reason
* @ uobj : The uobject that is destroyed
*
* Non - zero usecnts will block destruction unless destruction was triggered by
* a ucontext cleanup .
*/
static inline int ib_destroy_usecnt ( atomic_t * usecnt ,
enum rdma_remove_reason why ,
struct ib_uobject * uobj )
{
if ( atomic_read ( usecnt ) & & ib_is_destroy_retryable ( - EBUSY , why , uobj ) )
return - EBUSY ;
return 0 ;
}
2006-02-13 23:48:12 +03:00
/**
* ib_modify_qp_is_ok - Check that the supplied attribute mask
* contains all required attributes and no attributes not allowed for
* the given QP state transition .
* @ cur_state : Current QP state
* @ next_state : Next QP state
* @ type : QP type
* @ mask : Mask of supplied QP attributes
*
* This function is a helper function that a low - level driver ' s
* modify_qp method can use to validate the consumer ' s input . It
* checks that cur_state and next_state are valid QP states , that a
* transition from cur_state to next_state is allowed by the IB spec ,
* and that the attribute mask supplied is allowed for the transition .
*/
2018-03-11 14:51:35 +03:00
bool ib_modify_qp_is_ok ( enum ib_qp_state cur_state , enum ib_qp_state next_state ,
2018-10-02 16:11:21 +03:00
enum ib_qp_type type , enum ib_qp_attr_mask mask ) ;
2006-02-13 23:48:12 +03:00
2017-08-17 15:50:36 +03:00
void ib_register_event_handler ( struct ib_event_handler * event_handler ) ;
void ib_unregister_event_handler ( struct ib_event_handler * event_handler ) ;
2005-04-17 02:20:36 +04:00
void ib_dispatch_event ( struct ib_event * event ) ;
int ib_query_port ( struct ib_device * device ,
u8 port_num , struct ib_port_attr * port_attr ) ;
2010-09-28 04:51:10 +04:00
enum rdma_link_layer rdma_port_get_link_layer ( struct ib_device * device ,
u8 port_num ) ;
2015-06-29 16:57:00 +03:00
/**
* rdma_cap_ib_switch - Check if the device is IB switch
* @ device : Device to check
*
* Device driver is responsible for setting is_switch bit on
* in ib_device structure at init time .
*
* Return : true if the device is IB switch .
*/
static inline bool rdma_cap_ib_switch ( const struct ib_device * device )
{
return device - > is_switch ;
}
2015-05-14 03:02:55 +03:00
/**
* rdma_start_port - Return the first valid port number for the device
* specified
*
* @ device : Device to be checked
*
* Return start port number
*/
static inline u8 rdma_start_port ( const struct ib_device * device )
{
2015-06-29 16:57:00 +03:00
return rdma_cap_ib_switch ( device ) ? 0 : 1 ;
2015-05-14 03:02:55 +03:00
}
2019-02-13 07:12:47 +03:00
/**
* rdma_for_each_port - Iterate over all valid port numbers of the IB device
* @ device - The struct ib_device * to iterate over
* @ iter - The unsigned int to store the port number
*/
# define rdma_for_each_port(device, iter) \
for ( iter = rdma_start_port ( device + BUILD_BUG_ON_ZERO ( ! __same_type ( \
unsigned int , iter ) ) ) ; \
iter < = rdma_end_port ( device ) ; ( iter ) + + )
2015-05-14 03:02:55 +03:00
/**
* rdma_end_port - Return the last valid port number for the device
* specified
*
* @ device : Device to be checked
*
* Return last port number
*/
static inline u8 rdma_end_port ( const struct ib_device * device )
{
2015-06-29 16:57:00 +03:00
return rdma_cap_ib_switch ( device ) ? 0 : device - > phys_port_cnt ;
2015-05-14 03:02:55 +03:00
}
2017-01-25 19:41:37 +03:00
static inline int rdma_is_port_valid ( const struct ib_device * device ,
unsigned int port )
{
return ( port > = rdma_start_port ( device ) & &
port < = rdma_end_port ( device ) ) ;
}
2018-07-04 15:57:50 +03:00
static inline bool rdma_is_grh_required ( const struct ib_device * device ,
u8 port_num )
{
2019-02-13 07:12:48 +03:00
return device - > port_data [ port_num ] . immutable . core_cap_flags &
RDMA_CORE_PORT_IB_GRH_REQUIRED ;
2018-07-04 15:57:50 +03:00
}
2015-06-01 00:15:29 +03:00
static inline bool rdma_protocol_ib ( const struct ib_device * device , u8 port_num )
2015-05-05 15:50:19 +03:00
{
2019-02-13 07:12:48 +03:00
return device - > port_data [ port_num ] . immutable . core_cap_flags &
RDMA_CORE_CAP_PROT_IB ;
2015-05-05 15:50:19 +03:00
}
2015-06-01 00:15:29 +03:00
static inline bool rdma_protocol_roce ( const struct ib_device * device , u8 port_num )
2015-12-23 15:56:50 +03:00
{
2019-02-13 07:12:48 +03:00
return device - > port_data [ port_num ] . immutable . core_cap_flags &
( RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP ) ;
2015-12-23 15:56:50 +03:00
}
static inline bool rdma_protocol_roce_udp_encap ( const struct ib_device * device , u8 port_num )
{
2019-02-13 07:12:48 +03:00
return device - > port_data [ port_num ] . immutable . core_cap_flags &
RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP ;
2015-12-23 15:56:50 +03:00
}
static inline bool rdma_protocol_roce_eth_encap ( const struct ib_device * device , u8 port_num )
2015-05-05 15:50:19 +03:00
{
2019-02-13 07:12:48 +03:00
return device - > port_data [ port_num ] . immutable . core_cap_flags &
RDMA_CORE_CAP_PROT_ROCE ;
2015-05-05 15:50:19 +03:00
}
2015-06-01 00:15:29 +03:00
static inline bool rdma_protocol_iwarp ( const struct ib_device * device , u8 port_num )
2015-05-05 15:50:19 +03:00
{
2019-02-13 07:12:48 +03:00
return device - > port_data [ port_num ] . immutable . core_cap_flags &
RDMA_CORE_CAP_PROT_IWARP ;
2015-05-05 15:50:19 +03:00
}
2015-06-01 00:15:29 +03:00
static inline bool rdma_ib_or_roce ( const struct ib_device * device , u8 port_num )
2015-05-05 15:50:19 +03:00
{
2015-12-23 15:56:50 +03:00
return rdma_protocol_ib ( device , port_num ) | |
rdma_protocol_roce ( device , port_num ) ;
2015-05-05 15:50:19 +03:00
}
2017-01-24 14:02:35 +03:00
static inline bool rdma_protocol_raw_packet ( const struct ib_device * device , u8 port_num )
{
2019-02-13 07:12:48 +03:00
return device - > port_data [ port_num ] . immutable . core_cap_flags &
RDMA_CORE_CAP_PROT_RAW_PACKET ;
2017-01-24 14:02:35 +03:00
}
2017-01-24 14:02:38 +03:00
static inline bool rdma_protocol_usnic ( const struct ib_device * device , u8 port_num )
{
2019-02-13 07:12:48 +03:00
return device - > port_data [ port_num ] . immutable . core_cap_flags &
RDMA_CORE_CAP_PROT_USNIC ;
2017-01-24 14:02:38 +03:00
}
2015-05-05 15:50:32 +03:00
/**
2015-05-18 11:41:45 +03:00
* rdma_cap_ib_mad - Check if the port of a device supports Infiniband
2015-05-05 15:50:32 +03:00
* Management Datagrams .
2015-05-18 11:41:45 +03:00
* @ device : Device to check
* @ port_num : Port number to check
2015-05-05 15:50:32 +03:00
*
2015-05-18 11:41:45 +03:00
* Management Datagrams ( MAD ) are a required part of the InfiniBand
* specification and are supported on all InfiniBand devices . A slightly
* extended version are also supported on OPA interfaces .
2015-05-05 15:50:32 +03:00
*
2015-05-18 11:41:45 +03:00
* Return : true if the port supports sending / receiving of MAD packets .
2015-05-05 15:50:32 +03:00
*/
2015-06-01 00:15:29 +03:00
static inline bool rdma_cap_ib_mad ( const struct ib_device * device , u8 port_num )
2015-05-05 15:50:32 +03:00
{
2019-02-13 07:12:48 +03:00
return device - > port_data [ port_num ] . immutable . core_cap_flags &
RDMA_CORE_CAP_IB_MAD ;
2015-05-05 15:50:32 +03:00
}
2015-06-06 21:38:32 +03:00
/**
* rdma_cap_opa_mad - Check if the port of device provides support for OPA
* Management Datagrams .
* @ device : Device to check
* @ port_num : Port number to check
*
* Intel OmniPath devices extend and / or replace the InfiniBand Management
* datagrams with their own versions . These OPA MADs share many but not all of
* the characteristics of InfiniBand MADs .
*
* OPA MADs differ in the following ways :
*
* 1 ) MADs are variable size up to 2 K
* IBTA defined MADs remain fixed at 256 bytes
* 2 ) OPA SMPs must carry valid PKeys
* 3 ) OPA SMP packets are a different format
*
* Return : true if the port supports OPA MAD packet formats .
*/
static inline bool rdma_cap_opa_mad ( struct ib_device * device , u8 port_num )
{
2019-03-10 18:27:46 +03:00
return device - > port_data [ port_num ] . immutable . core_cap_flags &
RDMA_CORE_CAP_OPA_MAD ;
2015-06-06 21:38:32 +03:00
}
2015-05-05 15:50:33 +03:00
/**
2015-05-18 11:41:45 +03:00
* rdma_cap_ib_smi - Check if the port of a device provides an Infiniband
* Subnet Management Agent ( SMA ) on the Subnet Management Interface ( SMI ) .
* @ device : Device to check
* @ port_num : Port number to check
2015-05-05 15:50:33 +03:00
*
2015-05-18 11:41:45 +03:00
* Each InfiniBand node is required to provide a Subnet Management Agent
* that the subnet manager can access . Prior to the fabric being fully
* configured by the subnet manager , the SMA is accessed via a well known
* interface called the Subnet Management Interface ( SMI ) . This interface
* uses directed route packets to communicate with the SM to get around the
* chicken and egg problem of the SM needing to know what ' s on the fabric
* in order to configure the fabric , and needing to configure the fabric in
* order to send packets to the devices on the fabric . These directed
* route packets do not need the fabric fully configured in order to reach
* their destination . The SMI is the only method allowed to send
* directed route packets on an InfiniBand fabric .
2015-05-05 15:50:33 +03:00
*
2015-05-18 11:41:45 +03:00
* Return : true if the port provides an SMI .
2015-05-05 15:50:33 +03:00
*/
2015-06-01 00:15:29 +03:00
static inline bool rdma_cap_ib_smi ( const struct ib_device * device , u8 port_num )
2015-05-05 15:50:33 +03:00
{
2019-02-13 07:12:48 +03:00
return device - > port_data [ port_num ] . immutable . core_cap_flags &
RDMA_CORE_CAP_IB_SMI ;
2015-05-05 15:50:33 +03:00
}
2015-05-05 15:50:34 +03:00
/**
* rdma_cap_ib_cm - Check if the port of device has the capability Infiniband
* Communication Manager .
2015-05-18 11:41:45 +03:00
* @ device : Device to check
* @ port_num : Port number to check
2015-05-05 15:50:34 +03:00
*
2015-05-18 11:41:45 +03:00
* The InfiniBand Communication Manager is one of many pre - defined General
* Service Agents ( GSA ) that are accessed via the General Service
* Interface ( GSI ) . It ' s role is to facilitate establishment of connections
* between nodes as well as other management related tasks for established
* connections .
2015-05-05 15:50:34 +03:00
*
2015-05-18 11:41:45 +03:00
* Return : true if the port supports an IB CM ( this does not guarantee that
* a CM is actually running however ) .
2015-05-05 15:50:34 +03:00
*/
2015-06-01 00:15:29 +03:00
static inline bool rdma_cap_ib_cm ( const struct ib_device * device , u8 port_num )
2015-05-05 15:50:34 +03:00
{
2019-02-13 07:12:48 +03:00
return device - > port_data [ port_num ] . immutable . core_cap_flags &
RDMA_CORE_CAP_IB_CM ;
2015-05-05 15:50:34 +03:00
}
2015-05-05 15:50:35 +03:00
/**
* rdma_cap_iw_cm - Check if the port of device has the capability IWARP
* Communication Manager .
2015-05-18 11:41:45 +03:00
* @ device : Device to check
* @ port_num : Port number to check
2015-05-05 15:50:35 +03:00
*
2015-05-18 11:41:45 +03:00
* Similar to above , but specific to iWARP connections which have a different
* managment protocol than InfiniBand .
2015-05-05 15:50:35 +03:00
*
2015-05-18 11:41:45 +03:00
* Return : true if the port supports an iWARP CM ( this does not guarantee that
* a CM is actually running however ) .
2015-05-05 15:50:35 +03:00
*/
2015-06-01 00:15:29 +03:00
static inline bool rdma_cap_iw_cm ( const struct ib_device * device , u8 port_num )
2015-05-05 15:50:35 +03:00
{
2019-02-13 07:12:48 +03:00
return device - > port_data [ port_num ] . immutable . core_cap_flags &
RDMA_CORE_CAP_IW_CM ;
2015-05-05 15:50:35 +03:00
}
2015-05-05 15:50:36 +03:00
/**
* rdma_cap_ib_sa - Check if the port of device has the capability Infiniband
* Subnet Administration .
2015-05-18 11:41:45 +03:00
* @ device : Device to check
* @ port_num : Port number to check
2015-05-05 15:50:36 +03:00
*
2015-05-18 11:41:45 +03:00
* An InfiniBand Subnet Administration ( SA ) service is a pre - defined General
* Service Agent ( GSA ) provided by the Subnet Manager ( SM ) . On InfiniBand
* fabrics , devices should resolve routes to other hosts by contacting the
* SA to query the proper route .
2015-05-05 15:50:36 +03:00
*
2015-05-18 11:41:45 +03:00
* Return : true if the port should act as a client to the fabric Subnet
* Administration interface . This does not imply that the SA service is
* running locally .
2015-05-05 15:50:36 +03:00
*/
2015-06-01 00:15:29 +03:00
static inline bool rdma_cap_ib_sa ( const struct ib_device * device , u8 port_num )
2015-05-05 15:50:36 +03:00
{
2019-02-13 07:12:48 +03:00
return device - > port_data [ port_num ] . immutable . core_cap_flags &
RDMA_CORE_CAP_IB_SA ;
2015-05-05 15:50:36 +03:00
}
2015-05-05 15:50:37 +03:00
/**
* rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband
* Multicast .
2015-05-18 11:41:45 +03:00
* @ device : Device to check
* @ port_num : Port number to check
2015-05-05 15:50:37 +03:00
*
2015-05-18 11:41:45 +03:00
* InfiniBand multicast registration is more complex than normal IPv4 or
* IPv6 multicast registration . Each Host Channel Adapter must register
* with the Subnet Manager when it wishes to join a multicast group . It
* should do so only once regardless of how many queue pairs it subscribes
* to this group . And it should leave the group only after all queue pairs
* attached to the group have been detached .
2015-05-05 15:50:37 +03:00
*
2015-05-18 11:41:45 +03:00
* Return : true if the port must undertake the additional adminstrative
* overhead of registering / unregistering with the SM and tracking of the
* total number of queue pairs attached to the multicast group .
2015-05-05 15:50:37 +03:00
*/
2015-06-01 00:15:29 +03:00
static inline bool rdma_cap_ib_mcast ( const struct ib_device * device , u8 port_num )
2015-05-05 15:50:37 +03:00
{
return rdma_cap_ib_sa ( device , port_num ) ;
}
2015-05-05 15:50:39 +03:00
/**
* rdma_cap_af_ib - Check if the port of device has the capability
* Native Infiniband Address .
2015-05-18 11:41:45 +03:00
* @ device : Device to check
* @ port_num : Port number to check
2015-05-05 15:50:39 +03:00
*
2015-05-18 11:41:45 +03:00
* InfiniBand addressing uses a port ' s GUID + Subnet Prefix to make a default
* GID . RoCE uses a different mechanism , but still generates a GID via
* a prescribed mechanism and port specific data .
2015-05-05 15:50:39 +03:00
*
2015-05-18 11:41:45 +03:00
* Return : true if the port uses a GID address to identify devices on the
* network .
2015-05-05 15:50:39 +03:00
*/
2015-06-01 00:15:29 +03:00
static inline bool rdma_cap_af_ib ( const struct ib_device * device , u8 port_num )
2015-05-05 15:50:39 +03:00
{
2019-02-13 07:12:48 +03:00
return device - > port_data [ port_num ] . immutable . core_cap_flags &
RDMA_CORE_CAP_AF_IB ;
2015-05-05 15:50:39 +03:00
}
2015-05-05 15:50:40 +03:00
/**
* rdma_cap_eth_ah - Check if the port of device has the capability
2015-05-18 11:41:45 +03:00
* Ethernet Address Handle .
* @ device : Device to check
* @ port_num : Port number to check
2015-05-05 15:50:40 +03:00
*
2015-05-18 11:41:45 +03:00
* RoCE is InfiniBand over Ethernet , and it uses a well defined technique
* to fabricate GIDs over Ethernet / IP specific addresses native to the
* port . Normally , packet headers are generated by the sending host
* adapter , but when sending connectionless datagrams , we must manually
* inject the proper headers for the fabric we are communicating over .
2015-05-05 15:50:40 +03:00
*
2015-05-18 11:41:45 +03:00
* Return : true if we are running as a RoCE port and must force the
* addition of a Global Route Header built from our Ethernet Address
* Handle into our header list for connectionless packets .
2015-05-05 15:50:40 +03:00
*/
2015-06-01 00:15:29 +03:00
static inline bool rdma_cap_eth_ah ( const struct ib_device * device , u8 port_num )
2015-05-05 15:50:40 +03:00
{
2019-02-13 07:12:48 +03:00
return device - > port_data [ port_num ] . immutable . core_cap_flags &
RDMA_CORE_CAP_ETH_AH ;
2015-05-05 15:50:40 +03:00
}
2017-03-21 02:38:09 +03:00
/**
* rdma_cap_opa_ah - Check if the port of device supports
* OPA Address handles
* @ device : Device to check
* @ port_num : Port number to check
*
* Return : true if we are running on an OPA device which supports
* the extended OPA addressing .
*/
static inline bool rdma_cap_opa_ah ( struct ib_device * device , u8 port_num )
{
2019-02-13 07:12:48 +03:00
return ( device - > port_data [ port_num ] . immutable . core_cap_flags &
2017-03-21 02:38:09 +03:00
RDMA_CORE_CAP_OPA_AH ) = = RDMA_CORE_CAP_OPA_AH ;
}
2015-06-06 21:38:29 +03:00
/**
* rdma_max_mad_size - Return the max MAD size required by this RDMA Port .
*
* @ device : Device
* @ port_num : Port number
*
* This MAD size includes the MAD headers and MAD payload . No other headers
* are included .
*
* Return the max MAD size required by the Port . Will return 0 if the port
* does not support MADs
*/
static inline size_t rdma_max_mad_size ( const struct ib_device * device , u8 port_num )
{
2019-02-13 07:12:48 +03:00
return device - > port_data [ port_num ] . immutable . max_mad_size ;
2015-06-06 21:38:29 +03:00
}
IB/core: Add RoCE GID table management
RoCE GIDs are based on IP addresses configured on Ethernet net-devices
which relate to the RDMA (RoCE) device port.
Currently, each of the low-level drivers that support RoCE (ocrdma,
mlx4) manages its own RoCE port GID table. As there's nothing which is
essentially vendor specific, we generalize that, and enhance the RDMA
core GID cache to do this job.
In order to populate the GID table, we listen for events:
(a) netdev up/down/change_addr events - if a netdev is built onto
our RoCE device, we need to add/delete its IPs. This involves
adding all GIDs related to this ndev, add default GIDs, etc.
(b) inet events - add new GIDs (according to the IP addresses)
to the table.
For programming the port RoCE GID table, providers must implement
the add_gid and del_gid callbacks.
RoCE GID management requires us to state the associated net_device
alongside the GID. This information is necessary in order to manage
the GID table. For example, when a net_device is removed, its
associated GIDs need to be removed as well.
RoCE mandates generating a default GID for each port, based on the
related net-device's IPv6 link local. In contrast to the GID based on
the regular IPv6 link-local (as we generate GID per IP address),
the default GID is also available when the net device is down (in
order to support loopback).
Locking is done as follows:
The patch modify the GID table code both for new RoCE drivers
implementing the add_gid/del_gid callbacks and for current RoCE and
IB drivers that do not. The flows for updating the table are
different, so the locking requirements are too.
While updating RoCE GID table, protection against multiple writers is
achieved via mutex_lock(&table->lock). Since writing to a table
requires us to find an entry (possible a free entry) in the table and
then modify it, this mutex protects both the find_gid and write_gid
ensuring the atomicity of the action.
Each entry in the GID cache is protected by rwlock. In RoCE, writing
(usually results from netdev notifier) involves invoking the vendor's
add_gid and del_gid callbacks, which could sleep.
Therefore, an invalid flag is added for each entry. Updates for RoCE are
done via a workqueue, thus sleeping is permitted.
In IB, updates are done in write_lock_irq(&device->cache.lock), thus
write_gid isn't allowed to sleep and add_gid/del_gid are not called.
When passing net-device into/out-of the GID cache, the device
is always passed held (dev_hold).
The code uses a single work item for updating all RDMA devices,
following a netdev or inet notifier.
The patch moves the cache from being a client (which was incorrect,
as the cache is part of the IB infrastructure) to being explicitly
initialized/freed when a device is registered/removed.
Signed-off-by: Matan Barak <matanb@mellanox.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
2015-07-30 18:33:26 +03:00
/**
* rdma_cap_roce_gid_table - Check if the port of device uses roce_gid_table
* @ device : Device to check
* @ port_num : Port number to check
*
* RoCE GID table mechanism manages the various GIDs for a device .
*
* NOTE : if allocating the port ' s GID table has failed , this call will still
* return true , but any RoCE GID table API will fail .
*
* Return : true if the port uses RoCE GID table mechanism in order to manage
* its GIDs .
*/
static inline bool rdma_cap_roce_gid_table ( const struct ib_device * device ,
u8 port_num )
{
return rdma_protocol_roce ( device , port_num ) & &
2018-12-10 22:09:48 +03:00
device - > ops . add_gid & & device - > ops . del_gid ;
IB/core: Add RoCE GID table management
RoCE GIDs are based on IP addresses configured on Ethernet net-devices
which relate to the RDMA (RoCE) device port.
Currently, each of the low-level drivers that support RoCE (ocrdma,
mlx4) manages its own RoCE port GID table. As there's nothing which is
essentially vendor specific, we generalize that, and enhance the RDMA
core GID cache to do this job.
In order to populate the GID table, we listen for events:
(a) netdev up/down/change_addr events - if a netdev is built onto
our RoCE device, we need to add/delete its IPs. This involves
adding all GIDs related to this ndev, add default GIDs, etc.
(b) inet events - add new GIDs (according to the IP addresses)
to the table.
For programming the port RoCE GID table, providers must implement
the add_gid and del_gid callbacks.
RoCE GID management requires us to state the associated net_device
alongside the GID. This information is necessary in order to manage
the GID table. For example, when a net_device is removed, its
associated GIDs need to be removed as well.
RoCE mandates generating a default GID for each port, based on the
related net-device's IPv6 link local. In contrast to the GID based on
the regular IPv6 link-local (as we generate GID per IP address),
the default GID is also available when the net device is down (in
order to support loopback).
Locking is done as follows:
The patch modify the GID table code both for new RoCE drivers
implementing the add_gid/del_gid callbacks and for current RoCE and
IB drivers that do not. The flows for updating the table are
different, so the locking requirements are too.
While updating RoCE GID table, protection against multiple writers is
achieved via mutex_lock(&table->lock). Since writing to a table
requires us to find an entry (possible a free entry) in the table and
then modify it, this mutex protects both the find_gid and write_gid
ensuring the atomicity of the action.
Each entry in the GID cache is protected by rwlock. In RoCE, writing
(usually results from netdev notifier) involves invoking the vendor's
add_gid and del_gid callbacks, which could sleep.
Therefore, an invalid flag is added for each entry. Updates for RoCE are
done via a workqueue, thus sleeping is permitted.
In IB, updates are done in write_lock_irq(&device->cache.lock), thus
write_gid isn't allowed to sleep and add_gid/del_gid are not called.
When passing net-device into/out-of the GID cache, the device
is always passed held (dev_hold).
The code uses a single work item for updating all RDMA devices,
following a netdev or inet notifier.
The patch moves the cache from being a client (which was incorrect,
as the cache is part of the IB infrastructure) to being explicitly
initialized/freed when a device is registered/removed.
Signed-off-by: Matan Barak <matanb@mellanox.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
2015-07-30 18:33:26 +03:00
}
2016-05-03 19:01:05 +03:00
/*
* Check if the device supports READ W / INVALIDATE .
*/
static inline bool rdma_cap_read_inv ( struct ib_device * dev , u32 port_num )
{
/*
* iWarp drivers must support READ W / INVALIDATE . No other protocol
* has support for it yet .
*/
return rdma_protocol_iwarp ( dev , port_num ) ;
}
2019-05-06 16:53:32 +03:00
/**
* rdma_find_pg_bit - Find page bit given address and HW supported page sizes
*
* @ addr : address
* @ pgsz_bitmap : bitmap of HW supported page sizes
*/
static inline unsigned int rdma_find_pg_bit ( unsigned long addr ,
unsigned long pgsz_bitmap )
{
unsigned long align ;
unsigned long pgsz ;
align = addr & - addr ;
/* Find page bit such that addr is aligned to the highest supported
* HW page size
*/
pgsz = pgsz_bitmap & ~ ( - align < < 1 ) ;
if ( ! pgsz )
return __ffs ( pgsz_bitmap ) ;
return __fls ( pgsz ) ;
}
2016-03-11 23:58:38 +03:00
int ib_set_vf_link_state ( struct ib_device * device , int vf , u8 port ,
int state ) ;
int ib_get_vf_config ( struct ib_device * device , int vf , u8 port ,
struct ifla_vf_info * info ) ;
int ib_get_vf_stats ( struct ib_device * device , int vf , u8 port ,
struct ifla_vf_stats * stats ) ;
int ib_set_vf_guid ( struct ib_device * device , int vf , u8 port , u64 guid ,
int type ) ;
2005-04-17 02:20:36 +04:00
int ib_query_pkey ( struct ib_device * device ,
u8 port_num , u16 index , u16 * pkey ) ;
int ib_modify_device ( struct ib_device * device ,
int device_modify_mask ,
struct ib_device_modify * device_modify ) ;
int ib_modify_port ( struct ib_device * device ,
u8 port_num , int port_modify_mask ,
struct ib_port_modify * port_modify ) ;
2007-05-14 08:26:51 +04:00
int ib_find_gid ( struct ib_device * device , union ib_gid * gid ,
2018-03-13 17:06:12 +03:00
u8 * port_num , u16 * index ) ;
2007-05-14 08:26:51 +04:00
int ib_find_pkey ( struct ib_device * device ,
u8 port_num , u16 pkey , u16 * index ) ;
2016-09-05 13:56:17 +03:00
enum ib_pd_flags {
/*
* Create a memory registration for all memory in the system and place
* the rkey for it into pd - > unsafe_global_rkey . This can be used by
* ULPs to avoid the overhead of dynamic MRs .
*
* This flag is generally considered unsafe and must only be used in
* extremly trusted environments . Every use of it will log a warning
* in the kernel log .
*/
IB_PD_UNSAFE_GLOBAL_RKEY = 0x01 ,
} ;
2005-04-17 02:20:36 +04:00
2016-09-05 13:56:17 +03:00
struct ib_pd * __ib_alloc_pd ( struct ib_device * device , unsigned int flags ,
const char * caller ) ;
2019-03-31 19:10:05 +03:00
2016-09-05 13:56:17 +03:00
# define ib_alloc_pd(device, flags) \
2018-01-28 12:17:18 +03:00
__ib_alloc_pd ( ( device ) , ( flags ) , KBUILD_MODNAME )
2019-03-31 19:10:05 +03:00
/**
* ib_dealloc_pd_user - Deallocate kernel / user PD
* @ pd : The protection domain
* @ udata : Valid user data or NULL for kernel objects
*/
void ib_dealloc_pd_user ( struct ib_pd * pd , struct ib_udata * udata ) ;
/**
* ib_dealloc_pd - Deallocate kernel PD
* @ pd : The protection domain
*
* NOTE : for user PD use ib_dealloc_pd_user with valid udata !
*/
static inline void ib_dealloc_pd ( struct ib_pd * pd )
{
ib_dealloc_pd_user ( pd , NULL ) ;
}
2005-04-17 02:20:36 +04:00
2018-12-12 12:09:05 +03:00
enum rdma_create_ah_flags {
/* In a sleepable context */
RDMA_CREATE_AH_SLEEPABLE = BIT ( 0 ) ,
} ;
2005-04-17 02:20:36 +04:00
/**
2017-04-29 21:41:19 +03:00
* rdma_create_ah - Creates an address handle for the given address vector .
2005-04-17 02:20:36 +04:00
* @ pd : The protection domain associated with the address handle .
* @ ah_attr : The attributes of the address vector .
2018-12-12 12:09:05 +03:00
* @ flags : Create address handle flags ( see enum rdma_create_ah_flags ) .
2005-04-17 02:20:36 +04:00
*
* The address handle is used to reference a local or global destination
* in all UD QP post sends .
*/
2018-12-12 12:09:05 +03:00
struct ib_ah * rdma_create_ah ( struct ib_pd * pd , struct rdma_ah_attr * ah_attr ,
u32 flags ) ;
2005-04-17 02:20:36 +04:00
2017-10-16 08:45:12 +03:00
/**
* rdma_create_user_ah - Creates an address handle for the given address vector .
* It resolves destination mac address for ah attribute of RoCE type .
* @ pd : The protection domain associated with the address handle .
* @ ah_attr : The attributes of the address vector .
* @ udata : pointer to user ' s input output buffer information need by
* provider driver .
*
* It returns 0 on success and returns appropriate error code on error .
* The address handle is used to reference a local or global destination
* in all UD QP post sends .
*/
struct ib_ah * rdma_create_user_ah ( struct ib_pd * pd ,
struct rdma_ah_attr * ah_attr ,
struct ib_udata * udata ) ;
2016-11-10 12:30:56 +03:00
/**
* ib_get_gids_from_rdma_hdr - Get sgid and dgid from GRH or IPv4 header
* work completion .
* @ hdr : the L3 header to parse
* @ net_type : type of header to parse
* @ sgid : place to store source gid
* @ dgid : place to store destination gid
*/
int ib_get_gids_from_rdma_hdr ( const union rdma_network_hdr * hdr ,
enum rdma_network_type net_type ,
union ib_gid * sgid , union ib_gid * dgid ) ;
/**
* ib_get_rdma_header_version - Get the header version
* @ hdr : the L3 header to parse
*/
int ib_get_rdma_header_version ( const union rdma_network_hdr * hdr ) ;
2006-06-18 07:37:39 +04:00
/**
2017-11-14 15:52:17 +03:00
* ib_init_ah_attr_from_wc - Initializes address handle attributes from a
2006-06-18 07:37:39 +04:00
* work completion .
* @ device : Device on which the received message arrived .
* @ port_num : Port on which the received message arrived .
* @ wc : Work completion associated with the received message .
* @ grh : References the received global route header . This parameter is
* ignored unless the work completion indicates that the GRH is valid .
* @ ah_attr : Returned attributes that can be used when creating an address
* handle for replying to the message .
2018-06-19 10:59:14 +03:00
* When ib_init_ah_attr_from_wc ( ) returns success ,
* ( a ) for IB link layer it optionally contains a reference to SGID attribute
* when GRH is present for IB link layer .
* ( b ) for RoCE link layer it contains a reference to SGID attribute .
* User must invoke rdma_cleanup_ah_attr_gid_attr ( ) to release reference to SGID
* attributes which are initialized using ib_init_ah_attr_from_wc ( ) .
*
2006-06-18 07:37:39 +04:00
*/
2017-11-14 15:52:17 +03:00
int ib_init_ah_attr_from_wc ( struct ib_device * device , u8 port_num ,
const struct ib_wc * wc , const struct ib_grh * grh ,
struct rdma_ah_attr * ah_attr ) ;
2006-06-18 07:37:39 +04:00
2005-07-27 22:45:34 +04:00
/**
* ib_create_ah_from_wc - Creates an address handle associated with the
* sender of the specified work completion .
* @ pd : The protection domain associated with the address handle .
* @ wc : Work completion information associated with a received message .
* @ grh : References the received global route header . This parameter is
* ignored unless the work completion indicates that the GRH is valid .
* @ port_num : The outbound port number to associate with the address .
*
* The address handle is used to reference a local or global destination
* in all UD QP post sends .
*/
2015-06-01 00:15:31 +03:00
struct ib_ah * ib_create_ah_from_wc ( struct ib_pd * pd , const struct ib_wc * wc ,
const struct ib_grh * grh , u8 port_num ) ;
2005-07-27 22:45:34 +04:00
2005-04-17 02:20:36 +04:00
/**
2017-04-29 21:41:20 +03:00
* rdma_modify_ah - Modifies the address vector associated with an address
2005-04-17 02:20:36 +04:00
* handle .
* @ ah : The address handle to modify .
* @ ah_attr : The new address vector attributes to associate with the
* address handle .
*/
2017-04-29 21:41:20 +03:00
int rdma_modify_ah ( struct ib_ah * ah , struct rdma_ah_attr * ah_attr ) ;
2005-04-17 02:20:36 +04:00
/**
2017-04-29 21:41:21 +03:00
* rdma_query_ah - Queries the address vector associated with an address
2005-04-17 02:20:36 +04:00
* handle .
* @ ah : The address handle to query .
* @ ah_attr : The address vector attributes associated with the address
* handle .
*/
2017-04-29 21:41:21 +03:00
int rdma_query_ah ( struct ib_ah * ah , struct rdma_ah_attr * ah_attr ) ;
2005-04-17 02:20:36 +04:00
2018-12-12 12:09:06 +03:00
enum rdma_destroy_ah_flags {
/* In a sleepable context */
RDMA_DESTROY_AH_SLEEPABLE = BIT ( 0 ) ,
} ;
2005-04-17 02:20:36 +04:00
/**
2019-03-31 19:10:05 +03:00
* rdma_destroy_ah_user - Destroys an address handle .
2005-04-17 02:20:36 +04:00
* @ ah : The address handle to destroy .
2018-12-12 12:09:06 +03:00
* @ flags : Destroy address handle flags ( see enum rdma_destroy_ah_flags ) .
2019-03-31 19:10:05 +03:00
* @ udata : Valid user data or NULL for kernel objects
2005-04-17 02:20:36 +04:00
*/
2019-03-31 19:10:05 +03:00
int rdma_destroy_ah_user ( struct ib_ah * ah , u32 flags , struct ib_udata * udata ) ;
/**
* rdma_destroy_ah - Destroys an kernel address handle .
* @ ah : The address handle to destroy .
* @ flags : Destroy address handle flags ( see enum rdma_destroy_ah_flags ) .
*
* NOTE : for user ah use rdma_destroy_ah_user with valid udata !
*/
static inline int rdma_destroy_ah ( struct ib_ah * ah , u32 flags )
{
return rdma_destroy_ah_user ( ah , flags , NULL ) ;
}
2005-04-17 02:20:36 +04:00
2005-08-18 23:23:08 +04:00
/**
* ib_create_srq - Creates a SRQ associated with the specified protection
* domain .
* @ pd : The protection domain associated with the SRQ .
2006-02-23 23:13:51 +03:00
* @ srq_init_attr : A list of initial attributes required to create the
* SRQ . If SRQ creation succeeds , then the attributes are updated to
* the actual capabilities of the created SRQ .
2005-08-18 23:23:08 +04:00
*
* srq_attr - > max_wr and srq_attr - > max_sge are read the determine the
* requested size of the SRQ , and set to the actual values allocated
* on return . If ib_create_srq ( ) succeeds , then max_wr and max_sge
* will always be at least as large as the requested values .
*/
struct ib_srq * ib_create_srq ( struct ib_pd * pd ,
struct ib_srq_init_attr * srq_init_attr ) ;
/**
* ib_modify_srq - Modifies the attributes for the specified SRQ .
* @ srq : The SRQ to modify .
* @ srq_attr : On input , specifies the SRQ attributes to modify . On output ,
* the current values of selected SRQ attributes are returned .
* @ srq_attr_mask : A bit - mask used to specify which attributes of the SRQ
* are being modified .
*
* The mask may contain IB_SRQ_MAX_WR to resize the SRQ and / or
* IB_SRQ_LIMIT to set the SRQ ' s limit and request notification when
* the number of receives queued drops below the limit .
*/
int ib_modify_srq ( struct ib_srq * srq ,
struct ib_srq_attr * srq_attr ,
enum ib_srq_attr_mask srq_attr_mask ) ;
/**
* ib_query_srq - Returns the attribute list and current values for the
* specified SRQ .
* @ srq : The SRQ to query .
* @ srq_attr : The attributes of the specified SRQ .
*/
int ib_query_srq ( struct ib_srq * srq ,
struct ib_srq_attr * srq_attr ) ;
/**
2019-03-31 19:10:05 +03:00
* ib_destroy_srq_user - Destroys the specified SRQ .
* @ srq : The SRQ to destroy .
* @ udata : Valid user data or NULL for kernel objects
*/
int ib_destroy_srq_user ( struct ib_srq * srq , struct ib_udata * udata ) ;
/**
* ib_destroy_srq - Destroys the specified kernel SRQ .
2005-08-18 23:23:08 +04:00
* @ srq : The SRQ to destroy .
2019-03-31 19:10:05 +03:00
*
* NOTE : for user srq use ib_destroy_srq_user with valid udata !
2005-08-18 23:23:08 +04:00
*/
2019-03-31 19:10:05 +03:00
static inline int ib_destroy_srq ( struct ib_srq * srq )
{
return ib_destroy_srq_user ( srq , NULL ) ;
}
2005-08-18 23:23:08 +04:00
/**
* ib_post_srq_recv - Posts a list of work requests to the specified SRQ .
* @ srq : The SRQ to post the work request on .
* @ recv_wr : A list of work requests to post on the receive queue .
* @ bad_recv_wr : On an immediate failure , this parameter will reference
* the work request that failed to be posted on the QP .
*/
static inline int ib_post_srq_recv ( struct ib_srq * srq ,
2018-07-18 19:25:32 +03:00
const struct ib_recv_wr * recv_wr ,
const struct ib_recv_wr * * bad_recv_wr )
2005-08-18 23:23:08 +04:00
{
2018-07-18 19:25:32 +03:00
const struct ib_recv_wr * dummy ;
2018-07-18 19:25:16 +03:00
2018-12-10 22:09:48 +03:00
return srq - > device - > ops . post_srq_recv ( srq , recv_wr ,
bad_recv_wr ? : & dummy ) ;
2005-08-18 23:23:08 +04:00
}
2005-04-17 02:20:36 +04:00
/**
2019-03-31 19:10:05 +03:00
* ib_create_qp_user - Creates a QP associated with the specified protection
2005-04-17 02:20:36 +04:00
* domain .
* @ pd : The protection domain associated with the QP .
2006-02-23 23:13:51 +03:00
* @ qp_init_attr : A list of initial attributes required to create the
* QP . If QP creation succeeds , then the attributes are updated to
* the actual capabilities of the created QP .
2019-03-31 19:10:05 +03:00
* @ udata : Valid user data or NULL for kernel objects
2005-04-17 02:20:36 +04:00
*/
2019-03-31 19:10:05 +03:00
struct ib_qp * ib_create_qp_user ( struct ib_pd * pd ,
struct ib_qp_init_attr * qp_init_attr ,
struct ib_udata * udata ) ;
/**
* ib_create_qp - Creates a kernel QP associated with the specified protection
* domain .
* @ pd : The protection domain associated with the QP .
* @ qp_init_attr : A list of initial attributes required to create the
* QP . If QP creation succeeds , then the attributes are updated to
* the actual capabilities of the created QP .
* @ udata : Valid user data or NULL for kernel objects
*
* NOTE : for user qp use ib_create_qp_user with valid udata !
*/
static inline struct ib_qp * ib_create_qp ( struct ib_pd * pd ,
struct ib_qp_init_attr * qp_init_attr )
{
return ib_create_qp_user ( pd , qp_init_attr , NULL ) ;
}
2005-04-17 02:20:36 +04:00
2017-05-23 11:26:08 +03:00
/**
* ib_modify_qp_with_udata - Modifies the attributes for the specified QP .
* @ qp : The QP to modify .
* @ attr : On input , specifies the QP attributes to modify . On output ,
* the current values of selected QP attributes are returned .
* @ attr_mask : A bit - mask used to specify which attributes of the QP
* are being modified .
* @ udata : pointer to user ' s input output buffer information
* are being modified .
* It returns 0 on success and returns appropriate error code on error .
*/
int ib_modify_qp_with_udata ( struct ib_qp * qp ,
struct ib_qp_attr * attr ,
int attr_mask ,
struct ib_udata * udata ) ;
2005-04-17 02:20:36 +04:00
/**
* ib_modify_qp - Modifies the attributes for the specified QP and then
* transitions the QP to the given state .
* @ qp : The QP to modify .
* @ qp_attr : On input , specifies the QP attributes to modify . On output ,
* the current values of selected QP attributes are returned .
* @ qp_attr_mask : A bit - mask used to specify which attributes of the QP
* are being modified .
*/
int ib_modify_qp ( struct ib_qp * qp ,
struct ib_qp_attr * qp_attr ,
int qp_attr_mask ) ;
/**
* ib_query_qp - Returns the attribute list and current values for the
* specified QP .
* @ qp : The QP to query .
* @ qp_attr : The attributes of the specified QP .
* @ qp_attr_mask : A bit - mask used to select specific attributes to query .
* @ qp_init_attr : Additional attributes of the selected QP .
*
* The qp_attr_mask may be used to limit the query to gathering only the
* selected attributes .
*/
int ib_query_qp ( struct ib_qp * qp ,
struct ib_qp_attr * qp_attr ,
int qp_attr_mask ,
struct ib_qp_init_attr * qp_init_attr ) ;
/**
* ib_destroy_qp - Destroys the specified QP .
* @ qp : The QP to destroy .
2019-03-31 19:10:05 +03:00
* @ udata : Valid udata or NULL for kernel objects
2005-04-17 02:20:36 +04:00
*/
2019-03-31 19:10:05 +03:00
int ib_destroy_qp_user ( struct ib_qp * qp , struct ib_udata * udata ) ;
/**
* ib_destroy_qp - Destroys the specified kernel QP .
* @ qp : The QP to destroy .
*
* NOTE : for user qp use ib_destroy_qp_user with valid udata !
*/
static inline int ib_destroy_qp ( struct ib_qp * qp )
{
return ib_destroy_qp_user ( qp , NULL ) ;
}
2005-04-17 02:20:36 +04:00
2011-05-27 10:06:44 +04:00
/**
2011-08-09 02:31:51 +04:00
* ib_open_qp - Obtain a reference to an existing sharable QP .
* @ xrcd - XRC domain
* @ qp_open_attr : Attributes identifying the QP to open .
*
* Returns a reference to a sharable QP .
*/
struct ib_qp * ib_open_qp ( struct ib_xrcd * xrcd ,
struct ib_qp_open_attr * qp_open_attr ) ;
/**
* ib_close_qp - Release an external reference to a QP .
2011-05-27 10:06:44 +04:00
* @ qp : The QP handle to release
*
2011-08-09 02:31:51 +04:00
* The opened QP handle is released by the caller . The underlying
* shared QP is not destroyed until all internal references are released .
2011-05-27 10:06:44 +04:00
*/
2011-08-09 02:31:51 +04:00
int ib_close_qp ( struct ib_qp * qp ) ;
2011-05-27 10:06:44 +04:00
2005-04-17 02:20:36 +04:00
/**
* ib_post_send - Posts a list of work requests to the send queue of
* the specified QP .
* @ qp : The QP to post the work request on .
* @ send_wr : A list of work requests to post on the send queue .
* @ bad_send_wr : On an immediate failure , this parameter will reference
* the work request that failed to be posted on the QP .
2009-12-10 01:20:04 +03:00
*
* While IBA Vol . 1 section 11.4 .1 .1 specifies that if an immediate
* error is returned , the QP state shall not be affected ,
* ib_post_send ( ) will return an immediate error after queueing any
* earlier work requests in the list .
2005-04-17 02:20:36 +04:00
*/
static inline int ib_post_send ( struct ib_qp * qp ,
2018-07-18 19:25:32 +03:00
const struct ib_send_wr * send_wr ,
const struct ib_send_wr * * bad_send_wr )
2005-04-17 02:20:36 +04:00
{
2018-07-18 19:25:32 +03:00
const struct ib_send_wr * dummy ;
2018-07-18 19:25:16 +03:00
2018-12-10 22:09:48 +03:00
return qp - > device - > ops . post_send ( qp , send_wr , bad_send_wr ? : & dummy ) ;
2005-04-17 02:20:36 +04:00
}
/**
* ib_post_recv - Posts a list of work requests to the receive queue of
* the specified QP .
* @ qp : The QP to post the work request on .
* @ recv_wr : A list of work requests to post on the receive queue .
* @ bad_recv_wr : On an immediate failure , this parameter will reference
* the work request that failed to be posted on the QP .
*/
static inline int ib_post_recv ( struct ib_qp * qp ,
2018-07-18 19:25:32 +03:00
const struct ib_recv_wr * recv_wr ,
const struct ib_recv_wr * * bad_recv_wr )
2005-04-17 02:20:36 +04:00
{
2018-07-18 19:25:32 +03:00
const struct ib_recv_wr * dummy ;
2018-07-18 19:25:16 +03:00
2018-12-10 22:09:48 +03:00
return qp - > device - > ops . post_recv ( qp , recv_wr , bad_recv_wr ? : & dummy ) ;
2005-04-17 02:20:36 +04:00
}
2019-03-31 19:10:05 +03:00
struct ib_cq * __ib_alloc_cq_user ( struct ib_device * dev , void * private ,
int nr_cqe , int comp_vector ,
enum ib_poll_context poll_ctx ,
const char * caller , struct ib_udata * udata ) ;
/**
* ib_alloc_cq_user : Allocate kernel / user CQ
* @ dev : The IB device
* @ private : Private data attached to the CQE
* @ nr_cqe : Number of CQEs in the CQ
* @ comp_vector : Completion vector used for the IRQs
* @ poll_ctx : Context used for polling the CQ
* @ udata : Valid user data or NULL for kernel objects
*/
static inline struct ib_cq * ib_alloc_cq_user ( struct ib_device * dev ,
void * private , int nr_cqe ,
int comp_vector ,
enum ib_poll_context poll_ctx ,
struct ib_udata * udata )
{
return __ib_alloc_cq_user ( dev , private , nr_cqe , comp_vector , poll_ctx ,
KBUILD_MODNAME , udata ) ;
}
/**
* ib_alloc_cq : Allocate kernel CQ
* @ dev : The IB device
* @ private : Private data attached to the CQE
* @ nr_cqe : Number of CQEs in the CQ
* @ comp_vector : Completion vector used for the IRQs
* @ poll_ctx : Context used for polling the CQ
*
* NOTE : for user cq use ib_alloc_cq_user with valid udata !
*/
static inline struct ib_cq * ib_alloc_cq ( struct ib_device * dev , void * private ,
int nr_cqe , int comp_vector ,
enum ib_poll_context poll_ctx )
{
return ib_alloc_cq_user ( dev , private , nr_cqe , comp_vector , poll_ctx ,
NULL ) ;
}
/**
* ib_free_cq_user - Free kernel / user CQ
* @ cq : The CQ to free
* @ udata : Valid user data or NULL for kernel objects
*/
void ib_free_cq_user ( struct ib_cq * cq , struct ib_udata * udata ) ;
/**
* ib_free_cq - Free kernel CQ
* @ cq : The CQ to free
*
* NOTE : for user cq use ib_free_cq_user with valid udata !
*/
static inline void ib_free_cq ( struct ib_cq * cq )
{
ib_free_cq_user ( cq , NULL ) ;
}
2018-01-28 12:17:19 +03:00
2015-12-11 22:53:03 +03:00
int ib_process_cq_direct ( struct ib_cq * cq , int budget ) ;
2005-04-17 02:20:36 +04:00
/**
* ib_create_cq - Creates a CQ on the specified device .
* @ device : The device on which to create the CQ .
* @ comp_handler : A user - specified callback that is invoked when a
* completion event occurs on the CQ .
* @ event_handler : A user - specified callback that is invoked when an
* asynchronous event not associated with a completion occurs on the CQ .
* @ cq_context : Context associated with the CQ returned to the user via
* the associated completion and event handlers .
2015-06-11 16:35:21 +03:00
* @ cq_attr : The attributes the CQ should be created upon .
2005-04-17 02:20:36 +04:00
*
* Users can examine the cq structure to determine the actual CQ size .
*/
2018-06-15 18:22:33 +03:00
struct ib_cq * __ib_create_cq ( struct ib_device * device ,
ib_comp_handler comp_handler ,
void ( * event_handler ) ( struct ib_event * , void * ) ,
void * cq_context ,
const struct ib_cq_init_attr * cq_attr ,
const char * caller ) ;
# define ib_create_cq(device, cmp_hndlr, evt_hndlr, cq_ctxt, cq_attr) \
__ib_create_cq ( ( device ) , ( cmp_hndlr ) , ( evt_hndlr ) , ( cq_ctxt ) , ( cq_attr ) , KBUILD_MODNAME )
2005-04-17 02:20:36 +04:00
/**
* ib_resize_cq - Modifies the capacity of the CQ .
* @ cq : The CQ to resize .
* @ cqe : The minimum size of the CQ .
*
* Users can examine the cq structure to determine the actual CQ size .
*/
int ib_resize_cq ( struct ib_cq * cq , int cqe ) ;
2008-04-17 08:09:33 +04:00
/**
2017-11-13 11:51:19 +03:00
* rdma_set_cq_moderation - Modifies moderation params of the CQ
2008-04-17 08:09:33 +04:00
* @ cq : The CQ to modify .
* @ cq_count : number of CQEs that will trigger an event
* @ cq_period : max period of time in usec before triggering an event
*
*/
2017-11-13 11:51:19 +03:00
int rdma_set_cq_moderation ( struct ib_cq * cq , u16 cq_count , u16 cq_period ) ;
2008-04-17 08:09:33 +04:00
2005-04-17 02:20:36 +04:00
/**
2019-03-31 19:10:05 +03:00
* ib_destroy_cq_user - Destroys the specified CQ .
2005-04-17 02:20:36 +04:00
* @ cq : The CQ to destroy .
2019-03-31 19:10:05 +03:00
* @ udata : Valid user data or NULL for kernel objects
2005-04-17 02:20:36 +04:00
*/
2019-03-31 19:10:05 +03:00
int ib_destroy_cq_user ( struct ib_cq * cq , struct ib_udata * udata ) ;
/**
* ib_destroy_cq - Destroys the specified kernel CQ .
* @ cq : The CQ to destroy .
*
* NOTE : for user cq use ib_destroy_cq_user with valid udata !
*/
2019-05-20 09:54:21 +03:00
static inline void ib_destroy_cq ( struct ib_cq * cq )
2019-03-31 19:10:05 +03:00
{
2019-05-20 09:54:21 +03:00
ib_destroy_cq_user ( cq , NULL ) ;
2019-03-31 19:10:05 +03:00
}
2005-04-17 02:20:36 +04:00
/**
* ib_poll_cq - poll a CQ for completion ( s )
* @ cq : the CQ being polled
* @ num_entries : maximum number of completions to return
* @ wc : array of at least @ num_entries & struct ib_wc where completions
* will be returned
*
* Poll a CQ for ( possibly multiple ) completions . If the return value
* is < 0 , an error occurred . If the return value is > = 0 , it is the
* number of completions returned . If the return value is
* non - negative and < num_entries , then the CQ was emptied .
*/
static inline int ib_poll_cq ( struct ib_cq * cq , int num_entries ,
struct ib_wc * wc )
{
2018-12-10 22:09:48 +03:00
return cq - > device - > ops . poll_cq ( cq , num_entries , wc ) ;
2005-04-17 02:20:36 +04:00
}
/**
* ib_req_notify_cq - Request completion notification on a CQ .
* @ cq : The CQ to generate an event for .
IB: Return "maybe missed event" hint from ib_req_notify_cq()
The semantics defined by the InfiniBand specification say that
completion events are only generated when a completions is added to a
completion queue (CQ) after completion notification is requested. In
other words, this means that the following race is possible:
while (CQ is not empty)
ib_poll_cq(CQ);
// new completion is added after while loop is exited
ib_req_notify_cq(CQ);
// no event is generated for the existing completion
To close this race, the IB spec recommends doing another poll of the
CQ after requesting notification.
However, it is not always possible to arrange code this way (for
example, we have found that NAPI for IPoIB cannot poll after
requesting notification). Also, some hardware (eg Mellanox HCAs)
actually will generate an event for completions added before the call
to ib_req_notify_cq() -- which is allowed by the spec, since there's
no way for any upper-layer consumer to know exactly when a completion
was really added -- so the extra poll of the CQ is just a waste.
Motivated by this, we add a new flag "IB_CQ_REPORT_MISSED_EVENTS" for
ib_req_notify_cq() so that it can return a hint about whether the a
completion may have been added before the request for notification.
The return value of ib_req_notify_cq() is extended so:
< 0 means an error occurred while requesting notification
== 0 means notification was requested successfully, and if
IB_CQ_REPORT_MISSED_EVENTS was passed in, then no
events were missed and it is safe to wait for another
event.
> 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was
passed in. It means that the consumer must poll the
CQ again to make sure it is empty to avoid the race
described above.
We add a flag to enable this behavior rather than turning it on
unconditionally, because checking for missed events may incur
significant overhead for some low-level drivers, and consumers that
don't care about the results of this test shouldn't be forced to pay
for the test.
Signed-off-by: Roland Dreier <rolandd@cisco.com>
2007-05-07 08:02:48 +04:00
* @ flags :
* Must contain exactly one of % IB_CQ_SOLICITED or % IB_CQ_NEXT_COMP
* to request an event on the next solicited event or next work
* completion at any type , respectively . % IB_CQ_REPORT_MISSED_EVENTS
* may also be | ed in to request a hint about missed events , as
* described below .
*
* Return Value :
* < 0 means an error occurred while requesting notification
* = = 0 means notification was requested successfully , and if
* IB_CQ_REPORT_MISSED_EVENTS was passed in , then no events
* were missed and it is safe to wait for another event . In
* this case is it guaranteed that any work completions added
* to the CQ since the last CQ poll will trigger a completion
* notification event .
* > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
* in . It means that the consumer must poll the CQ again to
* make sure it is empty to avoid missing an event because of a
* race between requesting notification and an entry being
* added to the CQ . This return value means it is possible
* ( but not guaranteed ) that a work completion has been added
* to the CQ since the last poll without triggering a
* completion notification event .
2005-04-17 02:20:36 +04:00
*/
static inline int ib_req_notify_cq ( struct ib_cq * cq ,
IB: Return "maybe missed event" hint from ib_req_notify_cq()
The semantics defined by the InfiniBand specification say that
completion events are only generated when a completions is added to a
completion queue (CQ) after completion notification is requested. In
other words, this means that the following race is possible:
while (CQ is not empty)
ib_poll_cq(CQ);
// new completion is added after while loop is exited
ib_req_notify_cq(CQ);
// no event is generated for the existing completion
To close this race, the IB spec recommends doing another poll of the
CQ after requesting notification.
However, it is not always possible to arrange code this way (for
example, we have found that NAPI for IPoIB cannot poll after
requesting notification). Also, some hardware (eg Mellanox HCAs)
actually will generate an event for completions added before the call
to ib_req_notify_cq() -- which is allowed by the spec, since there's
no way for any upper-layer consumer to know exactly when a completion
was really added -- so the extra poll of the CQ is just a waste.
Motivated by this, we add a new flag "IB_CQ_REPORT_MISSED_EVENTS" for
ib_req_notify_cq() so that it can return a hint about whether the a
completion may have been added before the request for notification.
The return value of ib_req_notify_cq() is extended so:
< 0 means an error occurred while requesting notification
== 0 means notification was requested successfully, and if
IB_CQ_REPORT_MISSED_EVENTS was passed in, then no
events were missed and it is safe to wait for another
event.
> 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was
passed in. It means that the consumer must poll the
CQ again to make sure it is empty to avoid the race
described above.
We add a flag to enable this behavior rather than turning it on
unconditionally, because checking for missed events may incur
significant overhead for some low-level drivers, and consumers that
don't care about the results of this test shouldn't be forced to pay
for the test.
Signed-off-by: Roland Dreier <rolandd@cisco.com>
2007-05-07 08:02:48 +04:00
enum ib_cq_notify_flags flags )
2005-04-17 02:20:36 +04:00
{
2018-12-10 22:09:48 +03:00
return cq - > device - > ops . req_notify_cq ( cq , flags ) ;
2005-04-17 02:20:36 +04:00
}
/**
* ib_req_ncomp_notif - Request completion notification when there are
* at least the specified number of unreaped completions on the CQ .
* @ cq : The CQ to generate an event for .
* @ wc_cnt : The number of unreaped completions that should be on the
* CQ before an event is generated .
*/
static inline int ib_req_ncomp_notif ( struct ib_cq * cq , int wc_cnt )
{
2018-12-10 22:09:48 +03:00
return cq - > device - > ops . req_ncomp_notif ?
cq - > device - > ops . req_ncomp_notif ( cq , wc_cnt ) :
2005-04-17 02:20:36 +04:00
- ENOSYS ;
}
2006-12-13 01:27:41 +03:00
/**
* ib_dma_mapping_error - check a DMA addr for error
* @ dev : The device for which the dma_addr was created
* @ dma_addr : The DMA address to check
*/
static inline int ib_dma_mapping_error ( struct ib_device * dev , u64 dma_addr )
{
2017-03-08 01:56:53 +03:00
return dma_mapping_error ( dev - > dma_device , dma_addr ) ;
2006-12-13 01:27:41 +03:00
}
/**
* ib_dma_map_single - Map a kernel virtual address to DMA address
* @ dev : The device for which the dma_addr is to be created
* @ cpu_addr : The kernel virtual address
* @ size : The size of the region in bytes
* @ direction : The direction of the DMA
*/
static inline u64 ib_dma_map_single ( struct ib_device * dev ,
void * cpu_addr , size_t size ,
enum dma_data_direction direction )
{
2017-03-08 01:56:53 +03:00
return dma_map_single ( dev - > dma_device , cpu_addr , size , direction ) ;
2006-12-13 01:27:41 +03:00
}
/**
* ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single ( )
* @ dev : The device for which the DMA address was created
* @ addr : The DMA address
* @ size : The size of the region in bytes
* @ direction : The direction of the DMA
*/
static inline void ib_dma_unmap_single ( struct ib_device * dev ,
u64 addr , size_t size ,
enum dma_data_direction direction )
{
2017-03-08 01:56:53 +03:00
dma_unmap_single ( dev - > dma_device , addr , size , direction ) ;
2008-04-29 12:00:34 +04:00
}
2006-12-13 01:27:41 +03:00
/**
* ib_dma_map_page - Map a physical page to DMA address
* @ dev : The device for which the dma_addr is to be created
* @ page : The page to be mapped
* @ offset : The offset within the page
* @ size : The size of the region in bytes
* @ direction : The direction of the DMA
*/
static inline u64 ib_dma_map_page ( struct ib_device * dev ,
struct page * page ,
unsigned long offset ,
size_t size ,
enum dma_data_direction direction )
{
2017-03-08 01:56:53 +03:00
return dma_map_page ( dev - > dma_device , page , offset , size , direction ) ;
2006-12-13 01:27:41 +03:00
}
/**
* ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page ( )
* @ dev : The device for which the DMA address was created
* @ addr : The DMA address
* @ size : The size of the region in bytes
* @ direction : The direction of the DMA
*/
static inline void ib_dma_unmap_page ( struct ib_device * dev ,
u64 addr , size_t size ,
enum dma_data_direction direction )
{
2017-03-08 01:56:53 +03:00
dma_unmap_page ( dev - > dma_device , addr , size , direction ) ;
2006-12-13 01:27:41 +03:00
}
/**
* ib_dma_map_sg - Map a scatter / gather list to DMA addresses
* @ dev : The device for which the DMA addresses are to be created
* @ sg : The array of scatter / gather entries
* @ nents : The number of scatter / gather entries
* @ direction : The direction of the DMA
*/
static inline int ib_dma_map_sg ( struct ib_device * dev ,
struct scatterlist * sg , int nents ,
enum dma_data_direction direction )
{
2017-03-08 01:56:53 +03:00
return dma_map_sg ( dev - > dma_device , sg , nents , direction ) ;
2006-12-13 01:27:41 +03:00
}
/**
* ib_dma_unmap_sg - Unmap a scatter / gather list of DMA addresses
* @ dev : The device for which the DMA addresses were created
* @ sg : The array of scatter / gather entries
* @ nents : The number of scatter / gather entries
* @ direction : The direction of the DMA
*/
static inline void ib_dma_unmap_sg ( struct ib_device * dev ,
struct scatterlist * sg , int nents ,
enum dma_data_direction direction )
{
2017-03-08 01:56:53 +03:00
dma_unmap_sg ( dev - > dma_device , sg , nents , direction ) ;
2006-12-13 01:27:41 +03:00
}
2008-04-29 12:00:34 +04:00
static inline int ib_dma_map_sg_attrs ( struct ib_device * dev ,
struct scatterlist * sg , int nents ,
enum dma_data_direction direction ,
2016-08-03 23:46:00 +03:00
unsigned long dma_attrs )
2008-04-29 12:00:34 +04:00
{
2017-03-08 01:56:53 +03:00
return dma_map_sg_attrs ( dev - > dma_device , sg , nents , direction ,
dma_attrs ) ;
2008-04-29 12:00:34 +04:00
}
static inline void ib_dma_unmap_sg_attrs ( struct ib_device * dev ,
struct scatterlist * sg , int nents ,
enum dma_data_direction direction ,
2016-08-03 23:46:00 +03:00
unsigned long dma_attrs )
2008-04-29 12:00:34 +04:00
{
2017-03-08 01:56:53 +03:00
dma_unmap_sg_attrs ( dev - > dma_device , sg , nents , direction , dma_attrs ) ;
2008-04-29 12:00:34 +04:00
}
2006-12-13 01:27:41 +03:00
2019-01-22 21:25:20 +03:00
/**
* ib_dma_max_seg_size - Return the size limit of a single DMA transfer
* @ dev : The device to query
*
* The returned value represents a size in bytes .
*/
static inline unsigned int ib_dma_max_seg_size ( struct ib_device * dev )
{
struct device_dma_parameters * p = dev - > dma_device - > dma_parms ;
return p ? p - > max_segment_size : UINT_MAX ;
}
2006-12-13 01:27:41 +03:00
/**
* ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
* @ dev : The device for which the DMA address was created
* @ addr : The DMA address
* @ size : The size of the region in bytes
* @ dir : The direction of the DMA
*/
static inline void ib_dma_sync_single_for_cpu ( struct ib_device * dev ,
u64 addr ,
size_t size ,
enum dma_data_direction dir )
{
2017-03-08 01:56:53 +03:00
dma_sync_single_for_cpu ( dev - > dma_device , addr , size , dir ) ;
2006-12-13 01:27:41 +03:00
}
/**
* ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
* @ dev : The device for which the DMA address was created
* @ addr : The DMA address
* @ size : The size of the region in bytes
* @ dir : The direction of the DMA
*/
static inline void ib_dma_sync_single_for_device ( struct ib_device * dev ,
u64 addr ,
size_t size ,
enum dma_data_direction dir )
{
2017-03-08 01:56:53 +03:00
dma_sync_single_for_device ( dev - > dma_device , addr , size , dir ) ;
2006-12-13 01:27:41 +03:00
}
/**
* ib_dma_alloc_coherent - Allocate memory and map it for DMA
* @ dev : The device for which the DMA address is requested
* @ size : The size of the region to allocate in bytes
* @ dma_handle : A pointer for returning the DMA address of the region
* @ flag : memory allocator flags
*/
static inline void * ib_dma_alloc_coherent ( struct ib_device * dev ,
size_t size ,
2017-01-21 00:04:10 +03:00
dma_addr_t * dma_handle ,
2006-12-13 01:27:41 +03:00
gfp_t flag )
{
2017-03-08 01:56:53 +03:00
return dma_alloc_coherent ( dev - > dma_device , size , dma_handle , flag ) ;
2006-12-13 01:27:41 +03:00
}
/**
* ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent ( )
* @ dev : The device for which the DMA addresses were allocated
* @ size : The size of the region
* @ cpu_addr : the address returned by ib_dma_alloc_coherent ( )
* @ dma_handle : the DMA address returned by ib_dma_alloc_coherent ( )
*/
static inline void ib_dma_free_coherent ( struct ib_device * dev ,
size_t size , void * cpu_addr ,
2017-01-21 00:04:10 +03:00
dma_addr_t dma_handle )
2006-12-13 01:27:41 +03:00
{
2017-03-08 01:56:53 +03:00
dma_free_coherent ( dev - > dma_device , size , cpu_addr , dma_handle ) ;
2006-12-13 01:27:41 +03:00
}
2005-04-17 02:20:36 +04:00
/**
2019-03-31 19:10:05 +03:00
* ib_dereg_mr_user - Deregisters a memory region and removes it from the
* HCA translation table .
* @ mr : The memory region to deregister .
* @ udata : Valid user data or NULL for kernel object
*
* This function can fail , if the memory region has memory windows bound to it .
*/
int ib_dereg_mr_user ( struct ib_mr * mr , struct ib_udata * udata ) ;
/**
* ib_dereg_mr - Deregisters a kernel memory region and removes it from the
2005-04-17 02:20:36 +04:00
* HCA translation table .
* @ mr : The memory region to deregister .
2013-02-06 20:19:12 +04:00
*
* This function can fail , if the memory region has memory windows bound to it .
2019-03-31 19:10:05 +03:00
*
* NOTE : for user mr use ib_dereg_mr_user with valid udata !
2005-04-17 02:20:36 +04:00
*/
2019-03-31 19:10:05 +03:00
static inline int ib_dereg_mr ( struct ib_mr * mr )
{
return ib_dereg_mr_user ( mr , NULL ) ;
}
struct ib_mr * ib_alloc_mr_user ( struct ib_pd * pd , enum ib_mr_type mr_type ,
u32 max_num_sg , struct ib_udata * udata ) ;
2005-04-17 02:20:36 +04:00
2019-03-31 19:10:05 +03:00
static inline struct ib_mr * ib_alloc_mr ( struct ib_pd * pd ,
enum ib_mr_type mr_type , u32 max_num_sg )
{
return ib_alloc_mr_user ( pd , mr_type , max_num_sg , NULL ) ;
}
2008-07-15 10:48:45 +04:00
2019-06-11 18:52:39 +03:00
struct ib_mr * ib_alloc_mr_integrity ( struct ib_pd * pd ,
u32 max_num_data_sg ,
u32 max_num_meta_sg ) ;
2008-07-15 10:48:45 +04:00
/**
* ib_update_fast_reg_key - updates the key portion of the fast_reg MR
* R_Key and L_Key .
* @ mr - struct ib_mr pointer to be updated .
* @ newkey - new key to be used .
*/
static inline void ib_update_fast_reg_key ( struct ib_mr * mr , u8 newkey )
{
mr - > lkey = ( mr - > lkey & 0xffffff00 ) | newkey ;
mr - > rkey = ( mr - > rkey & 0xffffff00 ) | newkey ;
}
2013-02-06 20:19:12 +04:00
/**
* ib_inc_rkey - increments the key portion of the given rkey . Can be used
* for calculating a new rkey for type 2 memory windows .
* @ rkey - the rkey to increment .
*/
static inline u32 ib_inc_rkey ( u32 rkey )
{
const u32 mask = 0x000000ff ;
return ( ( rkey + 1 ) & mask ) | ( rkey & ~ mask ) ;
}
2005-04-17 02:20:36 +04:00
/**
* ib_alloc_fmr - Allocates a unmapped fast memory region .
* @ pd : The protection domain associated with the unmapped region .
* @ mr_access_flags : Specifies the memory access rights .
* @ fmr_attr : Attributes of the unmapped region .
*
* A fast memory region must be mapped before it can be used as part of
* a work request .
*/
struct ib_fmr * ib_alloc_fmr ( struct ib_pd * pd ,
int mr_access_flags ,
struct ib_fmr_attr * fmr_attr ) ;
/**
* ib_map_phys_fmr - Maps a list of physical pages to a fast memory region .
* @ fmr : The fast memory region to associate with the pages .
* @ page_list : An array of physical pages to map to the fast memory region .
* @ list_len : The number of pages in page_list .
* @ iova : The I / O virtual address to use with the mapped region .
*/
static inline int ib_map_phys_fmr ( struct ib_fmr * fmr ,
u64 * page_list , int list_len ,
u64 iova )
{
2018-12-10 22:09:48 +03:00
return fmr - > device - > ops . map_phys_fmr ( fmr , page_list , list_len , iova ) ;
2005-04-17 02:20:36 +04:00
}
/**
* ib_unmap_fmr - Removes the mapping from a list of fast memory regions .
* @ fmr_list : A linked list of fast memory regions to unmap .
*/
int ib_unmap_fmr ( struct list_head * fmr_list ) ;
/**
* ib_dealloc_fmr - Deallocates a fast memory region .
* @ fmr : The fast memory region to deallocate .
*/
int ib_dealloc_fmr ( struct ib_fmr * fmr ) ;
/**
* ib_attach_mcast - Attaches the specified QP to a multicast group .
* @ qp : QP to attach to the multicast group . The QP must be type
* IB_QPT_UD .
* @ gid : Multicast group GID .
* @ lid : Multicast group LID in host byte order .
*
* In order to send and receive multicast packets , subnet
* administration must have created the multicast group and configured
* the fabric appropriately . The port associated with the specified
* QP must also be a member of the multicast group .
*/
int ib_attach_mcast ( struct ib_qp * qp , union ib_gid * gid , u16 lid ) ;
/**
* ib_detach_mcast - Detaches the specified QP from a multicast group .
* @ qp : QP to detach from the multicast group .
* @ gid : Multicast group GID .
* @ lid : Multicast group LID in host byte order .
*/
int ib_detach_mcast ( struct ib_qp * qp , union ib_gid * gid , u16 lid ) ;
2011-05-24 04:52:46 +04:00
/**
* ib_alloc_xrcd - Allocates an XRC domain .
* @ device : The device on which to allocate the XRC domain .
2018-01-28 12:17:19 +03:00
* @ caller : Module name for kernel consumers
2011-05-24 04:52:46 +04:00
*/
2018-01-28 12:17:19 +03:00
struct ib_xrcd * __ib_alloc_xrcd ( struct ib_device * device , const char * caller ) ;
# define ib_alloc_xrcd(device) \
__ib_alloc_xrcd ( ( device ) , KBUILD_MODNAME )
2011-05-24 04:52:46 +04:00
/**
* ib_dealloc_xrcd - Deallocates an XRC domain .
* @ xrcd : The XRC domain to deallocate .
2019-03-31 19:10:05 +03:00
* @ udata : Valid user data or NULL for kernel object
2011-05-24 04:52:46 +04:00
*/
2019-03-31 19:10:05 +03:00
int ib_dealloc_xrcd ( struct ib_xrcd * xrcd , struct ib_udata * udata ) ;
2011-05-24 04:52:46 +04:00
2013-10-31 17:26:32 +04:00
static inline int ib_check_mr_access ( int flags )
{
/*
* Local write permission is required if remote write or
* remote atomic permission is also requested .
*/
if ( flags & ( IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE ) & &
! ( flags & IB_ACCESS_LOCAL_WRITE ) )
return - EINVAL ;
return 0 ;
}
2018-05-23 15:30:30 +03:00
static inline bool ib_access_writable ( int access_flags )
{
/*
* We have writable memory backing the MR if any of the following
* access flags are set . " Local write " and " remote write " obviously
* require write access . " Remote atomic " can do things like fetch and
* add , which will modify memory , and " MW bind " can change permissions
* by binding a window .
*/
return access_flags &
( IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE |
IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND ) ;
}
2014-02-23 16:19:05 +04:00
/**
* ib_check_mr_status : lightweight check of MR status .
* This routine may provide status checks on a selected
* ib_mr . first use is for signature status check .
*
* @ mr : A memory region .
* @ check_mask : Bitmask of which checks to perform from
* ib_mr_status_check enumeration .
* @ mr_status : The container of relevant status checks .
* failed checks will be indicated in the status bitmask
* and the relevant info shall be in the error item .
*/
int ib_check_mr_status ( struct ib_mr * mr , u32 check_mask ,
struct ib_mr_status * mr_status ) ;
2019-01-11 00:02:24 +03:00
/**
* ib_device_try_get : Hold a registration lock
* device : The device to lock
*
* A device under an active registration lock cannot become unregistered . It
* is only possible to obtain a registration lock on a device that is fully
* registered , otherwise this function returns false .
*
* The registration lock is only necessary for actions which require the
* device to still be registered . Uses that only require the device pointer to
* be valid should use get_device ( & ibdev - > dev ) to hold the memory .
*
*/
static inline bool ib_device_try_get ( struct ib_device * dev )
{
return refcount_inc_not_zero ( & dev - > refcount ) ;
}
void ib_device_put ( struct ib_device * device ) ;
2019-02-13 07:12:51 +03:00
struct ib_device * ib_device_get_by_netdev ( struct net_device * ndev ,
enum rdma_driver_id driver_id ) ;
struct ib_device * ib_device_get_by_name ( const char * name ,
enum rdma_driver_id driver_id ) ;
2015-07-30 17:50:15 +03:00
struct net_device * ib_get_net_dev_by_params ( struct ib_device * dev , u8 port ,
u16 pkey , const union ib_gid * gid ,
const struct sockaddr * addr ) ;
2019-02-13 07:12:50 +03:00
int ib_device_set_netdev ( struct ib_device * ib_dev , struct net_device * ndev ,
unsigned int port ) ;
struct net_device * ib_device_netdev ( struct ib_device * dev , u8 port ) ;
IB/core: Introduce Work Queue object and its verbs
Introduce Work Queue object and its create/destroy/modify verbs.
QP can be created without internal WQs "packaged" inside it,
this QP can be configured to use "external" WQ object as its
receive/send queue.
WQ is a necessary component for RSS technology since RSS mechanism
is supposed to distribute the traffic between multiple
Receive Work Queues.
WQ associated (many to one) with Completion Queue and it owns WQ
properties (PD, WQ size, etc.).
WQ has a type, this patch introduces the IB_WQT_RQ (i.e.receive queue),
it may be extend to others such as IB_WQT_SQ. (send queue).
WQ from type IB_WQT_RQ contains receive work requests.
PD is an attribute of a work queue (i.e. send/receive queue), it's used
by the hardware for security validation before scattering to a memory
region which is pointed by the WQ. For that, an external WQ object
needs a PD, letting the hardware makes that validation.
When accessing a memory region that is pointed by the WQ its PD
is used and not the QP's PD, this behavior is similar
to a SRQ and a QP.
WQ context is subject to a well-defined state transitions done by
the modify_wq verb.
When WQ is created its initial state becomes IB_WQS_RESET.
>From IB_WQS_RESET it can be modified to itself or to IB_WQS_RDY.
>From IB_WQS_RDY it can be modified to itself, to IB_WQS_RESET
or to IB_WQS_ERR.
>From IB_WQS_ERR it can be modified to IB_WQS_RESET.
Note: transition to IB_WQS_ERR might occur implicitly in case there
was some HW error.
Signed-off-by: Yishai Hadas <yishaih@mellanox.com>
Signed-off-by: Matan Barak <matanb@mellanox.com>
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Doug Ledford <dledford@redhat.com>
2016-05-23 15:20:48 +03:00
struct ib_wq * ib_create_wq ( struct ib_pd * pd ,
struct ib_wq_init_attr * init_attr ) ;
2019-03-31 19:10:05 +03:00
int ib_destroy_wq ( struct ib_wq * wq , struct ib_udata * udata ) ;
IB/core: Introduce Work Queue object and its verbs
Introduce Work Queue object and its create/destroy/modify verbs.
QP can be created without internal WQs "packaged" inside it,
this QP can be configured to use "external" WQ object as its
receive/send queue.
WQ is a necessary component for RSS technology since RSS mechanism
is supposed to distribute the traffic between multiple
Receive Work Queues.
WQ associated (many to one) with Completion Queue and it owns WQ
properties (PD, WQ size, etc.).
WQ has a type, this patch introduces the IB_WQT_RQ (i.e.receive queue),
it may be extend to others such as IB_WQT_SQ. (send queue).
WQ from type IB_WQT_RQ contains receive work requests.
PD is an attribute of a work queue (i.e. send/receive queue), it's used
by the hardware for security validation before scattering to a memory
region which is pointed by the WQ. For that, an external WQ object
needs a PD, letting the hardware makes that validation.
When accessing a memory region that is pointed by the WQ its PD
is used and not the QP's PD, this behavior is similar
to a SRQ and a QP.
WQ context is subject to a well-defined state transitions done by
the modify_wq verb.
When WQ is created its initial state becomes IB_WQS_RESET.
>From IB_WQS_RESET it can be modified to itself or to IB_WQS_RDY.
>From IB_WQS_RDY it can be modified to itself, to IB_WQS_RESET
or to IB_WQS_ERR.
>From IB_WQS_ERR it can be modified to IB_WQS_RESET.
Note: transition to IB_WQS_ERR might occur implicitly in case there
was some HW error.
Signed-off-by: Yishai Hadas <yishaih@mellanox.com>
Signed-off-by: Matan Barak <matanb@mellanox.com>
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Doug Ledford <dledford@redhat.com>
2016-05-23 15:20:48 +03:00
int ib_modify_wq ( struct ib_wq * wq , struct ib_wq_attr * attr ,
u32 wq_attr_mask ) ;
2016-05-23 15:20:51 +03:00
struct ib_rwq_ind_table * ib_create_rwq_ind_table ( struct ib_device * device ,
struct ib_rwq_ind_table_init_attr *
wq_ind_table_init_attr ) ;
int ib_destroy_rwq_ind_table ( struct ib_rwq_ind_table * wq_ind_table ) ;
2015-07-30 17:50:15 +03:00
2016-05-03 19:01:04 +03:00
int ib_map_mr_sg ( struct ib_mr * mr , struct scatterlist * sg , int sg_nents ,
2016-05-12 20:49:15 +03:00
unsigned int * sg_offset , unsigned int page_size ) ;
2015-10-13 19:11:24 +03:00
static inline int
2016-05-03 19:01:04 +03:00
ib_map_mr_sg_zbva ( struct ib_mr * mr , struct scatterlist * sg , int sg_nents ,
2016-05-12 20:49:15 +03:00
unsigned int * sg_offset , unsigned int page_size )
2015-10-13 19:11:24 +03:00
{
int n ;
2016-05-03 19:01:04 +03:00
n = ib_map_mr_sg ( mr , sg , sg_nents , sg_offset , page_size ) ;
2015-10-13 19:11:24 +03:00
mr - > iova = 0 ;
return n ;
}
2016-05-03 19:01:04 +03:00
int ib_sg_to_pages ( struct ib_mr * mr , struct scatterlist * sgl , int sg_nents ,
2016-05-12 20:49:15 +03:00
unsigned int * sg_offset , int ( * set_page ) ( struct ib_mr * , u64 ) ) ;
2015-10-13 19:11:24 +03:00
2016-02-17 19:15:41 +03:00
void ib_drain_rq ( struct ib_qp * qp ) ;
void ib_drain_sq ( struct ib_qp * qp ) ;
void ib_drain_qp ( struct ib_qp * qp ) ;
2016-11-10 12:30:56 +03:00
2017-06-14 23:13:34 +03:00
int ib_get_eth_speed ( struct ib_device * dev , u8 port_num , u8 * speed , u8 * width ) ;
2017-04-29 21:41:27 +03:00
static inline u8 * rdma_ah_retrieve_dmac ( struct rdma_ah_attr * attr )
{
2017-04-29 21:41:29 +03:00
if ( attr - > type = = RDMA_AH_ATTR_TYPE_ROCE )
return attr - > roce . dmac ;
return NULL ;
2017-04-29 21:41:27 +03:00
}
2017-04-29 21:41:30 +03:00
static inline void rdma_ah_set_dlid ( struct rdma_ah_attr * attr , u32 dlid )
2017-04-29 21:41:27 +03:00
{
2017-04-29 21:41:29 +03:00
if ( attr - > type = = RDMA_AH_ATTR_TYPE_IB )
2017-04-29 21:41:30 +03:00
attr - > ib . dlid = ( u16 ) dlid ;
else if ( attr - > type = = RDMA_AH_ATTR_TYPE_OPA )
attr - > opa . dlid = dlid ;
2017-04-29 21:41:27 +03:00
}
2017-04-29 21:41:30 +03:00
static inline u32 rdma_ah_get_dlid ( const struct rdma_ah_attr * attr )
2017-04-29 21:41:27 +03:00
{
2017-04-29 21:41:29 +03:00
if ( attr - > type = = RDMA_AH_ATTR_TYPE_IB )
return attr - > ib . dlid ;
2017-04-29 21:41:30 +03:00
else if ( attr - > type = = RDMA_AH_ATTR_TYPE_OPA )
return attr - > opa . dlid ;
2017-04-29 21:41:29 +03:00
return 0 ;
2017-04-29 21:41:27 +03:00
}
static inline void rdma_ah_set_sl ( struct rdma_ah_attr * attr , u8 sl )
{
attr - > sl = sl ;
}
static inline u8 rdma_ah_get_sl ( const struct rdma_ah_attr * attr )
{
return attr - > sl ;
}
static inline void rdma_ah_set_path_bits ( struct rdma_ah_attr * attr ,
u8 src_path_bits )
{
2017-04-29 21:41:29 +03:00
if ( attr - > type = = RDMA_AH_ATTR_TYPE_IB )
attr - > ib . src_path_bits = src_path_bits ;
2017-04-29 21:41:30 +03:00
else if ( attr - > type = = RDMA_AH_ATTR_TYPE_OPA )
attr - > opa . src_path_bits = src_path_bits ;
2017-04-29 21:41:27 +03:00
}
static inline u8 rdma_ah_get_path_bits ( const struct rdma_ah_attr * attr )
{
2017-04-29 21:41:29 +03:00
if ( attr - > type = = RDMA_AH_ATTR_TYPE_IB )
return attr - > ib . src_path_bits ;
2017-04-29 21:41:30 +03:00
else if ( attr - > type = = RDMA_AH_ATTR_TYPE_OPA )
return attr - > opa . src_path_bits ;
2017-04-29 21:41:29 +03:00
return 0 ;
2017-04-29 21:41:27 +03:00
}
2017-08-04 23:54:16 +03:00
static inline void rdma_ah_set_make_grd ( struct rdma_ah_attr * attr ,
bool make_grd )
{
if ( attr - > type = = RDMA_AH_ATTR_TYPE_OPA )
attr - > opa . make_grd = make_grd ;
}
static inline bool rdma_ah_get_make_grd ( const struct rdma_ah_attr * attr )
{
if ( attr - > type = = RDMA_AH_ATTR_TYPE_OPA )
return attr - > opa . make_grd ;
return false ;
}
2017-04-29 21:41:27 +03:00
static inline void rdma_ah_set_port_num ( struct rdma_ah_attr * attr , u8 port_num )
{
attr - > port_num = port_num ;
}
static inline u8 rdma_ah_get_port_num ( const struct rdma_ah_attr * attr )
{
return attr - > port_num ;
}
static inline void rdma_ah_set_static_rate ( struct rdma_ah_attr * attr ,
u8 static_rate )
{
attr - > static_rate = static_rate ;
}
static inline u8 rdma_ah_get_static_rate ( const struct rdma_ah_attr * attr )
{
return attr - > static_rate ;
}
static inline void rdma_ah_set_ah_flags ( struct rdma_ah_attr * attr ,
enum ib_ah_flags flag )
{
attr - > ah_flags = flag ;
}
static inline enum ib_ah_flags
rdma_ah_get_ah_flags ( const struct rdma_ah_attr * attr )
{
return attr - > ah_flags ;
}
static inline const struct ib_global_route
* rdma_ah_read_grh ( const struct rdma_ah_attr * attr )
{
return & attr - > grh ;
}
/*To retrieve and modify the grh */
static inline struct ib_global_route
* rdma_ah_retrieve_grh ( struct rdma_ah_attr * attr )
{
return & attr - > grh ;
}
static inline void rdma_ah_set_dgid_raw ( struct rdma_ah_attr * attr , void * dgid )
{
struct ib_global_route * grh = rdma_ah_retrieve_grh ( attr ) ;
memcpy ( grh - > dgid . raw , dgid , sizeof ( grh - > dgid ) ) ;
}
static inline void rdma_ah_set_subnet_prefix ( struct rdma_ah_attr * attr ,
__be64 prefix )
{
struct ib_global_route * grh = rdma_ah_retrieve_grh ( attr ) ;
grh - > dgid . global . subnet_prefix = prefix ;
}
static inline void rdma_ah_set_interface_id ( struct rdma_ah_attr * attr ,
__be64 if_id )
{
struct ib_global_route * grh = rdma_ah_retrieve_grh ( attr ) ;
grh - > dgid . global . interface_id = if_id ;
}
static inline void rdma_ah_set_grh ( struct rdma_ah_attr * attr ,
union ib_gid * dgid , u32 flow_label ,
u8 sgid_index , u8 hop_limit ,
u8 traffic_class )
{
struct ib_global_route * grh = rdma_ah_retrieve_grh ( attr ) ;
attr - > ah_flags = IB_AH_GRH ;
if ( dgid )
grh - > dgid = * dgid ;
grh - > flow_label = flow_label ;
grh - > sgid_index = sgid_index ;
grh - > hop_limit = hop_limit ;
grh - > traffic_class = traffic_class ;
2018-06-13 10:22:03 +03:00
grh - > sgid_attr = NULL ;
2017-04-29 21:41:27 +03:00
}
2017-04-29 21:41:29 +03:00
2018-06-13 10:22:03 +03:00
void rdma_destroy_ah_attr ( struct rdma_ah_attr * ah_attr ) ;
void rdma_move_grh_sgid_attr ( struct rdma_ah_attr * attr , union ib_gid * dgid ,
u32 flow_label , u8 hop_limit , u8 traffic_class ,
const struct ib_gid_attr * sgid_attr ) ;
2018-06-13 10:22:05 +03:00
void rdma_copy_ah_attr ( struct rdma_ah_attr * dest ,
const struct rdma_ah_attr * src ) ;
void rdma_replace_ah_attr ( struct rdma_ah_attr * old ,
const struct rdma_ah_attr * new ) ;
void rdma_move_ah_attr ( struct rdma_ah_attr * dest , struct rdma_ah_attr * src ) ;
2018-06-13 10:22:03 +03:00
2018-02-01 21:57:03 +03:00
/**
* rdma_ah_find_type - Return address handle type .
*
* @ dev : Device to be checked
* @ port_num : Port number
*/
2017-04-29 21:41:29 +03:00
static inline enum rdma_ah_attr_type rdma_ah_find_type ( struct ib_device * dev ,
2018-02-01 21:57:03 +03:00
u8 port_num )
2017-04-29 21:41:29 +03:00
{
2018-01-12 08:58:42 +03:00
if ( rdma_protocol_roce ( dev , port_num ) )
2017-04-29 21:41:29 +03:00
return RDMA_AH_ATTR_TYPE_ROCE ;
2018-02-01 21:57:03 +03:00
if ( rdma_protocol_ib ( dev , port_num ) ) {
if ( rdma_cap_opa_ah ( dev , port_num ) )
return RDMA_AH_ATTR_TYPE_OPA ;
2017-04-29 21:41:29 +03:00
return RDMA_AH_ATTR_TYPE_IB ;
2018-02-01 21:57:03 +03:00
}
return RDMA_AH_ATTR_TYPE_UNDEFINED ;
2017-04-29 21:41:29 +03:00
}
2017-06-08 20:37:49 +03:00
2017-08-14 21:17:43 +03:00
/**
* ib_lid_cpu16 - Return lid in 16 bit CPU encoding .
* In the current implementation the only way to get
* get the 32 bit lid is from other sources for OPA .
* For IB , lids will always be 16 bits so cast the
* value accordingly .
*
* @ lid : A 32 bit LID
*/
static inline u16 ib_lid_cpu16 ( u32 lid )
2017-06-08 20:37:49 +03:00
{
2017-08-14 21:17:43 +03:00
WARN_ON_ONCE ( lid & 0xFFFF0000 ) ;
return ( u16 ) lid ;
2017-06-08 20:37:49 +03:00
}
2017-08-14 21:17:43 +03:00
/**
* ib_lid_be16 - Return lid in 16 bit BE encoding .
*
* @ lid : A 32 bit LID
*/
static inline __be16 ib_lid_be16 ( u32 lid )
2017-06-08 20:37:49 +03:00
{
2017-08-14 21:17:43 +03:00
WARN_ON_ONCE ( lid & 0xFFFF0000 ) ;
return cpu_to_be16 ( ( u16 ) lid ) ;
2017-06-08 20:37:49 +03:00
}
2017-08-10 21:31:29 +03:00
2017-07-13 11:09:41 +03:00
/**
* ib_get_vector_affinity - Get the affinity mappings of a given completion
* vector
* @ device : the rdma device
* @ comp_vector : index of completion vector
*
* Returns NULL on failure , otherwise a corresponding cpu map of the
* completion vector ( returns all - cpus map if the device driver doesn ' t
* implement get_vector_affinity ) .
*/
static inline const struct cpumask *
ib_get_vector_affinity ( struct ib_device * device , int comp_vector )
{
if ( comp_vector < 0 | | comp_vector > = device - > num_comp_vectors | |
2018-12-10 22:09:48 +03:00
! device - > ops . get_vector_affinity )
2017-07-13 11:09:41 +03:00
return NULL ;
2018-12-10 22:09:48 +03:00
return device - > ops . get_vector_affinity ( device , comp_vector ) ;
2017-07-13 11:09:41 +03:00
}
{net, IB}/mlx5: Manage port association for multiport RoCE
When mlx5_ib_add is called determine if the mlx5 core device being
added is capable of dual port RoCE operation. If it is, determine
whether it is a master device or a slave device using the
num_vhca_ports and affiliate_nic_vport_criteria capabilities.
If the device is a slave, attempt to find a master device to affiliate it
with. Devices that can be affiliated will share a system image guid. If
none are found place it on a list of unaffiliated ports. If a master is
found bind the port to it by configuring the port affiliation in the NIC
vport context.
Similarly when mlx5_ib_remove is called determine the port type. If it's
a slave port, unaffiliate it from the master device, otherwise just
remove it from the unaffiliated port list.
The IB device is registered as a multiport device, even if a 2nd port is
not available for affiliation. When the 2nd port is affiliated later the
GID cache must be refreshed in order to get the default GIDs for the 2nd
port in the cache. Export roce_rescan_device to provide a mechanism to
refresh the cache after a new port is bound.
In a multiport configuration all IB object (QP, MR, PD, etc) related
commands should flow through the master mlx5_core_dev, other commands
must be sent to the slave port mlx5_core_mdev, an interface is provide
to get the correct mdev for non IB object commands.
Signed-off-by: Daniel Jurgens <danielj@mellanox.com>
Reviewed-by: Parav Pandit <parav@mellanox.com>
Signed-off-by: Leon Romanovsky <leon@kernel.org>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
2018-01-04 18:25:36 +03:00
/**
* rdma_roce_rescan_device - Rescan all of the network devices in the system
* and add their gids , as needed , to the relevant RoCE devices .
*
* @ device : the rdma device
*/
void rdma_roce_rescan_device ( struct ib_device * ibdev ) ;
2018-11-25 21:51:13 +03:00
struct ib_ucontext * ib_uverbs_get_ucontext_file ( struct ib_uverbs_file * ufile ) ;
2018-06-17 12:59:59 +03:00
2018-11-25 21:51:15 +03:00
int uverbs_destroy_def_handler ( struct uverbs_attr_bundle * attrs ) ;
2018-08-14 14:08:51 +03:00
struct net_device * rdma_alloc_netdev ( struct ib_device * device , u8 port_num ,
enum rdma_netdev_t type , const char * name ,
unsigned char name_assign_type ,
void ( * setup ) ( struct net_device * ) ) ;
2018-08-14 14:22:35 +03:00
int rdma_init_netdev ( struct ib_device * device , u8 port_num ,
enum rdma_netdev_t type , const char * name ,
unsigned char name_assign_type ,
void ( * setup ) ( struct net_device * ) ,
struct net_device * netdev ) ;
2018-10-11 22:31:53 +03:00
/**
* rdma_set_device_sysfs_group - Set device attributes group to have
* driver specific sysfs entries at
* for infiniband class .
*
* @ device : device pointer for which attributes to be created
* @ group : Pointer to group which should be added when device
* is registered with sysfs .
* rdma_set_device_sysfs_group ( ) allows existing drivers to expose one
* group per device to have sysfs attributes .
*
* NOTE : New drivers should not make use of this API ; instead new device
* parameter should be exposed via netlink command . This API and mechanism
* exist only for existing drivers .
*/
static inline void
rdma_set_device_sysfs_group ( struct ib_device * dev ,
const struct attribute_group * group )
{
dev - > groups [ 1 ] = group ;
}
2018-12-18 15:15:56 +03:00
/**
* rdma_device_to_ibdev - Get ib_device pointer from device pointer
*
* @ device : device pointer for which ib_device pointer to retrieve
*
* rdma_device_to_ibdev ( ) retrieves ib_device pointer from device .
*
*/
static inline struct ib_device * rdma_device_to_ibdev ( struct device * device )
{
2019-02-26 14:56:11 +03:00
struct ib_core_device * coredev =
container_of ( device , struct ib_core_device , dev ) ;
return coredev - > owner ;
2018-12-18 15:15:56 +03:00
}
/**
* rdma_device_to_drv_device - Helper macro to reach back to driver ' s
* ib_device holder structure from device pointer .
*
* NOTE : New drivers should not make use of this API ; This API is only for
* existing drivers who have exposed sysfs entries using
* rdma_set_device_sysfs_group ( ) .
*/
# define rdma_device_to_drv_device(dev, drv_dev_struct, ibdev_member) \
container_of ( rdma_device_to_ibdev ( dev ) , drv_dev_struct , ibdev_member )
2019-02-26 15:01:46 +03:00
bool rdma_dev_access_netns ( const struct ib_device * device ,
const struct net * net ) ;
2005-04-17 02:20:36 +04:00
# endif /* IB_VERBS_H */