First set of updates for 4.11 kernel merge window
- Add new Broadcom bnxt_re RoCE driver - rxe driver updates - ioctl cleanups - ETH_P_IBOE declaration cleanup - IPoIB changes - Add port state cache - Allow srpt driver to accept guids as port names in config - Update to hfi1 driver - Update to srp driver - Lots of misc. minor changes all over -----BEGIN PGP SIGNATURE----- iQIcBAABAgAGBQJYrfewAAoJELgmozMOVy/dFnEP/2Qe7NqXRqxLS0ZqsQseFHgQ jd236E7R/XtQQTE3PTcrWL0mq0DRF6tMEjfhUASKTbZVfCBTniJAoXYrvWhN/STq LxAdigdV/0SPbxO3r9B1Xvk2v5BySaIBkaUDvcEXzT4e7UVQwZgxDkhhsYeY0Z/r 9bNB5760PzW8uO5cctXccNcWztZnW0IUZuAHVfQCPjZ7svoGwLnNDW6YQx+FsEkW tbPdzMXX8VKHlC5RcKbfOOBjdNyrUpWl+uvWEc/7mazKscp4yKVFZL7PcxqPJSfd aKdfqXYawhjZZpyws8Kn0rhkfT7xWKD/y9G5STykRJPj9/n1BDScFkmyDQhtP5bJ GANzdgH0z7Dt9LkcAs86A8EVBbIdbdT2cpPVu7t0uWEIsJw/O5ThKpgjnrrTm6m+ 89tgqLZooifTEsdj4UkZoyktrD3J9LSNZkgVmWtRn01W3oYFOPbdM4TmBZtg+/Yl VGmOJEHMEsNuJBcJcOuRJ1MVz2LebXmPUcB0RXzgmHHgulZ/DqoOtlpg5JNmJcr5 wpw/yppkBop4V4+etJBlzDsZNmZZlX+AY0ZLqQJsDHNszDjwXgAy5Rn5FYIdMyk4 ff0FKb5dzASSxHRDxAsu2uoGaREM0NkpA0UYiIZbepGLSO8PuFG2ScQ6qzU47vqu 9SEzOaaQY2S2uqFFFnYp =ugNm -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma Pull rdma updates from Doug Ledford: "First set of updates for 4.11 kernel merge window - Add new Broadcom bnxt_re RoCE driver - rxe driver updates - ioctl cleanups - ETH_P_IBOE declaration cleanup - IPoIB changes - Add port state cache - Allow srpt driver to accept guids as port names in config - Update to hfi1 driver - Update to srp driver - Lots of misc minor changes all over" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma: (114 commits) RDMA/bnxt_re: fix for "bnxt_en: Update to firmware interface spec 1.7.0." rdma_cm: fail iwarp accepts w/o connection params IB/srp: Drain the send queue before destroying a QP IB/core: Add support for draining IB_POLL_DIRECT completion queues IB/srp: Improve an error path IB/srp: Make a diagnostic message more informative IB/srp: Document locking conventions IB/srp: Fix race conditions related to task management IB/srp: Avoid that duplicate responses trigger a kernel bug IB/SRP: Avoid using IB_MR_TYPE_SG_GAPS RDMA/qedr: Fix some error handling RDMA/bnxt_re: add DCB dependency IB/hns: include linux/module.h IB/vmw_pvrdma: Expose vendor error to ULPs vmw_pvrdma: switch to pci_alloc_irq_vectors IB/hfi1: use size_t for passing array length IB/ipoib: Remove redudant label IB/ipoib: remove the unnecessary memory free IB/mthca: switch to pci_alloc_irq_vectors IB/hfi1: Code reuse with memdup_copy ...
This commit is contained in:
commit
4cc4b9323f
@ -20,3 +20,11 @@ Description: RDMA-CM based connections from HCA <hca> at port <port-num>
|
||||
will be initiated with this RoCE type as default.
|
||||
The possible RoCE types are either "IB/RoCE v1" or "RoCE v2".
|
||||
This parameter has RW access.
|
||||
|
||||
What: /config/rdma_cm/<hca>/ports/<port-num>/default_roce_tos
|
||||
Date: February 7, 2017
|
||||
KernelVersion: 4.11.0
|
||||
Description: RDMA-CM QPs from HCA <hca> at port <port-num>
|
||||
will be created with this TOS as default.
|
||||
This can be overridden by using the rdma_set_option API.
|
||||
The possible RoCE TOS values are 0-255.
|
||||
|
11
MAINTAINERS
11
MAINTAINERS
@ -2826,6 +2826,17 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
F: arch/arm64/boot/dts/broadcom/vulcan*
|
||||
|
||||
BROADCOM NETXTREME-E ROCE DRIVER
|
||||
M: Selvin Xavier <selvin.xavier@broadcom.com>
|
||||
M: Devesh Sharma <devesh.sharma@broadcom.com>
|
||||
M: Somnath Kotur <somnath.kotur@broadcom.com>
|
||||
M: Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com>
|
||||
L: linux-rdma@vger.kernel.org
|
||||
W: http://www.broadcom.com
|
||||
S: Supported
|
||||
F: drivers/infiniband/hw/bnxt_re/
|
||||
F: include/uapi/rdma/bnxt_re-abi.h
|
||||
|
||||
BROCADE BFA FC SCSI DRIVER
|
||||
M: Anil Gurumurthy <anil.gurumurthy@qlogic.com>
|
||||
M: Sudarsana Kalluru <sudarsana.kalluru@qlogic.com>
|
||||
|
@ -92,4 +92,6 @@ source "drivers/infiniband/hw/hfi1/Kconfig"
|
||||
|
||||
source "drivers/infiniband/hw/qedr/Kconfig"
|
||||
|
||||
source "drivers/infiniband/hw/bnxt_re/Kconfig"
|
||||
|
||||
endif # INFINIBAND
|
||||
|
@ -314,14 +314,13 @@ static void make_default_gid(struct net_device *dev, union ib_gid *gid)
|
||||
int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
|
||||
union ib_gid *gid, struct ib_gid_attr *attr)
|
||||
{
|
||||
struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
|
||||
struct ib_gid_table *table;
|
||||
int ix;
|
||||
int ret = 0;
|
||||
struct net_device *idev;
|
||||
int empty;
|
||||
|
||||
table = ports_table[port - rdma_start_port(ib_dev)];
|
||||
table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
|
||||
|
||||
if (!memcmp(gid, &zgid, sizeof(*gid)))
|
||||
return -EINVAL;
|
||||
@ -369,11 +368,10 @@ out_unlock:
|
||||
int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
|
||||
union ib_gid *gid, struct ib_gid_attr *attr)
|
||||
{
|
||||
struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
|
||||
struct ib_gid_table *table;
|
||||
int ix;
|
||||
|
||||
table = ports_table[port - rdma_start_port(ib_dev)];
|
||||
table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
|
||||
|
||||
mutex_lock(&table->lock);
|
||||
write_lock_irq(&table->rwlock);
|
||||
@ -399,12 +397,11 @@ out_unlock:
|
||||
int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
|
||||
struct net_device *ndev)
|
||||
{
|
||||
struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
|
||||
struct ib_gid_table *table;
|
||||
int ix;
|
||||
bool deleted = false;
|
||||
|
||||
table = ports_table[port - rdma_start_port(ib_dev)];
|
||||
table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
|
||||
|
||||
mutex_lock(&table->lock);
|
||||
write_lock_irq(&table->rwlock);
|
||||
@ -428,10 +425,9 @@ int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
|
||||
static int __ib_cache_gid_get(struct ib_device *ib_dev, u8 port, int index,
|
||||
union ib_gid *gid, struct ib_gid_attr *attr)
|
||||
{
|
||||
struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
|
||||
struct ib_gid_table *table;
|
||||
|
||||
table = ports_table[port - rdma_start_port(ib_dev)];
|
||||
table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
|
||||
|
||||
if (index < 0 || index >= table->sz)
|
||||
return -EINVAL;
|
||||
@ -455,14 +451,13 @@ static int _ib_cache_gid_table_find(struct ib_device *ib_dev,
|
||||
unsigned long mask,
|
||||
u8 *port, u16 *index)
|
||||
{
|
||||
struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
|
||||
struct ib_gid_table *table;
|
||||
u8 p;
|
||||
int local_index;
|
||||
unsigned long flags;
|
||||
|
||||
for (p = 0; p < ib_dev->phys_port_cnt; p++) {
|
||||
table = ports_table[p];
|
||||
table = ib_dev->cache.ports[p].gid;
|
||||
read_lock_irqsave(&table->rwlock, flags);
|
||||
local_index = find_gid(table, gid, val, false, mask, NULL);
|
||||
if (local_index >= 0) {
|
||||
@ -503,18 +498,16 @@ int ib_find_cached_gid_by_port(struct ib_device *ib_dev,
|
||||
u16 *index)
|
||||
{
|
||||
int local_index;
|
||||
struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
|
||||
struct ib_gid_table *table;
|
||||
unsigned long mask = GID_ATTR_FIND_MASK_GID |
|
||||
GID_ATTR_FIND_MASK_GID_TYPE;
|
||||
struct ib_gid_attr val = {.ndev = ndev, .gid_type = gid_type};
|
||||
unsigned long flags;
|
||||
|
||||
if (port < rdma_start_port(ib_dev) ||
|
||||
port > rdma_end_port(ib_dev))
|
||||
if (!rdma_is_port_valid(ib_dev, port))
|
||||
return -ENOENT;
|
||||
|
||||
table = ports_table[port - rdma_start_port(ib_dev)];
|
||||
table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
|
||||
|
||||
if (ndev)
|
||||
mask |= GID_ATTR_FIND_MASK_NETDEV;
|
||||
@ -562,21 +555,17 @@ static int ib_cache_gid_find_by_filter(struct ib_device *ib_dev,
|
||||
void *context,
|
||||
u16 *index)
|
||||
{
|
||||
struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
|
||||
struct ib_gid_table *table;
|
||||
unsigned int i;
|
||||
unsigned long flags;
|
||||
bool found = false;
|
||||
|
||||
if (!ports_table)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (port < rdma_start_port(ib_dev) ||
|
||||
port > rdma_end_port(ib_dev) ||
|
||||
if (!rdma_is_port_valid(ib_dev, port) ||
|
||||
!rdma_protocol_roce(ib_dev, port))
|
||||
return -EPROTONOSUPPORT;
|
||||
|
||||
table = ports_table[port - rdma_start_port(ib_dev)];
|
||||
table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
|
||||
|
||||
read_lock_irqsave(&table->rwlock, flags);
|
||||
for (i = 0; i < table->sz; i++) {
|
||||
@ -668,14 +657,13 @@ void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
|
||||
unsigned long gid_type_mask,
|
||||
enum ib_cache_gid_default_mode mode)
|
||||
{
|
||||
struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
|
||||
union ib_gid gid;
|
||||
struct ib_gid_attr gid_attr;
|
||||
struct ib_gid_attr zattr_type = zattr;
|
||||
struct ib_gid_table *table;
|
||||
unsigned int gid_type;
|
||||
|
||||
table = ports_table[port - rdma_start_port(ib_dev)];
|
||||
table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
|
||||
|
||||
make_default_gid(ndev, &gid);
|
||||
memset(&gid_attr, 0, sizeof(gid_attr));
|
||||
@ -766,71 +754,64 @@ static int gid_table_reserve_default(struct ib_device *ib_dev, u8 port,
|
||||
static int _gid_table_setup_one(struct ib_device *ib_dev)
|
||||
{
|
||||
u8 port;
|
||||
struct ib_gid_table **table;
|
||||
struct ib_gid_table *table;
|
||||
int err = 0;
|
||||
|
||||
table = kcalloc(ib_dev->phys_port_cnt, sizeof(*table), GFP_KERNEL);
|
||||
if (!table)
|
||||
return -ENOMEM;
|
||||
|
||||
for (port = 0; port < ib_dev->phys_port_cnt; port++) {
|
||||
u8 rdma_port = port + rdma_start_port(ib_dev);
|
||||
|
||||
table[port] =
|
||||
table =
|
||||
alloc_gid_table(
|
||||
ib_dev->port_immutable[rdma_port].gid_tbl_len);
|
||||
if (!table[port]) {
|
||||
if (!table) {
|
||||
err = -ENOMEM;
|
||||
goto rollback_table_setup;
|
||||
}
|
||||
|
||||
err = gid_table_reserve_default(ib_dev,
|
||||
port + rdma_start_port(ib_dev),
|
||||
table[port]);
|
||||
table);
|
||||
if (err)
|
||||
goto rollback_table_setup;
|
||||
ib_dev->cache.ports[port].gid = table;
|
||||
}
|
||||
|
||||
ib_dev->cache.gid_cache = table;
|
||||
return 0;
|
||||
|
||||
rollback_table_setup:
|
||||
for (port = 0; port < ib_dev->phys_port_cnt; port++) {
|
||||
table = ib_dev->cache.ports[port].gid;
|
||||
|
||||
cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
|
||||
table[port]);
|
||||
release_gid_table(table[port]);
|
||||
table);
|
||||
release_gid_table(table);
|
||||
}
|
||||
|
||||
kfree(table);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void gid_table_release_one(struct ib_device *ib_dev)
|
||||
{
|
||||
struct ib_gid_table **table = ib_dev->cache.gid_cache;
|
||||
struct ib_gid_table *table;
|
||||
u8 port;
|
||||
|
||||
if (!table)
|
||||
return;
|
||||
|
||||
for (port = 0; port < ib_dev->phys_port_cnt; port++)
|
||||
release_gid_table(table[port]);
|
||||
|
||||
kfree(table);
|
||||
ib_dev->cache.gid_cache = NULL;
|
||||
for (port = 0; port < ib_dev->phys_port_cnt; port++) {
|
||||
table = ib_dev->cache.ports[port].gid;
|
||||
release_gid_table(table);
|
||||
ib_dev->cache.ports[port].gid = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static void gid_table_cleanup_one(struct ib_device *ib_dev)
|
||||
{
|
||||
struct ib_gid_table **table = ib_dev->cache.gid_cache;
|
||||
struct ib_gid_table *table;
|
||||
u8 port;
|
||||
|
||||
if (!table)
|
||||
return;
|
||||
|
||||
for (port = 0; port < ib_dev->phys_port_cnt; port++)
|
||||
for (port = 0; port < ib_dev->phys_port_cnt; port++) {
|
||||
table = ib_dev->cache.ports[port].gid;
|
||||
cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
|
||||
table[port]);
|
||||
table);
|
||||
}
|
||||
}
|
||||
|
||||
static int gid_table_setup_one(struct ib_device *ib_dev)
|
||||
@ -860,12 +841,12 @@ int ib_get_cached_gid(struct ib_device *device,
|
||||
{
|
||||
int res;
|
||||
unsigned long flags;
|
||||
struct ib_gid_table **ports_table = device->cache.gid_cache;
|
||||
struct ib_gid_table *table = ports_table[port_num - rdma_start_port(device)];
|
||||
struct ib_gid_table *table;
|
||||
|
||||
if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
|
||||
if (!rdma_is_port_valid(device, port_num))
|
||||
return -EINVAL;
|
||||
|
||||
table = device->cache.ports[port_num - rdma_start_port(device)].gid;
|
||||
read_lock_irqsave(&table->rwlock, flags);
|
||||
res = __ib_cache_gid_get(device, port_num, index, gid, gid_attr);
|
||||
read_unlock_irqrestore(&table->rwlock, flags);
|
||||
@ -912,12 +893,12 @@ int ib_get_cached_pkey(struct ib_device *device,
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
|
||||
if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
|
||||
if (!rdma_is_port_valid(device, port_num))
|
||||
return -EINVAL;
|
||||
|
||||
read_lock_irqsave(&device->cache.lock, flags);
|
||||
|
||||
cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
|
||||
cache = device->cache.ports[port_num - rdma_start_port(device)].pkey;
|
||||
|
||||
if (index < 0 || index >= cache->table_len)
|
||||
ret = -EINVAL;
|
||||
@ -941,12 +922,12 @@ int ib_find_cached_pkey(struct ib_device *device,
|
||||
int ret = -ENOENT;
|
||||
int partial_ix = -1;
|
||||
|
||||
if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
|
||||
if (!rdma_is_port_valid(device, port_num))
|
||||
return -EINVAL;
|
||||
|
||||
read_lock_irqsave(&device->cache.lock, flags);
|
||||
|
||||
cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
|
||||
cache = device->cache.ports[port_num - rdma_start_port(device)].pkey;
|
||||
|
||||
*index = -1;
|
||||
|
||||
@ -981,12 +962,12 @@ int ib_find_exact_cached_pkey(struct ib_device *device,
|
||||
int i;
|
||||
int ret = -ENOENT;
|
||||
|
||||
if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
|
||||
if (!rdma_is_port_valid(device, port_num))
|
||||
return -EINVAL;
|
||||
|
||||
read_lock_irqsave(&device->cache.lock, flags);
|
||||
|
||||
cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
|
||||
cache = device->cache.ports[port_num - rdma_start_port(device)].pkey;
|
||||
|
||||
*index = -1;
|
||||
|
||||
@ -1010,17 +991,36 @@ int ib_get_cached_lmc(struct ib_device *device,
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
|
||||
if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
|
||||
if (!rdma_is_port_valid(device, port_num))
|
||||
return -EINVAL;
|
||||
|
||||
read_lock_irqsave(&device->cache.lock, flags);
|
||||
*lmc = device->cache.lmc_cache[port_num - rdma_start_port(device)];
|
||||
*lmc = device->cache.ports[port_num - rdma_start_port(device)].lmc;
|
||||
read_unlock_irqrestore(&device->cache.lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(ib_get_cached_lmc);
|
||||
|
||||
int ib_get_cached_port_state(struct ib_device *device,
|
||||
u8 port_num,
|
||||
enum ib_port_state *port_state)
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
|
||||
if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
|
||||
return -EINVAL;
|
||||
|
||||
read_lock_irqsave(&device->cache.lock, flags);
|
||||
*port_state = device->cache.ports[port_num
|
||||
- rdma_start_port(device)].port_state;
|
||||
read_unlock_irqrestore(&device->cache.lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(ib_get_cached_port_state);
|
||||
|
||||
static void ib_cache_update(struct ib_device *device,
|
||||
u8 port)
|
||||
{
|
||||
@ -1033,14 +1033,13 @@ static void ib_cache_update(struct ib_device *device,
|
||||
int i;
|
||||
int ret;
|
||||
struct ib_gid_table *table;
|
||||
struct ib_gid_table **ports_table = device->cache.gid_cache;
|
||||
bool use_roce_gid_table =
|
||||
rdma_cap_roce_gid_table(device, port);
|
||||
|
||||
if (port < rdma_start_port(device) || port > rdma_end_port(device))
|
||||
if (!rdma_is_port_valid(device, port))
|
||||
return;
|
||||
|
||||
table = ports_table[port - rdma_start_port(device)];
|
||||
table = device->cache.ports[port - rdma_start_port(device)].gid;
|
||||
|
||||
tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
|
||||
if (!tprops)
|
||||
@ -1092,9 +1091,10 @@ static void ib_cache_update(struct ib_device *device,
|
||||
|
||||
write_lock_irq(&device->cache.lock);
|
||||
|
||||
old_pkey_cache = device->cache.pkey_cache[port - rdma_start_port(device)];
|
||||
old_pkey_cache = device->cache.ports[port -
|
||||
rdma_start_port(device)].pkey;
|
||||
|
||||
device->cache.pkey_cache[port - rdma_start_port(device)] = pkey_cache;
|
||||
device->cache.ports[port - rdma_start_port(device)].pkey = pkey_cache;
|
||||
if (!use_roce_gid_table) {
|
||||
write_lock(&table->rwlock);
|
||||
for (i = 0; i < gid_cache->table_len; i++) {
|
||||
@ -1104,7 +1104,9 @@ static void ib_cache_update(struct ib_device *device,
|
||||
write_unlock(&table->rwlock);
|
||||
}
|
||||
|
||||
device->cache.lmc_cache[port - rdma_start_port(device)] = tprops->lmc;
|
||||
device->cache.ports[port - rdma_start_port(device)].lmc = tprops->lmc;
|
||||
device->cache.ports[port - rdma_start_port(device)].port_state =
|
||||
tprops->state;
|
||||
|
||||
write_unlock_irq(&device->cache.lock);
|
||||
|
||||
@ -1157,22 +1159,17 @@ int ib_cache_setup_one(struct ib_device *device)
|
||||
|
||||
rwlock_init(&device->cache.lock);
|
||||
|
||||
device->cache.pkey_cache =
|
||||
kzalloc(sizeof *device->cache.pkey_cache *
|
||||
device->cache.ports =
|
||||
kzalloc(sizeof(*device->cache.ports) *
|
||||
(rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL);
|
||||
device->cache.lmc_cache = kmalloc(sizeof *device->cache.lmc_cache *
|
||||
(rdma_end_port(device) -
|
||||
rdma_start_port(device) + 1),
|
||||
GFP_KERNEL);
|
||||
if (!device->cache.pkey_cache ||
|
||||
!device->cache.lmc_cache) {
|
||||
if (!device->cache.ports) {
|
||||
err = -ENOMEM;
|
||||
goto free;
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = gid_table_setup_one(device);
|
||||
if (err)
|
||||
goto free;
|
||||
goto out;
|
||||
|
||||
for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
|
||||
ib_cache_update(device, p + rdma_start_port(device));
|
||||
@ -1187,9 +1184,7 @@ int ib_cache_setup_one(struct ib_device *device)
|
||||
|
||||
err:
|
||||
gid_table_cleanup_one(device);
|
||||
free:
|
||||
kfree(device->cache.pkey_cache);
|
||||
kfree(device->cache.lmc_cache);
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -1203,14 +1198,11 @@ void ib_cache_release_one(struct ib_device *device)
|
||||
* all the device's resources when the cache could no
|
||||
* longer be accessed.
|
||||
*/
|
||||
if (device->cache.pkey_cache)
|
||||
for (p = 0;
|
||||
p <= rdma_end_port(device) - rdma_start_port(device); ++p)
|
||||
kfree(device->cache.pkey_cache[p]);
|
||||
for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
|
||||
kfree(device->cache.ports[p].pkey);
|
||||
|
||||
gid_table_release_one(device);
|
||||
kfree(device->cache.pkey_cache);
|
||||
kfree(device->cache.lmc_cache);
|
||||
kfree(device->cache.ports);
|
||||
}
|
||||
|
||||
void ib_cache_cleanup_one(struct ib_device *device)
|
||||
|
@ -3409,6 +3409,8 @@ static void cm_process_send_error(struct ib_mad_send_buf *msg,
|
||||
if (msg != cm_id_priv->msg || state != cm_id_priv->id.state)
|
||||
goto discard;
|
||||
|
||||
pr_debug_ratelimited("CM: failed sending MAD in state %d. (%s)\n",
|
||||
state, ib_wc_status_msg(wc_status));
|
||||
switch (state) {
|
||||
case IB_CM_REQ_SENT:
|
||||
case IB_CM_MRA_REQ_RCVD:
|
||||
|
@ -198,6 +198,7 @@ struct cma_device {
|
||||
atomic_t refcount;
|
||||
struct list_head id_list;
|
||||
enum ib_gid_type *default_gid_type;
|
||||
u8 *default_roce_tos;
|
||||
};
|
||||
|
||||
struct rdma_bind_list {
|
||||
@ -269,8 +270,7 @@ struct cma_device *cma_enum_devices_by_ibdev(cma_device_filter filter,
|
||||
int cma_get_default_gid_type(struct cma_device *cma_dev,
|
||||
unsigned int port)
|
||||
{
|
||||
if (port < rdma_start_port(cma_dev->device) ||
|
||||
port > rdma_end_port(cma_dev->device))
|
||||
if (!rdma_is_port_valid(cma_dev->device, port))
|
||||
return -EINVAL;
|
||||
|
||||
return cma_dev->default_gid_type[port - rdma_start_port(cma_dev->device)];
|
||||
@ -282,8 +282,7 @@ int cma_set_default_gid_type(struct cma_device *cma_dev,
|
||||
{
|
||||
unsigned long supported_gids;
|
||||
|
||||
if (port < rdma_start_port(cma_dev->device) ||
|
||||
port > rdma_end_port(cma_dev->device))
|
||||
if (!rdma_is_port_valid(cma_dev->device, port))
|
||||
return -EINVAL;
|
||||
|
||||
supported_gids = roce_gid_type_mask_support(cma_dev->device, port);
|
||||
@ -297,6 +296,25 @@ int cma_set_default_gid_type(struct cma_device *cma_dev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int cma_get_default_roce_tos(struct cma_device *cma_dev, unsigned int port)
|
||||
{
|
||||
if (!rdma_is_port_valid(cma_dev->device, port))
|
||||
return -EINVAL;
|
||||
|
||||
return cma_dev->default_roce_tos[port - rdma_start_port(cma_dev->device)];
|
||||
}
|
||||
|
||||
int cma_set_default_roce_tos(struct cma_device *cma_dev, unsigned int port,
|
||||
u8 default_roce_tos)
|
||||
{
|
||||
if (!rdma_is_port_valid(cma_dev->device, port))
|
||||
return -EINVAL;
|
||||
|
||||
cma_dev->default_roce_tos[port - rdma_start_port(cma_dev->device)] =
|
||||
default_roce_tos;
|
||||
|
||||
return 0;
|
||||
}
|
||||
struct ib_device *cma_get_ib_dev(struct cma_device *cma_dev)
|
||||
{
|
||||
return cma_dev->device;
|
||||
@ -343,6 +361,7 @@ struct rdma_id_private {
|
||||
u32 options;
|
||||
u8 srq;
|
||||
u8 tos;
|
||||
bool tos_set;
|
||||
u8 reuseaddr;
|
||||
u8 afonly;
|
||||
enum ib_gid_type gid_type;
|
||||
@ -709,6 +728,7 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
|
||||
union ib_gid gid, sgid, *dgid;
|
||||
u16 pkey, index;
|
||||
u8 p;
|
||||
enum ib_port_state port_state;
|
||||
int i;
|
||||
|
||||
cma_dev = NULL;
|
||||
@ -724,6 +744,8 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
|
||||
if (ib_find_cached_pkey(cur_dev->device, p, pkey, &index))
|
||||
continue;
|
||||
|
||||
if (ib_get_cached_port_state(cur_dev->device, p, &port_state))
|
||||
continue;
|
||||
for (i = 0; !ib_get_cached_gid(cur_dev->device, p, i,
|
||||
&gid, NULL);
|
||||
i++) {
|
||||
@ -735,7 +757,8 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
|
||||
}
|
||||
|
||||
if (!cma_dev && (gid.global.subnet_prefix ==
|
||||
dgid->global.subnet_prefix)) {
|
||||
dgid->global.subnet_prefix) &&
|
||||
port_state == IB_PORT_ACTIVE) {
|
||||
cma_dev = cur_dev;
|
||||
sgid = gid;
|
||||
id_priv->id.port_num = p;
|
||||
@ -778,6 +801,7 @@ struct rdma_cm_id *rdma_create_id(struct net *net,
|
||||
id_priv->id.event_handler = event_handler;
|
||||
id_priv->id.ps = ps;
|
||||
id_priv->id.qp_type = qp_type;
|
||||
id_priv->tos_set = false;
|
||||
spin_lock_init(&id_priv->lock);
|
||||
mutex_init(&id_priv->qp_mutex);
|
||||
init_completion(&id_priv->comp);
|
||||
@ -1689,6 +1713,7 @@ static int cma_rep_recv(struct rdma_id_private *id_priv)
|
||||
|
||||
return 0;
|
||||
reject:
|
||||
pr_debug_ratelimited("RDMA CM: CONNECT_ERROR: failed to handle reply. status %d\n", ret);
|
||||
cma_modify_qp_err(id_priv);
|
||||
ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED,
|
||||
NULL, 0, NULL, 0);
|
||||
@ -1760,6 +1785,8 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
|
||||
/* ignore event */
|
||||
goto out;
|
||||
case IB_CM_REJ_RECEIVED:
|
||||
pr_debug_ratelimited("RDMA CM: REJECTED: %s\n", rdma_reject_msg(&id_priv->id,
|
||||
ib_event->param.rej_rcvd.reason));
|
||||
cma_modify_qp_err(id_priv);
|
||||
event.status = ib_event->param.rej_rcvd.reason;
|
||||
event.event = RDMA_CM_EVENT_REJECTED;
|
||||
@ -2266,6 +2293,7 @@ void rdma_set_service_type(struct rdma_cm_id *id, int tos)
|
||||
|
||||
id_priv = container_of(id, struct rdma_id_private, id);
|
||||
id_priv->tos = (u8) tos;
|
||||
id_priv->tos_set = true;
|
||||
}
|
||||
EXPORT_SYMBOL(rdma_set_service_type);
|
||||
|
||||
@ -2285,6 +2313,8 @@ static void cma_query_handler(int status, struct ib_sa_path_rec *path_rec,
|
||||
work->new_state = RDMA_CM_ADDR_RESOLVED;
|
||||
work->event.event = RDMA_CM_EVENT_ROUTE_ERROR;
|
||||
work->event.status = status;
|
||||
pr_debug_ratelimited("RDMA CM: ROUTE_ERROR: failed to query path. status %d\n",
|
||||
status);
|
||||
}
|
||||
|
||||
queue_work(cma_wq, &work->work);
|
||||
@ -2498,6 +2528,9 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
|
||||
struct cma_work *work;
|
||||
int ret;
|
||||
struct net_device *ndev = NULL;
|
||||
u8 default_roce_tos = id_priv->cma_dev->default_roce_tos[id_priv->id.port_num -
|
||||
rdma_start_port(id_priv->cma_dev->device)];
|
||||
u8 tos = id_priv->tos_set ? id_priv->tos : default_roce_tos;
|
||||
|
||||
|
||||
work = kzalloc(sizeof *work, GFP_KERNEL);
|
||||
@ -2571,7 +2604,8 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
|
||||
route->path_rec->reversible = 1;
|
||||
route->path_rec->pkey = cpu_to_be16(0xffff);
|
||||
route->path_rec->mtu_selector = IB_SA_EQ;
|
||||
route->path_rec->sl = iboe_tos_to_sl(ndev, id_priv->tos);
|
||||
route->path_rec->sl = iboe_tos_to_sl(ndev, tos);
|
||||
route->path_rec->traffic_class = tos;
|
||||
route->path_rec->mtu = iboe_get_mtu(ndev->mtu);
|
||||
route->path_rec->rate_selector = IB_SA_EQ;
|
||||
route->path_rec->rate = iboe_get_rate(ndev);
|
||||
@ -2650,8 +2684,8 @@ static void cma_set_loopback(struct sockaddr *addr)
|
||||
static int cma_bind_loopback(struct rdma_id_private *id_priv)
|
||||
{
|
||||
struct cma_device *cma_dev, *cur_dev;
|
||||
struct ib_port_attr port_attr;
|
||||
union ib_gid gid;
|
||||
enum ib_port_state port_state;
|
||||
u16 pkey;
|
||||
int ret;
|
||||
u8 p;
|
||||
@ -2667,8 +2701,8 @@ static int cma_bind_loopback(struct rdma_id_private *id_priv)
|
||||
cma_dev = cur_dev;
|
||||
|
||||
for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) {
|
||||
if (!ib_query_port(cur_dev->device, p, &port_attr) &&
|
||||
port_attr.state == IB_PORT_ACTIVE) {
|
||||
if (!ib_get_cached_port_state(cur_dev->device, p, &port_state) &&
|
||||
port_state == IB_PORT_ACTIVE) {
|
||||
cma_dev = cur_dev;
|
||||
goto port_found;
|
||||
}
|
||||
@ -2718,8 +2752,14 @@ static void addr_handler(int status, struct sockaddr *src_addr,
|
||||
goto out;
|
||||
|
||||
memcpy(cma_src_addr(id_priv), src_addr, rdma_addr_size(src_addr));
|
||||
if (!status && !id_priv->cma_dev)
|
||||
if (!status && !id_priv->cma_dev) {
|
||||
status = cma_acquire_dev(id_priv, NULL);
|
||||
if (status)
|
||||
pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to acquire device. status %d\n",
|
||||
status);
|
||||
} else {
|
||||
pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to resolve IP. status %d\n", status);
|
||||
}
|
||||
|
||||
if (status) {
|
||||
if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED,
|
||||
@ -2831,20 +2871,26 @@ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
|
||||
int ret;
|
||||
|
||||
id_priv = container_of(id, struct rdma_id_private, id);
|
||||
memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr));
|
||||
if (id_priv->state == RDMA_CM_IDLE) {
|
||||
ret = cma_bind_addr(id, src_addr, dst_addr);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
memset(cma_dst_addr(id_priv), 0, rdma_addr_size(dst_addr));
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
if (cma_family(id_priv) != dst_addr->sa_family)
|
||||
if (cma_family(id_priv) != dst_addr->sa_family) {
|
||||
memset(cma_dst_addr(id_priv), 0, rdma_addr_size(dst_addr));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY))
|
||||
if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) {
|
||||
memset(cma_dst_addr(id_priv), 0, rdma_addr_size(dst_addr));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
atomic_inc(&id_priv->refcount);
|
||||
memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr));
|
||||
if (cma_any_addr(dst_addr)) {
|
||||
ret = cma_resolve_loopback(id_priv);
|
||||
} else {
|
||||
@ -2960,6 +3006,43 @@ err:
|
||||
return ret == -ENOSPC ? -EADDRNOTAVAIL : ret;
|
||||
}
|
||||
|
||||
static int cma_port_is_unique(struct rdma_bind_list *bind_list,
|
||||
struct rdma_id_private *id_priv)
|
||||
{
|
||||
struct rdma_id_private *cur_id;
|
||||
struct sockaddr *daddr = cma_dst_addr(id_priv);
|
||||
struct sockaddr *saddr = cma_src_addr(id_priv);
|
||||
__be16 dport = cma_port(daddr);
|
||||
|
||||
hlist_for_each_entry(cur_id, &bind_list->owners, node) {
|
||||
struct sockaddr *cur_daddr = cma_dst_addr(cur_id);
|
||||
struct sockaddr *cur_saddr = cma_src_addr(cur_id);
|
||||
__be16 cur_dport = cma_port(cur_daddr);
|
||||
|
||||
if (id_priv == cur_id)
|
||||
continue;
|
||||
|
||||
/* different dest port -> unique */
|
||||
if (!cma_any_port(cur_daddr) &&
|
||||
(dport != cur_dport))
|
||||
continue;
|
||||
|
||||
/* different src address -> unique */
|
||||
if (!cma_any_addr(saddr) &&
|
||||
!cma_any_addr(cur_saddr) &&
|
||||
cma_addr_cmp(saddr, cur_saddr))
|
||||
continue;
|
||||
|
||||
/* different dst address -> unique */
|
||||
if (!cma_any_addr(cur_daddr) &&
|
||||
cma_addr_cmp(daddr, cur_daddr))
|
||||
continue;
|
||||
|
||||
return -EADDRNOTAVAIL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cma_alloc_any_port(enum rdma_port_space ps,
|
||||
struct rdma_id_private *id_priv)
|
||||
{
|
||||
@ -2972,9 +3055,19 @@ static int cma_alloc_any_port(enum rdma_port_space ps,
|
||||
remaining = (high - low) + 1;
|
||||
rover = prandom_u32() % remaining + low;
|
||||
retry:
|
||||
if (last_used_port != rover &&
|
||||
!cma_ps_find(net, ps, (unsigned short)rover)) {
|
||||
int ret = cma_alloc_port(ps, id_priv, rover);
|
||||
if (last_used_port != rover) {
|
||||
struct rdma_bind_list *bind_list;
|
||||
int ret;
|
||||
|
||||
bind_list = cma_ps_find(net, ps, (unsigned short)rover);
|
||||
|
||||
if (!bind_list) {
|
||||
ret = cma_alloc_port(ps, id_priv, rover);
|
||||
} else {
|
||||
ret = cma_port_is_unique(bind_list, id_priv);
|
||||
if (!ret)
|
||||
cma_bind_port(bind_list, id_priv);
|
||||
}
|
||||
/*
|
||||
* Remember previously used port number in order to avoid
|
||||
* re-using same port immediately after it is closed.
|
||||
@ -3203,6 +3296,7 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
|
||||
{
|
||||
struct rdma_id_private *id_priv;
|
||||
int ret;
|
||||
struct sockaddr *daddr;
|
||||
|
||||
if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6 &&
|
||||
addr->sa_family != AF_IB)
|
||||
@ -3242,6 +3336,9 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
|
||||
if (ret)
|
||||
goto err2;
|
||||
|
||||
daddr = cma_dst_addr(id_priv);
|
||||
daddr->sa_family = addr->sa_family;
|
||||
|
||||
return 0;
|
||||
err2:
|
||||
if (id_priv->cma_dev)
|
||||
@ -3306,10 +3403,13 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
|
||||
if (rep->status != IB_SIDR_SUCCESS) {
|
||||
event.event = RDMA_CM_EVENT_UNREACHABLE;
|
||||
event.status = ib_event->param.sidr_rep_rcvd.status;
|
||||
pr_debug_ratelimited("RDMA CM: UNREACHABLE: bad SIDR reply. status %d\n",
|
||||
event.status);
|
||||
break;
|
||||
}
|
||||
ret = cma_set_qkey(id_priv, rep->qkey);
|
||||
if (ret) {
|
||||
pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to set qkey. status %d\n", ret);
|
||||
event.event = RDMA_CM_EVENT_ADDR_ERROR;
|
||||
event.status = ret;
|
||||
break;
|
||||
@ -3581,6 +3681,9 @@ static int cma_accept_iw(struct rdma_id_private *id_priv,
|
||||
struct iw_cm_conn_param iw_param;
|
||||
int ret;
|
||||
|
||||
if (!conn_param)
|
||||
return -EINVAL;
|
||||
|
||||
ret = cma_modify_qp_rtr(id_priv, conn_param);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -3758,10 +3861,17 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
|
||||
|
||||
if (!status)
|
||||
status = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey));
|
||||
else
|
||||
pr_debug_ratelimited("RDMA CM: MULTICAST_ERROR: failed to join multicast. status %d\n",
|
||||
status);
|
||||
mutex_lock(&id_priv->qp_mutex);
|
||||
if (!status && id_priv->id.qp)
|
||||
if (!status && id_priv->id.qp) {
|
||||
status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid,
|
||||
be16_to_cpu(multicast->rec.mlid));
|
||||
if (status)
|
||||
pr_debug_ratelimited("RDMA CM: MULTICAST_ERROR: failed to attach QP. status %d\n",
|
||||
status);
|
||||
}
|
||||
mutex_unlock(&id_priv->qp_mutex);
|
||||
|
||||
memset(&event, 0, sizeof event);
|
||||
@ -4227,15 +4337,21 @@ static void cma_add_one(struct ib_device *device)
|
||||
cma_dev->default_gid_type = kcalloc(device->phys_port_cnt,
|
||||
sizeof(*cma_dev->default_gid_type),
|
||||
GFP_KERNEL);
|
||||
if (!cma_dev->default_gid_type) {
|
||||
kfree(cma_dev);
|
||||
return;
|
||||
}
|
||||
if (!cma_dev->default_gid_type)
|
||||
goto free_cma_dev;
|
||||
|
||||
cma_dev->default_roce_tos = kcalloc(device->phys_port_cnt,
|
||||
sizeof(*cma_dev->default_roce_tos),
|
||||
GFP_KERNEL);
|
||||
if (!cma_dev->default_roce_tos)
|
||||
goto free_gid_type;
|
||||
|
||||
for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) {
|
||||
supported_gids = roce_gid_type_mask_support(device, i);
|
||||
WARN_ON(!supported_gids);
|
||||
cma_dev->default_gid_type[i - rdma_start_port(device)] =
|
||||
find_first_bit(&supported_gids, BITS_PER_LONG);
|
||||
cma_dev->default_roce_tos[i - rdma_start_port(device)] = 0;
|
||||
}
|
||||
|
||||
init_completion(&cma_dev->comp);
|
||||
@ -4248,6 +4364,16 @@ static void cma_add_one(struct ib_device *device)
|
||||
list_for_each_entry(id_priv, &listen_any_list, list)
|
||||
cma_listen_on_dev(id_priv, cma_dev);
|
||||
mutex_unlock(&lock);
|
||||
|
||||
return;
|
||||
|
||||
free_gid_type:
|
||||
kfree(cma_dev->default_gid_type);
|
||||
|
||||
free_cma_dev:
|
||||
kfree(cma_dev);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static int cma_remove_id_dev(struct rdma_id_private *id_priv)
|
||||
@ -4316,6 +4442,7 @@ static void cma_remove_one(struct ib_device *device, void *client_data)
|
||||
mutex_unlock(&lock);
|
||||
|
||||
cma_process_remove(cma_dev);
|
||||
kfree(cma_dev->default_roce_tos);
|
||||
kfree(cma_dev->default_gid_type);
|
||||
kfree(cma_dev);
|
||||
}
|
||||
|
@ -139,8 +139,50 @@ static ssize_t default_roce_mode_store(struct config_item *item,
|
||||
|
||||
CONFIGFS_ATTR(, default_roce_mode);
|
||||
|
||||
static ssize_t default_roce_tos_show(struct config_item *item, char *buf)
|
||||
{
|
||||
struct cma_device *cma_dev;
|
||||
struct cma_dev_port_group *group;
|
||||
ssize_t ret;
|
||||
u8 tos;
|
||||
|
||||
ret = cma_configfs_params_get(item, &cma_dev, &group);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
tos = cma_get_default_roce_tos(cma_dev, group->port_num);
|
||||
cma_configfs_params_put(cma_dev);
|
||||
|
||||
return sprintf(buf, "%u\n", tos);
|
||||
}
|
||||
|
||||
static ssize_t default_roce_tos_store(struct config_item *item,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct cma_device *cma_dev;
|
||||
struct cma_dev_port_group *group;
|
||||
ssize_t ret;
|
||||
u8 tos;
|
||||
|
||||
ret = kstrtou8(buf, 0, &tos);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = cma_configfs_params_get(item, &cma_dev, &group);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = cma_set_default_roce_tos(cma_dev, group->port_num, tos);
|
||||
cma_configfs_params_put(cma_dev);
|
||||
|
||||
return ret ? ret : strnlen(buf, count);
|
||||
}
|
||||
|
||||
CONFIGFS_ATTR(, default_roce_tos);
|
||||
|
||||
static struct configfs_attribute *cma_configfs_attributes[] = {
|
||||
&attr_default_roce_mode,
|
||||
&attr_default_roce_tos,
|
||||
NULL,
|
||||
};
|
||||
|
||||
|
@ -62,6 +62,9 @@ int cma_get_default_gid_type(struct cma_device *cma_dev,
|
||||
int cma_set_default_gid_type(struct cma_device *cma_dev,
|
||||
unsigned int port,
|
||||
enum ib_gid_type default_gid_type);
|
||||
int cma_get_default_roce_tos(struct cma_device *cma_dev, unsigned int port);
|
||||
int cma_set_default_roce_tos(struct cma_device *a_dev, unsigned int port,
|
||||
u8 default_roce_tos);
|
||||
struct ib_device *cma_get_ib_dev(struct cma_device *cma_dev);
|
||||
|
||||
int ib_device_register_sysfs(struct ib_device *device,
|
||||
|
@ -58,8 +58,8 @@ static int __ib_process_cq(struct ib_cq *cq, int budget)
|
||||
* %IB_POLL_DIRECT CQ. It does not offload CQ processing to a different
|
||||
* context and does not ask for completion interrupts from the HCA.
|
||||
*
|
||||
* Note: for compatibility reasons -1 can be passed in %budget for unlimited
|
||||
* polling. Do not use this feature in new code, it will be removed soon.
|
||||
* Note: do not pass -1 as %budget unless it is guaranteed that the number
|
||||
* of completions that will be processed is small.
|
||||
*/
|
||||
int ib_process_cq_direct(struct ib_cq *cq, int budget)
|
||||
{
|
||||
@ -120,7 +120,7 @@ static void ib_cq_completion_workqueue(struct ib_cq *cq, void *private)
|
||||
*
|
||||
* This is the proper interface to allocate a CQ for in-kernel users. A
|
||||
* CQ allocated with this interface will automatically be polled from the
|
||||
* specified context. The ULP needs must use wr->wr_cqe instead of wr->wr_id
|
||||
* specified context. The ULP must use wr->wr_cqe instead of wr->wr_id
|
||||
* to use this CQ abstraction.
|
||||
*/
|
||||
struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
|
||||
|
@ -659,7 +659,7 @@ int ib_query_port(struct ib_device *device,
|
||||
union ib_gid gid;
|
||||
int err;
|
||||
|
||||
if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
|
||||
if (!rdma_is_port_valid(device, port_num))
|
||||
return -EINVAL;
|
||||
|
||||
memset(port_attr, 0, sizeof(*port_attr));
|
||||
@ -825,7 +825,7 @@ int ib_modify_port(struct ib_device *device,
|
||||
if (!device->modify_port)
|
||||
return -ENOSYS;
|
||||
|
||||
if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
|
||||
if (!rdma_is_port_valid(device, port_num))
|
||||
return -EINVAL;
|
||||
|
||||
return device->modify_port(device, port_num, port_modify_mask,
|
||||
|
@ -316,7 +316,9 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
|
||||
/* Validate device and port */
|
||||
port_priv = ib_get_mad_port(device, port_num);
|
||||
if (!port_priv) {
|
||||
dev_notice(&device->dev, "ib_register_mad_agent: Invalid port\n");
|
||||
dev_notice(&device->dev,
|
||||
"ib_register_mad_agent: Invalid port %d\n",
|
||||
port_num);
|
||||
ret = ERR_PTR(-ENODEV);
|
||||
goto error1;
|
||||
}
|
||||
|
@ -144,7 +144,6 @@ static enum bonding_slave_state is_eth_active_slave_of_bonding_rcu(struct net_de
|
||||
static int is_eth_port_of_netdev(struct ib_device *ib_dev, u8 port,
|
||||
struct net_device *rdma_ndev, void *cookie)
|
||||
{
|
||||
struct net_device *event_ndev = (struct net_device *)cookie;
|
||||
struct net_device *real_dev;
|
||||
int res;
|
||||
|
||||
@ -152,11 +151,11 @@ static int is_eth_port_of_netdev(struct ib_device *ib_dev, u8 port,
|
||||
return 0;
|
||||
|
||||
rcu_read_lock();
|
||||
real_dev = rdma_vlan_dev_real_dev(event_ndev);
|
||||
real_dev = rdma_vlan_dev_real_dev(cookie);
|
||||
if (!real_dev)
|
||||
real_dev = event_ndev;
|
||||
real_dev = cookie;
|
||||
|
||||
res = ((rdma_is_upper_dev_rcu(rdma_ndev, event_ndev) &&
|
||||
res = ((rdma_is_upper_dev_rcu(rdma_ndev, cookie) &&
|
||||
(is_eth_active_slave_of_bonding_rcu(rdma_ndev, real_dev) &
|
||||
REQUIRED_BOND_STATES)) ||
|
||||
real_dev == rdma_ndev);
|
||||
@ -192,17 +191,16 @@ static int pass_all_filter(struct ib_device *ib_dev, u8 port,
|
||||
static int upper_device_filter(struct ib_device *ib_dev, u8 port,
|
||||
struct net_device *rdma_ndev, void *cookie)
|
||||
{
|
||||
struct net_device *event_ndev = (struct net_device *)cookie;
|
||||
int res;
|
||||
|
||||
if (!rdma_ndev)
|
||||
return 0;
|
||||
|
||||
if (rdma_ndev == event_ndev)
|
||||
if (rdma_ndev == cookie)
|
||||
return 1;
|
||||
|
||||
rcu_read_lock();
|
||||
res = rdma_is_upper_dev_rcu(rdma_ndev, event_ndev);
|
||||
res = rdma_is_upper_dev_rcu(rdma_ndev, cookie);
|
||||
rcu_read_unlock();
|
||||
|
||||
return res;
|
||||
@ -379,18 +377,14 @@ static void _add_netdev_ips(struct ib_device *ib_dev, u8 port,
|
||||
static void add_netdev_ips(struct ib_device *ib_dev, u8 port,
|
||||
struct net_device *rdma_ndev, void *cookie)
|
||||
{
|
||||
struct net_device *event_ndev = (struct net_device *)cookie;
|
||||
|
||||
enum_netdev_default_gids(ib_dev, port, event_ndev, rdma_ndev);
|
||||
_add_netdev_ips(ib_dev, port, event_ndev);
|
||||
enum_netdev_default_gids(ib_dev, port, cookie, rdma_ndev);
|
||||
_add_netdev_ips(ib_dev, port, cookie);
|
||||
}
|
||||
|
||||
static void del_netdev_ips(struct ib_device *ib_dev, u8 port,
|
||||
struct net_device *rdma_ndev, void *cookie)
|
||||
{
|
||||
struct net_device *event_ndev = (struct net_device *)cookie;
|
||||
|
||||
ib_cache_gid_del_all_netdev_gids(ib_dev, port, event_ndev);
|
||||
ib_cache_gid_del_all_netdev_gids(ib_dev, port, cookie);
|
||||
}
|
||||
|
||||
static void enum_all_gids_of_dev_cb(struct ib_device *ib_dev,
|
||||
@ -460,7 +454,7 @@ static void handle_netdev_upper(struct ib_device *ib_dev, u8 port,
|
||||
u8 port,
|
||||
struct net_device *ndev))
|
||||
{
|
||||
struct net_device *ndev = (struct net_device *)cookie;
|
||||
struct net_device *ndev = cookie;
|
||||
struct upper_list *upper_iter;
|
||||
struct upper_list *upper_temp;
|
||||
LIST_HEAD(upper_list);
|
||||
@ -519,9 +513,7 @@ static void del_netdev_default_ips_join(struct ib_device *ib_dev, u8 port,
|
||||
static void del_netdev_default_ips(struct ib_device *ib_dev, u8 port,
|
||||
struct net_device *rdma_ndev, void *cookie)
|
||||
{
|
||||
struct net_device *event_ndev = (struct net_device *)cookie;
|
||||
|
||||
bond_delete_netdev_default_gids(ib_dev, port, event_ndev, rdma_ndev);
|
||||
bond_delete_netdev_default_gids(ib_dev, port, cookie, rdma_ndev);
|
||||
}
|
||||
|
||||
/* The following functions operate on all IB devices. netdevice_event and
|
||||
|
@ -1205,8 +1205,7 @@ int ib_resolve_eth_dmac(struct ib_device *device,
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (ah_attr->port_num < rdma_start_port(device) ||
|
||||
ah_attr->port_num > rdma_end_port(device))
|
||||
if (!rdma_is_port_valid(device, ah_attr->port_num))
|
||||
return -EINVAL;
|
||||
|
||||
if (!rdma_cap_eth_ah(device, ah_attr->port_num))
|
||||
@ -1949,17 +1948,12 @@ static void ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc)
|
||||
*/
|
||||
static void __ib_drain_sq(struct ib_qp *qp)
|
||||
{
|
||||
struct ib_cq *cq = qp->send_cq;
|
||||
struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
|
||||
struct ib_drain_cqe sdrain;
|
||||
struct ib_send_wr swr = {}, *bad_swr;
|
||||
int ret;
|
||||
|
||||
if (qp->send_cq->poll_ctx == IB_POLL_DIRECT) {
|
||||
WARN_ONCE(qp->send_cq->poll_ctx == IB_POLL_DIRECT,
|
||||
"IB_POLL_DIRECT poll_ctx not supported for drain\n");
|
||||
return;
|
||||
}
|
||||
|
||||
swr.wr_cqe = &sdrain.cqe;
|
||||
sdrain.cqe.done = ib_drain_qp_done;
|
||||
init_completion(&sdrain.done);
|
||||
@ -1976,7 +1970,11 @@ static void __ib_drain_sq(struct ib_qp *qp)
|
||||
return;
|
||||
}
|
||||
|
||||
wait_for_completion(&sdrain.done);
|
||||
if (cq->poll_ctx == IB_POLL_DIRECT)
|
||||
while (wait_for_completion_timeout(&sdrain.done, HZ / 10) <= 0)
|
||||
ib_process_cq_direct(cq, -1);
|
||||
else
|
||||
wait_for_completion(&sdrain.done);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1984,17 +1982,12 @@ static void __ib_drain_sq(struct ib_qp *qp)
|
||||
*/
|
||||
static void __ib_drain_rq(struct ib_qp *qp)
|
||||
{
|
||||
struct ib_cq *cq = qp->recv_cq;
|
||||
struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
|
||||
struct ib_drain_cqe rdrain;
|
||||
struct ib_recv_wr rwr = {}, *bad_rwr;
|
||||
int ret;
|
||||
|
||||
if (qp->recv_cq->poll_ctx == IB_POLL_DIRECT) {
|
||||
WARN_ONCE(qp->recv_cq->poll_ctx == IB_POLL_DIRECT,
|
||||
"IB_POLL_DIRECT poll_ctx not supported for drain\n");
|
||||
return;
|
||||
}
|
||||
|
||||
rwr.wr_cqe = &rdrain.cqe;
|
||||
rdrain.cqe.done = ib_drain_qp_done;
|
||||
init_completion(&rdrain.done);
|
||||
@ -2011,7 +2004,11 @@ static void __ib_drain_rq(struct ib_qp *qp)
|
||||
return;
|
||||
}
|
||||
|
||||
wait_for_completion(&rdrain.done);
|
||||
if (cq->poll_ctx == IB_POLL_DIRECT)
|
||||
while (wait_for_completion_timeout(&rdrain.done, HZ / 10) <= 0)
|
||||
ib_process_cq_direct(cq, -1);
|
||||
else
|
||||
wait_for_completion(&rdrain.done);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2028,8 +2025,7 @@ static void __ib_drain_rq(struct ib_qp *qp)
|
||||
* ensure there is room in the CQ and SQ for the drain work request and
|
||||
* completion.
|
||||
*
|
||||
* allocate the CQ using ib_alloc_cq() and the CQ poll context cannot be
|
||||
* IB_POLL_DIRECT.
|
||||
* allocate the CQ using ib_alloc_cq().
|
||||
*
|
||||
* ensure that there are no other contexts that are posting WRs concurrently.
|
||||
* Otherwise the drain is not guaranteed.
|
||||
@ -2057,8 +2053,7 @@ EXPORT_SYMBOL(ib_drain_sq);
|
||||
* ensure there is room in the CQ and RQ for the drain work request and
|
||||
* completion.
|
||||
*
|
||||
* allocate the CQ using ib_alloc_cq() and the CQ poll context cannot be
|
||||
* IB_POLL_DIRECT.
|
||||
* allocate the CQ using ib_alloc_cq().
|
||||
*
|
||||
* ensure that there are no other contexts that are posting WRs concurrently.
|
||||
* Otherwise the drain is not guaranteed.
|
||||
@ -2082,8 +2077,7 @@ EXPORT_SYMBOL(ib_drain_rq);
|
||||
* ensure there is room in the CQ(s), SQ, and RQ for drain work requests
|
||||
* and completions.
|
||||
*
|
||||
* allocate the CQs using ib_alloc_cq() and the CQ poll context cannot be
|
||||
* IB_POLL_DIRECT.
|
||||
* allocate the CQs using ib_alloc_cq().
|
||||
*
|
||||
* ensure that there are no other contexts that are posting WRs concurrently.
|
||||
* Otherwise the drain is not guaranteed.
|
||||
|
@ -12,3 +12,4 @@ obj-$(CONFIG_INFINIBAND_USNIC) += usnic/
|
||||
obj-$(CONFIG_INFINIBAND_HFI1) += hfi1/
|
||||
obj-$(CONFIG_INFINIBAND_HNS) += hns/
|
||||
obj-$(CONFIG_INFINIBAND_QEDR) += qedr/
|
||||
obj-$(CONFIG_INFINIBAND_BNXT_RE) += bnxt_re/
|
||||
|
9
drivers/infiniband/hw/bnxt_re/Kconfig
Normal file
9
drivers/infiniband/hw/bnxt_re/Kconfig
Normal file
@ -0,0 +1,9 @@
|
||||
config INFINIBAND_BNXT_RE
|
||||
tristate "Broadcom Netxtreme HCA support"
|
||||
depends on ETHERNET && NETDEVICES && PCI && INET && DCB
|
||||
select NET_VENDOR_BROADCOM
|
||||
select BNXT
|
||||
---help---
|
||||
This driver supports Broadcom NetXtreme-E 10/25/40/50 gigabit
|
||||
RoCE HCAs. To compile this driver as a module, choose M here:
|
||||
the module will be called bnxt_re.
|
6
drivers/infiniband/hw/bnxt_re/Makefile
Normal file
6
drivers/infiniband/hw/bnxt_re/Makefile
Normal file
@ -0,0 +1,6 @@
|
||||
|
||||
ccflags-y := -Idrivers/net/ethernet/broadcom/bnxt
|
||||
obj-$(CONFIG_INFINIBAND_BNXT_RE) += bnxt_re.o
|
||||
bnxt_re-y := main.o ib_verbs.o \
|
||||
qplib_res.o qplib_rcfw.o \
|
||||
qplib_sp.o qplib_fp.o
|
146
drivers/infiniband/hw/bnxt_re/bnxt_re.h
Normal file
146
drivers/infiniband/hw/bnxt_re/bnxt_re.h
Normal file
@ -0,0 +1,146 @@
|
||||
/*
|
||||
* Broadcom NetXtreme-E RoCE driver.
|
||||
*
|
||||
* Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
|
||||
* Broadcom refers to Broadcom Limited and/or its subsidiaries.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
||||
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
|
||||
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
|
||||
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
|
||||
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
|
||||
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
* Description: Slow Path Operators (header)
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __BNXT_RE_H__
|
||||
#define __BNXT_RE_H__
|
||||
#define ROCE_DRV_MODULE_NAME "bnxt_re"
|
||||
#define ROCE_DRV_MODULE_VERSION "1.0.0"
|
||||
|
||||
#define BNXT_RE_DESC "Broadcom NetXtreme-C/E RoCE Driver"
|
||||
|
||||
#define BNXT_RE_PAGE_SIZE_4K BIT(12)
|
||||
#define BNXT_RE_PAGE_SIZE_8K BIT(13)
|
||||
#define BNXT_RE_PAGE_SIZE_64K BIT(16)
|
||||
#define BNXT_RE_PAGE_SIZE_2M BIT(21)
|
||||
#define BNXT_RE_PAGE_SIZE_8M BIT(23)
|
||||
#define BNXT_RE_PAGE_SIZE_1G BIT(30)
|
||||
|
||||
#define BNXT_RE_MAX_QPC_COUNT (64 * 1024)
|
||||
#define BNXT_RE_MAX_MRW_COUNT (64 * 1024)
|
||||
#define BNXT_RE_MAX_SRQC_COUNT (64 * 1024)
|
||||
#define BNXT_RE_MAX_CQ_COUNT (64 * 1024)
|
||||
|
||||
struct bnxt_re_work {
|
||||
struct work_struct work;
|
||||
unsigned long event;
|
||||
struct bnxt_re_dev *rdev;
|
||||
struct net_device *vlan_dev;
|
||||
};
|
||||
|
||||
struct bnxt_re_sqp_entries {
|
||||
struct bnxt_qplib_sge sge;
|
||||
u64 wrid;
|
||||
/* For storing the actual qp1 cqe */
|
||||
struct bnxt_qplib_cqe cqe;
|
||||
struct bnxt_re_qp *qp1_qp;
|
||||
};
|
||||
|
||||
#define BNXT_RE_MIN_MSIX 2
|
||||
#define BNXT_RE_MAX_MSIX 16
|
||||
#define BNXT_RE_AEQ_IDX 0
|
||||
#define BNXT_RE_NQ_IDX 1
|
||||
|
||||
struct bnxt_re_dev {
|
||||
struct ib_device ibdev;
|
||||
struct list_head list;
|
||||
unsigned long flags;
|
||||
#define BNXT_RE_FLAG_NETDEV_REGISTERED 0
|
||||
#define BNXT_RE_FLAG_IBDEV_REGISTERED 1
|
||||
#define BNXT_RE_FLAG_GOT_MSIX 2
|
||||
#define BNXT_RE_FLAG_RCFW_CHANNEL_EN 8
|
||||
#define BNXT_RE_FLAG_QOS_WORK_REG 16
|
||||
struct net_device *netdev;
|
||||
unsigned int version, major, minor;
|
||||
struct bnxt_en_dev *en_dev;
|
||||
struct bnxt_msix_entry msix_entries[BNXT_RE_MAX_MSIX];
|
||||
int num_msix;
|
||||
|
||||
int id;
|
||||
|
||||
struct delayed_work worker;
|
||||
u8 cur_prio_map;
|
||||
|
||||
/* FP Notification Queue (CQ & SRQ) */
|
||||
struct tasklet_struct nq_task;
|
||||
|
||||
/* RCFW Channel */
|
||||
struct bnxt_qplib_rcfw rcfw;
|
||||
|
||||
/* NQ */
|
||||
struct bnxt_qplib_nq nq;
|
||||
|
||||
/* Device Resources */
|
||||
struct bnxt_qplib_dev_attr dev_attr;
|
||||
struct bnxt_qplib_ctx qplib_ctx;
|
||||
struct bnxt_qplib_res qplib_res;
|
||||
struct bnxt_qplib_dpi dpi_privileged;
|
||||
|
||||
atomic_t qp_count;
|
||||
struct mutex qp_lock; /* protect qp list */
|
||||
struct list_head qp_list;
|
||||
|
||||
atomic_t cq_count;
|
||||
atomic_t srq_count;
|
||||
atomic_t mr_count;
|
||||
atomic_t mw_count;
|
||||
/* Max of 2 lossless traffic class supported per port */
|
||||
u16 cosq[2];
|
||||
|
||||
/* QP for for handling QP1 packets */
|
||||
u32 sqp_id;
|
||||
struct bnxt_re_qp *qp1_sqp;
|
||||
struct bnxt_re_ah *sqp_ah;
|
||||
struct bnxt_re_sqp_entries sqp_tbl[1024];
|
||||
};
|
||||
|
||||
#define to_bnxt_re_dev(ptr, member) \
|
||||
container_of((ptr), struct bnxt_re_dev, member)
|
||||
|
||||
#define BNXT_RE_ROCE_V1_PACKET 0
|
||||
#define BNXT_RE_ROCEV2_IPV4_PACKET 2
|
||||
#define BNXT_RE_ROCEV2_IPV6_PACKET 3
|
||||
|
||||
static inline struct device *rdev_to_dev(struct bnxt_re_dev *rdev)
|
||||
{
|
||||
if (rdev)
|
||||
return &rdev->ibdev.dev;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#endif
|
3202
drivers/infiniband/hw/bnxt_re/ib_verbs.c
Normal file
3202
drivers/infiniband/hw/bnxt_re/ib_verbs.c
Normal file
File diff suppressed because it is too large
Load Diff
197
drivers/infiniband/hw/bnxt_re/ib_verbs.h
Normal file
197
drivers/infiniband/hw/bnxt_re/ib_verbs.h
Normal file
@ -0,0 +1,197 @@
|
||||
/*
|
||||
* Broadcom NetXtreme-E RoCE driver.
|
||||
*
|
||||
* Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
|
||||
* Broadcom refers to Broadcom Limited and/or its subsidiaries.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
||||
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
|
||||
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
|
||||
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
|
||||
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
|
||||
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
* Description: IB Verbs interpreter (header)
|
||||
*/
|
||||
|
||||
#ifndef __BNXT_RE_IB_VERBS_H__
|
||||
#define __BNXT_RE_IB_VERBS_H__
|
||||
|
||||
struct bnxt_re_gid_ctx {
|
||||
u32 idx;
|
||||
u32 refcnt;
|
||||
};
|
||||
|
||||
struct bnxt_re_pd {
|
||||
struct bnxt_re_dev *rdev;
|
||||
struct ib_pd ib_pd;
|
||||
struct bnxt_qplib_pd qplib_pd;
|
||||
struct bnxt_qplib_dpi dpi;
|
||||
};
|
||||
|
||||
struct bnxt_re_ah {
|
||||
struct bnxt_re_dev *rdev;
|
||||
struct ib_ah ib_ah;
|
||||
struct bnxt_qplib_ah qplib_ah;
|
||||
};
|
||||
|
||||
struct bnxt_re_qp {
|
||||
struct list_head list;
|
||||
struct bnxt_re_dev *rdev;
|
||||
struct ib_qp ib_qp;
|
||||
spinlock_t sq_lock; /* protect sq */
|
||||
struct bnxt_qplib_qp qplib_qp;
|
||||
struct ib_umem *sumem;
|
||||
struct ib_umem *rumem;
|
||||
/* QP1 */
|
||||
u32 send_psn;
|
||||
struct ib_ud_header qp1_hdr;
|
||||
};
|
||||
|
||||
struct bnxt_re_cq {
|
||||
struct bnxt_re_dev *rdev;
|
||||
spinlock_t cq_lock; /* protect cq */
|
||||
u16 cq_count;
|
||||
u16 cq_period;
|
||||
struct ib_cq ib_cq;
|
||||
struct bnxt_qplib_cq qplib_cq;
|
||||
struct bnxt_qplib_cqe *cql;
|
||||
#define MAX_CQL_PER_POLL 1024
|
||||
u32 max_cql;
|
||||
struct ib_umem *umem;
|
||||
};
|
||||
|
||||
struct bnxt_re_mr {
|
||||
struct bnxt_re_dev *rdev;
|
||||
struct ib_mr ib_mr;
|
||||
struct ib_umem *ib_umem;
|
||||
struct bnxt_qplib_mrw qplib_mr;
|
||||
u32 npages;
|
||||
u64 *pages;
|
||||
struct bnxt_qplib_frpl qplib_frpl;
|
||||
};
|
||||
|
||||
struct bnxt_re_frpl {
|
||||
struct bnxt_re_dev *rdev;
|
||||
struct bnxt_qplib_frpl qplib_frpl;
|
||||
u64 *page_list;
|
||||
};
|
||||
|
||||
struct bnxt_re_fmr {
|
||||
struct bnxt_re_dev *rdev;
|
||||
struct ib_fmr ib_fmr;
|
||||
struct bnxt_qplib_mrw qplib_fmr;
|
||||
};
|
||||
|
||||
struct bnxt_re_mw {
|
||||
struct bnxt_re_dev *rdev;
|
||||
struct ib_mw ib_mw;
|
||||
struct bnxt_qplib_mrw qplib_mw;
|
||||
};
|
||||
|
||||
struct bnxt_re_ucontext {
|
||||
struct bnxt_re_dev *rdev;
|
||||
struct ib_ucontext ib_uctx;
|
||||
struct bnxt_qplib_dpi *dpi;
|
||||
void *shpg;
|
||||
spinlock_t sh_lock; /* protect shpg */
|
||||
};
|
||||
|
||||
struct net_device *bnxt_re_get_netdev(struct ib_device *ibdev, u8 port_num);
|
||||
|
||||
int bnxt_re_query_device(struct ib_device *ibdev,
|
||||
struct ib_device_attr *ib_attr,
|
||||
struct ib_udata *udata);
|
||||
int bnxt_re_modify_device(struct ib_device *ibdev,
|
||||
int device_modify_mask,
|
||||
struct ib_device_modify *device_modify);
|
||||
int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
|
||||
struct ib_port_attr *port_attr);
|
||||
int bnxt_re_modify_port(struct ib_device *ibdev, u8 port_num,
|
||||
int port_modify_mask,
|
||||
struct ib_port_modify *port_modify);
|
||||
int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num,
|
||||
struct ib_port_immutable *immutable);
|
||||
int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num,
|
||||
u16 index, u16 *pkey);
|
||||
int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num,
|
||||
unsigned int index, void **context);
|
||||
int bnxt_re_add_gid(struct ib_device *ibdev, u8 port_num,
|
||||
unsigned int index, const union ib_gid *gid,
|
||||
const struct ib_gid_attr *attr, void **context);
|
||||
int bnxt_re_query_gid(struct ib_device *ibdev, u8 port_num,
|
||||
int index, union ib_gid *gid);
|
||||
enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
|
||||
u8 port_num);
|
||||
struct ib_pd *bnxt_re_alloc_pd(struct ib_device *ibdev,
|
||||
struct ib_ucontext *context,
|
||||
struct ib_udata *udata);
|
||||
int bnxt_re_dealloc_pd(struct ib_pd *pd);
|
||||
struct ib_ah *bnxt_re_create_ah(struct ib_pd *pd,
|
||||
struct ib_ah_attr *ah_attr,
|
||||
struct ib_udata *udata);
|
||||
int bnxt_re_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
|
||||
int bnxt_re_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
|
||||
int bnxt_re_destroy_ah(struct ib_ah *ah);
|
||||
struct ib_qp *bnxt_re_create_qp(struct ib_pd *pd,
|
||||
struct ib_qp_init_attr *qp_init_attr,
|
||||
struct ib_udata *udata);
|
||||
int bnxt_re_modify_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
|
||||
int qp_attr_mask, struct ib_udata *udata);
|
||||
int bnxt_re_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
|
||||
int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
|
||||
int bnxt_re_destroy_qp(struct ib_qp *qp);
|
||||
int bnxt_re_post_send(struct ib_qp *qp, struct ib_send_wr *send_wr,
|
||||
struct ib_send_wr **bad_send_wr);
|
||||
int bnxt_re_post_recv(struct ib_qp *qp, struct ib_recv_wr *recv_wr,
|
||||
struct ib_recv_wr **bad_recv_wr);
|
||||
struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
|
||||
const struct ib_cq_init_attr *attr,
|
||||
struct ib_ucontext *context,
|
||||
struct ib_udata *udata);
|
||||
int bnxt_re_destroy_cq(struct ib_cq *cq);
|
||||
int bnxt_re_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
|
||||
int bnxt_re_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
|
||||
struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
|
||||
|
||||
int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents,
|
||||
unsigned int *sg_offset);
|
||||
struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type mr_type,
|
||||
u32 max_num_sg);
|
||||
int bnxt_re_dereg_mr(struct ib_mr *mr);
|
||||
struct ib_fmr *bnxt_re_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
|
||||
struct ib_fmr_attr *fmr_attr);
|
||||
int bnxt_re_map_phys_fmr(struct ib_fmr *fmr, u64 *page_list, int list_len,
|
||||
u64 iova);
|
||||
int bnxt_re_unmap_fmr(struct list_head *fmr_list);
|
||||
int bnxt_re_dealloc_fmr(struct ib_fmr *fmr);
|
||||
struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
||||
u64 virt_addr, int mr_access_flags,
|
||||
struct ib_udata *udata);
|
||||
struct ib_ucontext *bnxt_re_alloc_ucontext(struct ib_device *ibdev,
|
||||
struct ib_udata *udata);
|
||||
int bnxt_re_dealloc_ucontext(struct ib_ucontext *context);
|
||||
int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
|
||||
#endif /* __BNXT_RE_IB_VERBS_H__ */
|
1315
drivers/infiniband/hw/bnxt_re/main.c
Normal file
1315
drivers/infiniband/hw/bnxt_re/main.c
Normal file
File diff suppressed because it is too large
Load Diff
2167
drivers/infiniband/hw/bnxt_re/qplib_fp.c
Normal file
2167
drivers/infiniband/hw/bnxt_re/qplib_fp.c
Normal file
File diff suppressed because it is too large
Load Diff
439
drivers/infiniband/hw/bnxt_re/qplib_fp.h
Normal file
439
drivers/infiniband/hw/bnxt_re/qplib_fp.h
Normal file
@ -0,0 +1,439 @@
|
||||
/*
|
||||
* Broadcom NetXtreme-E RoCE driver.
|
||||
*
|
||||
* Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
|
||||
* Broadcom refers to Broadcom Limited and/or its subsidiaries.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
||||
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
|
||||
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
|
||||
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
|
||||
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
|
||||
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
* Description: Fast Path Operators (header)
|
||||
*/
|
||||
|
||||
#ifndef __BNXT_QPLIB_FP_H__
|
||||
#define __BNXT_QPLIB_FP_H__
|
||||
|
||||
struct bnxt_qplib_sge {
|
||||
u64 addr;
|
||||
u32 lkey;
|
||||
u32 size;
|
||||
};
|
||||
|
||||
#define BNXT_QPLIB_MAX_SQE_ENTRY_SIZE sizeof(struct sq_send)
|
||||
|
||||
#define SQE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_MAX_SQE_ENTRY_SIZE)
|
||||
#define SQE_MAX_IDX_PER_PG (SQE_CNT_PER_PG - 1)
|
||||
|
||||
static inline u32 get_sqe_pg(u32 val)
|
||||
{
|
||||
return ((val & ~SQE_MAX_IDX_PER_PG) / SQE_CNT_PER_PG);
|
||||
}
|
||||
|
||||
static inline u32 get_sqe_idx(u32 val)
|
||||
{
|
||||
return (val & SQE_MAX_IDX_PER_PG);
|
||||
}
|
||||
|
||||
#define BNXT_QPLIB_MAX_PSNE_ENTRY_SIZE sizeof(struct sq_psn_search)
|
||||
|
||||
#define PSNE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_MAX_PSNE_ENTRY_SIZE)
|
||||
#define PSNE_MAX_IDX_PER_PG (PSNE_CNT_PER_PG - 1)
|
||||
|
||||
static inline u32 get_psne_pg(u32 val)
|
||||
{
|
||||
return ((val & ~PSNE_MAX_IDX_PER_PG) / PSNE_CNT_PER_PG);
|
||||
}
|
||||
|
||||
static inline u32 get_psne_idx(u32 val)
|
||||
{
|
||||
return (val & PSNE_MAX_IDX_PER_PG);
|
||||
}
|
||||
|
||||
#define BNXT_QPLIB_QP_MAX_SGL 6
|
||||
|
||||
struct bnxt_qplib_swq {
|
||||
u64 wr_id;
|
||||
u8 type;
|
||||
u8 flags;
|
||||
u32 start_psn;
|
||||
u32 next_psn;
|
||||
struct sq_psn_search *psn_search;
|
||||
};
|
||||
|
||||
struct bnxt_qplib_swqe {
|
||||
/* General */
|
||||
u64 wr_id;
|
||||
u8 reqs_type;
|
||||
u8 type;
|
||||
#define BNXT_QPLIB_SWQE_TYPE_SEND 0
|
||||
#define BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM 1
|
||||
#define BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV 2
|
||||
#define BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE 4
|
||||
#define BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM 5
|
||||
#define BNXT_QPLIB_SWQE_TYPE_RDMA_READ 6
|
||||
#define BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP 8
|
||||
#define BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD 11
|
||||
#define BNXT_QPLIB_SWQE_TYPE_LOCAL_INV 12
|
||||
#define BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR 13
|
||||
#define BNXT_QPLIB_SWQE_TYPE_REG_MR 13
|
||||
#define BNXT_QPLIB_SWQE_TYPE_BIND_MW 14
|
||||
#define BNXT_QPLIB_SWQE_TYPE_RECV 128
|
||||
#define BNXT_QPLIB_SWQE_TYPE_RECV_RDMA_IMM 129
|
||||
u8 flags;
|
||||
#define BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP BIT(0)
|
||||
#define BNXT_QPLIB_SWQE_FLAGS_RD_ATOMIC_FENCE BIT(1)
|
||||
#define BNXT_QPLIB_SWQE_FLAGS_UC_FENCE BIT(2)
|
||||
#define BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT BIT(3)
|
||||
#define BNXT_QPLIB_SWQE_FLAGS_INLINE BIT(4)
|
||||
struct bnxt_qplib_sge sg_list[BNXT_QPLIB_QP_MAX_SGL];
|
||||
int num_sge;
|
||||
/* Max inline data is 96 bytes */
|
||||
u32 inline_len;
|
||||
#define BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH 96
|
||||
u8 inline_data[BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH];
|
||||
|
||||
union {
|
||||
/* Send, with imm, inval key */
|
||||
struct {
|
||||
union {
|
||||
__be32 imm_data;
|
||||
u32 inv_key;
|
||||
};
|
||||
u32 q_key;
|
||||
u32 dst_qp;
|
||||
u16 avid;
|
||||
} send;
|
||||
|
||||
/* Send Raw Ethernet and QP1 */
|
||||
struct {
|
||||
u16 lflags;
|
||||
u16 cfa_action;
|
||||
u32 cfa_meta;
|
||||
} rawqp1;
|
||||
|
||||
/* RDMA write, with imm, read */
|
||||
struct {
|
||||
union {
|
||||
__be32 imm_data;
|
||||
u32 inv_key;
|
||||
};
|
||||
u64 remote_va;
|
||||
u32 r_key;
|
||||
} rdma;
|
||||
|
||||
/* Atomic cmp/swap, fetch/add */
|
||||
struct {
|
||||
u64 remote_va;
|
||||
u32 r_key;
|
||||
u64 swap_data;
|
||||
u64 cmp_data;
|
||||
} atomic;
|
||||
|
||||
/* Local Invalidate */
|
||||
struct {
|
||||
u32 inv_l_key;
|
||||
} local_inv;
|
||||
|
||||
/* FR-PMR */
|
||||
struct {
|
||||
u8 access_cntl;
|
||||
u8 pg_sz_log;
|
||||
bool zero_based;
|
||||
u32 l_key;
|
||||
u32 length;
|
||||
u8 pbl_pg_sz_log;
|
||||
#define BNXT_QPLIB_SWQE_PAGE_SIZE_4K 0
|
||||
#define BNXT_QPLIB_SWQE_PAGE_SIZE_8K 1
|
||||
#define BNXT_QPLIB_SWQE_PAGE_SIZE_64K 4
|
||||
#define BNXT_QPLIB_SWQE_PAGE_SIZE_256K 6
|
||||
#define BNXT_QPLIB_SWQE_PAGE_SIZE_1M 8
|
||||
#define BNXT_QPLIB_SWQE_PAGE_SIZE_2M 9
|
||||
#define BNXT_QPLIB_SWQE_PAGE_SIZE_4M 10
|
||||
#define BNXT_QPLIB_SWQE_PAGE_SIZE_1G 18
|
||||
u8 levels;
|
||||
#define PAGE_SHIFT_4K 12
|
||||
__le64 *pbl_ptr;
|
||||
dma_addr_t pbl_dma_ptr;
|
||||
u64 *page_list;
|
||||
u16 page_list_len;
|
||||
u64 va;
|
||||
} frmr;
|
||||
|
||||
/* Bind */
|
||||
struct {
|
||||
u8 access_cntl;
|
||||
#define BNXT_QPLIB_BIND_SWQE_ACCESS_LOCAL_WRITE BIT(0)
|
||||
#define BNXT_QPLIB_BIND_SWQE_ACCESS_REMOTE_READ BIT(1)
|
||||
#define BNXT_QPLIB_BIND_SWQE_ACCESS_REMOTE_WRITE BIT(2)
|
||||
#define BNXT_QPLIB_BIND_SWQE_ACCESS_REMOTE_ATOMIC BIT(3)
|
||||
#define BNXT_QPLIB_BIND_SWQE_ACCESS_WINDOW_BIND BIT(4)
|
||||
bool zero_based;
|
||||
u8 mw_type;
|
||||
u32 parent_l_key;
|
||||
u32 r_key;
|
||||
u64 va;
|
||||
u32 length;
|
||||
} bind;
|
||||
};
|
||||
};
|
||||
|
||||
#define BNXT_QPLIB_MAX_RQE_ENTRY_SIZE sizeof(struct rq_wqe)
|
||||
|
||||
#define RQE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_MAX_RQE_ENTRY_SIZE)
|
||||
#define RQE_MAX_IDX_PER_PG (RQE_CNT_PER_PG - 1)
|
||||
#define RQE_PG(x) (((x) & ~RQE_MAX_IDX_PER_PG) / RQE_CNT_PER_PG)
|
||||
#define RQE_IDX(x) ((x) & RQE_MAX_IDX_PER_PG)
|
||||
|
||||
struct bnxt_qplib_q {
|
||||
struct bnxt_qplib_hwq hwq;
|
||||
struct bnxt_qplib_swq *swq;
|
||||
struct scatterlist *sglist;
|
||||
u32 nmap;
|
||||
u32 max_wqe;
|
||||
u16 max_sge;
|
||||
u32 psn;
|
||||
bool flush_in_progress;
|
||||
};
|
||||
|
||||
struct bnxt_qplib_qp {
|
||||
struct bnxt_qplib_pd *pd;
|
||||
struct bnxt_qplib_dpi *dpi;
|
||||
u64 qp_handle;
|
||||
u32 id;
|
||||
u8 type;
|
||||
u8 sig_type;
|
||||
u32 modify_flags;
|
||||
u8 state;
|
||||
u8 cur_qp_state;
|
||||
u32 max_inline_data;
|
||||
u32 mtu;
|
||||
u8 path_mtu;
|
||||
bool en_sqd_async_notify;
|
||||
u16 pkey_index;
|
||||
u32 qkey;
|
||||
u32 dest_qp_id;
|
||||
u8 access;
|
||||
u8 timeout;
|
||||
u8 retry_cnt;
|
||||
u8 rnr_retry;
|
||||
u32 min_rnr_timer;
|
||||
u32 max_rd_atomic;
|
||||
u32 max_dest_rd_atomic;
|
||||
u32 dest_qpn;
|
||||
u8 smac[6];
|
||||
u16 vlan_id;
|
||||
u8 nw_type;
|
||||
struct bnxt_qplib_ah ah;
|
||||
|
||||
#define BTH_PSN_MASK ((1 << 24) - 1)
|
||||
/* SQ */
|
||||
struct bnxt_qplib_q sq;
|
||||
/* RQ */
|
||||
struct bnxt_qplib_q rq;
|
||||
/* SRQ */
|
||||
struct bnxt_qplib_srq *srq;
|
||||
/* CQ */
|
||||
struct bnxt_qplib_cq *scq;
|
||||
struct bnxt_qplib_cq *rcq;
|
||||
/* IRRQ and ORRQ */
|
||||
struct bnxt_qplib_hwq irrq;
|
||||
struct bnxt_qplib_hwq orrq;
|
||||
/* Header buffer for QP1 */
|
||||
int sq_hdr_buf_size;
|
||||
int rq_hdr_buf_size;
|
||||
/*
|
||||
* Buffer space for ETH(14), IP or GRH(40), UDP header(8)
|
||||
* and ib_bth + ib_deth (20).
|
||||
* Max required is 82 when RoCE V2 is enabled
|
||||
*/
|
||||
#define BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2 86
|
||||
/* Ethernet header = 14 */
|
||||
/* ib_grh = 40 (provided by MAD) */
|
||||
/* ib_bth + ib_deth = 20 */
|
||||
/* MAD = 256 (provided by MAD) */
|
||||
/* iCRC = 4 */
|
||||
#define BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE 14
|
||||
#define BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2 512
|
||||
#define BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4 20
|
||||
#define BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 40
|
||||
#define BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE 20
|
||||
void *sq_hdr_buf;
|
||||
dma_addr_t sq_hdr_buf_map;
|
||||
void *rq_hdr_buf;
|
||||
dma_addr_t rq_hdr_buf_map;
|
||||
};
|
||||
|
||||
#define BNXT_QPLIB_MAX_CQE_ENTRY_SIZE sizeof(struct cq_base)
|
||||
|
||||
#define CQE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_MAX_CQE_ENTRY_SIZE)
|
||||
#define CQE_MAX_IDX_PER_PG (CQE_CNT_PER_PG - 1)
|
||||
#define CQE_PG(x) (((x) & ~CQE_MAX_IDX_PER_PG) / CQE_CNT_PER_PG)
|
||||
#define CQE_IDX(x) ((x) & CQE_MAX_IDX_PER_PG)
|
||||
|
||||
#define ROCE_CQE_CMP_V 0
|
||||
#define CQE_CMP_VALID(hdr, raw_cons, cp_bit) \
|
||||
(!!((hdr)->cqe_type_toggle & CQ_BASE_TOGGLE) == \
|
||||
!((raw_cons) & (cp_bit)))
|
||||
|
||||
struct bnxt_qplib_cqe {
|
||||
u8 status;
|
||||
u8 type;
|
||||
u8 opcode;
|
||||
u32 length;
|
||||
u64 wr_id;
|
||||
union {
|
||||
__be32 immdata;
|
||||
u32 invrkey;
|
||||
};
|
||||
u64 qp_handle;
|
||||
u64 mr_handle;
|
||||
u16 flags;
|
||||
u8 smac[6];
|
||||
u32 src_qp;
|
||||
u16 raweth_qp1_flags;
|
||||
u16 raweth_qp1_errors;
|
||||
u16 raweth_qp1_cfa_code;
|
||||
u32 raweth_qp1_flags2;
|
||||
u32 raweth_qp1_metadata;
|
||||
u8 raweth_qp1_payload_offset;
|
||||
u16 pkey_index;
|
||||
};
|
||||
|
||||
#define BNXT_QPLIB_QUEUE_START_PERIOD 0x01
|
||||
struct bnxt_qplib_cq {
|
||||
struct bnxt_qplib_dpi *dpi;
|
||||
void __iomem *dbr_base;
|
||||
u32 max_wqe;
|
||||
u32 id;
|
||||
u16 count;
|
||||
u16 period;
|
||||
struct bnxt_qplib_hwq hwq;
|
||||
u32 cnq_hw_ring_id;
|
||||
bool resize_in_progress;
|
||||
struct scatterlist *sghead;
|
||||
u32 nmap;
|
||||
u64 cq_handle;
|
||||
|
||||
#define CQ_RESIZE_WAIT_TIME_MS 500
|
||||
unsigned long flags;
|
||||
#define CQ_FLAGS_RESIZE_IN_PROG 1
|
||||
wait_queue_head_t waitq;
|
||||
};
|
||||
|
||||
#define BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE sizeof(struct xrrq_irrq)
|
||||
#define BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE sizeof(struct xrrq_orrq)
|
||||
#define IRD_LIMIT_TO_IRRQ_SLOTS(x) (2 * (x) + 2)
|
||||
#define IRRQ_SLOTS_TO_IRD_LIMIT(s) (((s) >> 1) - 1)
|
||||
#define ORD_LIMIT_TO_ORRQ_SLOTS(x) ((x) + 1)
|
||||
#define ORRQ_SLOTS_TO_ORD_LIMIT(s) ((s) - 1)
|
||||
|
||||
#define BNXT_QPLIB_MAX_NQE_ENTRY_SIZE sizeof(struct nq_base)
|
||||
|
||||
#define NQE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_MAX_NQE_ENTRY_SIZE)
|
||||
#define NQE_MAX_IDX_PER_PG (NQE_CNT_PER_PG - 1)
|
||||
#define NQE_PG(x) (((x) & ~NQE_MAX_IDX_PER_PG) / NQE_CNT_PER_PG)
|
||||
#define NQE_IDX(x) ((x) & NQE_MAX_IDX_PER_PG)
|
||||
|
||||
#define NQE_CMP_VALID(hdr, raw_cons, cp_bit) \
|
||||
(!!(le32_to_cpu((hdr)->info63_v[0]) & NQ_BASE_V) == \
|
||||
!((raw_cons) & (cp_bit)))
|
||||
|
||||
#define BNXT_QPLIB_NQE_MAX_CNT (128 * 1024)
|
||||
|
||||
#define NQ_CONS_PCI_BAR_REGION 2
|
||||
#define NQ_DB_KEY_CP (0x2 << CMPL_DOORBELL_KEY_SFT)
|
||||
#define NQ_DB_IDX_VALID CMPL_DOORBELL_IDX_VALID
|
||||
#define NQ_DB_IRQ_DIS CMPL_DOORBELL_MASK
|
||||
#define NQ_DB_CP_FLAGS_REARM (NQ_DB_KEY_CP | \
|
||||
NQ_DB_IDX_VALID)
|
||||
#define NQ_DB_CP_FLAGS (NQ_DB_KEY_CP | \
|
||||
NQ_DB_IDX_VALID | \
|
||||
NQ_DB_IRQ_DIS)
|
||||
#define NQ_DB_REARM(db, raw_cons, cp_bit) \
|
||||
writel(NQ_DB_CP_FLAGS_REARM | ((raw_cons) & ((cp_bit) - 1)), db)
|
||||
#define NQ_DB(db, raw_cons, cp_bit) \
|
||||
writel(NQ_DB_CP_FLAGS | ((raw_cons) & ((cp_bit) - 1)), db)
|
||||
|
||||
struct bnxt_qplib_nq {
|
||||
struct pci_dev *pdev;
|
||||
|
||||
int vector;
|
||||
int budget;
|
||||
bool requested;
|
||||
struct tasklet_struct worker;
|
||||
struct bnxt_qplib_hwq hwq;
|
||||
|
||||
u16 bar_reg;
|
||||
u16 bar_reg_off;
|
||||
u16 ring_id;
|
||||
void __iomem *bar_reg_iomem;
|
||||
|
||||
int (*cqn_handler)
|
||||
(struct bnxt_qplib_nq *nq,
|
||||
struct bnxt_qplib_cq *cq);
|
||||
int (*srqn_handler)
|
||||
(struct bnxt_qplib_nq *nq,
|
||||
void *srq,
|
||||
u8 event);
|
||||
};
|
||||
|
||||
void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq);
|
||||
int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
|
||||
int msix_vector, int bar_reg_offset,
|
||||
int (*cqn_handler)(struct bnxt_qplib_nq *nq,
|
||||
struct bnxt_qplib_cq *cq),
|
||||
int (*srqn_handler)(struct bnxt_qplib_nq *nq,
|
||||
void *srq,
|
||||
u8 event));
|
||||
int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
|
||||
int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
|
||||
int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
|
||||
int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
|
||||
int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
|
||||
void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
|
||||
struct bnxt_qplib_sge *sge);
|
||||
void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
|
||||
struct bnxt_qplib_sge *sge);
|
||||
u32 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp);
|
||||
dma_addr_t bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp *qp,
|
||||
u32 index);
|
||||
void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp);
|
||||
int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
|
||||
struct bnxt_qplib_swqe *wqe);
|
||||
void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp);
|
||||
int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
|
||||
struct bnxt_qplib_swqe *wqe);
|
||||
int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq);
|
||||
int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq);
|
||||
int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
|
||||
int num);
|
||||
void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type);
|
||||
void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq);
|
||||
int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq);
|
||||
#endif /* __BNXT_QPLIB_FP_H__ */
|
694
drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
Normal file
694
drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
Normal file
@ -0,0 +1,694 @@
|
||||
/*
|
||||
* Broadcom NetXtreme-E RoCE driver.
|
||||
*
|
||||
* Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
|
||||
* Broadcom refers to Broadcom Limited and/or its subsidiaries.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
||||
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
|
||||
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
|
||||
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
|
||||
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
|
||||
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
* Description: RDMA Controller HW interface
|
||||
*/
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/prefetch.h>
|
||||
#include "roce_hsi.h"
|
||||
#include "qplib_res.h"
|
||||
#include "qplib_rcfw.h"
|
||||
static void bnxt_qplib_service_creq(unsigned long data);
|
||||
|
||||
/* Hardware communication channel */
|
||||
int bnxt_qplib_rcfw_wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
|
||||
{
|
||||
u16 cbit;
|
||||
int rc;
|
||||
|
||||
cookie &= RCFW_MAX_COOKIE_VALUE;
|
||||
cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
|
||||
if (!test_bit(cbit, rcfw->cmdq_bitmap))
|
||||
dev_warn(&rcfw->pdev->dev,
|
||||
"QPLIB: CMD bit %d for cookie 0x%x is not set?",
|
||||
cbit, cookie);
|
||||
|
||||
rc = wait_event_timeout(rcfw->waitq,
|
||||
!test_bit(cbit, rcfw->cmdq_bitmap),
|
||||
msecs_to_jiffies(RCFW_CMD_WAIT_TIME_MS));
|
||||
if (!rc) {
|
||||
dev_warn(&rcfw->pdev->dev,
|
||||
"QPLIB: Bono Error: timeout %d msec, msg {0x%x}\n",
|
||||
RCFW_CMD_WAIT_TIME_MS, cookie);
|
||||
}
|
||||
|
||||
return rc;
|
||||
};
|
||||
|
||||
int bnxt_qplib_rcfw_block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
|
||||
{
|
||||
u32 count = -1;
|
||||
u16 cbit;
|
||||
|
||||
cookie &= RCFW_MAX_COOKIE_VALUE;
|
||||
cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
|
||||
if (!test_bit(cbit, rcfw->cmdq_bitmap))
|
||||
goto done;
|
||||
do {
|
||||
bnxt_qplib_service_creq((unsigned long)rcfw);
|
||||
} while (test_bit(cbit, rcfw->cmdq_bitmap) && --count);
|
||||
done:
|
||||
return count;
|
||||
};
|
||||
|
||||
void *bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
|
||||
struct cmdq_base *req, void **crsbe,
|
||||
u8 is_block)
|
||||
{
|
||||
struct bnxt_qplib_crsq *crsq = &rcfw->crsq;
|
||||
struct bnxt_qplib_cmdqe *cmdqe, **cmdq_ptr;
|
||||
struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq;
|
||||
struct bnxt_qplib_hwq *crsb = &rcfw->crsb;
|
||||
struct bnxt_qplib_crsqe *crsqe = NULL;
|
||||
struct bnxt_qplib_crsbe **crsb_ptr;
|
||||
u32 sw_prod, cmdq_prod;
|
||||
u8 retry_cnt = 0xFF;
|
||||
dma_addr_t dma_addr;
|
||||
unsigned long flags;
|
||||
u32 size, opcode;
|
||||
u16 cookie, cbit;
|
||||
int pg, idx;
|
||||
u8 *preq;
|
||||
|
||||
retry:
|
||||
opcode = req->opcode;
|
||||
if (!test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) &&
|
||||
(opcode != CMDQ_BASE_OPCODE_QUERY_FUNC &&
|
||||
opcode != CMDQ_BASE_OPCODE_INITIALIZE_FW)) {
|
||||
dev_err(&rcfw->pdev->dev,
|
||||
"QPLIB: RCFW not initialized, reject opcode 0x%x",
|
||||
opcode);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) &&
|
||||
opcode == CMDQ_BASE_OPCODE_INITIALIZE_FW) {
|
||||
dev_err(&rcfw->pdev->dev, "QPLIB: RCFW already initialized!");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Cmdq are in 16-byte units, each request can consume 1 or more
|
||||
* cmdqe
|
||||
*/
|
||||
spin_lock_irqsave(&cmdq->lock, flags);
|
||||
if (req->cmd_size > cmdq->max_elements -
|
||||
((HWQ_CMP(cmdq->prod, cmdq) - HWQ_CMP(cmdq->cons, cmdq)) &
|
||||
(cmdq->max_elements - 1))) {
|
||||
dev_err(&rcfw->pdev->dev, "QPLIB: RCFW: CMDQ is full!");
|
||||
spin_unlock_irqrestore(&cmdq->lock, flags);
|
||||
|
||||
if (!retry_cnt--)
|
||||
return NULL;
|
||||
goto retry;
|
||||
}
|
||||
|
||||
retry_cnt = 0xFF;
|
||||
|
||||
cookie = atomic_inc_return(&rcfw->seq_num) & RCFW_MAX_COOKIE_VALUE;
|
||||
cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
|
||||
if (is_block)
|
||||
cookie |= RCFW_CMD_IS_BLOCKING;
|
||||
req->cookie = cpu_to_le16(cookie);
|
||||
if (test_and_set_bit(cbit, rcfw->cmdq_bitmap)) {
|
||||
dev_err(&rcfw->pdev->dev,
|
||||
"QPLIB: RCFW MAX outstanding cmd reached!");
|
||||
atomic_dec(&rcfw->seq_num);
|
||||
spin_unlock_irqrestore(&cmdq->lock, flags);
|
||||
|
||||
if (!retry_cnt--)
|
||||
return NULL;
|
||||
goto retry;
|
||||
}
|
||||
/* Reserve a resp buffer slot if requested */
|
||||
if (req->resp_size && crsbe) {
|
||||
spin_lock(&crsb->lock);
|
||||
sw_prod = HWQ_CMP(crsb->prod, crsb);
|
||||
crsb_ptr = (struct bnxt_qplib_crsbe **)crsb->pbl_ptr;
|
||||
*crsbe = (void *)&crsb_ptr[get_crsb_pg(sw_prod)]
|
||||
[get_crsb_idx(sw_prod)];
|
||||
bnxt_qplib_crsb_dma_next(crsb->pbl_dma_ptr, sw_prod, &dma_addr);
|
||||
req->resp_addr = cpu_to_le64(dma_addr);
|
||||
crsb->prod++;
|
||||
spin_unlock(&crsb->lock);
|
||||
|
||||
req->resp_size = (sizeof(struct bnxt_qplib_crsbe) +
|
||||
BNXT_QPLIB_CMDQE_UNITS - 1) /
|
||||
BNXT_QPLIB_CMDQE_UNITS;
|
||||
}
|
||||
cmdq_ptr = (struct bnxt_qplib_cmdqe **)cmdq->pbl_ptr;
|
||||
preq = (u8 *)req;
|
||||
size = req->cmd_size * BNXT_QPLIB_CMDQE_UNITS;
|
||||
do {
|
||||
pg = 0;
|
||||
idx = 0;
|
||||
|
||||
/* Locate the next cmdq slot */
|
||||
sw_prod = HWQ_CMP(cmdq->prod, cmdq);
|
||||
cmdqe = &cmdq_ptr[get_cmdq_pg(sw_prod)][get_cmdq_idx(sw_prod)];
|
||||
if (!cmdqe) {
|
||||
dev_err(&rcfw->pdev->dev,
|
||||
"QPLIB: RCFW request failed with no cmdqe!");
|
||||
goto done;
|
||||
}
|
||||
/* Copy a segment of the req cmd to the cmdq */
|
||||
memset(cmdqe, 0, sizeof(*cmdqe));
|
||||
memcpy(cmdqe, preq, min_t(u32, size, sizeof(*cmdqe)));
|
||||
preq += min_t(u32, size, sizeof(*cmdqe));
|
||||
size -= min_t(u32, size, sizeof(*cmdqe));
|
||||
cmdq->prod++;
|
||||
} while (size > 0);
|
||||
|
||||
cmdq_prod = cmdq->prod;
|
||||
if (rcfw->flags & FIRMWARE_FIRST_FLAG) {
|
||||
/* The very first doorbell write is required to set this flag
|
||||
* which prompts the FW to reset its internal pointers
|
||||
*/
|
||||
cmdq_prod |= FIRMWARE_FIRST_FLAG;
|
||||
rcfw->flags &= ~FIRMWARE_FIRST_FLAG;
|
||||
}
|
||||
sw_prod = HWQ_CMP(crsq->prod, crsq);
|
||||
crsqe = &crsq->crsq[sw_prod];
|
||||
memset(crsqe, 0, sizeof(*crsqe));
|
||||
crsq->prod++;
|
||||
crsqe->req_size = req->cmd_size;
|
||||
|
||||
/* ring CMDQ DB */
|
||||
writel(cmdq_prod, rcfw->cmdq_bar_reg_iomem +
|
||||
rcfw->cmdq_bar_reg_prod_off);
|
||||
writel(RCFW_CMDQ_TRIG_VAL, rcfw->cmdq_bar_reg_iomem +
|
||||
rcfw->cmdq_bar_reg_trig_off);
|
||||
done:
|
||||
spin_unlock_irqrestore(&cmdq->lock, flags);
|
||||
/* Return the CREQ response pointer */
|
||||
return crsqe ? &crsqe->qp_event : NULL;
|
||||
}
|
||||
|
||||
/* Completions */
|
||||
static int bnxt_qplib_process_func_event(struct bnxt_qplib_rcfw *rcfw,
|
||||
struct creq_func_event *func_event)
|
||||
{
|
||||
switch (func_event->event) {
|
||||
case CREQ_FUNC_EVENT_EVENT_TX_WQE_ERROR:
|
||||
break;
|
||||
case CREQ_FUNC_EVENT_EVENT_TX_DATA_ERROR:
|
||||
break;
|
||||
case CREQ_FUNC_EVENT_EVENT_RX_WQE_ERROR:
|
||||
break;
|
||||
case CREQ_FUNC_EVENT_EVENT_RX_DATA_ERROR:
|
||||
break;
|
||||
case CREQ_FUNC_EVENT_EVENT_CQ_ERROR:
|
||||
break;
|
||||
case CREQ_FUNC_EVENT_EVENT_TQM_ERROR:
|
||||
break;
|
||||
case CREQ_FUNC_EVENT_EVENT_CFCQ_ERROR:
|
||||
break;
|
||||
case CREQ_FUNC_EVENT_EVENT_CFCS_ERROR:
|
||||
/* SRQ ctx error, call srq_handler??
|
||||
* But there's no SRQ handle!
|
||||
*/
|
||||
break;
|
||||
case CREQ_FUNC_EVENT_EVENT_CFCC_ERROR:
|
||||
break;
|
||||
case CREQ_FUNC_EVENT_EVENT_CFCM_ERROR:
|
||||
break;
|
||||
case CREQ_FUNC_EVENT_EVENT_TIM_ERROR:
|
||||
break;
|
||||
case CREQ_FUNC_EVENT_EVENT_VF_COMM_REQUEST:
|
||||
break;
|
||||
case CREQ_FUNC_EVENT_EVENT_RESOURCE_EXHAUSTED:
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
|
||||
struct creq_qp_event *qp_event)
|
||||
{
|
||||
struct bnxt_qplib_crsq *crsq = &rcfw->crsq;
|
||||
struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq;
|
||||
struct bnxt_qplib_crsqe *crsqe;
|
||||
u16 cbit, cookie, blocked = 0;
|
||||
unsigned long flags;
|
||||
u32 sw_cons;
|
||||
|
||||
switch (qp_event->event) {
|
||||
case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION:
|
||||
dev_dbg(&rcfw->pdev->dev,
|
||||
"QPLIB: Received QP error notification");
|
||||
break;
|
||||
default:
|
||||
/* Command Response */
|
||||
spin_lock_irqsave(&cmdq->lock, flags);
|
||||
sw_cons = HWQ_CMP(crsq->cons, crsq);
|
||||
crsqe = &crsq->crsq[sw_cons];
|
||||
crsq->cons++;
|
||||
memcpy(&crsqe->qp_event, qp_event, sizeof(crsqe->qp_event));
|
||||
|
||||
cookie = le16_to_cpu(crsqe->qp_event.cookie);
|
||||
blocked = cookie & RCFW_CMD_IS_BLOCKING;
|
||||
cookie &= RCFW_MAX_COOKIE_VALUE;
|
||||
cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
|
||||
if (!test_and_clear_bit(cbit, rcfw->cmdq_bitmap))
|
||||
dev_warn(&rcfw->pdev->dev,
|
||||
"QPLIB: CMD bit %d was not requested", cbit);
|
||||
|
||||
cmdq->cons += crsqe->req_size;
|
||||
spin_unlock_irqrestore(&cmdq->lock, flags);
|
||||
if (!blocked)
|
||||
wake_up(&rcfw->waitq);
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* SP - CREQ Completion handlers */
|
||||
static void bnxt_qplib_service_creq(unsigned long data)
|
||||
{
|
||||
struct bnxt_qplib_rcfw *rcfw = (struct bnxt_qplib_rcfw *)data;
|
||||
struct bnxt_qplib_hwq *creq = &rcfw->creq;
|
||||
struct creq_base *creqe, **creq_ptr;
|
||||
u32 sw_cons, raw_cons;
|
||||
unsigned long flags;
|
||||
u32 type;
|
||||
|
||||
/* Service the CREQ until empty */
|
||||
spin_lock_irqsave(&creq->lock, flags);
|
||||
raw_cons = creq->cons;
|
||||
while (1) {
|
||||
sw_cons = HWQ_CMP(raw_cons, creq);
|
||||
creq_ptr = (struct creq_base **)creq->pbl_ptr;
|
||||
creqe = &creq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)];
|
||||
if (!CREQ_CMP_VALID(creqe, raw_cons, creq->max_elements))
|
||||
break;
|
||||
|
||||
type = creqe->type & CREQ_BASE_TYPE_MASK;
|
||||
switch (type) {
|
||||
case CREQ_BASE_TYPE_QP_EVENT:
|
||||
if (!bnxt_qplib_process_qp_event
|
||||
(rcfw, (struct creq_qp_event *)creqe))
|
||||
rcfw->creq_qp_event_processed++;
|
||||
else {
|
||||
dev_warn(&rcfw->pdev->dev, "QPLIB: crsqe with");
|
||||
dev_warn(&rcfw->pdev->dev,
|
||||
"QPLIB: type = 0x%x not handled",
|
||||
type);
|
||||
}
|
||||
break;
|
||||
case CREQ_BASE_TYPE_FUNC_EVENT:
|
||||
if (!bnxt_qplib_process_func_event
|
||||
(rcfw, (struct creq_func_event *)creqe))
|
||||
rcfw->creq_func_event_processed++;
|
||||
else
|
||||
dev_warn
|
||||
(&rcfw->pdev->dev, "QPLIB:aeqe:%#x Not handled",
|
||||
type);
|
||||
break;
|
||||
default:
|
||||
dev_warn(&rcfw->pdev->dev, "QPLIB: creqe with ");
|
||||
dev_warn(&rcfw->pdev->dev,
|
||||
"QPLIB: op_event = 0x%x not handled", type);
|
||||
break;
|
||||
}
|
||||
raw_cons++;
|
||||
}
|
||||
if (creq->cons != raw_cons) {
|
||||
creq->cons = raw_cons;
|
||||
CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, raw_cons,
|
||||
creq->max_elements);
|
||||
}
|
||||
spin_unlock_irqrestore(&creq->lock, flags);
|
||||
}
|
||||
|
||||
static irqreturn_t bnxt_qplib_creq_irq(int irq, void *dev_instance)
|
||||
{
|
||||
struct bnxt_qplib_rcfw *rcfw = dev_instance;
|
||||
struct bnxt_qplib_hwq *creq = &rcfw->creq;
|
||||
struct creq_base **creq_ptr;
|
||||
u32 sw_cons;
|
||||
|
||||
/* Prefetch the CREQ element */
|
||||
sw_cons = HWQ_CMP(creq->cons, creq);
|
||||
creq_ptr = (struct creq_base **)rcfw->creq.pbl_ptr;
|
||||
prefetch(&creq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)]);
|
||||
|
||||
tasklet_schedule(&rcfw->worker);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
/* RCFW */
|
||||
int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw)
|
||||
{
|
||||
struct creq_deinitialize_fw_resp *resp;
|
||||
struct cmdq_deinitialize_fw req;
|
||||
u16 cmd_flags = 0;
|
||||
|
||||
RCFW_CMD_PREP(req, DEINITIALIZE_FW, cmd_flags);
|
||||
resp = (struct creq_deinitialize_fw_resp *)
|
||||
bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
|
||||
NULL, 0);
|
||||
if (!resp)
|
||||
return -EINVAL;
|
||||
|
||||
if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie)))
|
||||
return -ETIMEDOUT;
|
||||
|
||||
if (resp->status ||
|
||||
le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie))
|
||||
return -EFAULT;
|
||||
|
||||
clear_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __get_pbl_pg_idx(struct bnxt_qplib_pbl *pbl)
|
||||
{
|
||||
return (pbl->pg_size == ROCE_PG_SIZE_4K ?
|
||||
CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_4K :
|
||||
pbl->pg_size == ROCE_PG_SIZE_8K ?
|
||||
CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_8K :
|
||||
pbl->pg_size == ROCE_PG_SIZE_64K ?
|
||||
CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_64K :
|
||||
pbl->pg_size == ROCE_PG_SIZE_2M ?
|
||||
CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_2M :
|
||||
pbl->pg_size == ROCE_PG_SIZE_8M ?
|
||||
CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_8M :
|
||||
pbl->pg_size == ROCE_PG_SIZE_1G ?
|
||||
CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_1G :
|
||||
CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_4K);
|
||||
}
|
||||
|
||||
int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
|
||||
struct bnxt_qplib_ctx *ctx, int is_virtfn)
|
||||
{
|
||||
struct creq_initialize_fw_resp *resp;
|
||||
struct cmdq_initialize_fw req;
|
||||
u16 cmd_flags = 0, level;
|
||||
|
||||
RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags);
|
||||
|
||||
/*
|
||||
* VFs need not setup the HW context area, PF
|
||||
* shall setup this area for VF. Skipping the
|
||||
* HW programming
|
||||
*/
|
||||
if (is_virtfn)
|
||||
goto skip_ctx_setup;
|
||||
|
||||
level = ctx->qpc_tbl.level;
|
||||
req.qpc_pg_size_qpc_lvl = (level << CMDQ_INITIALIZE_FW_QPC_LVL_SFT) |
|
||||
__get_pbl_pg_idx(&ctx->qpc_tbl.pbl[level]);
|
||||
level = ctx->mrw_tbl.level;
|
||||
req.mrw_pg_size_mrw_lvl = (level << CMDQ_INITIALIZE_FW_MRW_LVL_SFT) |
|
||||
__get_pbl_pg_idx(&ctx->mrw_tbl.pbl[level]);
|
||||
level = ctx->srqc_tbl.level;
|
||||
req.srq_pg_size_srq_lvl = (level << CMDQ_INITIALIZE_FW_SRQ_LVL_SFT) |
|
||||
__get_pbl_pg_idx(&ctx->srqc_tbl.pbl[level]);
|
||||
level = ctx->cq_tbl.level;
|
||||
req.cq_pg_size_cq_lvl = (level << CMDQ_INITIALIZE_FW_CQ_LVL_SFT) |
|
||||
__get_pbl_pg_idx(&ctx->cq_tbl.pbl[level]);
|
||||
level = ctx->srqc_tbl.level;
|
||||
req.srq_pg_size_srq_lvl = (level << CMDQ_INITIALIZE_FW_SRQ_LVL_SFT) |
|
||||
__get_pbl_pg_idx(&ctx->srqc_tbl.pbl[level]);
|
||||
level = ctx->cq_tbl.level;
|
||||
req.cq_pg_size_cq_lvl = (level << CMDQ_INITIALIZE_FW_CQ_LVL_SFT) |
|
||||
__get_pbl_pg_idx(&ctx->cq_tbl.pbl[level]);
|
||||
level = ctx->tim_tbl.level;
|
||||
req.tim_pg_size_tim_lvl = (level << CMDQ_INITIALIZE_FW_TIM_LVL_SFT) |
|
||||
__get_pbl_pg_idx(&ctx->tim_tbl.pbl[level]);
|
||||
level = ctx->tqm_pde_level;
|
||||
req.tqm_pg_size_tqm_lvl = (level << CMDQ_INITIALIZE_FW_TQM_LVL_SFT) |
|
||||
__get_pbl_pg_idx(&ctx->tqm_pde.pbl[level]);
|
||||
|
||||
req.qpc_page_dir =
|
||||
cpu_to_le64(ctx->qpc_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
|
||||
req.mrw_page_dir =
|
||||
cpu_to_le64(ctx->mrw_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
|
||||
req.srq_page_dir =
|
||||
cpu_to_le64(ctx->srqc_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
|
||||
req.cq_page_dir =
|
||||
cpu_to_le64(ctx->cq_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
|
||||
req.tim_page_dir =
|
||||
cpu_to_le64(ctx->tim_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
|
||||
req.tqm_page_dir =
|
||||
cpu_to_le64(ctx->tqm_pde.pbl[PBL_LVL_0].pg_map_arr[0]);
|
||||
|
||||
req.number_of_qp = cpu_to_le32(ctx->qpc_tbl.max_elements);
|
||||
req.number_of_mrw = cpu_to_le32(ctx->mrw_tbl.max_elements);
|
||||
req.number_of_srq = cpu_to_le32(ctx->srqc_tbl.max_elements);
|
||||
req.number_of_cq = cpu_to_le32(ctx->cq_tbl.max_elements);
|
||||
|
||||
req.max_qp_per_vf = cpu_to_le32(ctx->vf_res.max_qp_per_vf);
|
||||
req.max_mrw_per_vf = cpu_to_le32(ctx->vf_res.max_mrw_per_vf);
|
||||
req.max_srq_per_vf = cpu_to_le32(ctx->vf_res.max_srq_per_vf);
|
||||
req.max_cq_per_vf = cpu_to_le32(ctx->vf_res.max_cq_per_vf);
|
||||
req.max_gid_per_vf = cpu_to_le32(ctx->vf_res.max_gid_per_vf);
|
||||
|
||||
skip_ctx_setup:
|
||||
req.stat_ctx_id = cpu_to_le32(ctx->stats.fw_id);
|
||||
resp = (struct creq_initialize_fw_resp *)
|
||||
bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
|
||||
NULL, 0);
|
||||
if (!resp) {
|
||||
dev_err(&rcfw->pdev->dev,
|
||||
"QPLIB: RCFW: INITIALIZE_FW send failed");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
|
||||
/* Cmd timed out */
|
||||
dev_err(&rcfw->pdev->dev,
|
||||
"QPLIB: RCFW: INITIALIZE_FW timed out");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
if (resp->status ||
|
||||
le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
|
||||
dev_err(&rcfw->pdev->dev,
|
||||
"QPLIB: RCFW: INITIALIZE_FW failed");
|
||||
return -EINVAL;
|
||||
}
|
||||
set_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
|
||||
{
|
||||
bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->crsb);
|
||||
kfree(rcfw->crsq.crsq);
|
||||
bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->cmdq);
|
||||
bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->creq);
|
||||
|
||||
rcfw->pdev = NULL;
|
||||
}
|
||||
|
||||
int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev,
|
||||
struct bnxt_qplib_rcfw *rcfw)
|
||||
{
|
||||
rcfw->pdev = pdev;
|
||||
rcfw->creq.max_elements = BNXT_QPLIB_CREQE_MAX_CNT;
|
||||
if (bnxt_qplib_alloc_init_hwq(rcfw->pdev, &rcfw->creq, NULL, 0,
|
||||
&rcfw->creq.max_elements,
|
||||
BNXT_QPLIB_CREQE_UNITS, 0, PAGE_SIZE,
|
||||
HWQ_TYPE_L2_CMPL)) {
|
||||
dev_err(&rcfw->pdev->dev,
|
||||
"QPLIB: HW channel CREQ allocation failed");
|
||||
goto fail;
|
||||
}
|
||||
rcfw->cmdq.max_elements = BNXT_QPLIB_CMDQE_MAX_CNT;
|
||||
if (bnxt_qplib_alloc_init_hwq(rcfw->pdev, &rcfw->cmdq, NULL, 0,
|
||||
&rcfw->cmdq.max_elements,
|
||||
BNXT_QPLIB_CMDQE_UNITS, 0, PAGE_SIZE,
|
||||
HWQ_TYPE_CTX)) {
|
||||
dev_err(&rcfw->pdev->dev,
|
||||
"QPLIB: HW channel CMDQ allocation failed");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
rcfw->crsq.max_elements = rcfw->cmdq.max_elements;
|
||||
rcfw->crsq.crsq = kcalloc(rcfw->crsq.max_elements,
|
||||
sizeof(*rcfw->crsq.crsq), GFP_KERNEL);
|
||||
if (!rcfw->crsq.crsq)
|
||||
goto fail;
|
||||
|
||||
rcfw->crsb.max_elements = BNXT_QPLIB_CRSBE_MAX_CNT;
|
||||
if (bnxt_qplib_alloc_init_hwq(rcfw->pdev, &rcfw->crsb, NULL, 0,
|
||||
&rcfw->crsb.max_elements,
|
||||
BNXT_QPLIB_CRSBE_UNITS, 0, PAGE_SIZE,
|
||||
HWQ_TYPE_CTX)) {
|
||||
dev_err(&rcfw->pdev->dev,
|
||||
"QPLIB: HW channel CRSB allocation failed");
|
||||
goto fail;
|
||||
}
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
bnxt_qplib_free_rcfw_channel(rcfw);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
|
||||
{
|
||||
unsigned long indx;
|
||||
|
||||
/* Make sure the HW channel is stopped! */
|
||||
synchronize_irq(rcfw->vector);
|
||||
tasklet_disable(&rcfw->worker);
|
||||
tasklet_kill(&rcfw->worker);
|
||||
|
||||
if (rcfw->requested) {
|
||||
free_irq(rcfw->vector, rcfw);
|
||||
rcfw->requested = false;
|
||||
}
|
||||
if (rcfw->cmdq_bar_reg_iomem)
|
||||
iounmap(rcfw->cmdq_bar_reg_iomem);
|
||||
rcfw->cmdq_bar_reg_iomem = NULL;
|
||||
|
||||
if (rcfw->creq_bar_reg_iomem)
|
||||
iounmap(rcfw->creq_bar_reg_iomem);
|
||||
rcfw->creq_bar_reg_iomem = NULL;
|
||||
|
||||
indx = find_first_bit(rcfw->cmdq_bitmap, rcfw->bmap_size);
|
||||
if (indx != rcfw->bmap_size)
|
||||
dev_err(&rcfw->pdev->dev,
|
||||
"QPLIB: disabling RCFW with pending cmd-bit %lx", indx);
|
||||
kfree(rcfw->cmdq_bitmap);
|
||||
rcfw->bmap_size = 0;
|
||||
|
||||
rcfw->aeq_handler = NULL;
|
||||
rcfw->vector = 0;
|
||||
}
|
||||
|
||||
int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
|
||||
struct bnxt_qplib_rcfw *rcfw,
|
||||
int msix_vector,
|
||||
int cp_bar_reg_off, int virt_fn,
|
||||
int (*aeq_handler)(struct bnxt_qplib_rcfw *,
|
||||
struct creq_func_event *))
|
||||
{
|
||||
resource_size_t res_base;
|
||||
struct cmdq_init init;
|
||||
u16 bmap_size;
|
||||
int rc;
|
||||
|
||||
/* General */
|
||||
atomic_set(&rcfw->seq_num, 0);
|
||||
rcfw->flags = FIRMWARE_FIRST_FLAG;
|
||||
bmap_size = BITS_TO_LONGS(RCFW_MAX_OUTSTANDING_CMD *
|
||||
sizeof(unsigned long));
|
||||
rcfw->cmdq_bitmap = kzalloc(bmap_size, GFP_KERNEL);
|
||||
if (!rcfw->cmdq_bitmap)
|
||||
return -ENOMEM;
|
||||
rcfw->bmap_size = bmap_size;
|
||||
|
||||
/* CMDQ */
|
||||
rcfw->cmdq_bar_reg = RCFW_COMM_PCI_BAR_REGION;
|
||||
res_base = pci_resource_start(pdev, rcfw->cmdq_bar_reg);
|
||||
if (!res_base)
|
||||
return -ENOMEM;
|
||||
|
||||
rcfw->cmdq_bar_reg_iomem = ioremap_nocache(res_base +
|
||||
RCFW_COMM_BASE_OFFSET,
|
||||
RCFW_COMM_SIZE);
|
||||
if (!rcfw->cmdq_bar_reg_iomem) {
|
||||
dev_err(&rcfw->pdev->dev,
|
||||
"QPLIB: CMDQ BAR region %d mapping failed",
|
||||
rcfw->cmdq_bar_reg);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
rcfw->cmdq_bar_reg_prod_off = virt_fn ? RCFW_VF_COMM_PROD_OFFSET :
|
||||
RCFW_PF_COMM_PROD_OFFSET;
|
||||
|
||||
rcfw->cmdq_bar_reg_trig_off = RCFW_COMM_TRIG_OFFSET;
|
||||
|
||||
/* CRSQ */
|
||||
rcfw->crsq.prod = 0;
|
||||
rcfw->crsq.cons = 0;
|
||||
|
||||
/* CREQ */
|
||||
rcfw->creq_bar_reg = RCFW_COMM_CONS_PCI_BAR_REGION;
|
||||
res_base = pci_resource_start(pdev, rcfw->creq_bar_reg);
|
||||
if (!res_base)
|
||||
dev_err(&rcfw->pdev->dev,
|
||||
"QPLIB: CREQ BAR region %d resc start is 0!",
|
||||
rcfw->creq_bar_reg);
|
||||
rcfw->creq_bar_reg_iomem = ioremap_nocache(res_base + cp_bar_reg_off,
|
||||
4);
|
||||
if (!rcfw->creq_bar_reg_iomem) {
|
||||
dev_err(&rcfw->pdev->dev,
|
||||
"QPLIB: CREQ BAR region %d mapping failed",
|
||||
rcfw->creq_bar_reg);
|
||||
return -ENOMEM;
|
||||
}
|
||||
rcfw->creq_qp_event_processed = 0;
|
||||
rcfw->creq_func_event_processed = 0;
|
||||
|
||||
rcfw->vector = msix_vector;
|
||||
if (aeq_handler)
|
||||
rcfw->aeq_handler = aeq_handler;
|
||||
|
||||
tasklet_init(&rcfw->worker, bnxt_qplib_service_creq,
|
||||
(unsigned long)rcfw);
|
||||
|
||||
rcfw->requested = false;
|
||||
rc = request_irq(rcfw->vector, bnxt_qplib_creq_irq, 0,
|
||||
"bnxt_qplib_creq", rcfw);
|
||||
if (rc) {
|
||||
dev_err(&rcfw->pdev->dev,
|
||||
"QPLIB: Failed to request IRQ for CREQ rc = 0x%x", rc);
|
||||
bnxt_qplib_disable_rcfw_channel(rcfw);
|
||||
return rc;
|
||||
}
|
||||
rcfw->requested = true;
|
||||
|
||||
init_waitqueue_head(&rcfw->waitq);
|
||||
|
||||
CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, 0, rcfw->creq.max_elements);
|
||||
|
||||
init.cmdq_pbl = cpu_to_le64(rcfw->cmdq.pbl[PBL_LVL_0].pg_map_arr[0]);
|
||||
init.cmdq_size_cmdq_lvl = cpu_to_le16(
|
||||
((BNXT_QPLIB_CMDQE_MAX_CNT << CMDQ_INIT_CMDQ_SIZE_SFT) &
|
||||
CMDQ_INIT_CMDQ_SIZE_MASK) |
|
||||
((rcfw->cmdq.level << CMDQ_INIT_CMDQ_LVL_SFT) &
|
||||
CMDQ_INIT_CMDQ_LVL_MASK));
|
||||
init.creq_ring_id = cpu_to_le16(rcfw->creq_ring_id);
|
||||
|
||||
/* Write to the Bono mailbox register */
|
||||
__iowrite32_copy(rcfw->cmdq_bar_reg_iomem, &init, sizeof(init) / 4);
|
||||
return 0;
|
||||
}
|
231
drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
Normal file
231
drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
Normal file
@ -0,0 +1,231 @@
|
||||
/*
|
||||
* Broadcom NetXtreme-E RoCE driver.
|
||||
*
|
||||
* Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
|
||||
* Broadcom refers to Broadcom Limited and/or its subsidiaries.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
||||
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
|
||||
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
|
||||
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
|
||||
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
|
||||
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
* Description: RDMA Controller HW interface (header)
|
||||
*/
|
||||
|
||||
#ifndef __BNXT_QPLIB_RCFW_H__
|
||||
#define __BNXT_QPLIB_RCFW_H__
|
||||
|
||||
#define RCFW_CMDQ_TRIG_VAL 1
|
||||
#define RCFW_COMM_PCI_BAR_REGION 0
|
||||
#define RCFW_COMM_CONS_PCI_BAR_REGION 2
|
||||
#define RCFW_COMM_BASE_OFFSET 0x600
|
||||
#define RCFW_PF_COMM_PROD_OFFSET 0xc
|
||||
#define RCFW_VF_COMM_PROD_OFFSET 0xc
|
||||
#define RCFW_COMM_TRIG_OFFSET 0x100
|
||||
#define RCFW_COMM_SIZE 0x104
|
||||
|
||||
#define RCFW_DBR_PCI_BAR_REGION 2
|
||||
|
||||
#define RCFW_CMD_PREP(req, CMD, cmd_flags) \
|
||||
do { \
|
||||
memset(&(req), 0, sizeof((req))); \
|
||||
(req).opcode = CMDQ_BASE_OPCODE_##CMD; \
|
||||
(req).cmd_size = (sizeof((req)) + \
|
||||
BNXT_QPLIB_CMDQE_UNITS - 1) / \
|
||||
BNXT_QPLIB_CMDQE_UNITS; \
|
||||
(req).flags = cpu_to_le16(cmd_flags); \
|
||||
} while (0)
|
||||
|
||||
#define RCFW_CMD_WAIT_TIME_MS 20000 /* 20 Seconds timeout */
|
||||
|
||||
/* CMDQ elements */
|
||||
#define BNXT_QPLIB_CMDQE_MAX_CNT 256
|
||||
#define BNXT_QPLIB_CMDQE_UNITS sizeof(struct bnxt_qplib_cmdqe)
|
||||
#define BNXT_QPLIB_CMDQE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_CMDQE_UNITS)
|
||||
|
||||
#define MAX_CMDQ_IDX (BNXT_QPLIB_CMDQE_MAX_CNT - 1)
|
||||
#define MAX_CMDQ_IDX_PER_PG (BNXT_QPLIB_CMDQE_CNT_PER_PG - 1)
|
||||
|
||||
#define RCFW_MAX_OUTSTANDING_CMD BNXT_QPLIB_CMDQE_MAX_CNT
|
||||
#define RCFW_MAX_COOKIE_VALUE 0x7FFF
|
||||
#define RCFW_CMD_IS_BLOCKING 0x8000
|
||||
|
||||
/* Cmdq contains a fix number of a 16-Byte slots */
|
||||
struct bnxt_qplib_cmdqe {
|
||||
u8 data[16];
|
||||
};
|
||||
|
||||
static inline u32 get_cmdq_pg(u32 val)
|
||||
{
|
||||
return (val & ~MAX_CMDQ_IDX_PER_PG) / BNXT_QPLIB_CMDQE_CNT_PER_PG;
|
||||
}
|
||||
|
||||
static inline u32 get_cmdq_idx(u32 val)
|
||||
{
|
||||
return val & MAX_CMDQ_IDX_PER_PG;
|
||||
}
|
||||
|
||||
/* Crsq buf is 1024-Byte */
|
||||
struct bnxt_qplib_crsbe {
|
||||
u8 data[1024];
|
||||
};
|
||||
|
||||
/* CRSQ SB */
|
||||
#define BNXT_QPLIB_CRSBE_MAX_CNT 4
|
||||
#define BNXT_QPLIB_CRSBE_UNITS sizeof(struct bnxt_qplib_crsbe)
|
||||
#define BNXT_QPLIB_CRSBE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_CRSBE_UNITS)
|
||||
|
||||
#define MAX_CRSB_IDX (BNXT_QPLIB_CRSBE_MAX_CNT - 1)
|
||||
#define MAX_CRSB_IDX_PER_PG (BNXT_QPLIB_CRSBE_CNT_PER_PG - 1)
|
||||
|
||||
static inline u32 get_crsb_pg(u32 val)
|
||||
{
|
||||
return (val & ~MAX_CRSB_IDX_PER_PG) / BNXT_QPLIB_CRSBE_CNT_PER_PG;
|
||||
}
|
||||
|
||||
static inline u32 get_crsb_idx(u32 val)
|
||||
{
|
||||
return val & MAX_CRSB_IDX_PER_PG;
|
||||
}
|
||||
|
||||
static inline void bnxt_qplib_crsb_dma_next(dma_addr_t *pg_map_arr,
|
||||
u32 prod, dma_addr_t *dma_addr)
|
||||
{
|
||||
*dma_addr = pg_map_arr[(prod) / BNXT_QPLIB_CRSBE_CNT_PER_PG];
|
||||
*dma_addr += ((prod) % BNXT_QPLIB_CRSBE_CNT_PER_PG) *
|
||||
BNXT_QPLIB_CRSBE_UNITS;
|
||||
}
|
||||
|
||||
/* CREQ */
|
||||
/* Allocate 1 per QP for async error notification for now */
|
||||
#define BNXT_QPLIB_CREQE_MAX_CNT (64 * 1024)
|
||||
#define BNXT_QPLIB_CREQE_UNITS 16 /* 16-Bytes per prod unit */
|
||||
#define BNXT_QPLIB_CREQE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_CREQE_UNITS)
|
||||
|
||||
#define MAX_CREQ_IDX (BNXT_QPLIB_CREQE_MAX_CNT - 1)
|
||||
#define MAX_CREQ_IDX_PER_PG (BNXT_QPLIB_CREQE_CNT_PER_PG - 1)
|
||||
|
||||
static inline u32 get_creq_pg(u32 val)
|
||||
{
|
||||
return (val & ~MAX_CREQ_IDX_PER_PG) / BNXT_QPLIB_CREQE_CNT_PER_PG;
|
||||
}
|
||||
|
||||
static inline u32 get_creq_idx(u32 val)
|
||||
{
|
||||
return val & MAX_CREQ_IDX_PER_PG;
|
||||
}
|
||||
|
||||
#define BNXT_QPLIB_CREQE_PER_PG (PAGE_SIZE / sizeof(struct creq_base))
|
||||
|
||||
#define CREQ_CMP_VALID(hdr, raw_cons, cp_bit) \
|
||||
(!!((hdr)->v & CREQ_BASE_V) == \
|
||||
!((raw_cons) & (cp_bit)))
|
||||
|
||||
#define CREQ_DB_KEY_CP (0x2 << CMPL_DOORBELL_KEY_SFT)
|
||||
#define CREQ_DB_IDX_VALID CMPL_DOORBELL_IDX_VALID
|
||||
#define CREQ_DB_IRQ_DIS CMPL_DOORBELL_MASK
|
||||
#define CREQ_DB_CP_FLAGS_REARM (CREQ_DB_KEY_CP | \
|
||||
CREQ_DB_IDX_VALID)
|
||||
#define CREQ_DB_CP_FLAGS (CREQ_DB_KEY_CP | \
|
||||
CREQ_DB_IDX_VALID | \
|
||||
CREQ_DB_IRQ_DIS)
|
||||
#define CREQ_DB_REARM(db, raw_cons, cp_bit) \
|
||||
writel(CREQ_DB_CP_FLAGS_REARM | ((raw_cons) & ((cp_bit) - 1)), db)
|
||||
#define CREQ_DB(db, raw_cons, cp_bit) \
|
||||
writel(CREQ_DB_CP_FLAGS | ((raw_cons) & ((cp_bit) - 1)), db)
|
||||
|
||||
/* HWQ */
|
||||
struct bnxt_qplib_crsqe {
|
||||
struct creq_qp_event qp_event;
|
||||
u32 req_size;
|
||||
};
|
||||
|
||||
struct bnxt_qplib_crsq {
|
||||
struct bnxt_qplib_crsqe *crsq;
|
||||
u32 prod;
|
||||
u32 cons;
|
||||
u32 max_elements;
|
||||
};
|
||||
|
||||
/* RCFW Communication Channels */
|
||||
struct bnxt_qplib_rcfw {
|
||||
struct pci_dev *pdev;
|
||||
int vector;
|
||||
struct tasklet_struct worker;
|
||||
bool requested;
|
||||
unsigned long *cmdq_bitmap;
|
||||
u32 bmap_size;
|
||||
unsigned long flags;
|
||||
#define FIRMWARE_INITIALIZED_FLAG 1
|
||||
#define FIRMWARE_FIRST_FLAG BIT(31)
|
||||
wait_queue_head_t waitq;
|
||||
int (*aeq_handler)(struct bnxt_qplib_rcfw *,
|
||||
struct creq_func_event *);
|
||||
atomic_t seq_num;
|
||||
|
||||
/* Bar region info */
|
||||
void __iomem *cmdq_bar_reg_iomem;
|
||||
u16 cmdq_bar_reg;
|
||||
u16 cmdq_bar_reg_prod_off;
|
||||
u16 cmdq_bar_reg_trig_off;
|
||||
u16 creq_ring_id;
|
||||
u16 creq_bar_reg;
|
||||
void __iomem *creq_bar_reg_iomem;
|
||||
|
||||
/* Cmd-Resp and Async Event notification queue */
|
||||
struct bnxt_qplib_hwq creq;
|
||||
u64 creq_qp_event_processed;
|
||||
u64 creq_func_event_processed;
|
||||
|
||||
/* Actual Cmd and Resp Queues */
|
||||
struct bnxt_qplib_hwq cmdq;
|
||||
struct bnxt_qplib_crsq crsq;
|
||||
struct bnxt_qplib_hwq crsb;
|
||||
};
|
||||
|
||||
void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
|
||||
int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev,
|
||||
struct bnxt_qplib_rcfw *rcfw);
|
||||
void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
|
||||
int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
|
||||
struct bnxt_qplib_rcfw *rcfw,
|
||||
int msix_vector,
|
||||
int cp_bar_reg_off, int virt_fn,
|
||||
int (*aeq_handler)
|
||||
(struct bnxt_qplib_rcfw *,
|
||||
struct creq_func_event *));
|
||||
|
||||
int bnxt_qplib_rcfw_block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie);
|
||||
int bnxt_qplib_rcfw_wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie);
|
||||
void *bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
|
||||
struct cmdq_base *req, void **crsbe,
|
||||
u8 is_block);
|
||||
|
||||
int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw);
|
||||
int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
|
||||
struct bnxt_qplib_ctx *ctx, int is_virtfn);
|
||||
#endif /* __BNXT_QPLIB_RCFW_H__ */
|
825
drivers/infiniband/hw/bnxt_re/qplib_res.c
Normal file
825
drivers/infiniband/hw/bnxt_re/qplib_res.c
Normal file
@ -0,0 +1,825 @@
|
||||
/*
|
||||
* Broadcom NetXtreme-E RoCE driver.
|
||||
*
|
||||
* Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
|
||||
* Broadcom refers to Broadcom Limited and/or its subsidiaries.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
||||
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
|
||||
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
|
||||
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
|
||||
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
|
||||
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
* Description: QPLib resource manager
|
||||
*/
|
||||
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/inetdevice.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/if_vlan.h>
|
||||
#include "roce_hsi.h"
|
||||
#include "qplib_res.h"
|
||||
#include "qplib_sp.h"
|
||||
#include "qplib_rcfw.h"
|
||||
|
||||
static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
|
||||
struct bnxt_qplib_stats *stats);
|
||||
static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
|
||||
struct bnxt_qplib_stats *stats);
|
||||
|
||||
/* PBL */
|
||||
static void __free_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
|
||||
bool is_umem)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!is_umem) {
|
||||
for (i = 0; i < pbl->pg_count; i++) {
|
||||
if (pbl->pg_arr[i])
|
||||
dma_free_coherent(&pdev->dev, pbl->pg_size,
|
||||
(void *)((unsigned long)
|
||||
pbl->pg_arr[i] &
|
||||
PAGE_MASK),
|
||||
pbl->pg_map_arr[i]);
|
||||
else
|
||||
dev_warn(&pdev->dev,
|
||||
"QPLIB: PBL free pg_arr[%d] empty?!",
|
||||
i);
|
||||
pbl->pg_arr[i] = NULL;
|
||||
}
|
||||
}
|
||||
kfree(pbl->pg_arr);
|
||||
pbl->pg_arr = NULL;
|
||||
kfree(pbl->pg_map_arr);
|
||||
pbl->pg_map_arr = NULL;
|
||||
pbl->pg_count = 0;
|
||||
pbl->pg_size = 0;
|
||||
}
|
||||
|
||||
static int __alloc_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
|
||||
struct scatterlist *sghead, u32 pages, u32 pg_size)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
bool is_umem = false;
|
||||
int i;
|
||||
|
||||
/* page ptr arrays */
|
||||
pbl->pg_arr = kcalloc(pages, sizeof(void *), GFP_KERNEL);
|
||||
if (!pbl->pg_arr)
|
||||
return -ENOMEM;
|
||||
|
||||
pbl->pg_map_arr = kcalloc(pages, sizeof(dma_addr_t), GFP_KERNEL);
|
||||
if (!pbl->pg_map_arr) {
|
||||
kfree(pbl->pg_arr);
|
||||
pbl->pg_arr = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
pbl->pg_count = 0;
|
||||
pbl->pg_size = pg_size;
|
||||
|
||||
if (!sghead) {
|
||||
for (i = 0; i < pages; i++) {
|
||||
pbl->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
|
||||
pbl->pg_size,
|
||||
&pbl->pg_map_arr[i],
|
||||
GFP_KERNEL);
|
||||
if (!pbl->pg_arr[i])
|
||||
goto fail;
|
||||
memset(pbl->pg_arr[i], 0, pbl->pg_size);
|
||||
pbl->pg_count++;
|
||||
}
|
||||
} else {
|
||||
i = 0;
|
||||
is_umem = true;
|
||||
for_each_sg(sghead, sg, pages, i) {
|
||||
pbl->pg_map_arr[i] = sg_dma_address(sg);
|
||||
pbl->pg_arr[i] = sg_virt(sg);
|
||||
if (!pbl->pg_arr[i])
|
||||
goto fail;
|
||||
|
||||
pbl->pg_count++;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
__free_pbl(pdev, pbl, is_umem);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* HWQ */
|
||||
void bnxt_qplib_free_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!hwq->max_elements)
|
||||
return;
|
||||
if (hwq->level >= PBL_LVL_MAX)
|
||||
return;
|
||||
|
||||
for (i = 0; i < hwq->level + 1; i++) {
|
||||
if (i == hwq->level)
|
||||
__free_pbl(pdev, &hwq->pbl[i], hwq->is_user);
|
||||
else
|
||||
__free_pbl(pdev, &hwq->pbl[i], false);
|
||||
}
|
||||
|
||||
hwq->level = PBL_LVL_MAX;
|
||||
hwq->max_elements = 0;
|
||||
hwq->element_size = 0;
|
||||
hwq->prod = 0;
|
||||
hwq->cons = 0;
|
||||
hwq->cp_bit = 0;
|
||||
}
|
||||
|
||||
/* All HWQs are power of 2 in size */
|
||||
int bnxt_qplib_alloc_init_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq,
|
||||
struct scatterlist *sghead, int nmap,
|
||||
u32 *elements, u32 element_size, u32 aux,
|
||||
u32 pg_size, enum bnxt_qplib_hwq_type hwq_type)
|
||||
{
|
||||
u32 pages, slots, size, aux_pages = 0, aux_size = 0;
|
||||
dma_addr_t *src_phys_ptr, **dst_virt_ptr;
|
||||
int i, rc;
|
||||
|
||||
hwq->level = PBL_LVL_MAX;
|
||||
|
||||
slots = roundup_pow_of_two(*elements);
|
||||
if (aux) {
|
||||
aux_size = roundup_pow_of_two(aux);
|
||||
aux_pages = (slots * aux_size) / pg_size;
|
||||
if ((slots * aux_size) % pg_size)
|
||||
aux_pages++;
|
||||
}
|
||||
size = roundup_pow_of_two(element_size);
|
||||
|
||||
if (!sghead) {
|
||||
hwq->is_user = false;
|
||||
pages = (slots * size) / pg_size + aux_pages;
|
||||
if ((slots * size) % pg_size)
|
||||
pages++;
|
||||
if (!pages)
|
||||
return -EINVAL;
|
||||
} else {
|
||||
hwq->is_user = true;
|
||||
pages = nmap;
|
||||
}
|
||||
|
||||
/* Alloc the 1st memory block; can be a PDL/PTL/PBL */
|
||||
if (sghead && (pages == MAX_PBL_LVL_0_PGS))
|
||||
rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_0], sghead,
|
||||
pages, pg_size);
|
||||
else
|
||||
rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_0], NULL, 1, pg_size);
|
||||
if (rc)
|
||||
goto fail;
|
||||
|
||||
hwq->level = PBL_LVL_0;
|
||||
|
||||
if (pages > MAX_PBL_LVL_0_PGS) {
|
||||
if (pages > MAX_PBL_LVL_1_PGS) {
|
||||
/* 2 levels of indirection */
|
||||
rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_1], NULL,
|
||||
MAX_PBL_LVL_1_PGS_FOR_LVL_2, pg_size);
|
||||
if (rc)
|
||||
goto fail;
|
||||
/* Fill in lvl0 PBL */
|
||||
dst_virt_ptr =
|
||||
(dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
|
||||
src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
|
||||
for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++)
|
||||
dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
|
||||
src_phys_ptr[i] | PTU_PDE_VALID;
|
||||
hwq->level = PBL_LVL_1;
|
||||
|
||||
rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_2], sghead,
|
||||
pages, pg_size);
|
||||
if (rc)
|
||||
goto fail;
|
||||
|
||||
/* Fill in lvl1 PBL */
|
||||
dst_virt_ptr =
|
||||
(dma_addr_t **)hwq->pbl[PBL_LVL_1].pg_arr;
|
||||
src_phys_ptr = hwq->pbl[PBL_LVL_2].pg_map_arr;
|
||||
for (i = 0; i < hwq->pbl[PBL_LVL_2].pg_count; i++) {
|
||||
dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
|
||||
src_phys_ptr[i] | PTU_PTE_VALID;
|
||||
}
|
||||
if (hwq_type == HWQ_TYPE_QUEUE) {
|
||||
/* Find the last pg of the size */
|
||||
i = hwq->pbl[PBL_LVL_2].pg_count;
|
||||
dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |=
|
||||
PTU_PTE_LAST;
|
||||
if (i > 1)
|
||||
dst_virt_ptr[PTR_PG(i - 2)]
|
||||
[PTR_IDX(i - 2)] |=
|
||||
PTU_PTE_NEXT_TO_LAST;
|
||||
}
|
||||
hwq->level = PBL_LVL_2;
|
||||
} else {
|
||||
u32 flag = hwq_type == HWQ_TYPE_L2_CMPL ? 0 :
|
||||
PTU_PTE_VALID;
|
||||
|
||||
/* 1 level of indirection */
|
||||
rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_1], sghead,
|
||||
pages, pg_size);
|
||||
if (rc)
|
||||
goto fail;
|
||||
/* Fill in lvl0 PBL */
|
||||
dst_virt_ptr =
|
||||
(dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
|
||||
src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
|
||||
for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++) {
|
||||
dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
|
||||
src_phys_ptr[i] | flag;
|
||||
}
|
||||
if (hwq_type == HWQ_TYPE_QUEUE) {
|
||||
/* Find the last pg of the size */
|
||||
i = hwq->pbl[PBL_LVL_1].pg_count;
|
||||
dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |=
|
||||
PTU_PTE_LAST;
|
||||
if (i > 1)
|
||||
dst_virt_ptr[PTR_PG(i - 2)]
|
||||
[PTR_IDX(i - 2)] |=
|
||||
PTU_PTE_NEXT_TO_LAST;
|
||||
}
|
||||
hwq->level = PBL_LVL_1;
|
||||
}
|
||||
}
|
||||
hwq->pdev = pdev;
|
||||
spin_lock_init(&hwq->lock);
|
||||
hwq->prod = 0;
|
||||
hwq->cons = 0;
|
||||
*elements = hwq->max_elements = slots;
|
||||
hwq->element_size = size;
|
||||
|
||||
/* For direct access to the elements */
|
||||
hwq->pbl_ptr = hwq->pbl[hwq->level].pg_arr;
|
||||
hwq->pbl_dma_ptr = hwq->pbl[hwq->level].pg_map_arr;
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
bnxt_qplib_free_hwq(pdev, hwq);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Context Tables */
|
||||
void bnxt_qplib_free_ctx(struct pci_dev *pdev,
|
||||
struct bnxt_qplib_ctx *ctx)
|
||||
{
|
||||
int i;
|
||||
|
||||
bnxt_qplib_free_hwq(pdev, &ctx->qpc_tbl);
|
||||
bnxt_qplib_free_hwq(pdev, &ctx->mrw_tbl);
|
||||
bnxt_qplib_free_hwq(pdev, &ctx->srqc_tbl);
|
||||
bnxt_qplib_free_hwq(pdev, &ctx->cq_tbl);
|
||||
bnxt_qplib_free_hwq(pdev, &ctx->tim_tbl);
|
||||
for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
|
||||
bnxt_qplib_free_hwq(pdev, &ctx->tqm_tbl[i]);
|
||||
bnxt_qplib_free_hwq(pdev, &ctx->tqm_pde);
|
||||
bnxt_qplib_free_stats_ctx(pdev, &ctx->stats);
|
||||
}
|
||||
|
||||
/*
|
||||
* Routine: bnxt_qplib_alloc_ctx
|
||||
* Description:
|
||||
* Context tables are memories which are used by the chip fw.
|
||||
* The 6 tables defined are:
|
||||
* QPC ctx - holds QP states
|
||||
* MRW ctx - holds memory region and window
|
||||
* SRQ ctx - holds shared RQ states
|
||||
* CQ ctx - holds completion queue states
|
||||
* TQM ctx - holds Tx Queue Manager context
|
||||
* TIM ctx - holds timer context
|
||||
* Depending on the size of the tbl requested, either a 1 Page Buffer List
|
||||
* or a 1-to-2-stage indirection Page Directory List + 1 PBL is used
|
||||
* instead.
|
||||
* Table might be employed as follows:
|
||||
* For 0 < ctx size <= 1 PAGE, 0 level of ind is used
|
||||
* For 1 PAGE < ctx size <= 512 entries size, 1 level of ind is used
|
||||
* For 512 < ctx size <= MAX, 2 levels of ind is used
|
||||
* Returns:
|
||||
* 0 if success, else -ERRORS
|
||||
*/
|
||||
int bnxt_qplib_alloc_ctx(struct pci_dev *pdev,
|
||||
struct bnxt_qplib_ctx *ctx,
|
||||
bool virt_fn)
|
||||
{
|
||||
int i, j, k, rc = 0;
|
||||
int fnz_idx = -1;
|
||||
__le64 **pbl_ptr;
|
||||
|
||||
if (virt_fn)
|
||||
goto stats_alloc;
|
||||
|
||||
/* QPC Tables */
|
||||
ctx->qpc_tbl.max_elements = ctx->qpc_count;
|
||||
rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->qpc_tbl, NULL, 0,
|
||||
&ctx->qpc_tbl.max_elements,
|
||||
BNXT_QPLIB_MAX_QP_CTX_ENTRY_SIZE, 0,
|
||||
PAGE_SIZE, HWQ_TYPE_CTX);
|
||||
if (rc)
|
||||
goto fail;
|
||||
|
||||
/* MRW Tables */
|
||||
ctx->mrw_tbl.max_elements = ctx->mrw_count;
|
||||
rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->mrw_tbl, NULL, 0,
|
||||
&ctx->mrw_tbl.max_elements,
|
||||
BNXT_QPLIB_MAX_MRW_CTX_ENTRY_SIZE, 0,
|
||||
PAGE_SIZE, HWQ_TYPE_CTX);
|
||||
if (rc)
|
||||
goto fail;
|
||||
|
||||
/* SRQ Tables */
|
||||
ctx->srqc_tbl.max_elements = ctx->srqc_count;
|
||||
rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->srqc_tbl, NULL, 0,
|
||||
&ctx->srqc_tbl.max_elements,
|
||||
BNXT_QPLIB_MAX_SRQ_CTX_ENTRY_SIZE, 0,
|
||||
PAGE_SIZE, HWQ_TYPE_CTX);
|
||||
if (rc)
|
||||
goto fail;
|
||||
|
||||
/* CQ Tables */
|
||||
ctx->cq_tbl.max_elements = ctx->cq_count;
|
||||
rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->cq_tbl, NULL, 0,
|
||||
&ctx->cq_tbl.max_elements,
|
||||
BNXT_QPLIB_MAX_CQ_CTX_ENTRY_SIZE, 0,
|
||||
PAGE_SIZE, HWQ_TYPE_CTX);
|
||||
if (rc)
|
||||
goto fail;
|
||||
|
||||
/* TQM Buffer */
|
||||
ctx->tqm_pde.max_elements = 512;
|
||||
rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tqm_pde, NULL, 0,
|
||||
&ctx->tqm_pde.max_elements, sizeof(u64),
|
||||
0, PAGE_SIZE, HWQ_TYPE_CTX);
|
||||
if (rc)
|
||||
goto fail;
|
||||
|
||||
for (i = 0; i < MAX_TQM_ALLOC_REQ; i++) {
|
||||
if (!ctx->tqm_count[i])
|
||||
continue;
|
||||
ctx->tqm_tbl[i].max_elements = ctx->qpc_count *
|
||||
ctx->tqm_count[i];
|
||||
rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tqm_tbl[i], NULL, 0,
|
||||
&ctx->tqm_tbl[i].max_elements, 1,
|
||||
0, PAGE_SIZE, HWQ_TYPE_CTX);
|
||||
if (rc)
|
||||
goto fail;
|
||||
}
|
||||
pbl_ptr = (__le64 **)ctx->tqm_pde.pbl_ptr;
|
||||
for (i = 0, j = 0; i < MAX_TQM_ALLOC_REQ;
|
||||
i++, j += MAX_TQM_ALLOC_BLK_SIZE) {
|
||||
if (!ctx->tqm_tbl[i].max_elements)
|
||||
continue;
|
||||
if (fnz_idx == -1)
|
||||
fnz_idx = i;
|
||||
switch (ctx->tqm_tbl[i].level) {
|
||||
case PBL_LVL_2:
|
||||
for (k = 0; k < ctx->tqm_tbl[i].pbl[PBL_LVL_1].pg_count;
|
||||
k++)
|
||||
pbl_ptr[PTR_PG(j + k)][PTR_IDX(j + k)] =
|
||||
cpu_to_le64(
|
||||
ctx->tqm_tbl[i].pbl[PBL_LVL_1].pg_map_arr[k]
|
||||
| PTU_PTE_VALID);
|
||||
break;
|
||||
case PBL_LVL_1:
|
||||
case PBL_LVL_0:
|
||||
default:
|
||||
pbl_ptr[PTR_PG(j)][PTR_IDX(j)] = cpu_to_le64(
|
||||
ctx->tqm_tbl[i].pbl[PBL_LVL_0].pg_map_arr[0] |
|
||||
PTU_PTE_VALID);
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (fnz_idx == -1)
|
||||
fnz_idx = 0;
|
||||
ctx->tqm_pde_level = ctx->tqm_tbl[fnz_idx].level == PBL_LVL_2 ?
|
||||
PBL_LVL_2 : ctx->tqm_tbl[fnz_idx].level + 1;
|
||||
|
||||
/* TIM Buffer */
|
||||
ctx->tim_tbl.max_elements = ctx->qpc_count * 16;
|
||||
rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tim_tbl, NULL, 0,
|
||||
&ctx->tim_tbl.max_elements, 1,
|
||||
0, PAGE_SIZE, HWQ_TYPE_CTX);
|
||||
if (rc)
|
||||
goto fail;
|
||||
|
||||
stats_alloc:
|
||||
/* Stats */
|
||||
rc = bnxt_qplib_alloc_stats_ctx(pdev, &ctx->stats);
|
||||
if (rc)
|
||||
goto fail;
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
bnxt_qplib_free_ctx(pdev, ctx);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* GUID */
|
||||
void bnxt_qplib_get_guid(u8 *dev_addr, u8 *guid)
|
||||
{
|
||||
u8 mac[ETH_ALEN];
|
||||
|
||||
/* MAC-48 to EUI-64 mapping */
|
||||
memcpy(mac, dev_addr, ETH_ALEN);
|
||||
guid[0] = mac[0] ^ 2;
|
||||
guid[1] = mac[1];
|
||||
guid[2] = mac[2];
|
||||
guid[3] = 0xff;
|
||||
guid[4] = 0xfe;
|
||||
guid[5] = mac[3];
|
||||
guid[6] = mac[4];
|
||||
guid[7] = mac[5];
|
||||
}
|
||||
|
||||
static void bnxt_qplib_free_sgid_tbl(struct bnxt_qplib_res *res,
|
||||
struct bnxt_qplib_sgid_tbl *sgid_tbl)
|
||||
{
|
||||
kfree(sgid_tbl->tbl);
|
||||
kfree(sgid_tbl->hw_id);
|
||||
kfree(sgid_tbl->ctx);
|
||||
sgid_tbl->tbl = NULL;
|
||||
sgid_tbl->hw_id = NULL;
|
||||
sgid_tbl->ctx = NULL;
|
||||
sgid_tbl->max = 0;
|
||||
sgid_tbl->active = 0;
|
||||
}
|
||||
|
||||
static int bnxt_qplib_alloc_sgid_tbl(struct bnxt_qplib_res *res,
|
||||
struct bnxt_qplib_sgid_tbl *sgid_tbl,
|
||||
u16 max)
|
||||
{
|
||||
sgid_tbl->tbl = kcalloc(max, sizeof(struct bnxt_qplib_gid), GFP_KERNEL);
|
||||
if (!sgid_tbl->tbl)
|
||||
return -ENOMEM;
|
||||
|
||||
sgid_tbl->hw_id = kcalloc(max, sizeof(u16), GFP_KERNEL);
|
||||
if (!sgid_tbl->hw_id)
|
||||
goto out_free1;
|
||||
|
||||
sgid_tbl->ctx = kcalloc(max, sizeof(void *), GFP_KERNEL);
|
||||
if (!sgid_tbl->ctx)
|
||||
goto out_free2;
|
||||
|
||||
sgid_tbl->max = max;
|
||||
return 0;
|
||||
out_free2:
|
||||
kfree(sgid_tbl->hw_id);
|
||||
sgid_tbl->hw_id = NULL;
|
||||
out_free1:
|
||||
kfree(sgid_tbl->tbl);
|
||||
sgid_tbl->tbl = NULL;
|
||||
return -ENOMEM;
|
||||
};
|
||||
|
||||
static void bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res *res,
|
||||
struct bnxt_qplib_sgid_tbl *sgid_tbl)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < sgid_tbl->max; i++) {
|
||||
if (memcmp(&sgid_tbl->tbl[i], &bnxt_qplib_gid_zero,
|
||||
sizeof(bnxt_qplib_gid_zero)))
|
||||
bnxt_qplib_del_sgid(sgid_tbl, &sgid_tbl->tbl[i], true);
|
||||
}
|
||||
memset(sgid_tbl->tbl, 0, sizeof(struct bnxt_qplib_gid) * sgid_tbl->max);
|
||||
memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
|
||||
sgid_tbl->active = 0;
|
||||
}
|
||||
|
||||
static void bnxt_qplib_init_sgid_tbl(struct bnxt_qplib_sgid_tbl *sgid_tbl,
|
||||
struct net_device *netdev)
|
||||
{
|
||||
memset(sgid_tbl->tbl, 0, sizeof(struct bnxt_qplib_gid) * sgid_tbl->max);
|
||||
memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
|
||||
}
|
||||
|
||||
static void bnxt_qplib_free_pkey_tbl(struct bnxt_qplib_res *res,
|
||||
struct bnxt_qplib_pkey_tbl *pkey_tbl)
|
||||
{
|
||||
if (!pkey_tbl->tbl)
|
||||
dev_dbg(&res->pdev->dev, "QPLIB: PKEY tbl not present");
|
||||
else
|
||||
kfree(pkey_tbl->tbl);
|
||||
|
||||
pkey_tbl->tbl = NULL;
|
||||
pkey_tbl->max = 0;
|
||||
pkey_tbl->active = 0;
|
||||
}
|
||||
|
||||
static int bnxt_qplib_alloc_pkey_tbl(struct bnxt_qplib_res *res,
|
||||
struct bnxt_qplib_pkey_tbl *pkey_tbl,
|
||||
u16 max)
|
||||
{
|
||||
pkey_tbl->tbl = kcalloc(max, sizeof(u16), GFP_KERNEL);
|
||||
if (!pkey_tbl->tbl)
|
||||
return -ENOMEM;
|
||||
|
||||
pkey_tbl->max = max;
|
||||
return 0;
|
||||
};
|
||||
|
||||
/* PDs */
|
||||
int bnxt_qplib_alloc_pd(struct bnxt_qplib_pd_tbl *pdt, struct bnxt_qplib_pd *pd)
|
||||
{
|
||||
u32 bit_num;
|
||||
|
||||
bit_num = find_first_bit(pdt->tbl, pdt->max);
|
||||
if (bit_num == pdt->max)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Found unused PD */
|
||||
clear_bit(bit_num, pdt->tbl);
|
||||
pd->id = bit_num;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bnxt_qplib_dealloc_pd(struct bnxt_qplib_res *res,
|
||||
struct bnxt_qplib_pd_tbl *pdt,
|
||||
struct bnxt_qplib_pd *pd)
|
||||
{
|
||||
if (test_and_set_bit(pd->id, pdt->tbl)) {
|
||||
dev_warn(&res->pdev->dev, "Freeing an unused PD? pdn = %d",
|
||||
pd->id);
|
||||
return -EINVAL;
|
||||
}
|
||||
pd->id = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void bnxt_qplib_free_pd_tbl(struct bnxt_qplib_pd_tbl *pdt)
|
||||
{
|
||||
kfree(pdt->tbl);
|
||||
pdt->tbl = NULL;
|
||||
pdt->max = 0;
|
||||
}
|
||||
|
||||
static int bnxt_qplib_alloc_pd_tbl(struct bnxt_qplib_res *res,
|
||||
struct bnxt_qplib_pd_tbl *pdt,
|
||||
u32 max)
|
||||
{
|
||||
u32 bytes;
|
||||
|
||||
bytes = max >> 3;
|
||||
if (!bytes)
|
||||
bytes = 1;
|
||||
pdt->tbl = kmalloc(bytes, GFP_KERNEL);
|
||||
if (!pdt->tbl)
|
||||
return -ENOMEM;
|
||||
|
||||
pdt->max = max;
|
||||
memset((u8 *)pdt->tbl, 0xFF, bytes);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* DPIs */
|
||||
int bnxt_qplib_alloc_dpi(struct bnxt_qplib_dpi_tbl *dpit,
|
||||
struct bnxt_qplib_dpi *dpi,
|
||||
void *app)
|
||||
{
|
||||
u32 bit_num;
|
||||
|
||||
bit_num = find_first_bit(dpit->tbl, dpit->max);
|
||||
if (bit_num == dpit->max)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Found unused DPI */
|
||||
clear_bit(bit_num, dpit->tbl);
|
||||
dpit->app_tbl[bit_num] = app;
|
||||
|
||||
dpi->dpi = bit_num;
|
||||
dpi->dbr = dpit->dbr_bar_reg_iomem + (bit_num * PAGE_SIZE);
|
||||
dpi->umdbr = dpit->unmapped_dbr + (bit_num * PAGE_SIZE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bnxt_qplib_dealloc_dpi(struct bnxt_qplib_res *res,
|
||||
struct bnxt_qplib_dpi_tbl *dpit,
|
||||
struct bnxt_qplib_dpi *dpi)
|
||||
{
|
||||
if (dpi->dpi >= dpit->max) {
|
||||
dev_warn(&res->pdev->dev, "Invalid DPI? dpi = %d", dpi->dpi);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (test_and_set_bit(dpi->dpi, dpit->tbl)) {
|
||||
dev_warn(&res->pdev->dev, "Freeing an unused DPI? dpi = %d",
|
||||
dpi->dpi);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (dpit->app_tbl)
|
||||
dpit->app_tbl[dpi->dpi] = NULL;
|
||||
memset(dpi, 0, sizeof(*dpi));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void bnxt_qplib_free_dpi_tbl(struct bnxt_qplib_res *res,
|
||||
struct bnxt_qplib_dpi_tbl *dpit)
|
||||
{
|
||||
kfree(dpit->tbl);
|
||||
kfree(dpit->app_tbl);
|
||||
if (dpit->dbr_bar_reg_iomem)
|
||||
pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem);
|
||||
memset(dpit, 0, sizeof(*dpit));
|
||||
}
|
||||
|
||||
static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res *res,
|
||||
struct bnxt_qplib_dpi_tbl *dpit,
|
||||
u32 dbr_offset)
|
||||
{
|
||||
u32 dbr_bar_reg = RCFW_DBR_PCI_BAR_REGION;
|
||||
resource_size_t bar_reg_base;
|
||||
u32 dbr_len, bytes;
|
||||
|
||||
if (dpit->dbr_bar_reg_iomem) {
|
||||
dev_err(&res->pdev->dev,
|
||||
"QPLIB: DBR BAR region %d already mapped", dbr_bar_reg);
|
||||
return -EALREADY;
|
||||
}
|
||||
|
||||
bar_reg_base = pci_resource_start(res->pdev, dbr_bar_reg);
|
||||
if (!bar_reg_base) {
|
||||
dev_err(&res->pdev->dev,
|
||||
"QPLIB: BAR region %d resc start failed", dbr_bar_reg);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
dbr_len = pci_resource_len(res->pdev, dbr_bar_reg) - dbr_offset;
|
||||
if (!dbr_len || ((dbr_len & (PAGE_SIZE - 1)) != 0)) {
|
||||
dev_err(&res->pdev->dev, "QPLIB: Invalid DBR length %d",
|
||||
dbr_len);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
dpit->dbr_bar_reg_iomem = ioremap_nocache(bar_reg_base + dbr_offset,
|
||||
dbr_len);
|
||||
if (!dpit->dbr_bar_reg_iomem) {
|
||||
dev_err(&res->pdev->dev,
|
||||
"QPLIB: FP: DBR BAR region %d mapping failed",
|
||||
dbr_bar_reg);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
dpit->unmapped_dbr = bar_reg_base + dbr_offset;
|
||||
dpit->max = dbr_len / PAGE_SIZE;
|
||||
|
||||
dpit->app_tbl = kcalloc(dpit->max, sizeof(void *), GFP_KERNEL);
|
||||
if (!dpit->app_tbl) {
|
||||
pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem);
|
||||
dev_err(&res->pdev->dev,
|
||||
"QPLIB: DPI app tbl allocation failed");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
bytes = dpit->max >> 3;
|
||||
if (!bytes)
|
||||
bytes = 1;
|
||||
|
||||
dpit->tbl = kmalloc(bytes, GFP_KERNEL);
|
||||
if (!dpit->tbl) {
|
||||
pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem);
|
||||
kfree(dpit->app_tbl);
|
||||
dpit->app_tbl = NULL;
|
||||
dev_err(&res->pdev->dev,
|
||||
"QPLIB: DPI tbl allocation failed for size = %d",
|
||||
bytes);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memset((u8 *)dpit->tbl, 0xFF, bytes);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* PKEYs */
|
||||
static void bnxt_qplib_cleanup_pkey_tbl(struct bnxt_qplib_pkey_tbl *pkey_tbl)
|
||||
{
|
||||
memset(pkey_tbl->tbl, 0, sizeof(u16) * pkey_tbl->max);
|
||||
pkey_tbl->active = 0;
|
||||
}
|
||||
|
||||
static void bnxt_qplib_init_pkey_tbl(struct bnxt_qplib_res *res,
|
||||
struct bnxt_qplib_pkey_tbl *pkey_tbl)
|
||||
{
|
||||
u16 pkey = 0xFFFF;
|
||||
|
||||
memset(pkey_tbl->tbl, 0, sizeof(u16) * pkey_tbl->max);
|
||||
|
||||
/* pkey default = 0xFFFF */
|
||||
bnxt_qplib_add_pkey(res, pkey_tbl, &pkey, false);
|
||||
}
|
||||
|
||||
/* Stats */
|
||||
static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
|
||||
struct bnxt_qplib_stats *stats)
|
||||
{
|
||||
if (stats->dma) {
|
||||
dma_free_coherent(&pdev->dev, stats->size,
|
||||
stats->dma, stats->dma_map);
|
||||
}
|
||||
memset(stats, 0, sizeof(*stats));
|
||||
stats->fw_id = -1;
|
||||
}
|
||||
|
||||
static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
|
||||
struct bnxt_qplib_stats *stats)
|
||||
{
|
||||
memset(stats, 0, sizeof(*stats));
|
||||
stats->fw_id = -1;
|
||||
stats->size = sizeof(struct ctx_hw_stats);
|
||||
stats->dma = dma_alloc_coherent(&pdev->dev, stats->size,
|
||||
&stats->dma_map, GFP_KERNEL);
|
||||
if (!stats->dma) {
|
||||
dev_err(&pdev->dev, "QPLIB: Stats DMA allocation failed");
|
||||
return -ENOMEM;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void bnxt_qplib_cleanup_res(struct bnxt_qplib_res *res)
|
||||
{
|
||||
bnxt_qplib_cleanup_pkey_tbl(&res->pkey_tbl);
|
||||
bnxt_qplib_cleanup_sgid_tbl(res, &res->sgid_tbl);
|
||||
}
|
||||
|
||||
int bnxt_qplib_init_res(struct bnxt_qplib_res *res)
|
||||
{
|
||||
bnxt_qplib_init_sgid_tbl(&res->sgid_tbl, res->netdev);
|
||||
bnxt_qplib_init_pkey_tbl(res, &res->pkey_tbl);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void bnxt_qplib_free_res(struct bnxt_qplib_res *res)
|
||||
{
|
||||
bnxt_qplib_free_pkey_tbl(res, &res->pkey_tbl);
|
||||
bnxt_qplib_free_sgid_tbl(res, &res->sgid_tbl);
|
||||
bnxt_qplib_free_pd_tbl(&res->pd_tbl);
|
||||
bnxt_qplib_free_dpi_tbl(res, &res->dpi_tbl);
|
||||
|
||||
res->netdev = NULL;
|
||||
res->pdev = NULL;
|
||||
}
|
||||
|
||||
int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct pci_dev *pdev,
|
||||
struct net_device *netdev,
|
||||
struct bnxt_qplib_dev_attr *dev_attr)
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
res->pdev = pdev;
|
||||
res->netdev = netdev;
|
||||
|
||||
rc = bnxt_qplib_alloc_sgid_tbl(res, &res->sgid_tbl, dev_attr->max_sgid);
|
||||
if (rc)
|
||||
goto fail;
|
||||
|
||||
rc = bnxt_qplib_alloc_pkey_tbl(res, &res->pkey_tbl, dev_attr->max_pkey);
|
||||
if (rc)
|
||||
goto fail;
|
||||
|
||||
rc = bnxt_qplib_alloc_pd_tbl(res, &res->pd_tbl, dev_attr->max_pd);
|
||||
if (rc)
|
||||
goto fail;
|
||||
|
||||
rc = bnxt_qplib_alloc_dpi_tbl(res, &res->dpi_tbl, dev_attr->l2_db_size);
|
||||
if (rc)
|
||||
goto fail;
|
||||
|
||||
return 0;
|
||||
fail:
|
||||
bnxt_qplib_free_res(res);
|
||||
return rc;
|
||||
}
|
223
drivers/infiniband/hw/bnxt_re/qplib_res.h
Normal file
223
drivers/infiniband/hw/bnxt_re/qplib_res.h
Normal file
@ -0,0 +1,223 @@
|
||||
/*
|
||||
* Broadcom NetXtreme-E RoCE driver.
|
||||
*
|
||||
* Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
|
||||
* Broadcom refers to Broadcom Limited and/or its subsidiaries.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
||||
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
|
||||
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
|
||||
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
|
||||
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
|
||||
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
* Description: QPLib resource manager (header)
|
||||
*/
|
||||
|
||||
#ifndef __BNXT_QPLIB_RES_H__
|
||||
#define __BNXT_QPLIB_RES_H__
|
||||
|
||||
extern const struct bnxt_qplib_gid bnxt_qplib_gid_zero;
|
||||
|
||||
#define PTR_CNT_PER_PG (PAGE_SIZE / sizeof(void *))
|
||||
#define PTR_MAX_IDX_PER_PG (PTR_CNT_PER_PG - 1)
|
||||
#define PTR_PG(x) (((x) & ~PTR_MAX_IDX_PER_PG) / PTR_CNT_PER_PG)
|
||||
#define PTR_IDX(x) ((x) & PTR_MAX_IDX_PER_PG)
|
||||
|
||||
#define HWQ_CMP(idx, hwq) ((idx) & ((hwq)->max_elements - 1))
|
||||
|
||||
enum bnxt_qplib_hwq_type {
|
||||
HWQ_TYPE_CTX,
|
||||
HWQ_TYPE_QUEUE,
|
||||
HWQ_TYPE_L2_CMPL
|
||||
};
|
||||
|
||||
#define MAX_PBL_LVL_0_PGS 1
|
||||
#define MAX_PBL_LVL_1_PGS 512
|
||||
#define MAX_PBL_LVL_1_PGS_SHIFT 9
|
||||
#define MAX_PBL_LVL_1_PGS_FOR_LVL_2 256
|
||||
#define MAX_PBL_LVL_2_PGS (256 * 512)
|
||||
|
||||
enum bnxt_qplib_pbl_lvl {
|
||||
PBL_LVL_0,
|
||||
PBL_LVL_1,
|
||||
PBL_LVL_2,
|
||||
PBL_LVL_MAX
|
||||
};
|
||||
|
||||
#define ROCE_PG_SIZE_4K (4 * 1024)
|
||||
#define ROCE_PG_SIZE_8K (8 * 1024)
|
||||
#define ROCE_PG_SIZE_64K (64 * 1024)
|
||||
#define ROCE_PG_SIZE_2M (2 * 1024 * 1024)
|
||||
#define ROCE_PG_SIZE_8M (8 * 1024 * 1024)
|
||||
#define ROCE_PG_SIZE_1G (1024 * 1024 * 1024)
|
||||
|
||||
struct bnxt_qplib_pbl {
|
||||
u32 pg_count;
|
||||
u32 pg_size;
|
||||
void **pg_arr;
|
||||
dma_addr_t *pg_map_arr;
|
||||
};
|
||||
|
||||
struct bnxt_qplib_hwq {
|
||||
struct pci_dev *pdev;
|
||||
/* lock to protect qplib_hwq */
|
||||
spinlock_t lock;
|
||||
struct bnxt_qplib_pbl pbl[PBL_LVL_MAX];
|
||||
enum bnxt_qplib_pbl_lvl level; /* 0, 1, or 2 */
|
||||
/* ptr for easy access to the PBL entries */
|
||||
void **pbl_ptr;
|
||||
/* ptr for easy access to the dma_addr */
|
||||
dma_addr_t *pbl_dma_ptr;
|
||||
u32 max_elements;
|
||||
u16 element_size; /* Size of each entry */
|
||||
|
||||
u32 prod; /* raw */
|
||||
u32 cons; /* raw */
|
||||
u8 cp_bit;
|
||||
u8 is_user;
|
||||
};
|
||||
|
||||
/* Tables */
|
||||
struct bnxt_qplib_pd_tbl {
|
||||
unsigned long *tbl;
|
||||
u32 max;
|
||||
};
|
||||
|
||||
struct bnxt_qplib_sgid_tbl {
|
||||
struct bnxt_qplib_gid *tbl;
|
||||
u16 *hw_id;
|
||||
u16 max;
|
||||
u16 active;
|
||||
void *ctx;
|
||||
};
|
||||
|
||||
struct bnxt_qplib_pkey_tbl {
|
||||
u16 *tbl;
|
||||
u16 max;
|
||||
u16 active;
|
||||
};
|
||||
|
||||
struct bnxt_qplib_dpi {
|
||||
u32 dpi;
|
||||
void __iomem *dbr;
|
||||
u64 umdbr;
|
||||
};
|
||||
|
||||
struct bnxt_qplib_dpi_tbl {
|
||||
void **app_tbl;
|
||||
unsigned long *tbl;
|
||||
u16 max;
|
||||
void __iomem *dbr_bar_reg_iomem;
|
||||
u64 unmapped_dbr;
|
||||
};
|
||||
|
||||
struct bnxt_qplib_stats {
|
||||
dma_addr_t dma_map;
|
||||
void *dma;
|
||||
u32 size;
|
||||
u32 fw_id;
|
||||
};
|
||||
|
||||
struct bnxt_qplib_vf_res {
|
||||
u32 max_qp_per_vf;
|
||||
u32 max_mrw_per_vf;
|
||||
u32 max_srq_per_vf;
|
||||
u32 max_cq_per_vf;
|
||||
u32 max_gid_per_vf;
|
||||
};
|
||||
|
||||
#define BNXT_QPLIB_MAX_QP_CTX_ENTRY_SIZE 448
|
||||
#define BNXT_QPLIB_MAX_SRQ_CTX_ENTRY_SIZE 64
|
||||
#define BNXT_QPLIB_MAX_CQ_CTX_ENTRY_SIZE 64
|
||||
#define BNXT_QPLIB_MAX_MRW_CTX_ENTRY_SIZE 128
|
||||
|
||||
struct bnxt_qplib_ctx {
|
||||
u32 qpc_count;
|
||||
struct bnxt_qplib_hwq qpc_tbl;
|
||||
u32 mrw_count;
|
||||
struct bnxt_qplib_hwq mrw_tbl;
|
||||
u32 srqc_count;
|
||||
struct bnxt_qplib_hwq srqc_tbl;
|
||||
u32 cq_count;
|
||||
struct bnxt_qplib_hwq cq_tbl;
|
||||
struct bnxt_qplib_hwq tim_tbl;
|
||||
#define MAX_TQM_ALLOC_REQ 32
|
||||
#define MAX_TQM_ALLOC_BLK_SIZE 8
|
||||
u8 tqm_count[MAX_TQM_ALLOC_REQ];
|
||||
struct bnxt_qplib_hwq tqm_pde;
|
||||
u32 tqm_pde_level;
|
||||
struct bnxt_qplib_hwq tqm_tbl[MAX_TQM_ALLOC_REQ];
|
||||
struct bnxt_qplib_stats stats;
|
||||
struct bnxt_qplib_vf_res vf_res;
|
||||
};
|
||||
|
||||
struct bnxt_qplib_res {
|
||||
struct pci_dev *pdev;
|
||||
struct net_device *netdev;
|
||||
|
||||
struct bnxt_qplib_rcfw *rcfw;
|
||||
|
||||
struct bnxt_qplib_pd_tbl pd_tbl;
|
||||
struct bnxt_qplib_sgid_tbl sgid_tbl;
|
||||
struct bnxt_qplib_pkey_tbl pkey_tbl;
|
||||
struct bnxt_qplib_dpi_tbl dpi_tbl;
|
||||
};
|
||||
|
||||
#define to_bnxt_qplib(ptr, type, member) \
|
||||
container_of(ptr, type, member)
|
||||
|
||||
struct bnxt_qplib_pd;
|
||||
struct bnxt_qplib_dev_attr;
|
||||
|
||||
void bnxt_qplib_free_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq);
|
||||
int bnxt_qplib_alloc_init_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq,
|
||||
struct scatterlist *sl, int nmap, u32 *elements,
|
||||
u32 elements_per_page, u32 aux, u32 pg_size,
|
||||
enum bnxt_qplib_hwq_type hwq_type);
|
||||
void bnxt_qplib_get_guid(u8 *dev_addr, u8 *guid);
|
||||
int bnxt_qplib_alloc_pd(struct bnxt_qplib_pd_tbl *pd_tbl,
|
||||
struct bnxt_qplib_pd *pd);
|
||||
int bnxt_qplib_dealloc_pd(struct bnxt_qplib_res *res,
|
||||
struct bnxt_qplib_pd_tbl *pd_tbl,
|
||||
struct bnxt_qplib_pd *pd);
|
||||
int bnxt_qplib_alloc_dpi(struct bnxt_qplib_dpi_tbl *dpit,
|
||||
struct bnxt_qplib_dpi *dpi,
|
||||
void *app);
|
||||
int bnxt_qplib_dealloc_dpi(struct bnxt_qplib_res *res,
|
||||
struct bnxt_qplib_dpi_tbl *dpi_tbl,
|
||||
struct bnxt_qplib_dpi *dpi);
|
||||
void bnxt_qplib_cleanup_res(struct bnxt_qplib_res *res);
|
||||
int bnxt_qplib_init_res(struct bnxt_qplib_res *res);
|
||||
void bnxt_qplib_free_res(struct bnxt_qplib_res *res);
|
||||
int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct pci_dev *pdev,
|
||||
struct net_device *netdev,
|
||||
struct bnxt_qplib_dev_attr *dev_attr);
|
||||
void bnxt_qplib_free_ctx(struct pci_dev *pdev,
|
||||
struct bnxt_qplib_ctx *ctx);
|
||||
int bnxt_qplib_alloc_ctx(struct pci_dev *pdev,
|
||||
struct bnxt_qplib_ctx *ctx,
|
||||
bool virt_fn);
|
||||
#endif /* __BNXT_QPLIB_RES_H__ */
|
838
drivers/infiniband/hw/bnxt_re/qplib_sp.c
Normal file
838
drivers/infiniband/hw/bnxt_re/qplib_sp.c
Normal file
@ -0,0 +1,838 @@
|
||||
/*
|
||||
* Broadcom NetXtreme-E RoCE driver.
|
||||
*
|
||||
* Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
|
||||
* Broadcom refers to Broadcom Limited and/or its subsidiaries.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
||||
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
|
||||
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
|
||||
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
|
||||
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
|
||||
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
* Description: Slow Path Operators
|
||||
*/
|
||||
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/pci.h>
|
||||
|
||||
#include "roce_hsi.h"
|
||||
|
||||
#include "qplib_res.h"
|
||||
#include "qplib_rcfw.h"
|
||||
#include "qplib_sp.h"
|
||||
|
||||
const struct bnxt_qplib_gid bnxt_qplib_gid_zero = {{ 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0 } };
|
||||
|
||||
/* Device */
|
||||
int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
|
||||
struct bnxt_qplib_dev_attr *attr)
|
||||
{
|
||||
struct cmdq_query_func req;
|
||||
struct creq_query_func_resp *resp;
|
||||
struct creq_query_func_resp_sb *sb;
|
||||
u16 cmd_flags = 0;
|
||||
u32 temp;
|
||||
u8 *tqm_alloc;
|
||||
int i;
|
||||
|
||||
RCFW_CMD_PREP(req, QUERY_FUNC, cmd_flags);
|
||||
|
||||
req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
|
||||
resp = (struct creq_query_func_resp *)
|
||||
bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void **)&sb,
|
||||
0);
|
||||
if (!resp) {
|
||||
dev_err(&rcfw->pdev->dev, "QPLIB: SP: QUERY_FUNC send failed");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
|
||||
/* Cmd timed out */
|
||||
dev_err(&rcfw->pdev->dev, "QPLIB: SP: QUERY_FUNC timed out");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
if (resp->status ||
|
||||
le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
|
||||
dev_err(&rcfw->pdev->dev, "QPLIB: SP: QUERY_FUNC failed ");
|
||||
dev_err(&rcfw->pdev->dev,
|
||||
"QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
|
||||
resp->status, le16_to_cpu(req.cookie),
|
||||
le16_to_cpu(resp->cookie));
|
||||
return -EINVAL;
|
||||
}
|
||||
/* Extract the context from the side buffer */
|
||||
attr->max_qp = le32_to_cpu(sb->max_qp);
|
||||
attr->max_qp_rd_atom =
|
||||
sb->max_qp_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ?
|
||||
BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_rd_atom;
|
||||
attr->max_qp_init_rd_atom =
|
||||
sb->max_qp_init_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ?
|
||||
BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_init_rd_atom;
|
||||
attr->max_qp_wqes = le16_to_cpu(sb->max_qp_wr);
|
||||
attr->max_qp_sges = sb->max_sge;
|
||||
attr->max_cq = le32_to_cpu(sb->max_cq);
|
||||
attr->max_cq_wqes = le32_to_cpu(sb->max_cqe);
|
||||
attr->max_cq_sges = attr->max_qp_sges;
|
||||
attr->max_mr = le32_to_cpu(sb->max_mr);
|
||||
attr->max_mw = le32_to_cpu(sb->max_mw);
|
||||
|
||||
attr->max_mr_size = le64_to_cpu(sb->max_mr_size);
|
||||
attr->max_pd = 64 * 1024;
|
||||
attr->max_raw_ethy_qp = le32_to_cpu(sb->max_raw_eth_qp);
|
||||
attr->max_ah = le32_to_cpu(sb->max_ah);
|
||||
|
||||
attr->max_fmr = le32_to_cpu(sb->max_fmr);
|
||||
attr->max_map_per_fmr = sb->max_map_per_fmr;
|
||||
|
||||
attr->max_srq = le16_to_cpu(sb->max_srq);
|
||||
attr->max_srq_wqes = le32_to_cpu(sb->max_srq_wr) - 1;
|
||||
attr->max_srq_sges = sb->max_srq_sge;
|
||||
/* Bono only reports 1 PKEY for now, but it can support > 1 */
|
||||
attr->max_pkey = le32_to_cpu(sb->max_pkeys);
|
||||
|
||||
attr->max_inline_data = le32_to_cpu(sb->max_inline_data);
|
||||
attr->l2_db_size = (sb->l2_db_space_size + 1) * PAGE_SIZE;
|
||||
attr->max_sgid = le32_to_cpu(sb->max_gid);
|
||||
|
||||
strlcpy(attr->fw_ver, "20.6.28.0", sizeof(attr->fw_ver));
|
||||
|
||||
for (i = 0; i < MAX_TQM_ALLOC_REQ / 4; i++) {
|
||||
temp = le32_to_cpu(sb->tqm_alloc_reqs[i]);
|
||||
tqm_alloc = (u8 *)&temp;
|
||||
attr->tqm_alloc_reqs[i * 4] = *tqm_alloc;
|
||||
attr->tqm_alloc_reqs[i * 4 + 1] = *(++tqm_alloc);
|
||||
attr->tqm_alloc_reqs[i * 4 + 2] = *(++tqm_alloc);
|
||||
attr->tqm_alloc_reqs[i * 4 + 3] = *(++tqm_alloc);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* SGID */
|
||||
int bnxt_qplib_get_sgid(struct bnxt_qplib_res *res,
|
||||
struct bnxt_qplib_sgid_tbl *sgid_tbl, int index,
|
||||
struct bnxt_qplib_gid *gid)
|
||||
{
|
||||
if (index > sgid_tbl->max) {
|
||||
dev_err(&res->pdev->dev,
|
||||
"QPLIB: Index %d exceeded SGID table max (%d)",
|
||||
index, sgid_tbl->max);
|
||||
return -EINVAL;
|
||||
}
|
||||
memcpy(gid, &sgid_tbl->tbl[index], sizeof(*gid));
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
|
||||
struct bnxt_qplib_gid *gid, bool update)
|
||||
{
|
||||
struct bnxt_qplib_res *res = to_bnxt_qplib(sgid_tbl,
|
||||
struct bnxt_qplib_res,
|
||||
sgid_tbl);
|
||||
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
|
||||
int index;
|
||||
|
||||
if (!sgid_tbl) {
|
||||
dev_err(&res->pdev->dev, "QPLIB: SGID table not allocated");
|
||||
return -EINVAL;
|
||||
}
|
||||
/* Do we need a sgid_lock here? */
|
||||
if (!sgid_tbl->active) {
|
||||
dev_err(&res->pdev->dev,
|
||||
"QPLIB: SGID table has no active entries");
|
||||
return -ENOMEM;
|
||||
}
|
||||
for (index = 0; index < sgid_tbl->max; index++) {
|
||||
if (!memcmp(&sgid_tbl->tbl[index], gid, sizeof(*gid)))
|
||||
break;
|
||||
}
|
||||
if (index == sgid_tbl->max) {
|
||||
dev_warn(&res->pdev->dev, "GID not found in the SGID table");
|
||||
return 0;
|
||||
}
|
||||
/* Remove GID from the SGID table */
|
||||
if (update) {
|
||||
struct cmdq_delete_gid req;
|
||||
struct creq_delete_gid_resp *resp;
|
||||
u16 cmd_flags = 0;
|
||||
|
||||
RCFW_CMD_PREP(req, DELETE_GID, cmd_flags);
|
||||
if (sgid_tbl->hw_id[index] == 0xFFFF) {
|
||||
dev_err(&res->pdev->dev,
|
||||
"QPLIB: GID entry contains an invalid HW id");
|
||||
return -EINVAL;
|
||||
}
|
||||
req.gid_index = cpu_to_le16(sgid_tbl->hw_id[index]);
|
||||
resp = (struct creq_delete_gid_resp *)
|
||||
bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, NULL,
|
||||
0);
|
||||
if (!resp) {
|
||||
dev_err(&res->pdev->dev,
|
||||
"QPLIB: SP: DELETE_GID send failed");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!bnxt_qplib_rcfw_wait_for_resp(rcfw,
|
||||
le16_to_cpu(req.cookie))) {
|
||||
/* Cmd timed out */
|
||||
dev_err(&res->pdev->dev,
|
||||
"QPLIB: SP: DELETE_GID timed out");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
if (resp->status ||
|
||||
le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
|
||||
dev_err(&res->pdev->dev,
|
||||
"QPLIB: SP: DELETE_GID failed ");
|
||||
dev_err(&res->pdev->dev,
|
||||
"QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
|
||||
resp->status, le16_to_cpu(req.cookie),
|
||||
le16_to_cpu(resp->cookie));
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
memcpy(&sgid_tbl->tbl[index], &bnxt_qplib_gid_zero,
|
||||
sizeof(bnxt_qplib_gid_zero));
|
||||
sgid_tbl->active--;
|
||||
dev_dbg(&res->pdev->dev,
|
||||
"QPLIB: SGID deleted hw_id[0x%x] = 0x%x active = 0x%x",
|
||||
index, sgid_tbl->hw_id[index], sgid_tbl->active);
|
||||
sgid_tbl->hw_id[index] = (u16)-1;
|
||||
|
||||
/* unlock */
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
|
||||
struct bnxt_qplib_gid *gid, u8 *smac, u16 vlan_id,
|
||||
bool update, u32 *index)
|
||||
{
|
||||
struct bnxt_qplib_res *res = to_bnxt_qplib(sgid_tbl,
|
||||
struct bnxt_qplib_res,
|
||||
sgid_tbl);
|
||||
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
|
||||
int i, free_idx, rc = 0;
|
||||
|
||||
if (!sgid_tbl) {
|
||||
dev_err(&res->pdev->dev, "QPLIB: SGID table not allocated");
|
||||
return -EINVAL;
|
||||
}
|
||||
/* Do we need a sgid_lock here? */
|
||||
if (sgid_tbl->active == sgid_tbl->max) {
|
||||
dev_err(&res->pdev->dev, "QPLIB: SGID table is full");
|
||||
return -ENOMEM;
|
||||
}
|
||||
free_idx = sgid_tbl->max;
|
||||
for (i = 0; i < sgid_tbl->max; i++) {
|
||||
if (!memcmp(&sgid_tbl->tbl[i], gid, sizeof(*gid))) {
|
||||
dev_dbg(&res->pdev->dev,
|
||||
"QPLIB: SGID entry already exist in entry %d!",
|
||||
i);
|
||||
*index = i;
|
||||
return -EALREADY;
|
||||
} else if (!memcmp(&sgid_tbl->tbl[i], &bnxt_qplib_gid_zero,
|
||||
sizeof(bnxt_qplib_gid_zero)) &&
|
||||
free_idx == sgid_tbl->max) {
|
||||
free_idx = i;
|
||||
}
|
||||
}
|
||||
if (free_idx == sgid_tbl->max) {
|
||||
dev_err(&res->pdev->dev,
|
||||
"QPLIB: SGID table is FULL but count is not MAX??");
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (update) {
|
||||
struct cmdq_add_gid req;
|
||||
struct creq_add_gid_resp *resp;
|
||||
u16 cmd_flags = 0;
|
||||
u32 temp32[4];
|
||||
u16 temp16[3];
|
||||
|
||||
RCFW_CMD_PREP(req, ADD_GID, cmd_flags);
|
||||
|
||||
memcpy(temp32, gid->data, sizeof(struct bnxt_qplib_gid));
|
||||
req.gid[0] = cpu_to_be32(temp32[3]);
|
||||
req.gid[1] = cpu_to_be32(temp32[2]);
|
||||
req.gid[2] = cpu_to_be32(temp32[1]);
|
||||
req.gid[3] = cpu_to_be32(temp32[0]);
|
||||
if (vlan_id != 0xFFFF)
|
||||
req.vlan = cpu_to_le16((vlan_id &
|
||||
CMDQ_ADD_GID_VLAN_VLAN_ID_MASK) |
|
||||
CMDQ_ADD_GID_VLAN_TPID_TPID_8100 |
|
||||
CMDQ_ADD_GID_VLAN_VLAN_EN);
|
||||
|
||||
/* MAC in network format */
|
||||
memcpy(temp16, smac, 6);
|
||||
req.src_mac[0] = cpu_to_be16(temp16[0]);
|
||||
req.src_mac[1] = cpu_to_be16(temp16[1]);
|
||||
req.src_mac[2] = cpu_to_be16(temp16[2]);
|
||||
|
||||
resp = (struct creq_add_gid_resp *)
|
||||
bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
|
||||
NULL, 0);
|
||||
if (!resp) {
|
||||
dev_err(&res->pdev->dev,
|
||||
"QPLIB: SP: ADD_GID send failed");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!bnxt_qplib_rcfw_wait_for_resp(rcfw,
|
||||
le16_to_cpu(req.cookie))) {
|
||||
/* Cmd timed out */
|
||||
dev_err(&res->pdev->dev,
|
||||
"QPIB: SP: ADD_GID timed out");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
if (resp->status ||
|
||||
le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
|
||||
dev_err(&res->pdev->dev, "QPLIB: SP: ADD_GID failed ");
|
||||
dev_err(&res->pdev->dev,
|
||||
"QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
|
||||
resp->status, le16_to_cpu(req.cookie),
|
||||
le16_to_cpu(resp->cookie));
|
||||
return -EINVAL;
|
||||
}
|
||||
sgid_tbl->hw_id[free_idx] = le32_to_cpu(resp->xid);
|
||||
}
|
||||
/* Add GID to the sgid_tbl */
|
||||
memcpy(&sgid_tbl->tbl[free_idx], gid, sizeof(*gid));
|
||||
sgid_tbl->active++;
|
||||
dev_dbg(&res->pdev->dev,
|
||||
"QPLIB: SGID added hw_id[0x%x] = 0x%x active = 0x%x",
|
||||
free_idx, sgid_tbl->hw_id[free_idx], sgid_tbl->active);
|
||||
|
||||
*index = free_idx;
|
||||
/* unlock */
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* pkeys */
|
||||
int bnxt_qplib_get_pkey(struct bnxt_qplib_res *res,
|
||||
struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 index,
|
||||
u16 *pkey)
|
||||
{
|
||||
if (index == 0xFFFF) {
|
||||
*pkey = 0xFFFF;
|
||||
return 0;
|
||||
}
|
||||
if (index > pkey_tbl->max) {
|
||||
dev_err(&res->pdev->dev,
|
||||
"QPLIB: Index %d exceeded PKEY table max (%d)",
|
||||
index, pkey_tbl->max);
|
||||
return -EINVAL;
|
||||
}
|
||||
memcpy(pkey, &pkey_tbl->tbl[index], sizeof(*pkey));
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bnxt_qplib_del_pkey(struct bnxt_qplib_res *res,
|
||||
struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 *pkey,
|
||||
bool update)
|
||||
{
|
||||
int i, rc = 0;
|
||||
|
||||
if (!pkey_tbl) {
|
||||
dev_err(&res->pdev->dev, "QPLIB: PKEY table not allocated");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Do we need a pkey_lock here? */
|
||||
if (!pkey_tbl->active) {
|
||||
dev_err(&res->pdev->dev,
|
||||
"QPLIB: PKEY table has no active entries");
|
||||
return -ENOMEM;
|
||||
}
|
||||
for (i = 0; i < pkey_tbl->max; i++) {
|
||||
if (!memcmp(&pkey_tbl->tbl[i], pkey, sizeof(*pkey)))
|
||||
break;
|
||||
}
|
||||
if (i == pkey_tbl->max) {
|
||||
dev_err(&res->pdev->dev,
|
||||
"QPLIB: PKEY 0x%04x not found in the pkey table",
|
||||
*pkey);
|
||||
return -ENOMEM;
|
||||
}
|
||||
memset(&pkey_tbl->tbl[i], 0, sizeof(*pkey));
|
||||
pkey_tbl->active--;
|
||||
|
||||
/* unlock */
|
||||
return rc;
|
||||
}
|
||||
|
||||
int bnxt_qplib_add_pkey(struct bnxt_qplib_res *res,
|
||||
struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 *pkey,
|
||||
bool update)
|
||||
{
|
||||
int i, free_idx, rc = 0;
|
||||
|
||||
if (!pkey_tbl) {
|
||||
dev_err(&res->pdev->dev, "QPLIB: PKEY table not allocated");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Do we need a pkey_lock here? */
|
||||
if (pkey_tbl->active == pkey_tbl->max) {
|
||||
dev_err(&res->pdev->dev, "QPLIB: PKEY table is full");
|
||||
return -ENOMEM;
|
||||
}
|
||||
free_idx = pkey_tbl->max;
|
||||
for (i = 0; i < pkey_tbl->max; i++) {
|
||||
if (!memcmp(&pkey_tbl->tbl[i], pkey, sizeof(*pkey)))
|
||||
return -EALREADY;
|
||||
else if (!pkey_tbl->tbl[i] && free_idx == pkey_tbl->max)
|
||||
free_idx = i;
|
||||
}
|
||||
if (free_idx == pkey_tbl->max) {
|
||||
dev_err(&res->pdev->dev,
|
||||
"QPLIB: PKEY table is FULL but count is not MAX??");
|
||||
return -ENOMEM;
|
||||
}
|
||||
/* Add PKEY to the pkey_tbl */
|
||||
memcpy(&pkey_tbl->tbl[free_idx], pkey, sizeof(*pkey));
|
||||
pkey_tbl->active++;
|
||||
|
||||
/* unlock */
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* AH */
|
||||
int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah)
|
||||
{
|
||||
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
|
||||
struct cmdq_create_ah req;
|
||||
struct creq_create_ah_resp *resp;
|
||||
u16 cmd_flags = 0;
|
||||
u32 temp32[4];
|
||||
u16 temp16[3];
|
||||
|
||||
RCFW_CMD_PREP(req, CREATE_AH, cmd_flags);
|
||||
|
||||
memcpy(temp32, ah->dgid.data, sizeof(struct bnxt_qplib_gid));
|
||||
req.dgid[0] = cpu_to_le32(temp32[0]);
|
||||
req.dgid[1] = cpu_to_le32(temp32[1]);
|
||||
req.dgid[2] = cpu_to_le32(temp32[2]);
|
||||
req.dgid[3] = cpu_to_le32(temp32[3]);
|
||||
|
||||
req.type = ah->nw_type;
|
||||
req.hop_limit = ah->hop_limit;
|
||||
req.sgid_index = cpu_to_le16(res->sgid_tbl.hw_id[ah->sgid_index]);
|
||||
req.dest_vlan_id_flow_label = cpu_to_le32((ah->flow_label &
|
||||
CMDQ_CREATE_AH_FLOW_LABEL_MASK) |
|
||||
CMDQ_CREATE_AH_DEST_VLAN_ID_MASK);
|
||||
req.pd_id = cpu_to_le32(ah->pd->id);
|
||||
req.traffic_class = ah->traffic_class;
|
||||
|
||||
/* MAC in network format */
|
||||
memcpy(temp16, ah->dmac, 6);
|
||||
req.dest_mac[0] = cpu_to_le16(temp16[0]);
|
||||
req.dest_mac[1] = cpu_to_le16(temp16[1]);
|
||||
req.dest_mac[2] = cpu_to_le16(temp16[2]);
|
||||
|
||||
resp = (struct creq_create_ah_resp *)
|
||||
bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
|
||||
NULL, 1);
|
||||
if (!resp) {
|
||||
dev_err(&rcfw->pdev->dev, "QPLIB: SP: CREATE_AH send failed");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!bnxt_qplib_rcfw_block_for_resp(rcfw, le16_to_cpu(req.cookie))) {
|
||||
/* Cmd timed out */
|
||||
dev_err(&rcfw->pdev->dev, "QPLIB: SP: CREATE_AH timed out");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
if (resp->status ||
|
||||
le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
|
||||
dev_err(&rcfw->pdev->dev, "QPLIB: SP: CREATE_AH failed ");
|
||||
dev_err(&rcfw->pdev->dev,
|
||||
"QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
|
||||
resp->status, le16_to_cpu(req.cookie),
|
||||
le16_to_cpu(resp->cookie));
|
||||
return -EINVAL;
|
||||
}
|
||||
ah->id = le32_to_cpu(resp->xid);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bnxt_qplib_destroy_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah)
|
||||
{
|
||||
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
|
||||
struct cmdq_destroy_ah req;
|
||||
struct creq_destroy_ah_resp *resp;
|
||||
u16 cmd_flags = 0;
|
||||
|
||||
/* Clean up the AH table in the device */
|
||||
RCFW_CMD_PREP(req, DESTROY_AH, cmd_flags);
|
||||
|
||||
req.ah_cid = cpu_to_le32(ah->id);
|
||||
|
||||
resp = (struct creq_destroy_ah_resp *)
|
||||
bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
|
||||
NULL, 1);
|
||||
if (!resp) {
|
||||
dev_err(&rcfw->pdev->dev, "QPLIB: SP: DESTROY_AH send failed");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!bnxt_qplib_rcfw_block_for_resp(rcfw, le16_to_cpu(req.cookie))) {
|
||||
/* Cmd timed out */
|
||||
dev_err(&rcfw->pdev->dev, "QPLIB: SP: DESTROY_AH timed out");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
if (resp->status ||
|
||||
le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
|
||||
dev_err(&rcfw->pdev->dev, "QPLIB: SP: DESTROY_AH failed ");
|
||||
dev_err(&rcfw->pdev->dev,
|
||||
"QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
|
||||
resp->status, le16_to_cpu(req.cookie),
|
||||
le16_to_cpu(resp->cookie));
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* MRW */
|
||||
int bnxt_qplib_free_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw)
|
||||
{
|
||||
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
|
||||
struct cmdq_deallocate_key req;
|
||||
struct creq_deallocate_key_resp *resp;
|
||||
u16 cmd_flags = 0;
|
||||
|
||||
if (mrw->lkey == 0xFFFFFFFF) {
|
||||
dev_info(&res->pdev->dev,
|
||||
"QPLIB: SP: Free a reserved lkey MRW");
|
||||
return 0;
|
||||
}
|
||||
|
||||
RCFW_CMD_PREP(req, DEALLOCATE_KEY, cmd_flags);
|
||||
|
||||
req.mrw_flags = mrw->type;
|
||||
|
||||
if ((mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1) ||
|
||||
(mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A) ||
|
||||
(mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B))
|
||||
req.key = cpu_to_le32(mrw->rkey);
|
||||
else
|
||||
req.key = cpu_to_le32(mrw->lkey);
|
||||
|
||||
resp = (struct creq_deallocate_key_resp *)
|
||||
bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
|
||||
NULL, 0);
|
||||
if (!resp) {
|
||||
dev_err(&res->pdev->dev, "QPLIB: SP: FREE_MR send failed");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
|
||||
/* Cmd timed out */
|
||||
dev_err(&res->pdev->dev, "QPLIB: SP: FREE_MR timed out");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
if (resp->status ||
|
||||
le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
|
||||
dev_err(&res->pdev->dev, "QPLIB: SP: FREE_MR failed ");
|
||||
dev_err(&res->pdev->dev,
|
||||
"QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
|
||||
resp->status, le16_to_cpu(req.cookie),
|
||||
le16_to_cpu(resp->cookie));
|
||||
return -EINVAL;
|
||||
}
|
||||
/* Free the qplib's MRW memory */
|
||||
if (mrw->hwq.max_elements)
|
||||
bnxt_qplib_free_hwq(res->pdev, &mrw->hwq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bnxt_qplib_alloc_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw)
|
||||
{
|
||||
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
|
||||
struct cmdq_allocate_mrw req;
|
||||
struct creq_allocate_mrw_resp *resp;
|
||||
u16 cmd_flags = 0;
|
||||
unsigned long tmp;
|
||||
|
||||
RCFW_CMD_PREP(req, ALLOCATE_MRW, cmd_flags);
|
||||
|
||||
req.pd_id = cpu_to_le32(mrw->pd->id);
|
||||
req.mrw_flags = mrw->type;
|
||||
if ((mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR &&
|
||||
mrw->flags & BNXT_QPLIB_FR_PMR) ||
|
||||
mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A ||
|
||||
mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B)
|
||||
req.access = CMDQ_ALLOCATE_MRW_ACCESS_CONSUMER_OWNED_KEY;
|
||||
tmp = (unsigned long)mrw;
|
||||
req.mrw_handle = cpu_to_le64(tmp);
|
||||
|
||||
resp = (struct creq_allocate_mrw_resp *)
|
||||
bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
|
||||
NULL, 0);
|
||||
if (!resp) {
|
||||
dev_err(&rcfw->pdev->dev, "QPLIB: SP: ALLOC_MRW send failed");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
|
||||
/* Cmd timed out */
|
||||
dev_err(&rcfw->pdev->dev, "QPLIB: SP: ALLOC_MRW timed out");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
if (resp->status ||
|
||||
le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
|
||||
dev_err(&rcfw->pdev->dev, "QPLIB: SP: ALLOC_MRW failed ");
|
||||
dev_err(&rcfw->pdev->dev,
|
||||
"QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
|
||||
resp->status, le16_to_cpu(req.cookie),
|
||||
le16_to_cpu(resp->cookie));
|
||||
return -EINVAL;
|
||||
}
|
||||
if ((mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1) ||
|
||||
(mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A) ||
|
||||
(mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B))
|
||||
mrw->rkey = le32_to_cpu(resp->xid);
|
||||
else
|
||||
mrw->lkey = le32_to_cpu(resp->xid);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bnxt_qplib_dereg_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw,
|
||||
bool block)
|
||||
{
|
||||
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
|
||||
struct cmdq_deregister_mr req;
|
||||
struct creq_deregister_mr_resp *resp;
|
||||
u16 cmd_flags = 0;
|
||||
int rc;
|
||||
|
||||
RCFW_CMD_PREP(req, DEREGISTER_MR, cmd_flags);
|
||||
|
||||
req.lkey = cpu_to_le32(mrw->lkey);
|
||||
resp = (struct creq_deregister_mr_resp *)
|
||||
bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
|
||||
NULL, block);
|
||||
if (!resp) {
|
||||
dev_err(&rcfw->pdev->dev, "QPLIB: SP: DEREG_MR send failed");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (block)
|
||||
rc = bnxt_qplib_rcfw_block_for_resp(rcfw,
|
||||
le16_to_cpu(req.cookie));
|
||||
else
|
||||
rc = bnxt_qplib_rcfw_wait_for_resp(rcfw,
|
||||
le16_to_cpu(req.cookie));
|
||||
if (!rc) {
|
||||
/* Cmd timed out */
|
||||
dev_err(&res->pdev->dev, "QPLIB: SP: DEREG_MR timed out");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
if (resp->status ||
|
||||
le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
|
||||
dev_err(&rcfw->pdev->dev, "QPLIB: SP: DEREG_MR failed ");
|
||||
dev_err(&rcfw->pdev->dev,
|
||||
"QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
|
||||
resp->status, le16_to_cpu(req.cookie),
|
||||
le16_to_cpu(resp->cookie));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Free the qplib's MR memory */
|
||||
if (mrw->hwq.max_elements) {
|
||||
mrw->va = 0;
|
||||
mrw->total_size = 0;
|
||||
bnxt_qplib_free_hwq(res->pdev, &mrw->hwq);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
|
||||
u64 *pbl_tbl, int num_pbls, bool block)
|
||||
{
|
||||
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
|
||||
struct cmdq_register_mr req;
|
||||
struct creq_register_mr_resp *resp;
|
||||
u16 cmd_flags = 0, level;
|
||||
int pg_ptrs, pages, i, rc;
|
||||
dma_addr_t **pbl_ptr;
|
||||
u32 pg_size;
|
||||
|
||||
if (num_pbls) {
|
||||
pg_ptrs = roundup_pow_of_two(num_pbls);
|
||||
pages = pg_ptrs >> MAX_PBL_LVL_1_PGS_SHIFT;
|
||||
if (!pages)
|
||||
pages++;
|
||||
|
||||
if (pages > MAX_PBL_LVL_1_PGS) {
|
||||
dev_err(&res->pdev->dev, "QPLIB: SP: Reg MR pages ");
|
||||
dev_err(&res->pdev->dev,
|
||||
"requested (0x%x) exceeded max (0x%x)",
|
||||
pages, MAX_PBL_LVL_1_PGS);
|
||||
return -ENOMEM;
|
||||
}
|
||||
/* Free the hwq if it already exist, must be a rereg */
|
||||
if (mr->hwq.max_elements)
|
||||
bnxt_qplib_free_hwq(res->pdev, &mr->hwq);
|
||||
|
||||
mr->hwq.max_elements = pages;
|
||||
rc = bnxt_qplib_alloc_init_hwq(res->pdev, &mr->hwq, NULL, 0,
|
||||
&mr->hwq.max_elements,
|
||||
PAGE_SIZE, 0, PAGE_SIZE,
|
||||
HWQ_TYPE_CTX);
|
||||
if (rc) {
|
||||
dev_err(&res->pdev->dev,
|
||||
"SP: Reg MR memory allocation failed");
|
||||
return -ENOMEM;
|
||||
}
|
||||
/* Write to the hwq */
|
||||
pbl_ptr = (dma_addr_t **)mr->hwq.pbl_ptr;
|
||||
for (i = 0; i < num_pbls; i++)
|
||||
pbl_ptr[PTR_PG(i)][PTR_IDX(i)] =
|
||||
(pbl_tbl[i] & PAGE_MASK) | PTU_PTE_VALID;
|
||||
}
|
||||
|
||||
RCFW_CMD_PREP(req, REGISTER_MR, cmd_flags);
|
||||
|
||||
/* Configure the request */
|
||||
if (mr->hwq.level == PBL_LVL_MAX) {
|
||||
level = 0;
|
||||
req.pbl = 0;
|
||||
pg_size = PAGE_SIZE;
|
||||
} else {
|
||||
level = mr->hwq.level + 1;
|
||||
req.pbl = cpu_to_le64(mr->hwq.pbl[PBL_LVL_0].pg_map_arr[0]);
|
||||
pg_size = mr->hwq.pbl[PBL_LVL_0].pg_size;
|
||||
}
|
||||
req.log2_pg_size_lvl = (level << CMDQ_REGISTER_MR_LVL_SFT) |
|
||||
((ilog2(pg_size) <<
|
||||
CMDQ_REGISTER_MR_LOG2_PG_SIZE_SFT) &
|
||||
CMDQ_REGISTER_MR_LOG2_PG_SIZE_MASK);
|
||||
req.access = (mr->flags & 0xFFFF);
|
||||
req.va = cpu_to_le64(mr->va);
|
||||
req.key = cpu_to_le32(mr->lkey);
|
||||
req.mr_size = cpu_to_le64(mr->total_size);
|
||||
|
||||
resp = (struct creq_register_mr_resp *)
|
||||
bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
|
||||
NULL, block);
|
||||
if (!resp) {
|
||||
dev_err(&res->pdev->dev, "SP: REG_MR send failed");
|
||||
rc = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
if (block)
|
||||
rc = bnxt_qplib_rcfw_block_for_resp(rcfw,
|
||||
le16_to_cpu(req.cookie));
|
||||
else
|
||||
rc = bnxt_qplib_rcfw_wait_for_resp(rcfw,
|
||||
le16_to_cpu(req.cookie));
|
||||
if (!rc) {
|
||||
/* Cmd timed out */
|
||||
dev_err(&res->pdev->dev, "SP: REG_MR timed out");
|
||||
rc = -ETIMEDOUT;
|
||||
goto fail;
|
||||
}
|
||||
if (resp->status ||
|
||||
le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
|
||||
dev_err(&res->pdev->dev, "QPLIB: SP: REG_MR failed ");
|
||||
dev_err(&res->pdev->dev,
|
||||
"QPLIB: SP: with status 0x%x cmdq 0x%x resp 0x%x",
|
||||
resp->status, le16_to_cpu(req.cookie),
|
||||
le16_to_cpu(resp->cookie));
|
||||
rc = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
if (mr->hwq.max_elements)
|
||||
bnxt_qplib_free_hwq(res->pdev, &mr->hwq);
|
||||
return rc;
|
||||
}
|
||||
|
||||
int bnxt_qplib_alloc_fast_reg_page_list(struct bnxt_qplib_res *res,
|
||||
struct bnxt_qplib_frpl *frpl,
|
||||
int max_pg_ptrs)
|
||||
{
|
||||
int pg_ptrs, pages, rc;
|
||||
|
||||
/* Re-calculate the max to fit the HWQ allocation model */
|
||||
pg_ptrs = roundup_pow_of_two(max_pg_ptrs);
|
||||
pages = pg_ptrs >> MAX_PBL_LVL_1_PGS_SHIFT;
|
||||
if (!pages)
|
||||
pages++;
|
||||
|
||||
if (pages > MAX_PBL_LVL_1_PGS)
|
||||
return -ENOMEM;
|
||||
|
||||
frpl->hwq.max_elements = pages;
|
||||
rc = bnxt_qplib_alloc_init_hwq(res->pdev, &frpl->hwq, NULL, 0,
|
||||
&frpl->hwq.max_elements, PAGE_SIZE, 0,
|
||||
PAGE_SIZE, HWQ_TYPE_CTX);
|
||||
if (!rc)
|
||||
frpl->max_pg_ptrs = pg_ptrs;
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
int bnxt_qplib_free_fast_reg_page_list(struct bnxt_qplib_res *res,
|
||||
struct bnxt_qplib_frpl *frpl)
|
||||
{
|
||||
bnxt_qplib_free_hwq(res->pdev, &frpl->hwq);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bnxt_qplib_map_tc2cos(struct bnxt_qplib_res *res, u16 *cids)
|
||||
{
|
||||
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
|
||||
struct cmdq_map_tc_to_cos req;
|
||||
struct creq_map_tc_to_cos_resp *resp;
|
||||
u16 cmd_flags = 0;
|
||||
int tleft;
|
||||
|
||||
RCFW_CMD_PREP(req, MAP_TC_TO_COS, cmd_flags);
|
||||
req.cos0 = cpu_to_le16(cids[0]);
|
||||
req.cos1 = cpu_to_le16(cids[1]);
|
||||
|
||||
resp = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, NULL, 0);
|
||||
if (!resp) {
|
||||
dev_err(&res->pdev->dev, "QPLIB: SP: MAP_TC2COS send failed");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
tleft = bnxt_qplib_rcfw_block_for_resp(rcfw, le16_to_cpu(req.cookie));
|
||||
if (!tleft) {
|
||||
dev_err(&res->pdev->dev, "QPLIB: SP: MAP_TC2COS timed out");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
if (resp->status ||
|
||||
le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
|
||||
dev_err(&res->pdev->dev, "QPLIB: SP: MAP_TC2COS failed ");
|
||||
dev_err(&res->pdev->dev,
|
||||
"QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
|
||||
resp->status, le16_to_cpu(req.cookie),
|
||||
le16_to_cpu(resp->cookie));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
160
drivers/infiniband/hw/bnxt_re/qplib_sp.h
Normal file
160
drivers/infiniband/hw/bnxt_re/qplib_sp.h
Normal file
@ -0,0 +1,160 @@
|
||||
/*
|
||||
* Broadcom NetXtreme-E RoCE driver.
|
||||
*
|
||||
* Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
|
||||
* Broadcom refers to Broadcom Limited and/or its subsidiaries.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
||||
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
|
||||
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
|
||||
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
|
||||
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
|
||||
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
* Description: Slow Path Operators (header)
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __BNXT_QPLIB_SP_H__
|
||||
#define __BNXT_QPLIB_SP_H__
|
||||
|
||||
struct bnxt_qplib_dev_attr {
|
||||
char fw_ver[32];
|
||||
u16 max_sgid;
|
||||
u16 max_mrw;
|
||||
u32 max_qp;
|
||||
#define BNXT_QPLIB_MAX_OUT_RD_ATOM 126
|
||||
u32 max_qp_rd_atom;
|
||||
u32 max_qp_init_rd_atom;
|
||||
u32 max_qp_wqes;
|
||||
u32 max_qp_sges;
|
||||
u32 max_cq;
|
||||
u32 max_cq_wqes;
|
||||
u32 max_cq_sges;
|
||||
u32 max_mr;
|
||||
u64 max_mr_size;
|
||||
u32 max_pd;
|
||||
u32 max_mw;
|
||||
u32 max_raw_ethy_qp;
|
||||
u32 max_ah;
|
||||
u32 max_fmr;
|
||||
u32 max_map_per_fmr;
|
||||
u32 max_srq;
|
||||
u32 max_srq_wqes;
|
||||
u32 max_srq_sges;
|
||||
u32 max_pkey;
|
||||
u32 max_inline_data;
|
||||
u32 l2_db_size;
|
||||
u8 tqm_alloc_reqs[MAX_TQM_ALLOC_REQ];
|
||||
};
|
||||
|
||||
struct bnxt_qplib_pd {
|
||||
u32 id;
|
||||
};
|
||||
|
||||
struct bnxt_qplib_gid {
|
||||
u8 data[16];
|
||||
};
|
||||
|
||||
struct bnxt_qplib_ah {
|
||||
struct bnxt_qplib_gid dgid;
|
||||
struct bnxt_qplib_pd *pd;
|
||||
u32 id;
|
||||
u8 sgid_index;
|
||||
/* For Query AH if the hw table and SW table are differnt */
|
||||
u8 host_sgid_index;
|
||||
u8 traffic_class;
|
||||
u32 flow_label;
|
||||
u8 hop_limit;
|
||||
u8 sl;
|
||||
u8 dmac[6];
|
||||
u16 vlan_id;
|
||||
u8 nw_type;
|
||||
};
|
||||
|
||||
struct bnxt_qplib_mrw {
|
||||
struct bnxt_qplib_pd *pd;
|
||||
int type;
|
||||
u32 flags;
|
||||
#define BNXT_QPLIB_FR_PMR 0x80000000
|
||||
u32 lkey;
|
||||
u32 rkey;
|
||||
#define BNXT_QPLIB_RSVD_LKEY 0xFFFFFFFF
|
||||
u64 va;
|
||||
u64 total_size;
|
||||
u32 npages;
|
||||
u64 mr_handle;
|
||||
struct bnxt_qplib_hwq hwq;
|
||||
};
|
||||
|
||||
struct bnxt_qplib_frpl {
|
||||
int max_pg_ptrs;
|
||||
struct bnxt_qplib_hwq hwq;
|
||||
};
|
||||
|
||||
#define BNXT_QPLIB_ACCESS_LOCAL_WRITE BIT(0)
|
||||
#define BNXT_QPLIB_ACCESS_REMOTE_READ BIT(1)
|
||||
#define BNXT_QPLIB_ACCESS_REMOTE_WRITE BIT(2)
|
||||
#define BNXT_QPLIB_ACCESS_REMOTE_ATOMIC BIT(3)
|
||||
#define BNXT_QPLIB_ACCESS_MW_BIND BIT(4)
|
||||
#define BNXT_QPLIB_ACCESS_ZERO_BASED BIT(5)
|
||||
#define BNXT_QPLIB_ACCESS_ON_DEMAND BIT(6)
|
||||
|
||||
int bnxt_qplib_get_sgid(struct bnxt_qplib_res *res,
|
||||
struct bnxt_qplib_sgid_tbl *sgid_tbl, int index,
|
||||
struct bnxt_qplib_gid *gid);
|
||||
int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
|
||||
struct bnxt_qplib_gid *gid, bool update);
|
||||
int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
|
||||
struct bnxt_qplib_gid *gid, u8 *mac, u16 vlan_id,
|
||||
bool update, u32 *index);
|
||||
int bnxt_qplib_get_pkey(struct bnxt_qplib_res *res,
|
||||
struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 index,
|
||||
u16 *pkey);
|
||||
int bnxt_qplib_del_pkey(struct bnxt_qplib_res *res,
|
||||
struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 *pkey,
|
||||
bool update);
|
||||
int bnxt_qplib_add_pkey(struct bnxt_qplib_res *res,
|
||||
struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 *pkey,
|
||||
bool update);
|
||||
int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
|
||||
struct bnxt_qplib_dev_attr *attr);
|
||||
int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah);
|
||||
int bnxt_qplib_destroy_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah);
|
||||
int bnxt_qplib_alloc_mrw(struct bnxt_qplib_res *res,
|
||||
struct bnxt_qplib_mrw *mrw);
|
||||
int bnxt_qplib_dereg_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw,
|
||||
bool block);
|
||||
int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
|
||||
u64 *pbl_tbl, int num_pbls, bool block);
|
||||
int bnxt_qplib_free_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr);
|
||||
int bnxt_qplib_alloc_fast_reg_mr(struct bnxt_qplib_res *res,
|
||||
struct bnxt_qplib_mrw *mr, int max);
|
||||
int bnxt_qplib_alloc_fast_reg_page_list(struct bnxt_qplib_res *res,
|
||||
struct bnxt_qplib_frpl *frpl, int max);
|
||||
int bnxt_qplib_free_fast_reg_page_list(struct bnxt_qplib_res *res,
|
||||
struct bnxt_qplib_frpl *frpl);
|
||||
int bnxt_qplib_map_tc2cos(struct bnxt_qplib_res *res, u16 *cids);
|
||||
#endif /* __BNXT_QPLIB_SP_H__*/
|
2821
drivers/infiniband/hw/bnxt_re/roce_hsi.h
Normal file
2821
drivers/infiniband/hw/bnxt_re/roce_hsi.h
Normal file
File diff suppressed because it is too large
Load Diff
@ -692,6 +692,10 @@ static int send_connect(struct c4iw_ep *ep)
|
||||
int ret;
|
||||
enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type;
|
||||
u32 isn = (prandom_u32() & ~7UL) - 1;
|
||||
struct net_device *netdev;
|
||||
u64 params;
|
||||
|
||||
netdev = ep->com.dev->rdev.lldi.ports[0];
|
||||
|
||||
switch (CHELSIO_CHIP_VERSION(adapter_type)) {
|
||||
case CHELSIO_T4:
|
||||
@ -768,6 +772,8 @@ static int send_connect(struct c4iw_ep *ep)
|
||||
opt2 |= T5_ISS_F;
|
||||
}
|
||||
|
||||
params = cxgb4_select_ntuple(netdev, ep->l2t);
|
||||
|
||||
if (ep->com.remote_addr.ss_family == AF_INET6)
|
||||
cxgb4_clip_get(ep->com.dev->rdev.lldi.ports[0],
|
||||
(const u32 *)&la6->sin6_addr.s6_addr, 1);
|
||||
@ -809,18 +815,22 @@ static int send_connect(struct c4iw_ep *ep)
|
||||
req->opt0 = cpu_to_be64(opt0);
|
||||
|
||||
if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) {
|
||||
req->params = cpu_to_be32(cxgb4_select_ntuple(
|
||||
ep->com.dev->rdev.lldi.ports[0],
|
||||
ep->l2t));
|
||||
req->params = cpu_to_be32(params);
|
||||
req->opt2 = cpu_to_be32(opt2);
|
||||
} else {
|
||||
t5req->params = cpu_to_be64(FILTER_TUPLE_V(
|
||||
cxgb4_select_ntuple(
|
||||
ep->com.dev->rdev.lldi.ports[0],
|
||||
ep->l2t)));
|
||||
t5req->rsvd = cpu_to_be32(isn);
|
||||
PDBG("%s snd_isn %u\n", __func__, t5req->rsvd);
|
||||
t5req->opt2 = cpu_to_be32(opt2);
|
||||
if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
|
||||
t5req->params =
|
||||
cpu_to_be64(FILTER_TUPLE_V(params));
|
||||
t5req->rsvd = cpu_to_be32(isn);
|
||||
PDBG("%s snd_isn %u\n", __func__, t5req->rsvd);
|
||||
t5req->opt2 = cpu_to_be32(opt2);
|
||||
} else {
|
||||
t6req->params =
|
||||
cpu_to_be64(FILTER_TUPLE_V(params));
|
||||
t6req->rsvd = cpu_to_be32(isn);
|
||||
PDBG("%s snd_isn %u\n", __func__, t6req->rsvd);
|
||||
t6req->opt2 = cpu_to_be32(opt2);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
switch (CHELSIO_CHIP_VERSION(adapter_type)) {
|
||||
@ -859,18 +869,24 @@ static int send_connect(struct c4iw_ep *ep)
|
||||
req6->opt0 = cpu_to_be64(opt0);
|
||||
|
||||
if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) {
|
||||
req6->params = cpu_to_be32(cxgb4_select_ntuple(
|
||||
ep->com.dev->rdev.lldi.ports[0],
|
||||
ep->l2t));
|
||||
req6->params = cpu_to_be32(cxgb4_select_ntuple(netdev,
|
||||
ep->l2t));
|
||||
req6->opt2 = cpu_to_be32(opt2);
|
||||
} else {
|
||||
t5req6->params = cpu_to_be64(FILTER_TUPLE_V(
|
||||
cxgb4_select_ntuple(
|
||||
ep->com.dev->rdev.lldi.ports[0],
|
||||
ep->l2t)));
|
||||
t5req6->rsvd = cpu_to_be32(isn);
|
||||
PDBG("%s snd_isn %u\n", __func__, t5req6->rsvd);
|
||||
t5req6->opt2 = cpu_to_be32(opt2);
|
||||
if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
|
||||
t5req6->params =
|
||||
cpu_to_be64(FILTER_TUPLE_V(params));
|
||||
t5req6->rsvd = cpu_to_be32(isn);
|
||||
PDBG("%s snd_isn %u\n", __func__, t5req6->rsvd);
|
||||
t5req6->opt2 = cpu_to_be32(opt2);
|
||||
} else {
|
||||
t6req6->params =
|
||||
cpu_to_be64(FILTER_TUPLE_V(params));
|
||||
t6req6->rsvd = cpu_to_be32(isn);
|
||||
PDBG("%s snd_isn %u\n", __func__, t6req6->rsvd);
|
||||
t6req6->opt2 = cpu_to_be32(opt2);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@ -2517,18 +2533,18 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
|
||||
struct sockaddr_in *sin = (struct sockaddr_in *)
|
||||
&child_ep->com.local_addr;
|
||||
|
||||
sin->sin_family = PF_INET;
|
||||
sin->sin_family = AF_INET;
|
||||
sin->sin_port = local_port;
|
||||
sin->sin_addr.s_addr = *(__be32 *)local_ip;
|
||||
|
||||
sin = (struct sockaddr_in *)&child_ep->com.local_addr;
|
||||
sin->sin_family = PF_INET;
|
||||
sin->sin_family = AF_INET;
|
||||
sin->sin_port = ((struct sockaddr_in *)
|
||||
&parent_ep->com.local_addr)->sin_port;
|
||||
sin->sin_addr.s_addr = *(__be32 *)local_ip;
|
||||
|
||||
sin = (struct sockaddr_in *)&child_ep->com.remote_addr;
|
||||
sin->sin_family = PF_INET;
|
||||
sin->sin_family = AF_INET;
|
||||
sin->sin_port = peer_port;
|
||||
sin->sin_addr.s_addr = *(__be32 *)peer_ip;
|
||||
} else {
|
||||
|
@ -214,6 +214,52 @@ static const struct file_operations wr_log_debugfs_fops = {
|
||||
.write = wr_log_clear,
|
||||
};
|
||||
|
||||
static struct sockaddr_in zero_sin = {
|
||||
.sin_family = AF_INET,
|
||||
};
|
||||
|
||||
static struct sockaddr_in6 zero_sin6 = {
|
||||
.sin6_family = AF_INET6,
|
||||
};
|
||||
|
||||
static void set_ep_sin_addrs(struct c4iw_ep *ep,
|
||||
struct sockaddr_in **lsin,
|
||||
struct sockaddr_in **rsin,
|
||||
struct sockaddr_in **m_lsin,
|
||||
struct sockaddr_in **m_rsin)
|
||||
{
|
||||
struct iw_cm_id *id = ep->com.cm_id;
|
||||
|
||||
*lsin = (struct sockaddr_in *)&ep->com.local_addr;
|
||||
*rsin = (struct sockaddr_in *)&ep->com.remote_addr;
|
||||
if (id) {
|
||||
*m_lsin = (struct sockaddr_in *)&id->m_local_addr;
|
||||
*m_rsin = (struct sockaddr_in *)&id->m_remote_addr;
|
||||
} else {
|
||||
*m_lsin = &zero_sin;
|
||||
*m_rsin = &zero_sin;
|
||||
}
|
||||
}
|
||||
|
||||
static void set_ep_sin6_addrs(struct c4iw_ep *ep,
|
||||
struct sockaddr_in6 **lsin6,
|
||||
struct sockaddr_in6 **rsin6,
|
||||
struct sockaddr_in6 **m_lsin6,
|
||||
struct sockaddr_in6 **m_rsin6)
|
||||
{
|
||||
struct iw_cm_id *id = ep->com.cm_id;
|
||||
|
||||
*lsin6 = (struct sockaddr_in6 *)&ep->com.local_addr;
|
||||
*rsin6 = (struct sockaddr_in6 *)&ep->com.remote_addr;
|
||||
if (id) {
|
||||
*m_lsin6 = (struct sockaddr_in6 *)&id->m_local_addr;
|
||||
*m_rsin6 = (struct sockaddr_in6 *)&id->m_remote_addr;
|
||||
} else {
|
||||
*m_lsin6 = &zero_sin6;
|
||||
*m_rsin6 = &zero_sin6;
|
||||
}
|
||||
}
|
||||
|
||||
static int dump_qp(int id, void *p, void *data)
|
||||
{
|
||||
struct c4iw_qp *qp = p;
|
||||
@ -229,16 +275,15 @@ static int dump_qp(int id, void *p, void *data)
|
||||
return 1;
|
||||
|
||||
if (qp->ep) {
|
||||
if (qp->ep->com.local_addr.ss_family == AF_INET) {
|
||||
struct sockaddr_in *lsin = (struct sockaddr_in *)
|
||||
&qp->ep->com.cm_id->local_addr;
|
||||
struct sockaddr_in *rsin = (struct sockaddr_in *)
|
||||
&qp->ep->com.cm_id->remote_addr;
|
||||
struct sockaddr_in *mapped_lsin = (struct sockaddr_in *)
|
||||
&qp->ep->com.cm_id->m_local_addr;
|
||||
struct sockaddr_in *mapped_rsin = (struct sockaddr_in *)
|
||||
&qp->ep->com.cm_id->m_remote_addr;
|
||||
struct c4iw_ep *ep = qp->ep;
|
||||
|
||||
if (ep->com.local_addr.ss_family == AF_INET) {
|
||||
struct sockaddr_in *lsin;
|
||||
struct sockaddr_in *rsin;
|
||||
struct sockaddr_in *m_lsin;
|
||||
struct sockaddr_in *m_rsin;
|
||||
|
||||
set_ep_sin_addrs(ep, &lsin, &rsin, &m_lsin, &m_rsin);
|
||||
cc = snprintf(qpd->buf + qpd->pos, space,
|
||||
"rc qp sq id %u rq id %u state %u "
|
||||
"onchip %u ep tid %u state %u "
|
||||
@ -246,23 +291,19 @@ static int dump_qp(int id, void *p, void *data)
|
||||
qp->wq.sq.qid, qp->wq.rq.qid,
|
||||
(int)qp->attr.state,
|
||||
qp->wq.sq.flags & T4_SQ_ONCHIP,
|
||||
qp->ep->hwtid, (int)qp->ep->com.state,
|
||||
ep->hwtid, (int)ep->com.state,
|
||||
&lsin->sin_addr, ntohs(lsin->sin_port),
|
||||
ntohs(mapped_lsin->sin_port),
|
||||
ntohs(m_lsin->sin_port),
|
||||
&rsin->sin_addr, ntohs(rsin->sin_port),
|
||||
ntohs(mapped_rsin->sin_port));
|
||||
ntohs(m_rsin->sin_port));
|
||||
} else {
|
||||
struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *)
|
||||
&qp->ep->com.cm_id->local_addr;
|
||||
struct sockaddr_in6 *rsin6 = (struct sockaddr_in6 *)
|
||||
&qp->ep->com.cm_id->remote_addr;
|
||||
struct sockaddr_in6 *mapped_lsin6 =
|
||||
(struct sockaddr_in6 *)
|
||||
&qp->ep->com.cm_id->m_local_addr;
|
||||
struct sockaddr_in6 *mapped_rsin6 =
|
||||
(struct sockaddr_in6 *)
|
||||
&qp->ep->com.cm_id->m_remote_addr;
|
||||
struct sockaddr_in6 *lsin6;
|
||||
struct sockaddr_in6 *rsin6;
|
||||
struct sockaddr_in6 *m_lsin6;
|
||||
struct sockaddr_in6 *m_rsin6;
|
||||
|
||||
set_ep_sin6_addrs(ep, &lsin6, &rsin6, &m_lsin6,
|
||||
&m_rsin6);
|
||||
cc = snprintf(qpd->buf + qpd->pos, space,
|
||||
"rc qp sq id %u rq id %u state %u "
|
||||
"onchip %u ep tid %u state %u "
|
||||
@ -270,13 +311,13 @@ static int dump_qp(int id, void *p, void *data)
|
||||
qp->wq.sq.qid, qp->wq.rq.qid,
|
||||
(int)qp->attr.state,
|
||||
qp->wq.sq.flags & T4_SQ_ONCHIP,
|
||||
qp->ep->hwtid, (int)qp->ep->com.state,
|
||||
ep->hwtid, (int)ep->com.state,
|
||||
&lsin6->sin6_addr,
|
||||
ntohs(lsin6->sin6_port),
|
||||
ntohs(mapped_lsin6->sin6_port),
|
||||
ntohs(m_lsin6->sin6_port),
|
||||
&rsin6->sin6_addr,
|
||||
ntohs(rsin6->sin6_port),
|
||||
ntohs(mapped_rsin6->sin6_port));
|
||||
ntohs(m_rsin6->sin6_port));
|
||||
}
|
||||
} else
|
||||
cc = snprintf(qpd->buf + qpd->pos, space,
|
||||
@ -533,15 +574,12 @@ static int dump_ep(int id, void *p, void *data)
|
||||
return 1;
|
||||
|
||||
if (ep->com.local_addr.ss_family == AF_INET) {
|
||||
struct sockaddr_in *lsin = (struct sockaddr_in *)
|
||||
&ep->com.cm_id->local_addr;
|
||||
struct sockaddr_in *rsin = (struct sockaddr_in *)
|
||||
&ep->com.cm_id->remote_addr;
|
||||
struct sockaddr_in *mapped_lsin = (struct sockaddr_in *)
|
||||
&ep->com.cm_id->m_local_addr;
|
||||
struct sockaddr_in *mapped_rsin = (struct sockaddr_in *)
|
||||
&ep->com.cm_id->m_remote_addr;
|
||||
struct sockaddr_in *lsin;
|
||||
struct sockaddr_in *rsin;
|
||||
struct sockaddr_in *m_lsin;
|
||||
struct sockaddr_in *m_rsin;
|
||||
|
||||
set_ep_sin_addrs(ep, &lsin, &rsin, &m_lsin, &m_rsin);
|
||||
cc = snprintf(epd->buf + epd->pos, space,
|
||||
"ep %p cm_id %p qp %p state %d flags 0x%lx "
|
||||
"history 0x%lx hwtid %d atid %d "
|
||||
@ -553,19 +591,16 @@ static int dump_ep(int id, void *p, void *data)
|
||||
ep->stats.connect_neg_adv,
|
||||
ep->stats.abort_neg_adv,
|
||||
&lsin->sin_addr, ntohs(lsin->sin_port),
|
||||
ntohs(mapped_lsin->sin_port),
|
||||
ntohs(m_lsin->sin_port),
|
||||
&rsin->sin_addr, ntohs(rsin->sin_port),
|
||||
ntohs(mapped_rsin->sin_port));
|
||||
ntohs(m_rsin->sin_port));
|
||||
} else {
|
||||
struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *)
|
||||
&ep->com.cm_id->local_addr;
|
||||
struct sockaddr_in6 *rsin6 = (struct sockaddr_in6 *)
|
||||
&ep->com.cm_id->remote_addr;
|
||||
struct sockaddr_in6 *mapped_lsin6 = (struct sockaddr_in6 *)
|
||||
&ep->com.cm_id->m_local_addr;
|
||||
struct sockaddr_in6 *mapped_rsin6 = (struct sockaddr_in6 *)
|
||||
&ep->com.cm_id->m_remote_addr;
|
||||
struct sockaddr_in6 *lsin6;
|
||||
struct sockaddr_in6 *rsin6;
|
||||
struct sockaddr_in6 *m_lsin6;
|
||||
struct sockaddr_in6 *m_rsin6;
|
||||
|
||||
set_ep_sin6_addrs(ep, &lsin6, &rsin6, &m_lsin6, &m_rsin6);
|
||||
cc = snprintf(epd->buf + epd->pos, space,
|
||||
"ep %p cm_id %p qp %p state %d flags 0x%lx "
|
||||
"history 0x%lx hwtid %d atid %d "
|
||||
@ -577,9 +612,9 @@ static int dump_ep(int id, void *p, void *data)
|
||||
ep->stats.connect_neg_adv,
|
||||
ep->stats.abort_neg_adv,
|
||||
&lsin6->sin6_addr, ntohs(lsin6->sin6_port),
|
||||
ntohs(mapped_lsin6->sin6_port),
|
||||
ntohs(m_lsin6->sin6_port),
|
||||
&rsin6->sin6_addr, ntohs(rsin6->sin6_port),
|
||||
ntohs(mapped_rsin6->sin6_port));
|
||||
ntohs(m_rsin6->sin6_port));
|
||||
}
|
||||
if (cc < space)
|
||||
epd->pos += cc;
|
||||
@ -600,7 +635,7 @@ static int dump_listen_ep(int id, void *p, void *data)
|
||||
if (ep->com.local_addr.ss_family == AF_INET) {
|
||||
struct sockaddr_in *lsin = (struct sockaddr_in *)
|
||||
&ep->com.cm_id->local_addr;
|
||||
struct sockaddr_in *mapped_lsin = (struct sockaddr_in *)
|
||||
struct sockaddr_in *m_lsin = (struct sockaddr_in *)
|
||||
&ep->com.cm_id->m_local_addr;
|
||||
|
||||
cc = snprintf(epd->buf + epd->pos, space,
|
||||
@ -609,11 +644,11 @@ static int dump_listen_ep(int id, void *p, void *data)
|
||||
ep, ep->com.cm_id, (int)ep->com.state,
|
||||
ep->com.flags, ep->stid, ep->backlog,
|
||||
&lsin->sin_addr, ntohs(lsin->sin_port),
|
||||
ntohs(mapped_lsin->sin_port));
|
||||
ntohs(m_lsin->sin_port));
|
||||
} else {
|
||||
struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *)
|
||||
&ep->com.cm_id->local_addr;
|
||||
struct sockaddr_in6 *mapped_lsin6 = (struct sockaddr_in6 *)
|
||||
struct sockaddr_in6 *m_lsin6 = (struct sockaddr_in6 *)
|
||||
&ep->com.cm_id->m_local_addr;
|
||||
|
||||
cc = snprintf(epd->buf + epd->pos, space,
|
||||
@ -622,7 +657,7 @@ static int dump_listen_ep(int id, void *p, void *data)
|
||||
ep, ep->com.cm_id, (int)ep->com.state,
|
||||
ep->com.flags, ep->stid, ep->backlog,
|
||||
&lsin6->sin6_addr, ntohs(lsin6->sin6_port),
|
||||
ntohs(mapped_lsin6->sin6_port));
|
||||
ntohs(m_lsin6->sin6_port));
|
||||
}
|
||||
if (cc < space)
|
||||
epd->pos += cc;
|
||||
|
@ -7827,7 +7827,8 @@ static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
|
||||
}
|
||||
|
||||
/* just report this */
|
||||
dd_dev_info(dd, "DCC Error: fmconfig error: %s\n", extra);
|
||||
dd_dev_info_ratelimited(dd, "DCC Error: fmconfig error: %s\n",
|
||||
extra);
|
||||
reg &= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK;
|
||||
}
|
||||
|
||||
@ -7878,34 +7879,35 @@ static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
|
||||
}
|
||||
|
||||
/* just report this */
|
||||
dd_dev_info(dd, "DCC Error: PortRcv error: %s\n", extra);
|
||||
dd_dev_info(dd, " hdr0 0x%llx, hdr1 0x%llx\n",
|
||||
hdr0, hdr1);
|
||||
dd_dev_info_ratelimited(dd, "DCC Error: PortRcv error: %s\n"
|
||||
" hdr0 0x%llx, hdr1 0x%llx\n",
|
||||
extra, hdr0, hdr1);
|
||||
|
||||
reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK;
|
||||
}
|
||||
|
||||
if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK) {
|
||||
/* informative only */
|
||||
dd_dev_info(dd, "8051 access to LCB blocked\n");
|
||||
dd_dev_info_ratelimited(dd, "8051 access to LCB blocked\n");
|
||||
reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK;
|
||||
}
|
||||
if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK) {
|
||||
/* informative only */
|
||||
dd_dev_info(dd, "host access to LCB blocked\n");
|
||||
dd_dev_info_ratelimited(dd, "host access to LCB blocked\n");
|
||||
reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK;
|
||||
}
|
||||
|
||||
/* report any remaining errors */
|
||||
if (reg)
|
||||
dd_dev_info(dd, "DCC Error: %s\n",
|
||||
dcc_err_string(buf, sizeof(buf), reg));
|
||||
dd_dev_info_ratelimited(dd, "DCC Error: %s\n",
|
||||
dcc_err_string(buf, sizeof(buf), reg));
|
||||
|
||||
if (lcl_reason == 0)
|
||||
lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN;
|
||||
|
||||
if (do_bounce) {
|
||||
dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
|
||||
dd_dev_info_ratelimited(dd, "%s: PortErrorAction bounce\n",
|
||||
__func__);
|
||||
set_link_down_reason(ppd, lcl_reason, 0, lcl_reason);
|
||||
queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
|
||||
}
|
||||
@ -10508,16 +10510,18 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state)
|
||||
ppd->remote_link_down_reason = 0;
|
||||
}
|
||||
|
||||
ret1 = set_physical_link_state(dd, PLS_DISABLED);
|
||||
if (ret1 != HCMD_SUCCESS) {
|
||||
dd_dev_err(dd,
|
||||
"Failed to transition to Disabled link state, return 0x%x\n",
|
||||
ret1);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
if (!dd->dc_shutdown) {
|
||||
ret1 = set_physical_link_state(dd, PLS_DISABLED);
|
||||
if (ret1 != HCMD_SUCCESS) {
|
||||
dd_dev_err(dd,
|
||||
"Failed to transition to Disabled link state, return 0x%x\n",
|
||||
ret1);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
dc_shutdown(dd);
|
||||
}
|
||||
ppd->host_link_state = HLS_DN_DISABLE;
|
||||
dc_shutdown(dd);
|
||||
break;
|
||||
case HLS_DN_OFFLINE:
|
||||
if (ppd->host_link_state == HLS_DN_DISABLE)
|
||||
|
@ -331,10 +331,6 @@ struct diag_pkt {
|
||||
#define FULL_MGMT_P_KEY 0xFFFF
|
||||
|
||||
#define DEFAULT_P_KEY LIM_MGMT_P_KEY
|
||||
#define HFI1_AETH_CREDIT_SHIFT 24
|
||||
#define HFI1_AETH_CREDIT_MASK 0x1F
|
||||
#define HFI1_AETH_CREDIT_INVAL 0x1F
|
||||
#define HFI1_MSN_MASK 0xFFFFFF
|
||||
#define HFI1_FECN_SHIFT 31
|
||||
#define HFI1_FECN_MASK 1
|
||||
#define HFI1_FECN_SMASK BIT(HFI1_FECN_SHIFT)
|
||||
|
@ -50,6 +50,7 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/string.h>
|
||||
|
||||
#include "hfi.h"
|
||||
#include "debugfs.h"
|
||||
@ -503,18 +504,11 @@ static ssize_t asic_flags_write(struct file *file, const char __user *buf,
|
||||
ppd = private2ppd(file);
|
||||
dd = ppd->dd;
|
||||
|
||||
buff = kmalloc(count + 1, GFP_KERNEL);
|
||||
if (!buff)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = copy_from_user(buff, buf, count);
|
||||
if (ret > 0) {
|
||||
ret = -EFAULT;
|
||||
goto do_free;
|
||||
}
|
||||
|
||||
/* zero terminate and read the expected integer */
|
||||
buff[count] = 0;
|
||||
buff = memdup_user_nul(buf, count);
|
||||
if (IS_ERR(buff))
|
||||
return PTR_ERR(buff);
|
||||
|
||||
ret = kstrtoull(buff, 0, &value);
|
||||
if (ret)
|
||||
goto do_free;
|
||||
@ -692,15 +686,9 @@ static ssize_t __i2c_debugfs_write(struct file *file, const char __user *buf,
|
||||
if (i2c_addr == 0)
|
||||
return -EINVAL;
|
||||
|
||||
buff = kmalloc(count, GFP_KERNEL);
|
||||
if (!buff)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = copy_from_user(buff, buf, count);
|
||||
if (ret > 0) {
|
||||
ret = -EFAULT;
|
||||
goto _free;
|
||||
}
|
||||
buff = memdup_user(buf, count);
|
||||
if (IS_ERR(buff))
|
||||
return PTR_ERR(buff);
|
||||
|
||||
total_written = i2c_write(ppd, target, i2c_addr, offset, buff, count);
|
||||
if (total_written < 0) {
|
||||
@ -805,15 +793,10 @@ static ssize_t __qsfp_debugfs_write(struct file *file, const char __user *buf,
|
||||
|
||||
ppd = private2ppd(file);
|
||||
|
||||
buff = kmalloc(count, GFP_KERNEL);
|
||||
if (!buff)
|
||||
return -ENOMEM;
|
||||
buff = memdup_user(buf, count);
|
||||
if (IS_ERR(buff))
|
||||
return PTR_ERR(buff);
|
||||
|
||||
ret = copy_from_user(buff, buf, count);
|
||||
if (ret > 0) {
|
||||
ret = -EFAULT;
|
||||
goto _free;
|
||||
}
|
||||
total_written = qsfp_write(ppd, target, *ppos, buff, count);
|
||||
if (total_written < 0) {
|
||||
ret = total_written;
|
||||
|
@ -100,6 +100,11 @@ MODULE_VERSION(HFI1_DRIVER_VERSION);
|
||||
* MAX_PKT_RCV is the max # if packets processed per receive interrupt.
|
||||
*/
|
||||
#define MAX_PKT_RECV 64
|
||||
/*
|
||||
* MAX_PKT_THREAD_RCV is the max # of packets processed before
|
||||
* the qp_wait_list queue is flushed.
|
||||
*/
|
||||
#define MAX_PKT_RECV_THREAD (MAX_PKT_RECV * 4)
|
||||
#define EGR_HEAD_UPDATE_THRESHOLD 16
|
||||
|
||||
struct hfi1_ib_stats hfi1_stats;
|
||||
@ -259,7 +264,7 @@ static inline void *get_egrbuf(const struct hfi1_ctxtdata *rcd, u64 rhf,
|
||||
* allowed size ranges for the respective type and, optionally,
|
||||
* return the proper encoding.
|
||||
*/
|
||||
inline int hfi1_rcvbuf_validate(u32 size, u8 type, u16 *encoded)
|
||||
int hfi1_rcvbuf_validate(u32 size, u8 type, u16 *encoded)
|
||||
{
|
||||
if (unlikely(!PAGE_ALIGNED(size)))
|
||||
return 0;
|
||||
@ -279,7 +284,7 @@ static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd,
|
||||
struct ib_header *rhdr = packet->hdr;
|
||||
u32 rte = rhf_rcv_type_err(packet->rhf);
|
||||
int lnh = be16_to_cpu(rhdr->lrh[0]) & 3;
|
||||
struct hfi1_ibport *ibp = &ppd->ibport_data;
|
||||
struct hfi1_ibport *ibp = rcd_to_iport(rcd);
|
||||
struct hfi1_devdata *dd = ppd->dd;
|
||||
struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
|
||||
|
||||
@ -594,7 +599,7 @@ static void __prescan_rxq(struct hfi1_packet *packet)
|
||||
|
||||
while (1) {
|
||||
struct hfi1_devdata *dd = rcd->dd;
|
||||
struct hfi1_ibport *ibp = &rcd->ppd->ibport_data;
|
||||
struct hfi1_ibport *ibp = rcd_to_iport(rcd);
|
||||
__le32 *rhf_addr = (__le32 *)rcd->rcvhdrq + mdata.ps_head +
|
||||
dd->rhf_offset;
|
||||
struct rvt_qp *qp;
|
||||
@ -654,24 +659,68 @@ next:
|
||||
}
|
||||
}
|
||||
|
||||
static inline int skip_rcv_packet(struct hfi1_packet *packet, int thread)
|
||||
static void process_rcv_qp_work(struct hfi1_ctxtdata *rcd)
|
||||
{
|
||||
struct rvt_qp *qp, *nqp;
|
||||
|
||||
/*
|
||||
* Iterate over all QPs waiting to respond.
|
||||
* The list won't change since the IRQ is only run on one CPU.
|
||||
*/
|
||||
list_for_each_entry_safe(qp, nqp, &rcd->qp_wait_list, rspwait) {
|
||||
list_del_init(&qp->rspwait);
|
||||
if (qp->r_flags & RVT_R_RSP_NAK) {
|
||||
qp->r_flags &= ~RVT_R_RSP_NAK;
|
||||
hfi1_send_rc_ack(rcd, qp, 0);
|
||||
}
|
||||
if (qp->r_flags & RVT_R_RSP_SEND) {
|
||||
unsigned long flags;
|
||||
|
||||
qp->r_flags &= ~RVT_R_RSP_SEND;
|
||||
spin_lock_irqsave(&qp->s_lock, flags);
|
||||
if (ib_rvt_state_ops[qp->state] &
|
||||
RVT_PROCESS_OR_FLUSH_SEND)
|
||||
hfi1_schedule_send(qp);
|
||||
spin_unlock_irqrestore(&qp->s_lock, flags);
|
||||
}
|
||||
rvt_put_qp(qp);
|
||||
}
|
||||
}
|
||||
|
||||
static noinline int max_packet_exceeded(struct hfi1_packet *packet, int thread)
|
||||
{
|
||||
if (thread) {
|
||||
if ((packet->numpkt & (MAX_PKT_RECV_THREAD - 1)) == 0)
|
||||
/* allow defered processing */
|
||||
process_rcv_qp_work(packet->rcd);
|
||||
cond_resched();
|
||||
return RCV_PKT_OK;
|
||||
} else {
|
||||
this_cpu_inc(*packet->rcd->dd->rcv_limit);
|
||||
return RCV_PKT_LIMIT;
|
||||
}
|
||||
}
|
||||
|
||||
static inline int check_max_packet(struct hfi1_packet *packet, int thread)
|
||||
{
|
||||
int ret = RCV_PKT_OK;
|
||||
|
||||
if (unlikely((packet->numpkt & (MAX_PKT_RECV - 1)) == 0))
|
||||
ret = max_packet_exceeded(packet, thread);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static noinline int skip_rcv_packet(struct hfi1_packet *packet, int thread)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* Set up for the next packet */
|
||||
packet->rhqoff += packet->rsize;
|
||||
if (packet->rhqoff >= packet->maxcnt)
|
||||
packet->rhqoff = 0;
|
||||
|
||||
packet->numpkt++;
|
||||
if (unlikely((packet->numpkt & (MAX_PKT_RECV - 1)) == 0)) {
|
||||
if (thread) {
|
||||
cond_resched();
|
||||
} else {
|
||||
ret = RCV_PKT_LIMIT;
|
||||
this_cpu_inc(*packet->rcd->dd->rcv_limit);
|
||||
}
|
||||
}
|
||||
ret = check_max_packet(packet, thread);
|
||||
|
||||
packet->rhf_addr = (__le32 *)packet->rcd->rcvhdrq + packet->rhqoff +
|
||||
packet->rcd->dd->rhf_offset;
|
||||
@ -682,7 +731,7 @@ static inline int skip_rcv_packet(struct hfi1_packet *packet, int thread)
|
||||
|
||||
static inline int process_rcv_packet(struct hfi1_packet *packet, int thread)
|
||||
{
|
||||
int ret = RCV_PKT_OK;
|
||||
int ret;
|
||||
|
||||
packet->hdr = hfi1_get_msgheader(packet->rcd->dd,
|
||||
packet->rhf_addr);
|
||||
@ -723,14 +772,7 @@ static inline int process_rcv_packet(struct hfi1_packet *packet, int thread)
|
||||
if (packet->rhqoff >= packet->maxcnt)
|
||||
packet->rhqoff = 0;
|
||||
|
||||
if (unlikely((packet->numpkt & (MAX_PKT_RECV - 1)) == 0)) {
|
||||
if (thread) {
|
||||
cond_resched();
|
||||
} else {
|
||||
ret = RCV_PKT_LIMIT;
|
||||
this_cpu_inc(*packet->rcd->dd->rcv_limit);
|
||||
}
|
||||
}
|
||||
ret = check_max_packet(packet, thread);
|
||||
|
||||
packet->rhf_addr = (__le32 *)packet->rcd->rcvhdrq + packet->rhqoff +
|
||||
packet->rcd->dd->rhf_offset;
|
||||
@ -767,38 +809,6 @@ static inline void finish_packet(struct hfi1_packet *packet)
|
||||
packet->etail, rcv_intr_dynamic, packet->numpkt);
|
||||
}
|
||||
|
||||
static inline void process_rcv_qp_work(struct hfi1_packet *packet)
|
||||
{
|
||||
struct hfi1_ctxtdata *rcd;
|
||||
struct rvt_qp *qp, *nqp;
|
||||
|
||||
rcd = packet->rcd;
|
||||
rcd->head = packet->rhqoff;
|
||||
|
||||
/*
|
||||
* Iterate over all QPs waiting to respond.
|
||||
* The list won't change since the IRQ is only run on one CPU.
|
||||
*/
|
||||
list_for_each_entry_safe(qp, nqp, &rcd->qp_wait_list, rspwait) {
|
||||
list_del_init(&qp->rspwait);
|
||||
if (qp->r_flags & RVT_R_RSP_NAK) {
|
||||
qp->r_flags &= ~RVT_R_RSP_NAK;
|
||||
hfi1_send_rc_ack(rcd, qp, 0);
|
||||
}
|
||||
if (qp->r_flags & RVT_R_RSP_SEND) {
|
||||
unsigned long flags;
|
||||
|
||||
qp->r_flags &= ~RVT_R_RSP_SEND;
|
||||
spin_lock_irqsave(&qp->s_lock, flags);
|
||||
if (ib_rvt_state_ops[qp->state] &
|
||||
RVT_PROCESS_OR_FLUSH_SEND)
|
||||
hfi1_schedule_send(qp);
|
||||
spin_unlock_irqrestore(&qp->s_lock, flags);
|
||||
}
|
||||
rvt_put_qp(qp);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Handle receive interrupts when using the no dma rtail option.
|
||||
*/
|
||||
@ -826,7 +836,8 @@ int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *rcd, int thread)
|
||||
last = RCV_PKT_DONE;
|
||||
process_rcv_update(last, &packet);
|
||||
}
|
||||
process_rcv_qp_work(&packet);
|
||||
process_rcv_qp_work(rcd);
|
||||
rcd->head = packet.rhqoff;
|
||||
bail:
|
||||
finish_packet(&packet);
|
||||
return last;
|
||||
@ -854,7 +865,8 @@ int handle_receive_interrupt_dma_rtail(struct hfi1_ctxtdata *rcd, int thread)
|
||||
last = RCV_PKT_DONE;
|
||||
process_rcv_update(last, &packet);
|
||||
}
|
||||
process_rcv_qp_work(&packet);
|
||||
process_rcv_qp_work(rcd);
|
||||
rcd->head = packet.rhqoff;
|
||||
bail:
|
||||
finish_packet(&packet);
|
||||
return last;
|
||||
@ -1024,7 +1036,8 @@ int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread)
|
||||
process_rcv_update(last, &packet);
|
||||
}
|
||||
|
||||
process_rcv_qp_work(&packet);
|
||||
process_rcv_qp_work(rcd);
|
||||
rcd->head = packet.rhqoff;
|
||||
|
||||
bail:
|
||||
/*
|
||||
|
@ -45,6 +45,7 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/ctype.h>
|
||||
#include "efivar.h"
|
||||
|
||||
/* GUID for HFI1 variables in EFI */
|
||||
@ -150,15 +151,32 @@ fail:
|
||||
int read_hfi1_efi_var(struct hfi1_devdata *dd, const char *kind,
|
||||
unsigned long *size, void **return_data)
|
||||
{
|
||||
char prefix_name[64];
|
||||
char name[64];
|
||||
int result;
|
||||
int i;
|
||||
|
||||
/* create a common prefix */
|
||||
snprintf(name, sizeof(name), "%04x:%02x:%02x.%x-%s",
|
||||
snprintf(prefix_name, sizeof(prefix_name), "%04x:%02x:%02x.%x",
|
||||
pci_domain_nr(dd->pcidev->bus),
|
||||
dd->pcidev->bus->number,
|
||||
PCI_SLOT(dd->pcidev->devfn),
|
||||
PCI_FUNC(dd->pcidev->devfn),
|
||||
kind);
|
||||
PCI_FUNC(dd->pcidev->devfn));
|
||||
snprintf(name, sizeof(name), "%s-%s", prefix_name, kind);
|
||||
result = read_efi_var(name, size, return_data);
|
||||
|
||||
return read_efi_var(name, size, return_data);
|
||||
/*
|
||||
* If reading the lowercase EFI variable fail, read the uppercase
|
||||
* variable.
|
||||
*/
|
||||
if (result) {
|
||||
/* Converting to uppercase */
|
||||
for (i = 0; prefix_name[i]; i++)
|
||||
if (isalpha(prefix_name[i]))
|
||||
prefix_name[i] = toupper(prefix_name[i]);
|
||||
snprintf(name, sizeof(name), "%s-%s", prefix_name, kind);
|
||||
result = read_efi_var(name, size, return_data);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@ -356,12 +356,11 @@ struct hfi1_packet {
|
||||
u64 rhf;
|
||||
u32 maxcnt;
|
||||
u32 rhqoff;
|
||||
u32 hdrqtail;
|
||||
int numpkt;
|
||||
u16 tlen;
|
||||
u16 hlen;
|
||||
s16 etail;
|
||||
u16 rsize;
|
||||
u8 hlen;
|
||||
u8 numpkt;
|
||||
u8 rsize;
|
||||
u8 updegr;
|
||||
u8 rcv_flags;
|
||||
u8 etype;
|
||||
@ -1584,6 +1583,11 @@ static inline struct hfi1_ibport *to_iport(struct ib_device *ibdev, u8 port)
|
||||
return &dd->pport[pidx].ibport_data;
|
||||
}
|
||||
|
||||
static inline struct hfi1_ibport *rcd_to_iport(struct hfi1_ctxtdata *rcd)
|
||||
{
|
||||
return &rcd->ppd->ibport_data;
|
||||
}
|
||||
|
||||
void hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt,
|
||||
bool do_cnp);
|
||||
static inline bool process_ecn(struct rvt_qp *qp, struct hfi1_packet *pkt,
|
||||
@ -1793,8 +1797,6 @@ int kdeth_process_expected(struct hfi1_packet *packet);
|
||||
int kdeth_process_eager(struct hfi1_packet *packet);
|
||||
int process_receive_invalid(struct hfi1_packet *packet);
|
||||
|
||||
void update_sge(struct rvt_sge_state *ss, u32 length);
|
||||
|
||||
/* global module parameter variables */
|
||||
extern unsigned int hfi1_max_mtu;
|
||||
extern unsigned int hfi1_cu;
|
||||
@ -1940,6 +1942,10 @@ static inline u64 hfi1_pkt_base_sdma_integrity(struct hfi1_devdata *dd)
|
||||
dev_info(&(dd)->pcidev->dev, "%s: " fmt, \
|
||||
get_unit_name((dd)->unit), ##__VA_ARGS__)
|
||||
|
||||
#define dd_dev_info_ratelimited(dd, fmt, ...) \
|
||||
dev_info_ratelimited(&(dd)->pcidev->dev, "%s: " fmt, \
|
||||
get_unit_name((dd)->unit), ##__VA_ARGS__)
|
||||
|
||||
#define dd_dev_dbg(dd, fmt, ...) \
|
||||
dev_dbg(&(dd)->pcidev->dev, "%s: " fmt, \
|
||||
get_unit_name((dd)->unit), ##__VA_ARGS__)
|
||||
|
@ -297,14 +297,15 @@ struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, u32 ctxt,
|
||||
* The resulting value will be rounded down to the closest
|
||||
* multiple of dd->rcv_entries.group_size.
|
||||
*/
|
||||
rcd->egrbufs.buffers = kcalloc(rcd->egrbufs.count,
|
||||
sizeof(*rcd->egrbufs.buffers),
|
||||
GFP_KERNEL);
|
||||
rcd->egrbufs.buffers = kzalloc_node(
|
||||
rcd->egrbufs.count * sizeof(*rcd->egrbufs.buffers),
|
||||
GFP_KERNEL, numa);
|
||||
if (!rcd->egrbufs.buffers)
|
||||
goto bail;
|
||||
rcd->egrbufs.rcvtids = kcalloc(rcd->egrbufs.count,
|
||||
sizeof(*rcd->egrbufs.rcvtids),
|
||||
GFP_KERNEL);
|
||||
rcd->egrbufs.rcvtids = kzalloc_node(
|
||||
rcd->egrbufs.count *
|
||||
sizeof(*rcd->egrbufs.rcvtids),
|
||||
GFP_KERNEL, numa);
|
||||
if (!rcd->egrbufs.rcvtids)
|
||||
goto bail;
|
||||
rcd->egrbufs.size = eager_buffer_size;
|
||||
@ -322,8 +323,8 @@ struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, u32 ctxt,
|
||||
rcd->egrbufs.rcvtid_size = HFI1_MAX_EAGER_BUFFER_SIZE;
|
||||
|
||||
if (ctxt < dd->first_user_ctxt) { /* N/A for PSM contexts */
|
||||
rcd->opstats = kzalloc(sizeof(*rcd->opstats),
|
||||
GFP_KERNEL);
|
||||
rcd->opstats = kzalloc_node(sizeof(*rcd->opstats),
|
||||
GFP_KERNEL, numa);
|
||||
if (!rcd->opstats)
|
||||
goto bail;
|
||||
}
|
||||
|
@ -598,15 +598,6 @@ pci_slot_reset(struct pci_dev *pdev)
|
||||
return PCI_ERS_RESULT_CAN_RECOVER;
|
||||
}
|
||||
|
||||
static pci_ers_result_t
|
||||
pci_link_reset(struct pci_dev *pdev)
|
||||
{
|
||||
struct hfi1_devdata *dd = pci_get_drvdata(pdev);
|
||||
|
||||
dd_dev_info(dd, "HFI1 link_reset function called, ignored\n");
|
||||
return PCI_ERS_RESULT_CAN_RECOVER;
|
||||
}
|
||||
|
||||
static void
|
||||
pci_resume(struct pci_dev *pdev)
|
||||
{
|
||||
@ -625,7 +616,6 @@ pci_resume(struct pci_dev *pdev)
|
||||
const struct pci_error_handlers hfi1_pci_err_handler = {
|
||||
.error_detected = pci_error_detected,
|
||||
.mmio_enabled = pci_mmio_enabled,
|
||||
.link_reset = pci_link_reset,
|
||||
.slot_reset = pci_slot_reset,
|
||||
.resume = pci_resume,
|
||||
};
|
||||
@ -673,12 +663,12 @@ MODULE_PARM_DESC(pcie_retry, "Driver will try this many times to reach requested
|
||||
|
||||
#define UNSET_PSET 255
|
||||
#define DEFAULT_DISCRETE_PSET 2 /* discrete HFI */
|
||||
#define DEFAULT_MCP_PSET 4 /* MCP HFI */
|
||||
#define DEFAULT_MCP_PSET 6 /* MCP HFI */
|
||||
static uint pcie_pset = UNSET_PSET;
|
||||
module_param(pcie_pset, uint, S_IRUGO);
|
||||
MODULE_PARM_DESC(pcie_pset, "PCIe Eq Pset value to use, range is 0-10");
|
||||
|
||||
static uint pcie_ctle = 1; /* discrete on, integrated off */
|
||||
static uint pcie_ctle = 3; /* discrete on, integrated on */
|
||||
module_param(pcie_ctle, uint, S_IRUGO);
|
||||
MODULE_PARM_DESC(pcie_ctle, "PCIe static CTLE mode, bit 0 - discrete on/off, bit 1 - integrated on/off");
|
||||
|
||||
|
@ -79,43 +79,6 @@ static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
|
||||
return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert the AETH credit code into the number of credits.
|
||||
*/
|
||||
static const u16 credit_table[31] = {
|
||||
0, /* 0 */
|
||||
1, /* 1 */
|
||||
2, /* 2 */
|
||||
3, /* 3 */
|
||||
4, /* 4 */
|
||||
6, /* 5 */
|
||||
8, /* 6 */
|
||||
12, /* 7 */
|
||||
16, /* 8 */
|
||||
24, /* 9 */
|
||||
32, /* A */
|
||||
48, /* B */
|
||||
64, /* C */
|
||||
96, /* D */
|
||||
128, /* E */
|
||||
192, /* F */
|
||||
256, /* 10 */
|
||||
384, /* 11 */
|
||||
512, /* 12 */
|
||||
768, /* 13 */
|
||||
1024, /* 14 */
|
||||
1536, /* 15 */
|
||||
2048, /* 16 */
|
||||
3072, /* 17 */
|
||||
4096, /* 18 */
|
||||
6144, /* 19 */
|
||||
8192, /* 1A */
|
||||
12288, /* 1B */
|
||||
16384, /* 1C */
|
||||
24576, /* 1D */
|
||||
32768 /* 1E */
|
||||
};
|
||||
|
||||
const struct rvt_operation_params hfi1_post_parms[RVT_OPERATION_MAX] = {
|
||||
[IB_WR_RDMA_WRITE] = {
|
||||
.length = sizeof(struct ib_rdma_wr),
|
||||
@ -339,68 +302,6 @@ int hfi1_check_send_wqe(struct rvt_qp *qp,
|
||||
return wqe->length <= piothreshold;
|
||||
}
|
||||
|
||||
/**
|
||||
* hfi1_compute_aeth - compute the AETH (syndrome + MSN)
|
||||
* @qp: the queue pair to compute the AETH for
|
||||
*
|
||||
* Returns the AETH.
|
||||
*/
|
||||
__be32 hfi1_compute_aeth(struct rvt_qp *qp)
|
||||
{
|
||||
u32 aeth = qp->r_msn & HFI1_MSN_MASK;
|
||||
|
||||
if (qp->ibqp.srq) {
|
||||
/*
|
||||
* Shared receive queues don't generate credits.
|
||||
* Set the credit field to the invalid value.
|
||||
*/
|
||||
aeth |= HFI1_AETH_CREDIT_INVAL << HFI1_AETH_CREDIT_SHIFT;
|
||||
} else {
|
||||
u32 min, max, x;
|
||||
u32 credits;
|
||||
struct rvt_rwq *wq = qp->r_rq.wq;
|
||||
u32 head;
|
||||
u32 tail;
|
||||
|
||||
/* sanity check pointers before trusting them */
|
||||
head = wq->head;
|
||||
if (head >= qp->r_rq.size)
|
||||
head = 0;
|
||||
tail = wq->tail;
|
||||
if (tail >= qp->r_rq.size)
|
||||
tail = 0;
|
||||
/*
|
||||
* Compute the number of credits available (RWQEs).
|
||||
* There is a small chance that the pair of reads are
|
||||
* not atomic, which is OK, since the fuzziness is
|
||||
* resolved as further ACKs go out.
|
||||
*/
|
||||
credits = head - tail;
|
||||
if ((int)credits < 0)
|
||||
credits += qp->r_rq.size;
|
||||
/*
|
||||
* Binary search the credit table to find the code to
|
||||
* use.
|
||||
*/
|
||||
min = 0;
|
||||
max = 31;
|
||||
for (;;) {
|
||||
x = (min + max) / 2;
|
||||
if (credit_table[x] == credits)
|
||||
break;
|
||||
if (credit_table[x] > credits) {
|
||||
max = x;
|
||||
} else {
|
||||
if (min == x)
|
||||
break;
|
||||
min = x;
|
||||
}
|
||||
}
|
||||
aeth |= x << HFI1_AETH_CREDIT_SHIFT;
|
||||
}
|
||||
return cpu_to_be32(aeth);
|
||||
}
|
||||
|
||||
/**
|
||||
* _hfi1_schedule_send - schedule progress
|
||||
* @qp: the QP
|
||||
@ -457,44 +358,6 @@ void hfi1_schedule_send(struct rvt_qp *qp)
|
||||
_hfi1_schedule_send(qp);
|
||||
}
|
||||
|
||||
/**
|
||||
* hfi1_get_credit - handle credit in aeth
|
||||
* @qp: the qp
|
||||
* @aeth: the Acknowledge Extended Transport Header
|
||||
*
|
||||
* The QP s_lock should be held.
|
||||
*/
|
||||
void hfi1_get_credit(struct rvt_qp *qp, u32 aeth)
|
||||
{
|
||||
u32 credit = (aeth >> HFI1_AETH_CREDIT_SHIFT) & HFI1_AETH_CREDIT_MASK;
|
||||
|
||||
lockdep_assert_held(&qp->s_lock);
|
||||
/*
|
||||
* If the credit is invalid, we can send
|
||||
* as many packets as we like. Otherwise, we have to
|
||||
* honor the credit field.
|
||||
*/
|
||||
if (credit == HFI1_AETH_CREDIT_INVAL) {
|
||||
if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) {
|
||||
qp->s_flags |= RVT_S_UNLIMITED_CREDIT;
|
||||
if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) {
|
||||
qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT;
|
||||
hfi1_schedule_send(qp);
|
||||
}
|
||||
}
|
||||
} else if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) {
|
||||
/* Compute new LSN (i.e., MSN + credit) */
|
||||
credit = (aeth + credit_table[credit]) & HFI1_MSN_MASK;
|
||||
if (cmp_msn(credit, qp->s_lsn) > 0) {
|
||||
qp->s_lsn = credit;
|
||||
if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) {
|
||||
qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT;
|
||||
hfi1_schedule_send(qp);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void hfi1_qp_wakeup(struct rvt_qp *qp, u32 flag)
|
||||
{
|
||||
unsigned long flags;
|
||||
@ -744,7 +607,7 @@ void qp_iter_print(struct seq_file *s, struct qp_iter *iter)
|
||||
wqe = rvt_get_swqe_ptr(qp, qp->s_last);
|
||||
send_context = qp_to_send_context(qp, priv->s_sc);
|
||||
seq_printf(s,
|
||||
"N %d %s QP %x R %u %s %u %u %u f=%x %u %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u %u) RQP %x LID %x SL %u MTU %u %u %u %u SDE %p,%u SC %p,%u SCQ %u %u PID %d\n",
|
||||
"N %d %s QP %x R %u %s %u %u %u f=%x %u %u %u %u %u %u SPSN %x %x %x %x %x RPSN %x (%u %u %u %u %u %u %u) RQP %x LID %x SL %u MTU %u %u %u %u %u SDE %p,%u SC %p,%u SCQ %u %u PID %d\n",
|
||||
iter->n,
|
||||
qp_idle(qp) ? "I" : "B",
|
||||
qp->ibqp.qp_num,
|
||||
@ -763,6 +626,7 @@ void qp_iter_print(struct seq_file *s, struct qp_iter *iter)
|
||||
qp->s_last_psn,
|
||||
qp->s_psn, qp->s_next_psn,
|
||||
qp->s_sending_psn, qp->s_sending_hpsn,
|
||||
qp->r_psn,
|
||||
qp->s_last, qp->s_acked, qp->s_cur,
|
||||
qp->s_tail, qp->s_head, qp->s_size,
|
||||
qp->s_avail,
|
||||
@ -773,6 +637,7 @@ void qp_iter_print(struct seq_file *s, struct qp_iter *iter)
|
||||
qp->s_retry,
|
||||
qp->s_retry_cnt,
|
||||
qp->s_rnr_retry_cnt,
|
||||
qp->s_rnr_retry,
|
||||
sde,
|
||||
sde ? sde->this_idx : 0,
|
||||
send_context,
|
||||
@ -782,19 +647,6 @@ void qp_iter_print(struct seq_file *s, struct qp_iter *iter)
|
||||
qp->pid);
|
||||
}
|
||||
|
||||
void qp_comm_est(struct rvt_qp *qp)
|
||||
{
|
||||
qp->r_flags |= RVT_R_COMM_EST;
|
||||
if (qp->ibqp.event_handler) {
|
||||
struct ib_event ev;
|
||||
|
||||
ev.device = qp->ibqp.device;
|
||||
ev.element.qp = &qp->ibqp;
|
||||
ev.event = IB_EVENT_COMM_EST;
|
||||
qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
|
||||
}
|
||||
}
|
||||
|
||||
void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp,
|
||||
gfp_t gfp)
|
||||
{
|
||||
@ -819,8 +671,6 @@ void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp,
|
||||
iowait_sleep,
|
||||
iowait_wakeup,
|
||||
iowait_sdma_drained);
|
||||
setup_timer(&priv->s_rnr_timer, hfi1_rc_rnr_retry, (unsigned long)qp);
|
||||
qp->s_timer.function = hfi1_rc_timeout;
|
||||
return priv;
|
||||
}
|
||||
|
||||
@ -861,7 +711,6 @@ void flush_qp_waiters(struct rvt_qp *qp)
|
||||
{
|
||||
lockdep_assert_held(&qp->s_lock);
|
||||
flush_iowait(qp);
|
||||
hfi1_stop_rc_timers(qp);
|
||||
}
|
||||
|
||||
void stop_send_queue(struct rvt_qp *qp)
|
||||
@ -869,7 +718,6 @@ void stop_send_queue(struct rvt_qp *qp)
|
||||
struct hfi1_qp_priv *priv = qp->priv;
|
||||
|
||||
cancel_work_sync(&priv->s_iowait.iowork);
|
||||
hfi1_del_timers_sync(qp);
|
||||
}
|
||||
|
||||
void quiesce_qp(struct rvt_qp *qp)
|
||||
@ -961,17 +809,20 @@ int get_pmtu_from_attr(struct rvt_dev_info *rdi, struct rvt_qp *qp,
|
||||
|
||||
void notify_error_qp(struct rvt_qp *qp)
|
||||
{
|
||||
struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
|
||||
struct hfi1_qp_priv *priv = qp->priv;
|
||||
seqlock_t *lock = priv->s_iowait.lock;
|
||||
|
||||
write_seqlock(&dev->iowait_lock);
|
||||
if (!list_empty(&priv->s_iowait.list) && !(qp->s_flags & RVT_S_BUSY)) {
|
||||
qp->s_flags &= ~RVT_S_ANY_WAIT_IO;
|
||||
list_del_init(&priv->s_iowait.list);
|
||||
priv->s_iowait.lock = NULL;
|
||||
rvt_put_qp(qp);
|
||||
if (lock) {
|
||||
write_seqlock(lock);
|
||||
if (!list_empty(&priv->s_iowait.list) &&
|
||||
!(qp->s_flags & RVT_S_BUSY)) {
|
||||
qp->s_flags &= ~RVT_S_ANY_WAIT_IO;
|
||||
list_del_init(&priv->s_iowait.list);
|
||||
priv->s_iowait.lock = NULL;
|
||||
rvt_put_qp(qp);
|
||||
}
|
||||
write_sequnlock(lock);
|
||||
}
|
||||
write_sequnlock(&dev->iowait_lock);
|
||||
|
||||
if (!(qp->s_flags & RVT_S_BUSY)) {
|
||||
qp->s_hdrwords = 0;
|
||||
|
@ -70,14 +70,6 @@ static inline void clear_ahg(struct rvt_qp *qp)
|
||||
qp->s_ahgidx = -1;
|
||||
}
|
||||
|
||||
/**
|
||||
* hfi1_compute_aeth - compute the AETH (syndrome + MSN)
|
||||
* @qp: the queue pair to compute the AETH for
|
||||
*
|
||||
* Returns the AETH.
|
||||
*/
|
||||
__be32 hfi1_compute_aeth(struct rvt_qp *qp);
|
||||
|
||||
/**
|
||||
* hfi1_create_qp - create a queue pair for a device
|
||||
* @ibpd: the protection domain who's device we create the queue pair for
|
||||
@ -91,14 +83,6 @@ __be32 hfi1_compute_aeth(struct rvt_qp *qp);
|
||||
struct ib_qp *hfi1_create_qp(struct ib_pd *ibpd,
|
||||
struct ib_qp_init_attr *init_attr,
|
||||
struct ib_udata *udata);
|
||||
/**
|
||||
* hfi1_get_credit - flush the send work queue of a QP
|
||||
* @qp: the qp who's send work queue to flush
|
||||
* @aeth: the Acknowledge Extended Transport Header
|
||||
*
|
||||
* The QP s_lock should be held.
|
||||
*/
|
||||
void hfi1_get_credit(struct rvt_qp *qp, u32 aeth);
|
||||
|
||||
/**
|
||||
* hfi1_qp_wakeup - wake up on the indicated event
|
||||
@ -131,12 +115,6 @@ int qp_iter_next(struct qp_iter *iter);
|
||||
*/
|
||||
void qp_iter_print(struct seq_file *s, struct qp_iter *iter);
|
||||
|
||||
/**
|
||||
* qp_comm_est - handle trap with QP established
|
||||
* @qp: the QP
|
||||
*/
|
||||
void qp_comm_est(struct rvt_qp *qp);
|
||||
|
||||
void _hfi1_schedule_send(struct rvt_qp *qp);
|
||||
void hfi1_schedule_send(struct rvt_qp *qp);
|
||||
|
||||
|
@ -57,133 +57,6 @@
|
||||
/* cut down ridiculously long IB macro names */
|
||||
#define OP(x) RC_OP(x)
|
||||
|
||||
/**
|
||||
* hfi1_add_retry_timer - add/start a retry timer
|
||||
* @qp - the QP
|
||||
*
|
||||
* add a retry timer on the QP
|
||||
*/
|
||||
static inline void hfi1_add_retry_timer(struct rvt_qp *qp)
|
||||
{
|
||||
struct ib_qp *ibqp = &qp->ibqp;
|
||||
struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
|
||||
|
||||
lockdep_assert_held(&qp->s_lock);
|
||||
qp->s_flags |= RVT_S_TIMER;
|
||||
/* 4.096 usec. * (1 << qp->timeout) */
|
||||
qp->s_timer.expires = jiffies + qp->timeout_jiffies +
|
||||
rdi->busy_jiffies;
|
||||
add_timer(&qp->s_timer);
|
||||
}
|
||||
|
||||
/**
|
||||
* hfi1_add_rnr_timer - add/start an rnr timer
|
||||
* @qp - the QP
|
||||
* @to - timeout in usecs
|
||||
*
|
||||
* add an rnr timer on the QP
|
||||
*/
|
||||
void hfi1_add_rnr_timer(struct rvt_qp *qp, u32 to)
|
||||
{
|
||||
struct hfi1_qp_priv *priv = qp->priv;
|
||||
|
||||
lockdep_assert_held(&qp->s_lock);
|
||||
qp->s_flags |= RVT_S_WAIT_RNR;
|
||||
priv->s_rnr_timer.expires = jiffies + usecs_to_jiffies(to);
|
||||
add_timer(&priv->s_rnr_timer);
|
||||
}
|
||||
|
||||
/**
|
||||
* hfi1_mod_retry_timer - mod a retry timer
|
||||
* @qp - the QP
|
||||
*
|
||||
* Modify a potentially already running retry
|
||||
* timer
|
||||
*/
|
||||
static inline void hfi1_mod_retry_timer(struct rvt_qp *qp)
|
||||
{
|
||||
struct ib_qp *ibqp = &qp->ibqp;
|
||||
struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
|
||||
|
||||
lockdep_assert_held(&qp->s_lock);
|
||||
qp->s_flags |= RVT_S_TIMER;
|
||||
/* 4.096 usec. * (1 << qp->timeout) */
|
||||
mod_timer(&qp->s_timer, jiffies + qp->timeout_jiffies +
|
||||
rdi->busy_jiffies);
|
||||
}
|
||||
|
||||
/**
|
||||
* hfi1_stop_retry_timer - stop a retry timer
|
||||
* @qp - the QP
|
||||
*
|
||||
* stop a retry timer and return if the timer
|
||||
* had been pending.
|
||||
*/
|
||||
static inline int hfi1_stop_retry_timer(struct rvt_qp *qp)
|
||||
{
|
||||
int rval = 0;
|
||||
|
||||
lockdep_assert_held(&qp->s_lock);
|
||||
/* Remove QP from retry */
|
||||
if (qp->s_flags & RVT_S_TIMER) {
|
||||
qp->s_flags &= ~RVT_S_TIMER;
|
||||
rval = del_timer(&qp->s_timer);
|
||||
}
|
||||
return rval;
|
||||
}
|
||||
|
||||
/**
|
||||
* hfi1_stop_rc_timers - stop all timers
|
||||
* @qp - the QP
|
||||
*
|
||||
* stop any pending timers
|
||||
*/
|
||||
void hfi1_stop_rc_timers(struct rvt_qp *qp)
|
||||
{
|
||||
struct hfi1_qp_priv *priv = qp->priv;
|
||||
|
||||
lockdep_assert_held(&qp->s_lock);
|
||||
/* Remove QP from all timers */
|
||||
if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
|
||||
qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
|
||||
del_timer(&qp->s_timer);
|
||||
del_timer(&priv->s_rnr_timer);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* hfi1_stop_rnr_timer - stop an rnr timer
|
||||
* @qp - the QP
|
||||
*
|
||||
* stop an rnr timer and return if the timer
|
||||
* had been pending.
|
||||
*/
|
||||
static inline int hfi1_stop_rnr_timer(struct rvt_qp *qp)
|
||||
{
|
||||
int rval = 0;
|
||||
struct hfi1_qp_priv *priv = qp->priv;
|
||||
|
||||
lockdep_assert_held(&qp->s_lock);
|
||||
/* Remove QP from rnr timer */
|
||||
if (qp->s_flags & RVT_S_WAIT_RNR) {
|
||||
qp->s_flags &= ~RVT_S_WAIT_RNR;
|
||||
rval = del_timer(&priv->s_rnr_timer);
|
||||
}
|
||||
return rval;
|
||||
}
|
||||
|
||||
/**
|
||||
* hfi1_del_timers_sync - wait for any timeout routines to exit
|
||||
* @qp - the QP
|
||||
*/
|
||||
void hfi1_del_timers_sync(struct rvt_qp *qp)
|
||||
{
|
||||
struct hfi1_qp_priv *priv = qp->priv;
|
||||
|
||||
del_timer_sync(&qp->s_timer);
|
||||
del_timer_sync(&priv->s_rnr_timer);
|
||||
}
|
||||
|
||||
static u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe,
|
||||
u32 psn, u32 pmtu)
|
||||
{
|
||||
@ -194,7 +67,7 @@ static u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe,
|
||||
ss->sg_list = wqe->sg_list + 1;
|
||||
ss->num_sge = wqe->wr.num_sge;
|
||||
ss->total_len = wqe->length;
|
||||
hfi1_skip_sge(ss, len, 0);
|
||||
rvt_skip_sge(ss, len, false);
|
||||
return wqe->length - len;
|
||||
}
|
||||
|
||||
@ -284,7 +157,7 @@ static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp,
|
||||
qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
|
||||
e->sent = 1;
|
||||
}
|
||||
ohdr->u.aeth = hfi1_compute_aeth(qp);
|
||||
ohdr->u.aeth = rvt_compute_aeth(qp);
|
||||
hwords++;
|
||||
qp->s_ack_rdma_psn = e->psn;
|
||||
bth2 = mask_psn(qp->s_ack_rdma_psn++);
|
||||
@ -293,7 +166,7 @@ static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp,
|
||||
ps->s_txreq->ss = NULL;
|
||||
len = 0;
|
||||
qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
|
||||
ohdr->u.at.aeth = hfi1_compute_aeth(qp);
|
||||
ohdr->u.at.aeth = rvt_compute_aeth(qp);
|
||||
ib_u64_put(e->atomic_data, &ohdr->u.at.atomic_ack_eth);
|
||||
hwords += sizeof(ohdr->u.at) / sizeof(u32);
|
||||
bth2 = mask_psn(e->psn);
|
||||
@ -315,7 +188,7 @@ static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp,
|
||||
len = pmtu;
|
||||
middle = HFI1_CAP_IS_KSET(SDMA_AHG);
|
||||
} else {
|
||||
ohdr->u.aeth = hfi1_compute_aeth(qp);
|
||||
ohdr->u.aeth = rvt_compute_aeth(qp);
|
||||
hwords++;
|
||||
qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
|
||||
e = &qp->s_ack_queue[qp->s_tail_ack_queue];
|
||||
@ -338,11 +211,11 @@ normal:
|
||||
ps->s_txreq->ss = NULL;
|
||||
if (qp->s_nak_state)
|
||||
ohdr->u.aeth =
|
||||
cpu_to_be32((qp->r_msn & HFI1_MSN_MASK) |
|
||||
cpu_to_be32((qp->r_msn & IB_MSN_MASK) |
|
||||
(qp->s_nak_state <<
|
||||
HFI1_AETH_CREDIT_SHIFT));
|
||||
IB_AETH_CREDIT_SHIFT));
|
||||
else
|
||||
ohdr->u.aeth = hfi1_compute_aeth(qp);
|
||||
ohdr->u.aeth = rvt_compute_aeth(qp);
|
||||
hwords++;
|
||||
len = 0;
|
||||
bth0 = OP(ACKNOWLEDGE) << 24;
|
||||
@ -414,7 +287,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
|
||||
goto bail;
|
||||
/* We are in the error state, flush the work request. */
|
||||
smp_read_barrier_depends(); /* see post_one_send() */
|
||||
if (qp->s_last == ACCESS_ONCE(qp->s_head))
|
||||
if (qp->s_last == READ_ONCE(qp->s_head))
|
||||
goto bail;
|
||||
/* If DMAs are in progress, we can't flush immediately. */
|
||||
if (iowait_sdma_pending(&priv->s_iowait)) {
|
||||
@ -457,7 +330,8 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
|
||||
newreq = 0;
|
||||
if (qp->s_cur == qp->s_tail) {
|
||||
/* Check if send work queue is empty. */
|
||||
if (qp->s_tail == qp->s_head) {
|
||||
smp_read_barrier_depends(); /* see post_one_send() */
|
||||
if (qp->s_tail == READ_ONCE(qp->s_head)) {
|
||||
clear_ahg(qp);
|
||||
goto bail;
|
||||
}
|
||||
@ -518,7 +392,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
|
||||
case IB_WR_SEND_WITH_INV:
|
||||
/* If no credit, return. */
|
||||
if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
|
||||
cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
|
||||
rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
|
||||
qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
|
||||
goto bail;
|
||||
}
|
||||
@ -555,7 +429,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
|
||||
case IB_WR_RDMA_WRITE_WITH_IMM:
|
||||
/* If no credit, return. */
|
||||
if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
|
||||
cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
|
||||
rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
|
||||
qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
|
||||
goto bail;
|
||||
}
|
||||
@ -840,7 +714,7 @@ bail_no_tx:
|
||||
void hfi1_send_rc_ack(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp,
|
||||
int is_fecn)
|
||||
{
|
||||
struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
|
||||
struct hfi1_ibport *ibp = rcd_to_iport(rcd);
|
||||
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
|
||||
u64 pbc, pbc_flags = 0;
|
||||
u16 lrh0;
|
||||
@ -853,6 +727,10 @@ void hfi1_send_rc_ack(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp,
|
||||
struct ib_header hdr;
|
||||
struct ib_other_headers *ohdr;
|
||||
unsigned long flags;
|
||||
struct hfi1_qp_priv *priv = qp->priv;
|
||||
|
||||
/* clear the defer count */
|
||||
priv->r_adefered = 0;
|
||||
|
||||
/* Don't send ACK or NAK if a RDMA read or atomic is pending. */
|
||||
if (qp->s_flags & RVT_S_RESP_PENDING)
|
||||
@ -880,11 +758,11 @@ void hfi1_send_rc_ack(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp,
|
||||
if (qp->s_mig_state == IB_MIG_MIGRATED)
|
||||
bth0 |= IB_BTH_MIG_REQ;
|
||||
if (qp->r_nak_state)
|
||||
ohdr->u.aeth = cpu_to_be32((qp->r_msn & HFI1_MSN_MASK) |
|
||||
ohdr->u.aeth = cpu_to_be32((qp->r_msn & IB_MSN_MASK) |
|
||||
(qp->r_nak_state <<
|
||||
HFI1_AETH_CREDIT_SHIFT));
|
||||
IB_AETH_CREDIT_SHIFT));
|
||||
else
|
||||
ohdr->u.aeth = hfi1_compute_aeth(qp);
|
||||
ohdr->u.aeth = rvt_compute_aeth(qp);
|
||||
sc5 = ibp->sl_to_sc[qp->remote_ah_attr.sl];
|
||||
/* set PBC_DC_INFO bit (aka SC[4]) in pbc_flags */
|
||||
pbc_flags |= ((!!(sc5 & 0x10)) << PBC_DC_INFO_SHIFT);
|
||||
@ -1038,7 +916,7 @@ done:
|
||||
* Back up requester to resend the last un-ACKed request.
|
||||
* The QP r_lock and s_lock should be held and interrupts disabled.
|
||||
*/
|
||||
static void restart_rc(struct rvt_qp *qp, u32 psn, int wait)
|
||||
void hfi1_restart_rc(struct rvt_qp *qp, u32 psn, int wait)
|
||||
{
|
||||
struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
|
||||
struct hfi1_ibport *ibp;
|
||||
@ -1074,44 +952,6 @@ static void restart_rc(struct rvt_qp *qp, u32 psn, int wait)
|
||||
reset_psn(qp, psn);
|
||||
}
|
||||
|
||||
/*
|
||||
* This is called from s_timer for missing responses.
|
||||
*/
|
||||
void hfi1_rc_timeout(unsigned long arg)
|
||||
{
|
||||
struct rvt_qp *qp = (struct rvt_qp *)arg;
|
||||
struct hfi1_ibport *ibp;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&qp->r_lock, flags);
|
||||
spin_lock(&qp->s_lock);
|
||||
if (qp->s_flags & RVT_S_TIMER) {
|
||||
ibp = to_iport(qp->ibqp.device, qp->port_num);
|
||||
ibp->rvp.n_rc_timeouts++;
|
||||
qp->s_flags &= ~RVT_S_TIMER;
|
||||
del_timer(&qp->s_timer);
|
||||
trace_hfi1_timeout(qp, qp->s_last_psn + 1);
|
||||
restart_rc(qp, qp->s_last_psn + 1, 1);
|
||||
hfi1_schedule_send(qp);
|
||||
}
|
||||
spin_unlock(&qp->s_lock);
|
||||
spin_unlock_irqrestore(&qp->r_lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* This is called from s_timer for RNR timeouts.
|
||||
*/
|
||||
void hfi1_rc_rnr_retry(unsigned long arg)
|
||||
{
|
||||
struct rvt_qp *qp = (struct rvt_qp *)arg;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&qp->s_lock, flags);
|
||||
hfi1_stop_rnr_timer(qp);
|
||||
hfi1_schedule_send(qp);
|
||||
spin_unlock_irqrestore(&qp->s_lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set qp->s_sending_psn to the next PSN after the given one.
|
||||
* This would be psn+1 except when RDMA reads are present.
|
||||
@ -1150,7 +990,7 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr)
|
||||
u32 psn;
|
||||
|
||||
lockdep_assert_held(&qp->s_lock);
|
||||
if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
|
||||
if (!(ib_rvt_state_ops[qp->state] & RVT_SEND_OR_FLUSH_OR_RECV_OK))
|
||||
return;
|
||||
|
||||
/* Find out where the BTH is */
|
||||
@ -1178,7 +1018,7 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr)
|
||||
!(qp->s_flags &
|
||||
(RVT_S_TIMER | RVT_S_WAIT_RNR | RVT_S_WAIT_PSN)) &&
|
||||
(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
|
||||
hfi1_add_retry_timer(qp);
|
||||
rvt_add_retry_timer(qp);
|
||||
|
||||
while (qp->s_last != qp->s_acked) {
|
||||
u32 s_last;
|
||||
@ -1308,7 +1148,6 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
|
||||
int ret = 0;
|
||||
u32 ack_psn;
|
||||
int diff;
|
||||
unsigned long to;
|
||||
|
||||
lockdep_assert_held(&qp->s_lock);
|
||||
/*
|
||||
@ -1318,10 +1157,10 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
|
||||
* request but will include an ACK'ed request(s).
|
||||
*/
|
||||
ack_psn = psn;
|
||||
if (aeth >> 29)
|
||||
if (aeth >> IB_AETH_NAK_SHIFT)
|
||||
ack_psn--;
|
||||
wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
|
||||
ibp = to_iport(qp->ibqp.device, qp->port_num);
|
||||
ibp = rcd_to_iport(rcd);
|
||||
|
||||
/*
|
||||
* The MSN might be for a later WQE than the PSN indicates so
|
||||
@ -1357,7 +1196,7 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
|
||||
/* Retry this request. */
|
||||
if (!(qp->r_flags & RVT_R_RDMAR_SEQ)) {
|
||||
qp->r_flags |= RVT_R_RDMAR_SEQ;
|
||||
restart_rc(qp, qp->s_last_psn + 1, 0);
|
||||
hfi1_restart_rc(qp, qp->s_last_psn + 1, 0);
|
||||
if (list_empty(&qp->rspwait)) {
|
||||
qp->r_flags |= RVT_R_RSP_SEND;
|
||||
rvt_get_qp(qp);
|
||||
@ -1398,7 +1237,7 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
|
||||
break;
|
||||
}
|
||||
|
||||
switch (aeth >> 29) {
|
||||
switch (aeth >> IB_AETH_NAK_SHIFT) {
|
||||
case 0: /* ACK */
|
||||
this_cpu_inc(*ibp->rvp.rc_acks);
|
||||
if (qp->s_acked != qp->s_tail) {
|
||||
@ -1406,7 +1245,7 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
|
||||
* We are expecting more ACKs so
|
||||
* mod the retry timer.
|
||||
*/
|
||||
hfi1_mod_retry_timer(qp);
|
||||
rvt_mod_retry_timer(qp);
|
||||
/*
|
||||
* We can stop re-sending the earlier packets and
|
||||
* continue with the next packet the receiver wants.
|
||||
@ -1415,7 +1254,7 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
|
||||
reset_psn(qp, psn + 1);
|
||||
} else {
|
||||
/* No more acks - kill all timers */
|
||||
hfi1_stop_rc_timers(qp);
|
||||
rvt_stop_rc_timers(qp);
|
||||
if (cmp_psn(qp->s_psn, psn) <= 0) {
|
||||
qp->s_state = OP(SEND_LAST);
|
||||
qp->s_psn = psn + 1;
|
||||
@ -1425,7 +1264,7 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
|
||||
qp->s_flags &= ~RVT_S_WAIT_ACK;
|
||||
hfi1_schedule_send(qp);
|
||||
}
|
||||
hfi1_get_credit(qp, aeth);
|
||||
rvt_get_credit(qp, aeth);
|
||||
qp->s_rnr_retry = qp->s_rnr_retry_cnt;
|
||||
qp->s_retry = qp->s_retry_cnt;
|
||||
update_last_psn(qp, psn);
|
||||
@ -1452,11 +1291,8 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
|
||||
reset_psn(qp, psn);
|
||||
|
||||
qp->s_flags &= ~(RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_ACK);
|
||||
hfi1_stop_rc_timers(qp);
|
||||
to =
|
||||
ib_hfi1_rnr_table[(aeth >> HFI1_AETH_CREDIT_SHIFT) &
|
||||
HFI1_AETH_CREDIT_MASK];
|
||||
hfi1_add_rnr_timer(qp, to);
|
||||
rvt_stop_rc_timers(qp);
|
||||
rvt_add_rnr_timer(qp, aeth);
|
||||
return 0;
|
||||
|
||||
case 3: /* NAK */
|
||||
@ -1464,8 +1300,8 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
|
||||
goto bail_stop;
|
||||
/* The last valid PSN is the previous PSN. */
|
||||
update_last_psn(qp, psn - 1);
|
||||
switch ((aeth >> HFI1_AETH_CREDIT_SHIFT) &
|
||||
HFI1_AETH_CREDIT_MASK) {
|
||||
switch ((aeth >> IB_AETH_CREDIT_SHIFT) &
|
||||
IB_AETH_CREDIT_MASK) {
|
||||
case 0: /* PSN sequence error */
|
||||
ibp->rvp.n_seq_naks++;
|
||||
/*
|
||||
@ -1474,7 +1310,7 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
|
||||
* RDMA READ response which terminates the RDMA
|
||||
* READ.
|
||||
*/
|
||||
restart_rc(qp, psn, 0);
|
||||
hfi1_restart_rc(qp, psn, 0);
|
||||
hfi1_schedule_send(qp);
|
||||
break;
|
||||
|
||||
@ -1513,7 +1349,7 @@ reserved:
|
||||
}
|
||||
/* cannot be reached */
|
||||
bail_stop:
|
||||
hfi1_stop_rc_timers(qp);
|
||||
rvt_stop_rc_timers(qp);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1528,7 +1364,7 @@ static void rdma_seq_err(struct rvt_qp *qp, struct hfi1_ibport *ibp, u32 psn,
|
||||
|
||||
lockdep_assert_held(&qp->s_lock);
|
||||
/* Remove QP from retry timer */
|
||||
hfi1_stop_rc_timers(qp);
|
||||
rvt_stop_rc_timers(qp);
|
||||
|
||||
wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
|
||||
|
||||
@ -1542,7 +1378,7 @@ static void rdma_seq_err(struct rvt_qp *qp, struct hfi1_ibport *ibp, u32 psn,
|
||||
|
||||
ibp->rvp.n_rdma_seq++;
|
||||
qp->r_flags |= RVT_R_RDMAR_SEQ;
|
||||
restart_rc(qp, qp->s_last_psn + 1, 0);
|
||||
hfi1_restart_rc(qp, qp->s_last_psn + 1, 0);
|
||||
if (list_empty(&qp->rspwait)) {
|
||||
qp->r_flags |= RVT_R_RSP_SEND;
|
||||
rvt_get_qp(qp);
|
||||
@ -1586,7 +1422,7 @@ static void rc_rcv_resp(struct hfi1_ibport *ibp,
|
||||
|
||||
/* Ignore invalid responses. */
|
||||
smp_read_barrier_depends(); /* see post_one_send */
|
||||
if (cmp_psn(psn, ACCESS_ONCE(qp->s_next_psn)) >= 0)
|
||||
if (cmp_psn(psn, READ_ONCE(qp->s_next_psn)) >= 0)
|
||||
goto ack_done;
|
||||
|
||||
/* Ignore duplicate responses. */
|
||||
@ -1595,8 +1431,8 @@ static void rc_rcv_resp(struct hfi1_ibport *ibp,
|
||||
/* Update credits for "ghost" ACKs */
|
||||
if (diff == 0 && opcode == OP(ACKNOWLEDGE)) {
|
||||
aeth = be32_to_cpu(ohdr->u.aeth);
|
||||
if ((aeth >> 29) == 0)
|
||||
hfi1_get_credit(qp, aeth);
|
||||
if ((aeth >> IB_AETH_NAK_SHIFT) == 0)
|
||||
rvt_get_credit(qp, aeth);
|
||||
}
|
||||
goto ack_done;
|
||||
}
|
||||
@ -1656,8 +1492,7 @@ read_middle:
|
||||
* We got a response so update the timeout.
|
||||
* 4.096 usec. * (1 << qp->timeout)
|
||||
*/
|
||||
qp->s_flags |= RVT_S_TIMER;
|
||||
mod_timer(&qp->s_timer, jiffies + qp->timeout_jiffies);
|
||||
rvt_mod_retry_timer(qp);
|
||||
if (qp->s_flags & RVT_S_WAIT_ACK) {
|
||||
qp->s_flags &= ~RVT_S_WAIT_ACK;
|
||||
hfi1_schedule_send(qp);
|
||||
@ -1673,7 +1508,7 @@ read_middle:
|
||||
qp->s_rdma_read_len -= pmtu;
|
||||
update_last_psn(qp, psn);
|
||||
spin_unlock_irqrestore(&qp->s_lock, flags);
|
||||
hfi1_copy_sge(&qp->s_rdma_read_sge, data, pmtu, 0, 0);
|
||||
hfi1_copy_sge(&qp->s_rdma_read_sge, data, pmtu, false, false);
|
||||
goto bail;
|
||||
|
||||
case OP(RDMA_READ_RESPONSE_ONLY):
|
||||
@ -1717,7 +1552,7 @@ read_last:
|
||||
if (unlikely(tlen != qp->s_rdma_read_len))
|
||||
goto ack_len_err;
|
||||
aeth = be32_to_cpu(ohdr->u.aeth);
|
||||
hfi1_copy_sge(&qp->s_rdma_read_sge, data, tlen, 0, 0);
|
||||
hfi1_copy_sge(&qp->s_rdma_read_sge, data, tlen, false, false);
|
||||
WARN_ON(qp->s_rdma_read_sge.num_sge);
|
||||
(void)do_rc_ack(qp, aeth, psn,
|
||||
OP(RDMA_READ_RESPONSE_LAST), 0, rcd);
|
||||
@ -1786,7 +1621,7 @@ static noinline int rc_rcv_error(struct ib_other_headers *ohdr, void *data,
|
||||
struct rvt_qp *qp, u32 opcode, u32 psn,
|
||||
int diff, struct hfi1_ctxtdata *rcd)
|
||||
{
|
||||
struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
|
||||
struct hfi1_ibport *ibp = rcd_to_iport(rcd);
|
||||
struct rvt_ack_entry *e;
|
||||
unsigned long flags;
|
||||
u8 i, prev;
|
||||
@ -1961,25 +1796,6 @@ send_ack:
|
||||
return 0;
|
||||
}
|
||||
|
||||
void hfi1_rc_error(struct rvt_qp *qp, enum ib_wc_status err)
|
||||
{
|
||||
unsigned long flags;
|
||||
int lastwqe;
|
||||
|
||||
spin_lock_irqsave(&qp->s_lock, flags);
|
||||
lastwqe = rvt_error_qp(qp, err);
|
||||
spin_unlock_irqrestore(&qp->s_lock, flags);
|
||||
|
||||
if (lastwqe) {
|
||||
struct ib_event ev;
|
||||
|
||||
ev.device = qp->ibqp.device;
|
||||
ev.element.qp = &qp->ibqp;
|
||||
ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
|
||||
qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void update_ack_queue(struct rvt_qp *qp, unsigned n)
|
||||
{
|
||||
unsigned next;
|
||||
@ -2095,7 +1911,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
|
||||
void *data = packet->ebuf;
|
||||
u32 tlen = packet->tlen;
|
||||
struct rvt_qp *qp = packet->qp;
|
||||
struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
|
||||
struct hfi1_ibport *ibp = rcd_to_iport(rcd);
|
||||
struct ib_other_headers *ohdr = packet->ohdr;
|
||||
u32 bth0, opcode;
|
||||
u32 hdrsize = packet->hlen;
|
||||
@ -2107,7 +1923,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
|
||||
struct ib_reth *reth;
|
||||
unsigned long flags;
|
||||
int ret, is_fecn = 0;
|
||||
int copy_last = 0;
|
||||
bool copy_last = false;
|
||||
u32 rkey;
|
||||
|
||||
lockdep_assert_held(&qp->r_lock);
|
||||
@ -2180,7 +1996,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
|
||||
}
|
||||
|
||||
if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST))
|
||||
qp_comm_est(qp);
|
||||
rvt_comm_est(qp);
|
||||
|
||||
/* OK, process the packet. */
|
||||
switch (opcode) {
|
||||
@ -2201,7 +2017,7 @@ send_middle:
|
||||
qp->r_rcv_len += pmtu;
|
||||
if (unlikely(qp->r_rcv_len > qp->r_len))
|
||||
goto nack_inv;
|
||||
hfi1_copy_sge(&qp->r_sge, data, pmtu, 1, 0);
|
||||
hfi1_copy_sge(&qp->r_sge, data, pmtu, true, false);
|
||||
break;
|
||||
|
||||
case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
|
||||
@ -2241,7 +2057,7 @@ send_last_inv:
|
||||
wc.wc_flags = IB_WC_WITH_INVALIDATE;
|
||||
goto send_last;
|
||||
case OP(RDMA_WRITE_LAST):
|
||||
copy_last = ibpd_to_rvtpd(qp->ibqp.pd)->user;
|
||||
copy_last = rvt_is_user_qp(qp);
|
||||
/* fall through */
|
||||
case OP(SEND_LAST):
|
||||
no_immediate_data:
|
||||
@ -2259,7 +2075,7 @@ send_last:
|
||||
wc.byte_len = tlen + qp->r_rcv_len;
|
||||
if (unlikely(wc.byte_len > qp->r_len))
|
||||
goto nack_inv;
|
||||
hfi1_copy_sge(&qp->r_sge, data, tlen, 1, copy_last);
|
||||
hfi1_copy_sge(&qp->r_sge, data, tlen, true, copy_last);
|
||||
rvt_put_ss(&qp->r_sge);
|
||||
qp->r_msn++;
|
||||
if (!__test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
|
||||
@ -2297,7 +2113,7 @@ send_last:
|
||||
break;
|
||||
|
||||
case OP(RDMA_WRITE_ONLY):
|
||||
copy_last = 1;
|
||||
copy_last = rvt_is_user_qp(qp);
|
||||
/* fall through */
|
||||
case OP(RDMA_WRITE_FIRST):
|
||||
case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
|
||||
@ -2512,7 +2328,7 @@ rnr_nak:
|
||||
return;
|
||||
|
||||
nack_op_err:
|
||||
hfi1_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
|
||||
rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
|
||||
qp->r_nak_state = IB_NAK_REMOTE_OPERATIONAL_ERROR;
|
||||
qp->r_ack_psn = qp->r_psn;
|
||||
/* Queue NAK for later */
|
||||
@ -2522,7 +2338,7 @@ nack_op_err:
|
||||
nack_inv_unlck:
|
||||
spin_unlock_irqrestore(&qp->s_lock, flags);
|
||||
nack_inv:
|
||||
hfi1_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
|
||||
rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
|
||||
qp->r_nak_state = IB_NAK_INVALID_REQUEST;
|
||||
qp->r_ack_psn = qp->r_psn;
|
||||
/* Queue NAK for later */
|
||||
@ -2532,7 +2348,7 @@ nack_inv:
|
||||
nack_acc_unlck:
|
||||
spin_unlock_irqrestore(&qp->s_lock, flags);
|
||||
nack_acc:
|
||||
hfi1_rc_error(qp, IB_WC_LOC_PROT_ERR);
|
||||
rvt_rc_error(qp, IB_WC_LOC_PROT_ERR);
|
||||
qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
|
||||
qp->r_ack_psn = qp->r_psn;
|
||||
send_ack:
|
||||
@ -2547,7 +2363,7 @@ void hfi1_rc_hdrerr(
|
||||
{
|
||||
int has_grh = rcv_flags & HFI1_HAS_GRH;
|
||||
struct ib_other_headers *ohdr;
|
||||
struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
|
||||
struct hfi1_ibport *ibp = rcd_to_iport(rcd);
|
||||
int diff;
|
||||
u32 opcode;
|
||||
u32 psn, bth0;
|
||||
|
@ -53,44 +53,6 @@
|
||||
#include "verbs_txreq.h"
|
||||
#include "trace.h"
|
||||
|
||||
/*
|
||||
* Convert the AETH RNR timeout code into the number of microseconds.
|
||||
*/
|
||||
const u32 ib_hfi1_rnr_table[32] = {
|
||||
655360, /* 00: 655.36 */
|
||||
10, /* 01: .01 */
|
||||
20, /* 02 .02 */
|
||||
30, /* 03: .03 */
|
||||
40, /* 04: .04 */
|
||||
60, /* 05: .06 */
|
||||
80, /* 06: .08 */
|
||||
120, /* 07: .12 */
|
||||
160, /* 08: .16 */
|
||||
240, /* 09: .24 */
|
||||
320, /* 0A: .32 */
|
||||
480, /* 0B: .48 */
|
||||
640, /* 0C: .64 */
|
||||
960, /* 0D: .96 */
|
||||
1280, /* 0E: 1.28 */
|
||||
1920, /* 0F: 1.92 */
|
||||
2560, /* 10: 2.56 */
|
||||
3840, /* 11: 3.84 */
|
||||
5120, /* 12: 5.12 */
|
||||
7680, /* 13: 7.68 */
|
||||
10240, /* 14: 10.24 */
|
||||
15360, /* 15: 15.36 */
|
||||
20480, /* 16: 20.48 */
|
||||
30720, /* 17: 30.72 */
|
||||
40960, /* 18: 40.96 */
|
||||
61440, /* 19: 61.44 */
|
||||
81920, /* 1A: 81.92 */
|
||||
122880, /* 1B: 122.88 */
|
||||
163840, /* 1C: 163.84 */
|
||||
245760, /* 1D: 245.76 */
|
||||
327680, /* 1E: 327.68 */
|
||||
491520 /* 1F: 491.52 */
|
||||
};
|
||||
|
||||
/*
|
||||
* Validate a RWQE and fill in the SGE state.
|
||||
* Return 1 if OK.
|
||||
@ -358,10 +320,9 @@ static void ruc_loopback(struct rvt_qp *sqp)
|
||||
u64 sdata;
|
||||
atomic64_t *maddr;
|
||||
enum ib_wc_status send_status;
|
||||
int release;
|
||||
bool release;
|
||||
int ret;
|
||||
int copy_last = 0;
|
||||
u32 to;
|
||||
bool copy_last = false;
|
||||
int local_ops = 0;
|
||||
|
||||
rcu_read_lock();
|
||||
@ -425,7 +386,7 @@ again:
|
||||
memset(&wc, 0, sizeof(wc));
|
||||
send_status = IB_WC_SUCCESS;
|
||||
|
||||
release = 1;
|
||||
release = true;
|
||||
sqp->s_sge.sge = wqe->sg_list[0];
|
||||
sqp->s_sge.sg_list = wqe->sg_list + 1;
|
||||
sqp->s_sge.num_sge = wqe->wr.num_sge;
|
||||
@ -476,7 +437,7 @@ send:
|
||||
/* skip copy_last set and qp_access_flags recheck */
|
||||
goto do_write;
|
||||
case IB_WR_RDMA_WRITE:
|
||||
copy_last = ibpd_to_rvtpd(qp->ibqp.pd)->user;
|
||||
copy_last = rvt_is_user_qp(qp);
|
||||
if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
|
||||
goto inv_err;
|
||||
do_write:
|
||||
@ -500,7 +461,7 @@ do_write:
|
||||
wqe->rdma_wr.rkey,
|
||||
IB_ACCESS_REMOTE_READ)))
|
||||
goto acc_err;
|
||||
release = 0;
|
||||
release = false;
|
||||
sqp->s_sge.sg_list = NULL;
|
||||
sqp->s_sge.num_sge = 1;
|
||||
qp->r_sge.sge = wqe->sg_list[0];
|
||||
@ -618,8 +579,8 @@ rnr_nak:
|
||||
spin_lock_irqsave(&sqp->s_lock, flags);
|
||||
if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_RECV_OK))
|
||||
goto clr_busy;
|
||||
to = ib_hfi1_rnr_table[qp->r_min_rnr_timer];
|
||||
hfi1_add_rnr_timer(sqp, to);
|
||||
rvt_add_rnr_timer(sqp, qp->r_min_rnr_timer <<
|
||||
IB_AETH_CREDIT_SHIFT);
|
||||
goto clr_busy;
|
||||
|
||||
op_err:
|
||||
@ -637,7 +598,7 @@ acc_err:
|
||||
wc.status = IB_WC_LOC_PROT_ERR;
|
||||
err:
|
||||
/* responder goes to error state */
|
||||
hfi1_rc_error(qp, wc.status);
|
||||
rvt_rc_error(qp, wc.status);
|
||||
|
||||
serr:
|
||||
spin_lock_irqsave(&sqp->s_lock, flags);
|
||||
|
@ -130,14 +130,14 @@ const char *parse_everbs_hdrs(
|
||||
case OP(RC, ACKNOWLEDGE):
|
||||
trace_seq_printf(p, AETH_PRN, be32_to_cpu(eh->aeth) >> 24,
|
||||
parse_syndrome(be32_to_cpu(eh->aeth) >> 24),
|
||||
be32_to_cpu(eh->aeth) & HFI1_MSN_MASK);
|
||||
be32_to_cpu(eh->aeth) & IB_MSN_MASK);
|
||||
break;
|
||||
/* aeth + atomicacketh */
|
||||
case OP(RC, ATOMIC_ACKNOWLEDGE):
|
||||
trace_seq_printf(p, AETH_PRN " " ATOMICACKETH_PRN,
|
||||
be32_to_cpu(eh->at.aeth) >> 24,
|
||||
parse_syndrome(be32_to_cpu(eh->at.aeth) >> 24),
|
||||
be32_to_cpu(eh->at.aeth) & HFI1_MSN_MASK,
|
||||
be32_to_cpu(eh->at.aeth) & IB_MSN_MASK,
|
||||
ib_u64_get(&eh->at.atomic_ack_eth));
|
||||
break;
|
||||
/* atomiceth */
|
||||
|
@ -296,7 +296,7 @@ bail_no_tx:
|
||||
*/
|
||||
void hfi1_uc_rcv(struct hfi1_packet *packet)
|
||||
{
|
||||
struct hfi1_ibport *ibp = &packet->rcd->ppd->ibport_data;
|
||||
struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd);
|
||||
struct ib_header *hdr = packet->hdr;
|
||||
u32 rcv_flags = packet->rcv_flags;
|
||||
void *data = packet->ebuf;
|
||||
@ -384,7 +384,7 @@ inv:
|
||||
}
|
||||
|
||||
if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST))
|
||||
qp_comm_est(qp);
|
||||
rvt_comm_est(qp);
|
||||
|
||||
/* OK, process the packet. */
|
||||
switch (opcode) {
|
||||
@ -419,7 +419,7 @@ send_first:
|
||||
qp->r_rcv_len += pmtu;
|
||||
if (unlikely(qp->r_rcv_len > qp->r_len))
|
||||
goto rewind;
|
||||
hfi1_copy_sge(&qp->r_sge, data, pmtu, 0, 0);
|
||||
hfi1_copy_sge(&qp->r_sge, data, pmtu, false, false);
|
||||
break;
|
||||
|
||||
case OP(SEND_LAST_WITH_IMMEDIATE):
|
||||
@ -444,7 +444,7 @@ send_last:
|
||||
if (unlikely(wc.byte_len > qp->r_len))
|
||||
goto rewind;
|
||||
wc.opcode = IB_WC_RECV;
|
||||
hfi1_copy_sge(&qp->r_sge, data, tlen, 0, 0);
|
||||
hfi1_copy_sge(&qp->r_sge, data, tlen, false, false);
|
||||
rvt_put_ss(&qp->s_rdma_read_sge);
|
||||
last_imm:
|
||||
wc.wr_id = qp->r_wr_id;
|
||||
@ -519,7 +519,7 @@ rdma_first:
|
||||
qp->r_rcv_len += pmtu;
|
||||
if (unlikely(qp->r_rcv_len > qp->r_len))
|
||||
goto drop;
|
||||
hfi1_copy_sge(&qp->r_sge, data, pmtu, 1, 0);
|
||||
hfi1_copy_sge(&qp->r_sge, data, pmtu, true, false);
|
||||
break;
|
||||
|
||||
case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
|
||||
@ -548,7 +548,7 @@ rdma_last_imm:
|
||||
}
|
||||
wc.byte_len = qp->r_len;
|
||||
wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
|
||||
hfi1_copy_sge(&qp->r_sge, data, tlen, 1, 0);
|
||||
hfi1_copy_sge(&qp->r_sge, data, tlen, true, false);
|
||||
rvt_put_ss(&qp->r_sge);
|
||||
goto last_imm;
|
||||
|
||||
@ -564,7 +564,7 @@ rdma_last:
|
||||
tlen -= (hdrsize + pad + 4);
|
||||
if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
|
||||
goto drop;
|
||||
hfi1_copy_sge(&qp->r_sge, data, tlen, 1, 0);
|
||||
hfi1_copy_sge(&qp->r_sge, data, tlen, true, false);
|
||||
rvt_put_ss(&qp->r_sge);
|
||||
break;
|
||||
|
||||
@ -584,5 +584,5 @@ drop:
|
||||
return;
|
||||
|
||||
op_err:
|
||||
hfi1_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
|
||||
rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
|
||||
}
|
||||
|
@ -167,7 +167,7 @@ static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
|
||||
|
||||
ret = hfi1_rvt_get_rwqe(qp, 0);
|
||||
if (ret < 0) {
|
||||
hfi1_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
|
||||
rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
|
||||
goto bail_unlock;
|
||||
}
|
||||
if (!ret) {
|
||||
@ -189,10 +189,10 @@ static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
|
||||
|
||||
hfi1_make_grh(ibp, &grh, &grd, 0, 0);
|
||||
hfi1_copy_sge(&qp->r_sge, &grh,
|
||||
sizeof(grh), 1, 0);
|
||||
sizeof(grh), true, false);
|
||||
wc.wc_flags |= IB_WC_GRH;
|
||||
} else {
|
||||
hfi1_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1);
|
||||
rvt_skip_sge(&qp->r_sge, sizeof(struct ib_grh), true);
|
||||
}
|
||||
ssge.sg_list = swqe->sg_list + 1;
|
||||
ssge.sge = *swqe->sg_list;
|
||||
@ -206,7 +206,7 @@ static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
|
||||
if (len > sge->sge_length)
|
||||
len = sge->sge_length;
|
||||
WARN_ON_ONCE(len == 0);
|
||||
hfi1_copy_sge(&qp->r_sge, sge->vaddr, len, 1, 0);
|
||||
hfi1_copy_sge(&qp->r_sge, sge->vaddr, len, true, false);
|
||||
sge->vaddr += len;
|
||||
sge->length -= len;
|
||||
sge->sge_length -= len;
|
||||
@ -672,7 +672,7 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
|
||||
u32 src_qp;
|
||||
u16 dlid, pkey;
|
||||
int mgmt_pkey_idx = -1;
|
||||
struct hfi1_ibport *ibp = &packet->rcd->ppd->ibport_data;
|
||||
struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd);
|
||||
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
|
||||
struct ib_header *hdr = packet->hdr;
|
||||
u32 rcv_flags = packet->rcv_flags;
|
||||
@ -796,7 +796,7 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
|
||||
|
||||
ret = hfi1_rvt_get_rwqe(qp, 0);
|
||||
if (ret < 0) {
|
||||
hfi1_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
|
||||
rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
|
||||
return;
|
||||
}
|
||||
if (!ret) {
|
||||
@ -812,13 +812,13 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
|
||||
}
|
||||
if (has_grh) {
|
||||
hfi1_copy_sge(&qp->r_sge, &hdr->u.l.grh,
|
||||
sizeof(struct ib_grh), 1, 0);
|
||||
sizeof(struct ib_grh), true, false);
|
||||
wc.wc_flags |= IB_WC_GRH;
|
||||
} else {
|
||||
hfi1_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1);
|
||||
rvt_skip_sge(&qp->r_sge, sizeof(struct ib_grh), true);
|
||||
}
|
||||
hfi1_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh),
|
||||
1, 0);
|
||||
true, false);
|
||||
rvt_put_ss(&qp->r_sge);
|
||||
if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
|
||||
return;
|
||||
|
@ -45,6 +45,7 @@
|
||||
*
|
||||
*/
|
||||
#include <asm/page.h>
|
||||
#include <linux/string.h>
|
||||
|
||||
#include "user_exp_rcv.h"
|
||||
#include "trace.h"
|
||||
@ -577,16 +578,10 @@ int hfi1_user_exp_rcv_clear(struct file *fp, struct hfi1_tid_info *tinfo)
|
||||
u32 *tidinfo;
|
||||
unsigned tididx;
|
||||
|
||||
tidinfo = kcalloc(tinfo->tidcnt, sizeof(*tidinfo), GFP_KERNEL);
|
||||
if (!tidinfo)
|
||||
return -ENOMEM;
|
||||
|
||||
if (copy_from_user(tidinfo, (void __user *)(unsigned long)
|
||||
tinfo->tidlist, sizeof(tidinfo[0]) *
|
||||
tinfo->tidcnt)) {
|
||||
ret = -EFAULT;
|
||||
goto done;
|
||||
}
|
||||
tidinfo = memdup_user((void __user *)(unsigned long)tinfo->tidlist,
|
||||
sizeof(tidinfo[0]) * tinfo->tidcnt);
|
||||
if (IS_ERR(tidinfo))
|
||||
return PTR_ERR(tidinfo);
|
||||
|
||||
mutex_lock(&uctxt->exp_lock);
|
||||
for (tididx = 0; tididx < tinfo->tidcnt; tididx++) {
|
||||
@ -602,7 +597,7 @@ int hfi1_user_exp_rcv_clear(struct file *fp, struct hfi1_tid_info *tinfo)
|
||||
spin_unlock(&fd->tid_lock);
|
||||
tinfo->tidcnt = tididx;
|
||||
mutex_unlock(&uctxt->exp_lock);
|
||||
done:
|
||||
|
||||
kfree(tidinfo);
|
||||
return ret;
|
||||
}
|
||||
|
@ -60,6 +60,7 @@
|
||||
#include <linux/mmu_context.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/string.h>
|
||||
|
||||
#include "hfi.h"
|
||||
#include "sdma.h"
|
||||
@ -725,30 +726,28 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
|
||||
*/
|
||||
if (req_opcode(req->info.ctrl) == EXPECTED) {
|
||||
u16 ntids = iovec[idx].iov_len / sizeof(*req->tids);
|
||||
u32 *tmp;
|
||||
|
||||
if (!ntids || ntids > MAX_TID_PAIR_ENTRIES) {
|
||||
ret = -EINVAL;
|
||||
goto free_req;
|
||||
}
|
||||
req->tids = kcalloc(ntids, sizeof(*req->tids), GFP_KERNEL);
|
||||
if (!req->tids) {
|
||||
ret = -ENOMEM;
|
||||
goto free_req;
|
||||
}
|
||||
|
||||
/*
|
||||
* We have to copy all of the tids because they may vary
|
||||
* in size and, therefore, the TID count might not be
|
||||
* equal to the pkt count. However, there is no way to
|
||||
* tell at this point.
|
||||
*/
|
||||
ret = copy_from_user(req->tids, iovec[idx].iov_base,
|
||||
ntids * sizeof(*req->tids));
|
||||
if (ret) {
|
||||
tmp = memdup_user(iovec[idx].iov_base,
|
||||
ntids * sizeof(*req->tids));
|
||||
if (IS_ERR(tmp)) {
|
||||
ret = PTR_ERR(tmp);
|
||||
SDMA_DBG(req, "Failed to copy %d TIDs (%d)",
|
||||
ntids, ret);
|
||||
ret = -EFAULT;
|
||||
goto free_req;
|
||||
}
|
||||
req->tids = tmp;
|
||||
req->n_tids = ntids;
|
||||
idx++;
|
||||
}
|
||||
|
@ -291,7 +291,7 @@ static void wss_insert(void *address)
|
||||
/*
|
||||
* Is the working set larger than the threshold?
|
||||
*/
|
||||
static inline int wss_exceeds_threshold(void)
|
||||
static inline bool wss_exceeds_threshold(void)
|
||||
{
|
||||
return atomic_read(&wss.total_count) >= wss.threshold;
|
||||
}
|
||||
@ -419,18 +419,19 @@ __be64 ib_hfi1_sys_image_guid;
|
||||
* @ss: the SGE state
|
||||
* @data: the data to copy
|
||||
* @length: the length of the data
|
||||
* @release: boolean to release MR
|
||||
* @copy_last: do a separate copy of the last 8 bytes
|
||||
*/
|
||||
void hfi1_copy_sge(
|
||||
struct rvt_sge_state *ss,
|
||||
void *data, u32 length,
|
||||
int release,
|
||||
int copy_last)
|
||||
bool release,
|
||||
bool copy_last)
|
||||
{
|
||||
struct rvt_sge *sge = &ss->sge;
|
||||
int in_last = 0;
|
||||
int i;
|
||||
int cacheless_copy = 0;
|
||||
bool in_last = false;
|
||||
bool cacheless_copy = false;
|
||||
|
||||
if (sge_copy_mode == COPY_CACHELESS) {
|
||||
cacheless_copy = length >= PAGE_SIZE;
|
||||
@ -454,19 +455,15 @@ void hfi1_copy_sge(
|
||||
if (length > 8) {
|
||||
length -= 8;
|
||||
} else {
|
||||
copy_last = 0;
|
||||
in_last = 1;
|
||||
copy_last = false;
|
||||
in_last = true;
|
||||
}
|
||||
}
|
||||
|
||||
again:
|
||||
while (length) {
|
||||
u32 len = sge->length;
|
||||
u32 len = rvt_get_sge_length(sge, length);
|
||||
|
||||
if (len > length)
|
||||
len = length;
|
||||
if (len > sge->sge_length)
|
||||
len = sge->sge_length;
|
||||
WARN_ON_ONCE(len == 0);
|
||||
if (unlikely(in_last)) {
|
||||
/* enforce byte transfer ordering */
|
||||
@ -477,77 +474,19 @@ again:
|
||||
} else {
|
||||
memcpy(sge->vaddr, data, len);
|
||||
}
|
||||
sge->vaddr += len;
|
||||
sge->length -= len;
|
||||
sge->sge_length -= len;
|
||||
if (sge->sge_length == 0) {
|
||||
if (release)
|
||||
rvt_put_mr(sge->mr);
|
||||
if (--ss->num_sge)
|
||||
*sge = *ss->sg_list++;
|
||||
} else if (sge->length == 0 && sge->mr->lkey) {
|
||||
if (++sge->n >= RVT_SEGSZ) {
|
||||
if (++sge->m >= sge->mr->mapsz)
|
||||
break;
|
||||
sge->n = 0;
|
||||
}
|
||||
sge->vaddr =
|
||||
sge->mr->map[sge->m]->segs[sge->n].vaddr;
|
||||
sge->length =
|
||||
sge->mr->map[sge->m]->segs[sge->n].length;
|
||||
}
|
||||
rvt_update_sge(ss, len, release);
|
||||
data += len;
|
||||
length -= len;
|
||||
}
|
||||
|
||||
if (copy_last) {
|
||||
copy_last = 0;
|
||||
in_last = 1;
|
||||
copy_last = false;
|
||||
in_last = true;
|
||||
length = 8;
|
||||
goto again;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* hfi1_skip_sge - skip over SGE memory
|
||||
* @ss: the SGE state
|
||||
* @length: the number of bytes to skip
|
||||
*/
|
||||
void hfi1_skip_sge(struct rvt_sge_state *ss, u32 length, int release)
|
||||
{
|
||||
struct rvt_sge *sge = &ss->sge;
|
||||
|
||||
while (length) {
|
||||
u32 len = sge->length;
|
||||
|
||||
if (len > length)
|
||||
len = length;
|
||||
if (len > sge->sge_length)
|
||||
len = sge->sge_length;
|
||||
WARN_ON_ONCE(len == 0);
|
||||
sge->vaddr += len;
|
||||
sge->length -= len;
|
||||
sge->sge_length -= len;
|
||||
if (sge->sge_length == 0) {
|
||||
if (release)
|
||||
rvt_put_mr(sge->mr);
|
||||
if (--ss->num_sge)
|
||||
*sge = *ss->sg_list++;
|
||||
} else if (sge->length == 0 && sge->mr->lkey) {
|
||||
if (++sge->n >= RVT_SEGSZ) {
|
||||
if (++sge->m >= sge->mr->mapsz)
|
||||
break;
|
||||
sge->n = 0;
|
||||
}
|
||||
sge->vaddr =
|
||||
sge->mr->map[sge->m]->segs[sge->n].vaddr;
|
||||
sge->length =
|
||||
sge->mr->map[sge->m]->segs[sge->n].length;
|
||||
}
|
||||
length -= len;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Make sure the QP is ready and able to accept the given opcode.
|
||||
*/
|
||||
@ -576,7 +515,7 @@ void hfi1_ib_rcv(struct hfi1_packet *packet)
|
||||
struct ib_header *hdr = packet->hdr;
|
||||
u32 tlen = packet->tlen;
|
||||
struct hfi1_pportdata *ppd = rcd->ppd;
|
||||
struct hfi1_ibport *ibp = &ppd->ibport_data;
|
||||
struct hfi1_ibport *ibp = rcd_to_iport(rcd);
|
||||
struct rvt_dev_info *rdi = &ppd->dd->verbs_dev.rdi;
|
||||
opcode_handler packet_handler;
|
||||
unsigned long flags;
|
||||
@ -689,27 +628,6 @@ static void mem_timer(unsigned long data)
|
||||
hfi1_qp_wakeup(qp, RVT_S_WAIT_KMEM);
|
||||
}
|
||||
|
||||
void update_sge(struct rvt_sge_state *ss, u32 length)
|
||||
{
|
||||
struct rvt_sge *sge = &ss->sge;
|
||||
|
||||
sge->vaddr += length;
|
||||
sge->length -= length;
|
||||
sge->sge_length -= length;
|
||||
if (sge->sge_length == 0) {
|
||||
if (--ss->num_sge)
|
||||
*sge = *ss->sg_list++;
|
||||
} else if (sge->length == 0 && sge->mr->lkey) {
|
||||
if (++sge->n >= RVT_SEGSZ) {
|
||||
if (++sge->m >= sge->mr->mapsz)
|
||||
return;
|
||||
sge->n = 0;
|
||||
}
|
||||
sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
|
||||
sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* This is called with progress side lock held.
|
||||
*/
|
||||
@ -798,7 +716,7 @@ static noinline int build_verbs_ulp_payload(
|
||||
len);
|
||||
if (ret)
|
||||
goto bail_txadd;
|
||||
update_sge(ss, len);
|
||||
rvt_update_sge(ss, len, false);
|
||||
length -= len;
|
||||
}
|
||||
return ret;
|
||||
@ -1073,7 +991,7 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
|
||||
|
||||
if (slen > len)
|
||||
slen = len;
|
||||
update_sge(ss, slen);
|
||||
rvt_update_sge(ss, slen, false);
|
||||
seg_pio_copy_mid(pbuf, addr, slen);
|
||||
len -= slen;
|
||||
}
|
||||
@ -1618,7 +1536,7 @@ static int cntr_names_initialized;
|
||||
* external strings.
|
||||
*/
|
||||
static int init_cntr_names(const char *names_in,
|
||||
const int names_len,
|
||||
const size_t names_len,
|
||||
int num_extra_names,
|
||||
int *num_cntrs,
|
||||
const char ***cntr_names)
|
||||
@ -1845,6 +1763,7 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
|
||||
dd->verbs_dev.rdi.driver_f.mtu_to_path_mtu = mtu_to_path_mtu;
|
||||
dd->verbs_dev.rdi.driver_f.check_modify_qp = hfi1_check_modify_qp;
|
||||
dd->verbs_dev.rdi.driver_f.modify_qp = hfi1_modify_qp;
|
||||
dd->verbs_dev.rdi.driver_f.notify_restart_rc = hfi1_restart_rc;
|
||||
dd->verbs_dev.rdi.driver_f.check_send_wqe = hfi1_check_send_wqe;
|
||||
|
||||
/* completeion queue */
|
||||
@ -1910,7 +1829,7 @@ void hfi1_unregister_ib_device(struct hfi1_devdata *dd)
|
||||
|
||||
void hfi1_cnp_rcv(struct hfi1_packet *packet)
|
||||
{
|
||||
struct hfi1_ibport *ibp = &packet->rcd->ppd->ibport_data;
|
||||
struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd);
|
||||
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
|
||||
struct ib_header *hdr = packet->hdr;
|
||||
struct rvt_qp *qp = packet->qp;
|
||||
|
@ -127,7 +127,6 @@ struct hfi1_qp_priv {
|
||||
u8 s_sc; /* SC[0..4] for next packet */
|
||||
u8 r_adefered; /* number of acks defered */
|
||||
struct iowait s_iowait;
|
||||
struct timer_list s_rnr_timer;
|
||||
struct rvt_qp *owner;
|
||||
};
|
||||
|
||||
@ -259,15 +258,6 @@ int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
|
||||
#endif
|
||||
#define PSN_MODIFY_MASK 0xFFFFFF
|
||||
|
||||
/*
|
||||
* Compare the lower 24 bits of the msn values.
|
||||
* Returns an integer <, ==, or > than zero.
|
||||
*/
|
||||
static inline int cmp_msn(u32 a, u32 b)
|
||||
{
|
||||
return (((int)a) - ((int)b)) << 8;
|
||||
}
|
||||
|
||||
/*
|
||||
* Compare two PSNs
|
||||
* Returns an integer <, ==, or > than zero.
|
||||
@ -299,9 +289,7 @@ void hfi1_put_txreq(struct verbs_txreq *tx);
|
||||
int hfi1_verbs_send(struct rvt_qp *qp, struct hfi1_pkt_state *ps);
|
||||
|
||||
void hfi1_copy_sge(struct rvt_sge_state *ss, void *data, u32 length,
|
||||
int release, int copy_last);
|
||||
|
||||
void hfi1_skip_sge(struct rvt_sge_state *ss, u32 length, int release);
|
||||
bool release, bool copy_last);
|
||||
|
||||
void hfi1_cnp_rcv(struct hfi1_packet *packet);
|
||||
|
||||
@ -319,16 +307,8 @@ u8 ah_to_sc(struct ib_device *ibdev, struct ib_ah_attr *ah_attr);
|
||||
|
||||
struct ib_ah *hfi1_create_qp0_ah(struct hfi1_ibport *ibp, u16 dlid);
|
||||
|
||||
void hfi1_rc_rnr_retry(unsigned long arg);
|
||||
void hfi1_add_rnr_timer(struct rvt_qp *qp, u32 to);
|
||||
void hfi1_rc_timeout(unsigned long arg);
|
||||
void hfi1_del_timers_sync(struct rvt_qp *qp);
|
||||
void hfi1_stop_rc_timers(struct rvt_qp *qp);
|
||||
|
||||
void hfi1_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr);
|
||||
|
||||
void hfi1_rc_error(struct rvt_qp *qp, enum ib_wc_status err);
|
||||
|
||||
void hfi1_ud_rcv(struct hfi1_packet *packet);
|
||||
|
||||
int hfi1_lookup_pkey_idx(struct hfi1_ibport *ibp, u16 pkey);
|
||||
@ -342,7 +322,7 @@ int hfi1_check_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
|
||||
|
||||
void hfi1_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
|
||||
int attr_mask, struct ib_udata *udata);
|
||||
|
||||
void hfi1_restart_rc(struct rvt_qp *qp, u32 psn, int wait);
|
||||
int hfi1_check_send_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe);
|
||||
|
||||
extern const u32 rc_only_opcode;
|
||||
|
@ -32,6 +32,7 @@
|
||||
*/
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/module.h>
|
||||
#include <rdma/ib_addr.h>
|
||||
#include <rdma/ib_smi.h>
|
||||
#include <rdma/ib_user_verbs.h>
|
||||
|
@ -450,6 +450,9 @@ static enum i40iw_status_code i40iw_sc_cqp_create(struct i40iw_sc_cqp *cqp,
|
||||
u32 cnt = 0, p1, p2, val = 0, err_code;
|
||||
enum i40iw_status_code ret_code;
|
||||
|
||||
*maj_err = 0;
|
||||
*min_err = 0;
|
||||
|
||||
ret_code = i40iw_allocate_dma_mem(cqp->dev->hw,
|
||||
&cqp->sdbuf,
|
||||
128,
|
||||
@ -4498,9 +4501,9 @@ void i40iw_sc_vsi_init(struct i40iw_sc_vsi *vsi, struct i40iw_vsi_init_info *inf
|
||||
i40iw_fill_qos_list(info->params->qs_handle_list);
|
||||
|
||||
for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) {
|
||||
vsi->qos[i].qs_handle =
|
||||
info->params->qs_handle_list[i];
|
||||
i40iw_debug(vsi->dev, I40IW_DEBUG_DCB, "qset[%d]: %d\n", i, vsi->qos[i].qs_handle);
|
||||
vsi->qos[i].qs_handle = info->params->qs_handle_list[i];
|
||||
i40iw_debug(vsi->dev, I40IW_DEBUG_DCB, "qset[%d]: %d\n", i,
|
||||
vsi->qos[i].qs_handle);
|
||||
spin_lock_init(&vsi->qos[i].lock);
|
||||
INIT_LIST_HEAD(&vsi->qos[i].qplist);
|
||||
}
|
||||
@ -4851,46 +4854,46 @@ void i40iw_vsi_stats_free(struct i40iw_sc_vsi *vsi)
|
||||
}
|
||||
|
||||
static struct i40iw_cqp_ops iw_cqp_ops = {
|
||||
i40iw_sc_cqp_init,
|
||||
i40iw_sc_cqp_create,
|
||||
i40iw_sc_cqp_post_sq,
|
||||
i40iw_sc_cqp_get_next_send_wqe,
|
||||
i40iw_sc_cqp_destroy,
|
||||
i40iw_sc_poll_for_cqp_op_done
|
||||
.cqp_init = i40iw_sc_cqp_init,
|
||||
.cqp_create = i40iw_sc_cqp_create,
|
||||
.cqp_post_sq = i40iw_sc_cqp_post_sq,
|
||||
.cqp_get_next_send_wqe = i40iw_sc_cqp_get_next_send_wqe,
|
||||
.cqp_destroy = i40iw_sc_cqp_destroy,
|
||||
.poll_for_cqp_op_done = i40iw_sc_poll_for_cqp_op_done
|
||||
};
|
||||
|
||||
static struct i40iw_ccq_ops iw_ccq_ops = {
|
||||
i40iw_sc_ccq_init,
|
||||
i40iw_sc_ccq_create,
|
||||
i40iw_sc_ccq_destroy,
|
||||
i40iw_sc_ccq_create_done,
|
||||
i40iw_sc_ccq_get_cqe_info,
|
||||
i40iw_sc_ccq_arm
|
||||
.ccq_init = i40iw_sc_ccq_init,
|
||||
.ccq_create = i40iw_sc_ccq_create,
|
||||
.ccq_destroy = i40iw_sc_ccq_destroy,
|
||||
.ccq_create_done = i40iw_sc_ccq_create_done,
|
||||
.ccq_get_cqe_info = i40iw_sc_ccq_get_cqe_info,
|
||||
.ccq_arm = i40iw_sc_ccq_arm
|
||||
};
|
||||
|
||||
static struct i40iw_ceq_ops iw_ceq_ops = {
|
||||
i40iw_sc_ceq_init,
|
||||
i40iw_sc_ceq_create,
|
||||
i40iw_sc_cceq_create_done,
|
||||
i40iw_sc_cceq_destroy_done,
|
||||
i40iw_sc_cceq_create,
|
||||
i40iw_sc_ceq_destroy,
|
||||
i40iw_sc_process_ceq
|
||||
.ceq_init = i40iw_sc_ceq_init,
|
||||
.ceq_create = i40iw_sc_ceq_create,
|
||||
.cceq_create_done = i40iw_sc_cceq_create_done,
|
||||
.cceq_destroy_done = i40iw_sc_cceq_destroy_done,
|
||||
.cceq_create = i40iw_sc_cceq_create,
|
||||
.ceq_destroy = i40iw_sc_ceq_destroy,
|
||||
.process_ceq = i40iw_sc_process_ceq
|
||||
};
|
||||
|
||||
static struct i40iw_aeq_ops iw_aeq_ops = {
|
||||
i40iw_sc_aeq_init,
|
||||
i40iw_sc_aeq_create,
|
||||
i40iw_sc_aeq_destroy,
|
||||
i40iw_sc_get_next_aeqe,
|
||||
i40iw_sc_repost_aeq_entries,
|
||||
i40iw_sc_aeq_create_done,
|
||||
i40iw_sc_aeq_destroy_done
|
||||
.aeq_init = i40iw_sc_aeq_init,
|
||||
.aeq_create = i40iw_sc_aeq_create,
|
||||
.aeq_destroy = i40iw_sc_aeq_destroy,
|
||||
.get_next_aeqe = i40iw_sc_get_next_aeqe,
|
||||
.repost_aeq_entries = i40iw_sc_repost_aeq_entries,
|
||||
.aeq_create_done = i40iw_sc_aeq_create_done,
|
||||
.aeq_destroy_done = i40iw_sc_aeq_destroy_done
|
||||
};
|
||||
|
||||
/* iwarp pd ops */
|
||||
static struct i40iw_pd_ops iw_pd_ops = {
|
||||
i40iw_sc_pd_init,
|
||||
.pd_init = i40iw_sc_pd_init,
|
||||
};
|
||||
|
||||
static struct i40iw_priv_qp_ops iw_priv_qp_ops = {
|
||||
@ -4909,53 +4912,51 @@ static struct i40iw_priv_qp_ops iw_priv_qp_ops = {
|
||||
};
|
||||
|
||||
static struct i40iw_priv_cq_ops iw_priv_cq_ops = {
|
||||
i40iw_sc_cq_init,
|
||||
i40iw_sc_cq_create,
|
||||
i40iw_sc_cq_destroy,
|
||||
i40iw_sc_cq_modify,
|
||||
.cq_init = i40iw_sc_cq_init,
|
||||
.cq_create = i40iw_sc_cq_create,
|
||||
.cq_destroy = i40iw_sc_cq_destroy,
|
||||
.cq_modify = i40iw_sc_cq_modify,
|
||||
};
|
||||
|
||||
static struct i40iw_mr_ops iw_mr_ops = {
|
||||
i40iw_sc_alloc_stag,
|
||||
i40iw_sc_mr_reg_non_shared,
|
||||
i40iw_sc_mr_reg_shared,
|
||||
i40iw_sc_dealloc_stag,
|
||||
i40iw_sc_query_stag,
|
||||
i40iw_sc_mw_alloc
|
||||
.alloc_stag = i40iw_sc_alloc_stag,
|
||||
.mr_reg_non_shared = i40iw_sc_mr_reg_non_shared,
|
||||
.mr_reg_shared = i40iw_sc_mr_reg_shared,
|
||||
.dealloc_stag = i40iw_sc_dealloc_stag,
|
||||
.query_stag = i40iw_sc_query_stag,
|
||||
.mw_alloc = i40iw_sc_mw_alloc
|
||||
};
|
||||
|
||||
static struct i40iw_cqp_misc_ops iw_cqp_misc_ops = {
|
||||
i40iw_sc_manage_push_page,
|
||||
i40iw_sc_manage_hmc_pm_func_table,
|
||||
i40iw_sc_set_hmc_resource_profile,
|
||||
i40iw_sc_commit_fpm_values,
|
||||
i40iw_sc_query_fpm_values,
|
||||
i40iw_sc_static_hmc_pages_allocated,
|
||||
i40iw_sc_add_arp_cache_entry,
|
||||
i40iw_sc_del_arp_cache_entry,
|
||||
i40iw_sc_query_arp_cache_entry,
|
||||
i40iw_sc_manage_apbvt_entry,
|
||||
i40iw_sc_manage_qhash_table_entry,
|
||||
i40iw_sc_alloc_local_mac_ipaddr_entry,
|
||||
i40iw_sc_add_local_mac_ipaddr_entry,
|
||||
i40iw_sc_del_local_mac_ipaddr_entry,
|
||||
i40iw_sc_cqp_nop,
|
||||
i40iw_sc_commit_fpm_values_done,
|
||||
i40iw_sc_query_fpm_values_done,
|
||||
i40iw_sc_manage_hmc_pm_func_table_done,
|
||||
i40iw_sc_suspend_qp,
|
||||
i40iw_sc_resume_qp
|
||||
.manage_push_page = i40iw_sc_manage_push_page,
|
||||
.manage_hmc_pm_func_table = i40iw_sc_manage_hmc_pm_func_table,
|
||||
.set_hmc_resource_profile = i40iw_sc_set_hmc_resource_profile,
|
||||
.commit_fpm_values = i40iw_sc_commit_fpm_values,
|
||||
.query_fpm_values = i40iw_sc_query_fpm_values,
|
||||
.static_hmc_pages_allocated = i40iw_sc_static_hmc_pages_allocated,
|
||||
.add_arp_cache_entry = i40iw_sc_add_arp_cache_entry,
|
||||
.del_arp_cache_entry = i40iw_sc_del_arp_cache_entry,
|
||||
.query_arp_cache_entry = i40iw_sc_query_arp_cache_entry,
|
||||
.manage_apbvt_entry = i40iw_sc_manage_apbvt_entry,
|
||||
.manage_qhash_table_entry = i40iw_sc_manage_qhash_table_entry,
|
||||
.alloc_local_mac_ipaddr_table_entry = i40iw_sc_alloc_local_mac_ipaddr_entry,
|
||||
.add_local_mac_ipaddr_entry = i40iw_sc_add_local_mac_ipaddr_entry,
|
||||
.del_local_mac_ipaddr_entry = i40iw_sc_del_local_mac_ipaddr_entry,
|
||||
.cqp_nop = i40iw_sc_cqp_nop,
|
||||
.commit_fpm_values_done = i40iw_sc_commit_fpm_values_done,
|
||||
.query_fpm_values_done = i40iw_sc_query_fpm_values_done,
|
||||
.manage_hmc_pm_func_table_done = i40iw_sc_manage_hmc_pm_func_table_done,
|
||||
.update_suspend_qp = i40iw_sc_suspend_qp,
|
||||
.update_resume_qp = i40iw_sc_resume_qp
|
||||
};
|
||||
|
||||
static struct i40iw_hmc_ops iw_hmc_ops = {
|
||||
i40iw_sc_init_iw_hmc,
|
||||
i40iw_sc_parse_fpm_query_buf,
|
||||
i40iw_sc_configure_iw_fpm,
|
||||
i40iw_sc_parse_fpm_commit_buf,
|
||||
i40iw_sc_create_hmc_obj,
|
||||
i40iw_sc_del_hmc_obj,
|
||||
NULL,
|
||||
NULL
|
||||
.init_iw_hmc = i40iw_sc_init_iw_hmc,
|
||||
.parse_fpm_query_buf = i40iw_sc_parse_fpm_query_buf,
|
||||
.configure_iw_fpm = i40iw_sc_configure_iw_fpm,
|
||||
.parse_fpm_commit_buf = i40iw_sc_parse_fpm_commit_buf,
|
||||
.create_hmc_object = i40iw_sc_create_hmc_obj,
|
||||
.del_hmc_object = i40iw_sc_del_hmc_obj
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -913,29 +913,29 @@ enum i40iw_status_code i40iw_get_wqe_shift(u32 wqdepth, u32 sge, u32 inline_data
|
||||
}
|
||||
|
||||
static struct i40iw_qp_uk_ops iw_qp_uk_ops = {
|
||||
i40iw_qp_post_wr,
|
||||
i40iw_qp_ring_push_db,
|
||||
i40iw_rdma_write,
|
||||
i40iw_rdma_read,
|
||||
i40iw_send,
|
||||
i40iw_inline_rdma_write,
|
||||
i40iw_inline_send,
|
||||
i40iw_stag_local_invalidate,
|
||||
i40iw_mw_bind,
|
||||
i40iw_post_receive,
|
||||
i40iw_nop
|
||||
.iw_qp_post_wr = i40iw_qp_post_wr,
|
||||
.iw_qp_ring_push_db = i40iw_qp_ring_push_db,
|
||||
.iw_rdma_write = i40iw_rdma_write,
|
||||
.iw_rdma_read = i40iw_rdma_read,
|
||||
.iw_send = i40iw_send,
|
||||
.iw_inline_rdma_write = i40iw_inline_rdma_write,
|
||||
.iw_inline_send = i40iw_inline_send,
|
||||
.iw_stag_local_invalidate = i40iw_stag_local_invalidate,
|
||||
.iw_mw_bind = i40iw_mw_bind,
|
||||
.iw_post_receive = i40iw_post_receive,
|
||||
.iw_post_nop = i40iw_nop
|
||||
};
|
||||
|
||||
static struct i40iw_cq_ops iw_cq_ops = {
|
||||
i40iw_cq_request_notification,
|
||||
i40iw_cq_poll_completion,
|
||||
i40iw_cq_post_entries,
|
||||
i40iw_clean_cq
|
||||
.iw_cq_request_notification = i40iw_cq_request_notification,
|
||||
.iw_cq_poll_completion = i40iw_cq_poll_completion,
|
||||
.iw_cq_post_entries = i40iw_cq_post_entries,
|
||||
.iw_cq_clean = i40iw_clean_cq
|
||||
};
|
||||
|
||||
static struct i40iw_device_uk_ops iw_device_uk_ops = {
|
||||
i40iw_cq_uk_init,
|
||||
i40iw_qp_uk_init,
|
||||
.iwarp_cq_uk_init = i40iw_cq_uk_init,
|
||||
.iwarp_qp_uk_init = i40iw_qp_uk_init,
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -76,10 +76,6 @@ enum {
|
||||
MLX4_IB_LSO_HEADER_SPARE = 128,
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX4_IB_IBOE_ETHERTYPE = 0x8915
|
||||
};
|
||||
|
||||
struct mlx4_ib_sqp {
|
||||
struct mlx4_ib_qp qp;
|
||||
int pkey_index;
|
||||
@ -2588,7 +2584,7 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_ud_wr *wr,
|
||||
u16 ether_type;
|
||||
u16 pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 29) << 13;
|
||||
|
||||
ether_type = (!is_udp) ? MLX4_IB_IBOE_ETHERTYPE :
|
||||
ether_type = (!is_udp) ? ETH_P_IBOE:
|
||||
(ip_version == 4 ? ETH_P_IP : ETH_P_IPV6);
|
||||
|
||||
mlx->sched_prio = cpu_to_be16(pcp);
|
||||
|
@ -851,20 +851,18 @@ err_uar_table_free:
|
||||
|
||||
static int mthca_enable_msi_x(struct mthca_dev *mdev)
|
||||
{
|
||||
struct msix_entry entries[3];
|
||||
int err;
|
||||
|
||||
entries[0].entry = 0;
|
||||
entries[1].entry = 1;
|
||||
entries[2].entry = 2;
|
||||
|
||||
err = pci_enable_msix_exact(mdev->pdev, entries, ARRAY_SIZE(entries));
|
||||
if (err)
|
||||
err = pci_alloc_irq_vectors(mdev->pdev, 3, 3, PCI_IRQ_MSIX);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
mdev->eq_table.eq[MTHCA_EQ_COMP ].msi_x_vector = entries[0].vector;
|
||||
mdev->eq_table.eq[MTHCA_EQ_ASYNC].msi_x_vector = entries[1].vector;
|
||||
mdev->eq_table.eq[MTHCA_EQ_CMD ].msi_x_vector = entries[2].vector;
|
||||
mdev->eq_table.eq[MTHCA_EQ_COMP ].msi_x_vector =
|
||||
pci_irq_vector(mdev->pdev, 0);
|
||||
mdev->eq_table.eq[MTHCA_EQ_ASYNC].msi_x_vector =
|
||||
pci_irq_vector(mdev->pdev, 1);
|
||||
mdev->eq_table.eq[MTHCA_EQ_CMD ].msi_x_vector =
|
||||
pci_irq_vector(mdev->pdev, 2);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1018,7 +1016,7 @@ static int __mthca_init_one(struct pci_dev *pdev, int hca_type)
|
||||
err = mthca_setup_hca(mdev);
|
||||
if (err == -EBUSY && (mdev->mthca_flags & MTHCA_FLAG_MSI_X)) {
|
||||
if (mdev->mthca_flags & MTHCA_FLAG_MSI_X)
|
||||
pci_disable_msix(pdev);
|
||||
pci_free_irq_vectors(pdev);
|
||||
mdev->mthca_flags &= ~MTHCA_FLAG_MSI_X;
|
||||
|
||||
err = mthca_setup_hca(mdev);
|
||||
@ -1062,7 +1060,7 @@ err_cleanup:
|
||||
|
||||
err_close:
|
||||
if (mdev->mthca_flags & MTHCA_FLAG_MSI_X)
|
||||
pci_disable_msix(pdev);
|
||||
pci_free_irq_vectors(pdev);
|
||||
|
||||
mthca_close_hca(mdev);
|
||||
|
||||
@ -1113,7 +1111,7 @@ static void __mthca_remove_one(struct pci_dev *pdev)
|
||||
mthca_cmd_cleanup(mdev);
|
||||
|
||||
if (mdev->mthca_flags & MTHCA_FLAG_MSI_X)
|
||||
pci_disable_msix(pdev);
|
||||
pci_free_irq_vectors(pdev);
|
||||
|
||||
ib_dealloc_device(&mdev->ib_dev);
|
||||
pci_release_regions(pdev);
|
||||
|
@ -135,17 +135,17 @@ static void record_ird_ord(struct nes_cm_node *, u16, u16);
|
||||
/* instance of function pointers for client API */
|
||||
/* set address of this instance to cm_core->cm_ops at cm_core alloc */
|
||||
static const struct nes_cm_ops nes_cm_api = {
|
||||
mini_cm_accelerated,
|
||||
mini_cm_listen,
|
||||
mini_cm_del_listen,
|
||||
mini_cm_connect,
|
||||
mini_cm_close,
|
||||
mini_cm_accept,
|
||||
mini_cm_reject,
|
||||
mini_cm_recv_pkt,
|
||||
mini_cm_dealloc_core,
|
||||
mini_cm_get,
|
||||
mini_cm_set
|
||||
.accelerated = mini_cm_accelerated,
|
||||
.listen = mini_cm_listen,
|
||||
.stop_listener = mini_cm_del_listen,
|
||||
.connect = mini_cm_connect,
|
||||
.close = mini_cm_close,
|
||||
.accept = mini_cm_accept,
|
||||
.reject = mini_cm_reject,
|
||||
.recv_pkt = mini_cm_recv_pkt,
|
||||
.destroy_cm_core = mini_cm_dealloc_core,
|
||||
.get = mini_cm_get,
|
||||
.set = mini_cm_set
|
||||
};
|
||||
|
||||
static struct nes_cm_core *g_cm_core;
|
||||
|
@ -59,7 +59,7 @@ static u16 ocrdma_hdr_type_to_proto_num(int devid, u8 hdr_type)
|
||||
{
|
||||
switch (hdr_type) {
|
||||
case OCRDMA_L3_TYPE_IB_GRH:
|
||||
return (u16)0x8915;
|
||||
return (u16)ETH_P_IBOE;
|
||||
case OCRDMA_L3_TYPE_IPV4:
|
||||
return (u16)0x0800;
|
||||
case OCRDMA_L3_TYPE_IPV6:
|
||||
@ -94,7 +94,7 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
|
||||
proto_num = ocrdma_hdr_type_to_proto_num(dev->id, ah->hdr_type);
|
||||
if (!proto_num)
|
||||
return -EINVAL;
|
||||
nxthdr = (proto_num == 0x8915) ? 0x1b : 0x11;
|
||||
nxthdr = (proto_num == ETH_P_IBOE) ? 0x1b : 0x11;
|
||||
/* VLAN */
|
||||
if (!vlan_tag || (vlan_tag > 0xFFF))
|
||||
vlan_tag = dev->pvid;
|
||||
|
@ -44,6 +44,7 @@
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/log2.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/if_ether.h>
|
||||
|
||||
#include <rdma/ib_verbs.h>
|
||||
#include <rdma/ib_user_verbs.h>
|
||||
@ -2984,7 +2985,7 @@ static int ocrdma_parse_dcbxcfg_rsp(struct ocrdma_dev *dev, int ptype,
|
||||
OCRDMA_APP_PARAM_APP_PROTO_MASK;
|
||||
|
||||
if (
|
||||
valid && proto == OCRDMA_APP_PROTO_ROCE &&
|
||||
valid && proto == ETH_P_IBOE &&
|
||||
proto_sel == OCRDMA_PROTO_SELECT_L2) {
|
||||
for (slindx = 0; slindx <
|
||||
OCRDMA_MAX_SERVICE_LEVEL_INDEX; slindx++) {
|
||||
|
@ -1901,7 +1901,6 @@ struct ocrdma_eth_vlan {
|
||||
u8 smac[6];
|
||||
__be16 eth_type;
|
||||
__be16 vlan_tag;
|
||||
#define OCRDMA_ROCE_ETH_TYPE 0x8915
|
||||
__be16 roce_eth_type;
|
||||
} __packed;
|
||||
|
||||
@ -2179,10 +2178,6 @@ enum OCRDMA_DCBX_PARAM_TYPE {
|
||||
OCRDMA_PARAMETER_TYPE_PEER = 0x02
|
||||
};
|
||||
|
||||
enum OCRDMA_DCBX_APP_PROTO {
|
||||
OCRDMA_APP_PROTO_ROCE = 0x8915
|
||||
};
|
||||
|
||||
enum OCRDMA_DCBX_PROTO {
|
||||
OCRDMA_PROTO_SELECT_L2 = 0x00,
|
||||
OCRDMA_PROTO_SELECT_L4 = 0x01
|
||||
|
@ -1170,8 +1170,7 @@ int ocrdma_destroy_cq(struct ib_cq *ibcq)
|
||||
|
||||
dev->cq_tbl[cq->id] = NULL;
|
||||
indx = ocrdma_get_eq_table_index(dev, cq->eqn);
|
||||
if (indx == -EINVAL)
|
||||
BUG();
|
||||
BUG_ON(indx == -EINVAL);
|
||||
|
||||
eq = &dev->eq_tbl[indx];
|
||||
irq = ocrdma_get_irq(dev, eq);
|
||||
@ -1741,8 +1740,7 @@ static void ocrdma_discard_cqes(struct ocrdma_qp *qp, struct ocrdma_cq *cq)
|
||||
wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >>
|
||||
OCRDMA_CQE_BUFTAG_SHIFT) &
|
||||
qp->srq->rq.max_wqe_idx;
|
||||
if (wqe_idx < 1)
|
||||
BUG();
|
||||
BUG_ON(wqe_idx < 1);
|
||||
spin_lock_irqsave(&qp->srq->q_lock, flags);
|
||||
ocrdma_hwq_inc_tail(&qp->srq->rq);
|
||||
ocrdma_srq_toggle_bit(qp->srq, wqe_idx - 1);
|
||||
@ -2388,15 +2386,13 @@ static int ocrdma_srq_get_idx(struct ocrdma_srq *srq)
|
||||
if (srq->idx_bit_fields[row]) {
|
||||
indx = ffs(srq->idx_bit_fields[row]);
|
||||
indx = (row * 32) + (indx - 1);
|
||||
if (indx >= srq->rq.max_cnt)
|
||||
BUG();
|
||||
BUG_ON(indx >= srq->rq.max_cnt);
|
||||
ocrdma_srq_toggle_bit(srq, indx);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (row == srq->bit_fields_len)
|
||||
BUG();
|
||||
BUG_ON(row == srq->bit_fields_len);
|
||||
return indx + 1; /* Use from index 1 */
|
||||
}
|
||||
|
||||
@ -2754,8 +2750,7 @@ static void ocrdma_update_free_srq_cqe(struct ib_wc *ibwc,
|
||||
srq = get_ocrdma_srq(qp->ibqp.srq);
|
||||
wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >>
|
||||
OCRDMA_CQE_BUFTAG_SHIFT) & srq->rq.max_wqe_idx;
|
||||
if (wqe_idx < 1)
|
||||
BUG();
|
||||
BUG_ON(wqe_idx < 1);
|
||||
|
||||
ibwc->wr_id = srq->rqe_wr_id_tbl[wqe_idx];
|
||||
spin_lock_irqsave(&srq->q_lock, flags);
|
||||
|
@ -287,7 +287,7 @@ static inline int qedr_gsi_build_header(struct qedr_dev *dev,
|
||||
has_udp = (sgid_attr.gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP);
|
||||
if (!has_udp) {
|
||||
/* RoCE v1 */
|
||||
ether_type = ETH_P_ROCE;
|
||||
ether_type = ETH_P_IBOE;
|
||||
*roce_mode = ROCE_V1;
|
||||
} else if (ipv6_addr_v4mapped((struct in6_addr *)&sgid)) {
|
||||
/* RoCE v2 IPv4 */
|
||||
|
@ -37,7 +37,6 @@
|
||||
|
||||
#define QEDR_GSI_MAX_RECV_SGE (1) /* LL2 FW limitation */
|
||||
|
||||
#define ETH_P_ROCE (0x8915)
|
||||
#define QEDR_ROCE_V2_UDP_SPORT (0000)
|
||||
|
||||
static inline u32 qedr_get_ipv4_from_gid(u8 *gid)
|
||||
|
@ -771,8 +771,10 @@ static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
|
||||
goto err0;
|
||||
|
||||
q->pbl_tbl = qedr_alloc_pbl_tbl(dev, &q->pbl_info, GFP_KERNEL);
|
||||
if (IS_ERR_OR_NULL(q->pbl_tbl))
|
||||
if (IS_ERR(q->pbl_tbl)) {
|
||||
rc = PTR_ERR(q->pbl_tbl);
|
||||
goto err0;
|
||||
}
|
||||
|
||||
qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info);
|
||||
|
||||
@ -1086,30 +1088,6 @@ static inline int get_gid_info_from_table(struct ib_qp *ibqp,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void qedr_cleanup_user_sq(struct qedr_dev *dev, struct qedr_qp *qp)
|
||||
{
|
||||
qedr_free_pbl(dev, &qp->usq.pbl_info, qp->usq.pbl_tbl);
|
||||
ib_umem_release(qp->usq.umem);
|
||||
}
|
||||
|
||||
static void qedr_cleanup_user_rq(struct qedr_dev *dev, struct qedr_qp *qp)
|
||||
{
|
||||
qedr_free_pbl(dev, &qp->urq.pbl_info, qp->urq.pbl_tbl);
|
||||
ib_umem_release(qp->urq.umem);
|
||||
}
|
||||
|
||||
static void qedr_cleanup_kernel_sq(struct qedr_dev *dev, struct qedr_qp *qp)
|
||||
{
|
||||
dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl);
|
||||
kfree(qp->wqe_wr_id);
|
||||
}
|
||||
|
||||
static void qedr_cleanup_kernel_rq(struct qedr_dev *dev, struct qedr_qp *qp)
|
||||
{
|
||||
dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
|
||||
kfree(qp->rqe_wr_id);
|
||||
}
|
||||
|
||||
static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
|
||||
struct ib_qp_init_attr *attrs)
|
||||
{
|
||||
@ -1198,15 +1176,13 @@ static int qedr_copy_qp_uresp(struct qedr_dev *dev,
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void qedr_set_qp_init_params(struct qedr_dev *dev,
|
||||
struct qedr_qp *qp,
|
||||
struct qedr_pd *pd,
|
||||
struct ib_qp_init_attr *attrs)
|
||||
static void qedr_set_common_qp_params(struct qedr_dev *dev,
|
||||
struct qedr_qp *qp,
|
||||
struct qedr_pd *pd,
|
||||
struct ib_qp_init_attr *attrs)
|
||||
{
|
||||
qp->pd = pd;
|
||||
|
||||
spin_lock_init(&qp->q_lock);
|
||||
|
||||
qp->pd = pd;
|
||||
qp->qp_type = attrs->qp_type;
|
||||
qp->max_inline_data = attrs->cap.max_inline_data;
|
||||
qp->sq.max_sges = attrs->cap.max_send_sge;
|
||||
@ -1215,7 +1191,11 @@ static void qedr_set_qp_init_params(struct qedr_dev *dev,
|
||||
qp->sq_cq = get_qedr_cq(attrs->send_cq);
|
||||
qp->rq_cq = get_qedr_cq(attrs->recv_cq);
|
||||
qp->dev = dev;
|
||||
qp->rq.max_sges = attrs->cap.max_recv_sge;
|
||||
|
||||
DP_DEBUG(dev, QEDR_MSG_QP,
|
||||
"RQ params:\trq_max_sges = %d, rq_cq_id = %d\n",
|
||||
qp->rq.max_sges, qp->rq_cq->icid);
|
||||
DP_DEBUG(dev, QEDR_MSG_QP,
|
||||
"QP params:\tpd = %d, qp_type = %d, max_inline_data = %d, state = %d, signaled = %d, use_srq=%d\n",
|
||||
pd->pd_id, qp->qp_type, qp->max_inline_data,
|
||||
@ -1223,95 +1203,149 @@ static void qedr_set_qp_init_params(struct qedr_dev *dev,
|
||||
DP_DEBUG(dev, QEDR_MSG_QP,
|
||||
"SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n",
|
||||
qp->sq.max_sges, qp->sq_cq->icid);
|
||||
qp->rq.max_sges = attrs->cap.max_recv_sge;
|
||||
DP_DEBUG(dev, QEDR_MSG_QP,
|
||||
"RQ params:\trq_max_sges = %d, rq_cq_id = %d\n",
|
||||
qp->rq.max_sges, qp->rq_cq->icid);
|
||||
}
|
||||
|
||||
static inline void
|
||||
qedr_init_qp_user_params(struct qed_rdma_create_qp_in_params *params,
|
||||
struct qedr_create_qp_ureq *ureq)
|
||||
{
|
||||
/* QP handle to be written in CQE */
|
||||
params->qp_handle_lo = ureq->qp_handle_lo;
|
||||
params->qp_handle_hi = ureq->qp_handle_hi;
|
||||
}
|
||||
|
||||
static inline void
|
||||
qedr_init_qp_kernel_doorbell_sq(struct qedr_dev *dev, struct qedr_qp *qp)
|
||||
static void qedr_set_roce_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
|
||||
{
|
||||
qp->sq.db = dev->db_addr +
|
||||
DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
|
||||
qp->sq.db_data.data.icid = qp->icid + 1;
|
||||
}
|
||||
|
||||
static inline void
|
||||
qedr_init_qp_kernel_doorbell_rq(struct qedr_dev *dev, struct qedr_qp *qp)
|
||||
{
|
||||
qp->rq.db = dev->db_addr +
|
||||
DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
|
||||
qp->rq.db_data.data.icid = qp->icid;
|
||||
}
|
||||
|
||||
static inline int
|
||||
qedr_init_qp_kernel_params_rq(struct qedr_dev *dev,
|
||||
struct qedr_qp *qp, struct ib_qp_init_attr *attrs)
|
||||
{
|
||||
/* Allocate driver internal RQ array */
|
||||
qp->rqe_wr_id = kcalloc(qp->rq.max_wr, sizeof(*qp->rqe_wr_id),
|
||||
GFP_KERNEL);
|
||||
if (!qp->rqe_wr_id)
|
||||
return -ENOMEM;
|
||||
|
||||
DP_DEBUG(dev, QEDR_MSG_QP, "RQ max_wr set to %d.\n", qp->rq.max_wr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int
|
||||
qedr_init_qp_kernel_params_sq(struct qedr_dev *dev,
|
||||
static inline void
|
||||
qedr_init_common_qp_in_params(struct qedr_dev *dev,
|
||||
struct qedr_pd *pd,
|
||||
struct qedr_qp *qp,
|
||||
struct ib_qp_init_attr *attrs,
|
||||
bool fmr_and_reserved_lkey,
|
||||
struct qed_rdma_create_qp_in_params *params)
|
||||
{
|
||||
u32 temp_max_wr;
|
||||
/* QP handle to be written in an async event */
|
||||
params->qp_handle_async_lo = lower_32_bits((uintptr_t) qp);
|
||||
params->qp_handle_async_hi = upper_32_bits((uintptr_t) qp);
|
||||
|
||||
/* Allocate driver internal SQ array */
|
||||
temp_max_wr = attrs->cap.max_send_wr * dev->wq_multiplier;
|
||||
temp_max_wr = min_t(u32, temp_max_wr, dev->attr.max_sqe);
|
||||
|
||||
/* temp_max_wr < attr->max_sqe < u16 so the casting is safe */
|
||||
qp->sq.max_wr = (u16)temp_max_wr;
|
||||
qp->wqe_wr_id = kcalloc(qp->sq.max_wr, sizeof(*qp->wqe_wr_id),
|
||||
GFP_KERNEL);
|
||||
if (!qp->wqe_wr_id)
|
||||
return -ENOMEM;
|
||||
|
||||
DP_DEBUG(dev, QEDR_MSG_QP, "SQ max_wr set to %d.\n", qp->sq.max_wr);
|
||||
|
||||
/* QP handle to be written in CQE */
|
||||
params->qp_handle_lo = lower_32_bits((uintptr_t)qp);
|
||||
params->qp_handle_hi = upper_32_bits((uintptr_t)qp);
|
||||
|
||||
return 0;
|
||||
params->signal_all = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR);
|
||||
params->fmr_and_reserved_lkey = fmr_and_reserved_lkey;
|
||||
params->pd = pd->pd_id;
|
||||
params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi;
|
||||
params->sq_cq_id = get_qedr_cq(attrs->send_cq)->icid;
|
||||
params->stats_queue = 0;
|
||||
params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
|
||||
params->srq_id = 0;
|
||||
params->use_srq = false;
|
||||
}
|
||||
|
||||
static inline int qedr_init_qp_kernel_sq(struct qedr_dev *dev,
|
||||
struct qedr_qp *qp,
|
||||
struct ib_qp_init_attr *attrs)
|
||||
static inline void qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp)
|
||||
{
|
||||
u32 n_sq_elems, n_sq_entries;
|
||||
DP_DEBUG(dev, QEDR_MSG_QP, "create qp: successfully created user QP. "
|
||||
"qp=%p. "
|
||||
"sq_addr=0x%llx, "
|
||||
"sq_len=%zd, "
|
||||
"rq_addr=0x%llx, "
|
||||
"rq_len=%zd"
|
||||
"\n",
|
||||
qp,
|
||||
qp->usq.buf_addr,
|
||||
qp->usq.buf_len, qp->urq.buf_addr, qp->urq.buf_len);
|
||||
}
|
||||
|
||||
static void qedr_cleanup_user(struct qedr_dev *dev, struct qedr_qp *qp)
|
||||
{
|
||||
if (qp->usq.umem)
|
||||
ib_umem_release(qp->usq.umem);
|
||||
qp->usq.umem = NULL;
|
||||
|
||||
if (qp->urq.umem)
|
||||
ib_umem_release(qp->urq.umem);
|
||||
qp->urq.umem = NULL;
|
||||
}
|
||||
|
||||
static int qedr_create_user_qp(struct qedr_dev *dev,
|
||||
struct qedr_qp *qp,
|
||||
struct ib_pd *ibpd,
|
||||
struct ib_udata *udata,
|
||||
struct ib_qp_init_attr *attrs)
|
||||
{
|
||||
struct qed_rdma_create_qp_in_params in_params;
|
||||
struct qed_rdma_create_qp_out_params out_params;
|
||||
struct qedr_pd *pd = get_qedr_pd(ibpd);
|
||||
struct ib_ucontext *ib_ctx = NULL;
|
||||
struct qedr_ucontext *ctx = NULL;
|
||||
struct qedr_create_qp_ureq ureq;
|
||||
int rc = -EINVAL;
|
||||
|
||||
ib_ctx = ibpd->uobject->context;
|
||||
ctx = get_qedr_ucontext(ib_ctx);
|
||||
|
||||
memset(&ureq, 0, sizeof(ureq));
|
||||
rc = ib_copy_from_udata(&ureq, udata, sizeof(ureq));
|
||||
if (rc) {
|
||||
DP_ERR(dev, "Problem copying data from user space\n");
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* SQ - read access only (0), dma sync not required (0) */
|
||||
rc = qedr_init_user_queue(ib_ctx, dev, &qp->usq, ureq.sq_addr,
|
||||
ureq.sq_len, 0, 0);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
/* RQ - read access only (0), dma sync not required (0) */
|
||||
rc = qedr_init_user_queue(ib_ctx, dev, &qp->urq, ureq.rq_addr,
|
||||
ureq.rq_len, 0, 0);
|
||||
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
memset(&in_params, 0, sizeof(in_params));
|
||||
qedr_init_common_qp_in_params(dev, pd, qp, attrs, false, &in_params);
|
||||
in_params.qp_handle_lo = ureq.qp_handle_lo;
|
||||
in_params.qp_handle_hi = ureq.qp_handle_hi;
|
||||
in_params.sq_num_pages = qp->usq.pbl_info.num_pbes;
|
||||
in_params.sq_pbl_ptr = qp->usq.pbl_tbl->pa;
|
||||
in_params.rq_num_pages = qp->urq.pbl_info.num_pbes;
|
||||
in_params.rq_pbl_ptr = qp->urq.pbl_tbl->pa;
|
||||
|
||||
qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
|
||||
&in_params, &out_params);
|
||||
|
||||
if (!qp->qed_qp) {
|
||||
rc = -ENOMEM;
|
||||
goto err1;
|
||||
}
|
||||
|
||||
qp->qp_id = out_params.qp_id;
|
||||
qp->icid = out_params.icid;
|
||||
|
||||
rc = qedr_copy_qp_uresp(dev, qp, udata);
|
||||
if (rc)
|
||||
goto err;
|
||||
|
||||
qedr_qp_user_print(dev, qp);
|
||||
|
||||
return 0;
|
||||
err:
|
||||
rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
|
||||
if (rc)
|
||||
DP_ERR(dev, "create qp: fatal fault. rc=%d", rc);
|
||||
|
||||
err1:
|
||||
qedr_cleanup_user(dev, qp);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int
|
||||
qedr_roce_create_kernel_qp(struct qedr_dev *dev,
|
||||
struct qedr_qp *qp,
|
||||
struct qed_rdma_create_qp_in_params *in_params,
|
||||
u32 n_sq_elems, u32 n_rq_elems)
|
||||
{
|
||||
struct qed_rdma_create_qp_out_params out_params;
|
||||
int rc;
|
||||
|
||||
/* A single work request may take up to QEDR_MAX_SQ_WQE_SIZE elements in
|
||||
* the ring. The ring should allow at least a single WR, even if the
|
||||
* user requested none, due to allocation issues.
|
||||
*/
|
||||
n_sq_entries = attrs->cap.max_send_wr;
|
||||
n_sq_entries = min_t(u32, n_sq_entries, dev->attr.max_sqe);
|
||||
n_sq_entries = max_t(u32, n_sq_entries, 1);
|
||||
n_sq_elems = n_sq_entries * QEDR_MAX_SQE_ELEMENTS_PER_SQE;
|
||||
rc = dev->ops->common->chain_alloc(dev->cdev,
|
||||
QED_CHAIN_USE_TO_PRODUCE,
|
||||
QED_CHAIN_MODE_PBL,
|
||||
@ -1319,31 +1353,13 @@ static inline int qedr_init_qp_kernel_sq(struct qedr_dev *dev,
|
||||
n_sq_elems,
|
||||
QEDR_SQE_ELEMENT_SIZE,
|
||||
&qp->sq.pbl);
|
||||
if (rc) {
|
||||
DP_ERR(dev, "failed to allocate QP %p SQ\n", qp);
|
||||
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
|
||||
DP_DEBUG(dev, QEDR_MSG_SQ,
|
||||
"SQ Pbl base addr = %llx max_send_wr=%d max_wr=%d capacity=%d, rc=%d\n",
|
||||
qed_chain_get_pbl_phys(&qp->sq.pbl), attrs->cap.max_send_wr,
|
||||
n_sq_entries, qed_chain_get_capacity(&qp->sq.pbl), rc);
|
||||
return 0;
|
||||
}
|
||||
in_params->sq_num_pages = qed_chain_get_page_cnt(&qp->sq.pbl);
|
||||
in_params->sq_pbl_ptr = qed_chain_get_pbl_phys(&qp->sq.pbl);
|
||||
|
||||
static inline int qedr_init_qp_kernel_rq(struct qedr_dev *dev,
|
||||
struct qedr_qp *qp,
|
||||
struct ib_qp_init_attr *attrs)
|
||||
{
|
||||
u32 n_rq_elems, n_rq_entries;
|
||||
int rc;
|
||||
|
||||
/* A single work request may take up to QEDR_MAX_RQ_WQE_SIZE elements in
|
||||
* the ring. There ring should allow at least a single WR, even if the
|
||||
* user requested none, due to allocation issues.
|
||||
*/
|
||||
n_rq_entries = max_t(u32, attrs->cap.max_recv_wr, 1);
|
||||
n_rq_elems = n_rq_entries * QEDR_MAX_RQE_ELEMENTS_PER_RQE;
|
||||
rc = dev->ops->common->chain_alloc(dev->cdev,
|
||||
QED_CHAIN_USE_TO_CONSUME_PRODUCE,
|
||||
QED_CHAIN_MODE_PBL,
|
||||
@ -1351,136 +1367,102 @@ static inline int qedr_init_qp_kernel_rq(struct qedr_dev *dev,
|
||||
n_rq_elems,
|
||||
QEDR_RQE_ELEMENT_SIZE,
|
||||
&qp->rq.pbl);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
if (rc) {
|
||||
DP_ERR(dev, "failed to allocate memory for QP %p RQ\n", qp);
|
||||
return -ENOMEM;
|
||||
}
|
||||
in_params->rq_num_pages = qed_chain_get_page_cnt(&qp->rq.pbl);
|
||||
in_params->rq_pbl_ptr = qed_chain_get_pbl_phys(&qp->rq.pbl);
|
||||
|
||||
DP_DEBUG(dev, QEDR_MSG_RQ,
|
||||
"RQ Pbl base addr = %llx max_recv_wr=%d max_wr=%d capacity=%d, rc=%d\n",
|
||||
qed_chain_get_pbl_phys(&qp->rq.pbl), attrs->cap.max_recv_wr,
|
||||
n_rq_entries, qed_chain_get_capacity(&qp->rq.pbl), rc);
|
||||
qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
|
||||
in_params, &out_params);
|
||||
|
||||
/* n_rq_entries < u16 so the casting is safe */
|
||||
qp->rq.max_wr = (u16)n_rq_entries;
|
||||
if (!qp->qed_qp)
|
||||
return -EINVAL;
|
||||
|
||||
qp->qp_id = out_params.qp_id;
|
||||
qp->icid = out_params.icid;
|
||||
|
||||
qedr_set_roce_db_info(dev, qp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void
|
||||
qedr_init_qp_in_params_sq(struct qedr_dev *dev,
|
||||
struct qedr_pd *pd,
|
||||
struct qedr_qp *qp,
|
||||
struct ib_qp_init_attr *attrs,
|
||||
struct ib_udata *udata,
|
||||
struct qed_rdma_create_qp_in_params *params)
|
||||
static void qedr_cleanup_kernel(struct qedr_dev *dev, struct qedr_qp *qp)
|
||||
{
|
||||
/* QP handle to be written in an async event */
|
||||
params->qp_handle_async_lo = lower_32_bits((uintptr_t)qp);
|
||||
params->qp_handle_async_hi = upper_32_bits((uintptr_t)qp);
|
||||
dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl);
|
||||
kfree(qp->wqe_wr_id);
|
||||
|
||||
params->signal_all = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR);
|
||||
params->fmr_and_reserved_lkey = !udata;
|
||||
params->pd = pd->pd_id;
|
||||
params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi;
|
||||
params->sq_cq_id = get_qedr_cq(attrs->send_cq)->icid;
|
||||
params->max_sq_sges = 0;
|
||||
params->stats_queue = 0;
|
||||
dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
|
||||
kfree(qp->rqe_wr_id);
|
||||
}
|
||||
|
||||
if (udata) {
|
||||
params->sq_num_pages = qp->usq.pbl_info.num_pbes;
|
||||
params->sq_pbl_ptr = qp->usq.pbl_tbl->pa;
|
||||
} else {
|
||||
params->sq_num_pages = qed_chain_get_page_cnt(&qp->sq.pbl);
|
||||
params->sq_pbl_ptr = qed_chain_get_pbl_phys(&qp->sq.pbl);
|
||||
static int qedr_create_kernel_qp(struct qedr_dev *dev,
|
||||
struct qedr_qp *qp,
|
||||
struct ib_pd *ibpd,
|
||||
struct ib_qp_init_attr *attrs)
|
||||
{
|
||||
struct qed_rdma_create_qp_in_params in_params;
|
||||
struct qedr_pd *pd = get_qedr_pd(ibpd);
|
||||
int rc = -EINVAL;
|
||||
u32 n_rq_elems;
|
||||
u32 n_sq_elems;
|
||||
u32 n_sq_entries;
|
||||
|
||||
memset(&in_params, 0, sizeof(in_params));
|
||||
|
||||
/* A single work request may take up to QEDR_MAX_SQ_WQE_SIZE elements in
|
||||
* the ring. The ring should allow at least a single WR, even if the
|
||||
* user requested none, due to allocation issues.
|
||||
* We should add an extra WR since the prod and cons indices of
|
||||
* wqe_wr_id are managed in such a way that the WQ is considered full
|
||||
* when (prod+1)%max_wr==cons. We currently don't do that because we
|
||||
* double the number of entries due an iSER issue that pushes far more
|
||||
* WRs than indicated. If we decline its ib_post_send() then we get
|
||||
* error prints in the dmesg we'd like to avoid.
|
||||
*/
|
||||
qp->sq.max_wr = min_t(u32, attrs->cap.max_send_wr * dev->wq_multiplier,
|
||||
dev->attr.max_sqe);
|
||||
|
||||
qp->wqe_wr_id = kzalloc(qp->sq.max_wr * sizeof(*qp->wqe_wr_id),
|
||||
GFP_KERNEL);
|
||||
if (!qp->wqe_wr_id) {
|
||||
DP_ERR(dev, "create qp: failed SQ shadow memory allocation\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
qedr_init_qp_in_params_rq(struct qedr_qp *qp,
|
||||
struct ib_qp_init_attr *attrs,
|
||||
struct ib_udata *udata,
|
||||
struct qed_rdma_create_qp_in_params *params)
|
||||
{
|
||||
params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
|
||||
params->srq_id = 0;
|
||||
params->use_srq = false;
|
||||
/* QP handle to be written in CQE */
|
||||
in_params.qp_handle_lo = lower_32_bits((uintptr_t) qp);
|
||||
in_params.qp_handle_hi = upper_32_bits((uintptr_t) qp);
|
||||
|
||||
if (udata) {
|
||||
params->rq_num_pages = qp->urq.pbl_info.num_pbes;
|
||||
params->rq_pbl_ptr = qp->urq.pbl_tbl->pa;
|
||||
} else {
|
||||
params->rq_num_pages = qed_chain_get_page_cnt(&qp->rq.pbl);
|
||||
params->rq_pbl_ptr = qed_chain_get_pbl_phys(&qp->rq.pbl);
|
||||
/* A single work request may take up to QEDR_MAX_RQ_WQE_SIZE elements in
|
||||
* the ring. There ring should allow at least a single WR, even if the
|
||||
* user requested none, due to allocation issues.
|
||||
*/
|
||||
qp->rq.max_wr = (u16) max_t(u32, attrs->cap.max_recv_wr, 1);
|
||||
|
||||
/* Allocate driver internal RQ array */
|
||||
qp->rqe_wr_id = kzalloc(qp->rq.max_wr * sizeof(*qp->rqe_wr_id),
|
||||
GFP_KERNEL);
|
||||
if (!qp->rqe_wr_id) {
|
||||
DP_ERR(dev,
|
||||
"create qp: failed RQ shadow memory allocation\n");
|
||||
kfree(qp->wqe_wr_id);
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp)
|
||||
{
|
||||
DP_DEBUG(dev, QEDR_MSG_QP,
|
||||
"create qp: successfully created user QP. qp=%p, sq_addr=0x%llx, sq_len=%zd, rq_addr=0x%llx, rq_len=%zd\n",
|
||||
qp, qp->usq.buf_addr, qp->usq.buf_len, qp->urq.buf_addr,
|
||||
qp->urq.buf_len);
|
||||
}
|
||||
qedr_init_common_qp_in_params(dev, pd, qp, attrs, true, &in_params);
|
||||
|
||||
static inline int qedr_init_user_qp(struct ib_ucontext *ib_ctx,
|
||||
struct qedr_dev *dev,
|
||||
struct qedr_qp *qp,
|
||||
struct qedr_create_qp_ureq *ureq)
|
||||
{
|
||||
int rc;
|
||||
n_sq_entries = attrs->cap.max_send_wr;
|
||||
n_sq_entries = min_t(u32, n_sq_entries, dev->attr.max_sqe);
|
||||
n_sq_entries = max_t(u32, n_sq_entries, 1);
|
||||
n_sq_elems = n_sq_entries * QEDR_MAX_SQE_ELEMENTS_PER_SQE;
|
||||
|
||||
/* SQ - read access only (0), dma sync not required (0) */
|
||||
rc = qedr_init_user_queue(ib_ctx, dev, &qp->usq, ureq->sq_addr,
|
||||
ureq->sq_len, 0, 0);
|
||||
n_rq_elems = qp->rq.max_wr * QEDR_MAX_RQE_ELEMENTS_PER_RQE;
|
||||
|
||||
rc = qedr_roce_create_kernel_qp(dev, qp, &in_params,
|
||||
n_sq_elems, n_rq_elems);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
/* RQ - read access only (0), dma sync not required (0) */
|
||||
rc = qedr_init_user_queue(ib_ctx, dev, &qp->urq, ureq->rq_addr,
|
||||
ureq->rq_len, 0, 0);
|
||||
|
||||
if (rc)
|
||||
qedr_cleanup_user_sq(dev, qp);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static inline int
|
||||
qedr_init_kernel_qp(struct qedr_dev *dev,
|
||||
struct qedr_qp *qp,
|
||||
struct ib_qp_init_attr *attrs,
|
||||
struct qed_rdma_create_qp_in_params *params)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = qedr_init_qp_kernel_sq(dev, qp, attrs);
|
||||
if (rc) {
|
||||
DP_ERR(dev, "failed to init kernel QP %p SQ\n", qp);
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = qedr_init_qp_kernel_params_sq(dev, qp, attrs, params);
|
||||
if (rc) {
|
||||
dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl);
|
||||
DP_ERR(dev, "failed to init kernel QP %p SQ params\n", qp);
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = qedr_init_qp_kernel_rq(dev, qp, attrs);
|
||||
if (rc) {
|
||||
qedr_cleanup_kernel_sq(dev, qp);
|
||||
DP_ERR(dev, "failed to init kernel QP %p RQ\n", qp);
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = qedr_init_qp_kernel_params_rq(dev, qp, attrs);
|
||||
if (rc) {
|
||||
DP_ERR(dev, "failed to init kernel QP %p RQ params\n", qp);
|
||||
qedr_cleanup_kernel_sq(dev, qp);
|
||||
dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
|
||||
return rc;
|
||||
}
|
||||
qedr_cleanup_kernel(dev, qp);
|
||||
|
||||
return rc;
|
||||
}
|
||||
@ -1490,12 +1472,7 @@ struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
struct qedr_dev *dev = get_qedr_dev(ibpd->device);
|
||||
struct qed_rdma_create_qp_out_params out_params;
|
||||
struct qed_rdma_create_qp_in_params in_params;
|
||||
struct qedr_pd *pd = get_qedr_pd(ibpd);
|
||||
struct ib_ucontext *ib_ctx = NULL;
|
||||
struct qedr_ucontext *ctx = NULL;
|
||||
struct qedr_create_qp_ureq ureq;
|
||||
struct qedr_qp *qp;
|
||||
struct ib_qp *ibqp;
|
||||
int rc = 0;
|
||||
@ -1510,101 +1487,42 @@ struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
|
||||
if (attrs->srq)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
qp = kzalloc(sizeof(*qp), GFP_KERNEL);
|
||||
if (!qp)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
DP_DEBUG(dev, QEDR_MSG_QP,
|
||||
"create qp: sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n",
|
||||
"create qp: called from %s, event_handler=%p, eepd=%p sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n",
|
||||
udata ? "user library" : "kernel", attrs->event_handler, pd,
|
||||
get_qedr_cq(attrs->send_cq),
|
||||
get_qedr_cq(attrs->send_cq)->icid,
|
||||
get_qedr_cq(attrs->recv_cq),
|
||||
get_qedr_cq(attrs->recv_cq)->icid);
|
||||
|
||||
qedr_set_qp_init_params(dev, qp, pd, attrs);
|
||||
qp = kzalloc(sizeof(*qp), GFP_KERNEL);
|
||||
if (!qp) {
|
||||
DP_ERR(dev, "create qp: failed allocating memory\n");
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
qedr_set_common_qp_params(dev, qp, pd, attrs);
|
||||
|
||||
if (attrs->qp_type == IB_QPT_GSI) {
|
||||
if (udata) {
|
||||
DP_ERR(dev,
|
||||
"create qp: unexpected udata when creating GSI QP\n");
|
||||
goto err0;
|
||||
}
|
||||
ibqp = qedr_create_gsi_qp(dev, attrs, qp);
|
||||
if (IS_ERR(ibqp))
|
||||
kfree(qp);
|
||||
return ibqp;
|
||||
}
|
||||
|
||||
memset(&in_params, 0, sizeof(in_params));
|
||||
if (udata)
|
||||
rc = qedr_create_user_qp(dev, qp, ibpd, udata, attrs);
|
||||
else
|
||||
rc = qedr_create_kernel_qp(dev, qp, ibpd, attrs);
|
||||
|
||||
if (udata) {
|
||||
if (!(udata && ibpd->uobject && ibpd->uobject->context))
|
||||
goto err0;
|
||||
if (rc)
|
||||
goto err;
|
||||
|
||||
ib_ctx = ibpd->uobject->context;
|
||||
ctx = get_qedr_ucontext(ib_ctx);
|
||||
|
||||
memset(&ureq, 0, sizeof(ureq));
|
||||
if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
|
||||
DP_ERR(dev,
|
||||
"create qp: problem copying data from user space\n");
|
||||
goto err0;
|
||||
}
|
||||
|
||||
rc = qedr_init_user_qp(ib_ctx, dev, qp, &ureq);
|
||||
if (rc)
|
||||
goto err0;
|
||||
|
||||
qedr_init_qp_user_params(&in_params, &ureq);
|
||||
} else {
|
||||
rc = qedr_init_kernel_qp(dev, qp, attrs, &in_params);
|
||||
if (rc)
|
||||
goto err0;
|
||||
}
|
||||
|
||||
qedr_init_qp_in_params_sq(dev, pd, qp, attrs, udata, &in_params);
|
||||
qedr_init_qp_in_params_rq(qp, attrs, udata, &in_params);
|
||||
|
||||
qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
|
||||
&in_params, &out_params);
|
||||
|
||||
if (!qp->qed_qp)
|
||||
goto err1;
|
||||
|
||||
qp->qp_id = out_params.qp_id;
|
||||
qp->icid = out_params.icid;
|
||||
qp->ibqp.qp_num = qp->qp_id;
|
||||
|
||||
if (udata) {
|
||||
rc = qedr_copy_qp_uresp(dev, qp, udata);
|
||||
if (rc)
|
||||
goto err2;
|
||||
|
||||
qedr_qp_user_print(dev, qp);
|
||||
} else {
|
||||
qedr_init_qp_kernel_doorbell_sq(dev, qp);
|
||||
qedr_init_qp_kernel_doorbell_rq(dev, qp);
|
||||
}
|
||||
|
||||
DP_DEBUG(dev, QEDR_MSG_QP, "created %s space QP %p\n",
|
||||
udata ? "user" : "kernel", qp);
|
||||
|
||||
return &qp->ibqp;
|
||||
|
||||
err2:
|
||||
rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
|
||||
if (rc)
|
||||
DP_ERR(dev, "create qp: fatal fault. rc=%d", rc);
|
||||
err1:
|
||||
if (udata) {
|
||||
qedr_cleanup_user_sq(dev, qp);
|
||||
qedr_cleanup_user_rq(dev, qp);
|
||||
} else {
|
||||
qedr_cleanup_kernel_sq(dev, qp);
|
||||
qedr_cleanup_kernel_rq(dev, qp);
|
||||
}
|
||||
|
||||
err0:
|
||||
err:
|
||||
kfree(qp);
|
||||
|
||||
return ERR_PTR(-EFAULT);
|
||||
@ -2085,6 +2003,24 @@ err:
|
||||
return rc;
|
||||
}
|
||||
|
||||
int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp)
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
if (qp->qp_type != IB_QPT_GSI) {
|
||||
rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
|
||||
if (qp->ibqp.uobject && qp->ibqp.uobject->context)
|
||||
qedr_cleanup_user(dev, qp);
|
||||
else
|
||||
qedr_cleanup_kernel(dev, qp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int qedr_destroy_qp(struct ib_qp *ibqp)
|
||||
{
|
||||
struct qedr_qp *qp = get_qedr_qp(ibqp);
|
||||
@ -2107,21 +2043,10 @@ int qedr_destroy_qp(struct ib_qp *ibqp)
|
||||
qedr_modify_qp(ibqp, &attr, attr_mask, NULL);
|
||||
}
|
||||
|
||||
if (qp->qp_type != IB_QPT_GSI) {
|
||||
rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
|
||||
if (rc)
|
||||
return rc;
|
||||
} else {
|
||||
if (qp->qp_type == IB_QPT_GSI)
|
||||
qedr_destroy_gsi_qp(dev);
|
||||
}
|
||||
|
||||
if (ibqp->uobject && ibqp->uobject->context) {
|
||||
qedr_cleanup_user_sq(dev, qp);
|
||||
qedr_cleanup_user_rq(dev, qp);
|
||||
} else {
|
||||
qedr_cleanup_kernel_sq(dev, qp);
|
||||
qedr_cleanup_kernel_rq(dev, qp);
|
||||
}
|
||||
qedr_free_qp_resources(dev, qp);
|
||||
|
||||
kfree(qp);
|
||||
|
||||
@ -2182,8 +2107,8 @@ static int init_mr_info(struct qedr_dev *dev, struct mr_info *info,
|
||||
goto done;
|
||||
|
||||
info->pbl_table = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
|
||||
if (!info->pbl_table) {
|
||||
rc = -ENOMEM;
|
||||
if (IS_ERR(info->pbl_table)) {
|
||||
rc = PTR_ERR(info->pbl_table);
|
||||
goto done;
|
||||
}
|
||||
|
||||
@ -2194,7 +2119,7 @@ static int init_mr_info(struct qedr_dev *dev, struct mr_info *info,
|
||||
* list and allocating another one
|
||||
*/
|
||||
tmp = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
|
||||
if (!tmp) {
|
||||
if (IS_ERR(tmp)) {
|
||||
DP_DEBUG(dev, QEDR_MSG_MR, "Extra PBL is not allocated\n");
|
||||
goto done;
|
||||
}
|
||||
|
@ -742,11 +742,7 @@ struct qib_tid_session_member {
|
||||
#define SIZE_OF_CRC 1
|
||||
|
||||
#define QIB_DEFAULT_P_KEY 0xFFFF
|
||||
#define QIB_AETH_CREDIT_SHIFT 24
|
||||
#define QIB_AETH_CREDIT_MASK 0x1F
|
||||
#define QIB_AETH_CREDIT_INVAL 0x1F
|
||||
#define QIB_PSN_MASK 0xFFFFFF
|
||||
#define QIB_MSN_MASK 0xFFFFFF
|
||||
#define QIB_EAGER_TID_ID QLOGIC_IB_I_TID_MASK
|
||||
#define QIB_MULTICAST_QPN 0xFFFFFF
|
||||
|
||||
|
@ -2893,7 +2893,6 @@ static void qib_setup_7322_cleanup(struct qib_devdata *dd)
|
||||
dd->cspec->gpio_mask &= ~mask;
|
||||
qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
|
||||
spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
|
||||
qib_qsfp_deinit(&dd->pport[i].cpspec->qsfp_data);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -682,13 +682,6 @@ qib_pci_slot_reset(struct pci_dev *pdev)
|
||||
return PCI_ERS_RESULT_CAN_RECOVER;
|
||||
}
|
||||
|
||||
static pci_ers_result_t
|
||||
qib_pci_link_reset(struct pci_dev *pdev)
|
||||
{
|
||||
qib_devinfo(pdev, "QIB link_reset function called, ignored\n");
|
||||
return PCI_ERS_RESULT_CAN_RECOVER;
|
||||
}
|
||||
|
||||
static void
|
||||
qib_pci_resume(struct pci_dev *pdev)
|
||||
{
|
||||
@ -707,7 +700,6 @@ qib_pci_resume(struct pci_dev *pdev)
|
||||
const struct pci_error_handlers qib_pci_err_handler = {
|
||||
.error_detected = qib_pci_error_detected,
|
||||
.mmio_enabled = qib_pci_mmio_enabled,
|
||||
.link_reset = qib_pci_link_reset,
|
||||
.slot_reset = qib_pci_slot_reset,
|
||||
.resume = qib_pci_resume,
|
||||
};
|
||||
|
@ -61,43 +61,6 @@ static inline unsigned find_next_offset(struct rvt_qpn_table *qpt,
|
||||
return off;
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert the AETH credit code into the number of credits.
|
||||
*/
|
||||
static u32 credit_table[31] = {
|
||||
0, /* 0 */
|
||||
1, /* 1 */
|
||||
2, /* 2 */
|
||||
3, /* 3 */
|
||||
4, /* 4 */
|
||||
6, /* 5 */
|
||||
8, /* 6 */
|
||||
12, /* 7 */
|
||||
16, /* 8 */
|
||||
24, /* 9 */
|
||||
32, /* A */
|
||||
48, /* B */
|
||||
64, /* C */
|
||||
96, /* D */
|
||||
128, /* E */
|
||||
192, /* F */
|
||||
256, /* 10 */
|
||||
384, /* 11 */
|
||||
512, /* 12 */
|
||||
768, /* 13 */
|
||||
1024, /* 14 */
|
||||
1536, /* 15 */
|
||||
2048, /* 16 */
|
||||
3072, /* 17 */
|
||||
4096, /* 18 */
|
||||
6144, /* 19 */
|
||||
8192, /* 1A */
|
||||
12288, /* 1B */
|
||||
16384, /* 1C */
|
||||
24576, /* 1D */
|
||||
32768 /* 1E */
|
||||
};
|
||||
|
||||
const struct rvt_operation_params qib_post_parms[RVT_OPERATION_MAX] = {
|
||||
[IB_WR_RDMA_WRITE] = {
|
||||
.length = sizeof(struct ib_rdma_wr),
|
||||
@ -354,66 +317,6 @@ u32 qib_mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu)
|
||||
return ib_mtu_enum_to_int(pmtu);
|
||||
}
|
||||
|
||||
/**
|
||||
* qib_compute_aeth - compute the AETH (syndrome + MSN)
|
||||
* @qp: the queue pair to compute the AETH for
|
||||
*
|
||||
* Returns the AETH.
|
||||
*/
|
||||
__be32 qib_compute_aeth(struct rvt_qp *qp)
|
||||
{
|
||||
u32 aeth = qp->r_msn & QIB_MSN_MASK;
|
||||
|
||||
if (qp->ibqp.srq) {
|
||||
/*
|
||||
* Shared receive queues don't generate credits.
|
||||
* Set the credit field to the invalid value.
|
||||
*/
|
||||
aeth |= QIB_AETH_CREDIT_INVAL << QIB_AETH_CREDIT_SHIFT;
|
||||
} else {
|
||||
u32 min, max, x;
|
||||
u32 credits;
|
||||
struct rvt_rwq *wq = qp->r_rq.wq;
|
||||
u32 head;
|
||||
u32 tail;
|
||||
|
||||
/* sanity check pointers before trusting them */
|
||||
head = wq->head;
|
||||
if (head >= qp->r_rq.size)
|
||||
head = 0;
|
||||
tail = wq->tail;
|
||||
if (tail >= qp->r_rq.size)
|
||||
tail = 0;
|
||||
/*
|
||||
* Compute the number of credits available (RWQEs).
|
||||
* XXX Not holding the r_rq.lock here so there is a small
|
||||
* chance that the pair of reads are not atomic.
|
||||
*/
|
||||
credits = head - tail;
|
||||
if ((int)credits < 0)
|
||||
credits += qp->r_rq.size;
|
||||
/*
|
||||
* Binary search the credit table to find the code to
|
||||
* use.
|
||||
*/
|
||||
min = 0;
|
||||
max = 31;
|
||||
for (;;) {
|
||||
x = (min + max) / 2;
|
||||
if (credit_table[x] == credits)
|
||||
break;
|
||||
if (credit_table[x] > credits)
|
||||
max = x;
|
||||
else if (min == x)
|
||||
break;
|
||||
else
|
||||
min = x;
|
||||
}
|
||||
aeth |= x << QIB_AETH_CREDIT_SHIFT;
|
||||
}
|
||||
return cpu_to_be32(aeth);
|
||||
}
|
||||
|
||||
void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp, gfp_t gfp)
|
||||
{
|
||||
struct qib_qp_priv *priv;
|
||||
@ -448,7 +351,6 @@ void qib_stop_send_queue(struct rvt_qp *qp)
|
||||
struct qib_qp_priv *priv = qp->priv;
|
||||
|
||||
cancel_work_sync(&priv->s_work);
|
||||
del_timer_sync(&qp->s_timer);
|
||||
}
|
||||
|
||||
void qib_quiesce_qp(struct rvt_qp *qp)
|
||||
@ -473,43 +375,6 @@ void qib_flush_qp_waiters(struct rvt_qp *qp)
|
||||
spin_unlock(&dev->rdi.pending_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* qib_get_credit - flush the send work queue of a QP
|
||||
* @qp: the qp who's send work queue to flush
|
||||
* @aeth: the Acknowledge Extended Transport Header
|
||||
*
|
||||
* The QP s_lock should be held.
|
||||
*/
|
||||
void qib_get_credit(struct rvt_qp *qp, u32 aeth)
|
||||
{
|
||||
u32 credit = (aeth >> QIB_AETH_CREDIT_SHIFT) & QIB_AETH_CREDIT_MASK;
|
||||
|
||||
/*
|
||||
* If the credit is invalid, we can send
|
||||
* as many packets as we like. Otherwise, we have to
|
||||
* honor the credit field.
|
||||
*/
|
||||
if (credit == QIB_AETH_CREDIT_INVAL) {
|
||||
if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) {
|
||||
qp->s_flags |= RVT_S_UNLIMITED_CREDIT;
|
||||
if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) {
|
||||
qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT;
|
||||
qib_schedule_send(qp);
|
||||
}
|
||||
}
|
||||
} else if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) {
|
||||
/* Compute new LSN (i.e., MSN + credit) */
|
||||
credit = (aeth + credit_table[credit]) & QIB_MSN_MASK;
|
||||
if (qib_cmp24(credit, qp->s_lsn) > 0) {
|
||||
qp->s_lsn = credit;
|
||||
if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) {
|
||||
qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT;
|
||||
qib_schedule_send(qp);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* qib_check_send_wqe - validate wr/wqe
|
||||
* @qp - The qp
|
||||
|
@ -485,16 +485,6 @@ void qib_qsfp_init(struct qib_qsfp_data *qd,
|
||||
dd->f_gpio_mod(dd, mask, mask, mask);
|
||||
}
|
||||
|
||||
void qib_qsfp_deinit(struct qib_qsfp_data *qd)
|
||||
{
|
||||
/*
|
||||
* There is nothing to do here for now. our work is scheduled
|
||||
* with queue_work(), and flush_workqueue() from remove_one
|
||||
* will block until all work setup with queue_work()
|
||||
* completes.
|
||||
*/
|
||||
}
|
||||
|
||||
int qib_qsfp_dump(struct qib_pportdata *ppd, char *buf, int len)
|
||||
{
|
||||
struct qib_qsfp_cache cd;
|
||||
|
@ -186,4 +186,3 @@ extern int qib_refresh_qsfp_cache(struct qib_pportdata *ppd,
|
||||
extern int qib_qsfp_mod_present(struct qib_pportdata *ppd);
|
||||
extern void qib_qsfp_init(struct qib_qsfp_data *qd,
|
||||
void (*fevent)(struct work_struct *));
|
||||
extern void qib_qsfp_deinit(struct qib_qsfp_data *qd);
|
||||
|
@ -38,7 +38,6 @@
|
||||
/* cut down ridiculously long IB macro names */
|
||||
#define OP(x) IB_OPCODE_RC_##x
|
||||
|
||||
static void rc_timeout(unsigned long arg);
|
||||
|
||||
static u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe,
|
||||
u32 psn, u32 pmtu)
|
||||
@ -50,19 +49,10 @@ static u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe,
|
||||
ss->sg_list = wqe->sg_list + 1;
|
||||
ss->num_sge = wqe->wr.num_sge;
|
||||
ss->total_len = wqe->length;
|
||||
qib_skip_sge(ss, len, 0);
|
||||
rvt_skip_sge(ss, len, false);
|
||||
return wqe->length - len;
|
||||
}
|
||||
|
||||
static void start_timer(struct rvt_qp *qp)
|
||||
{
|
||||
qp->s_flags |= RVT_S_TIMER;
|
||||
qp->s_timer.function = rc_timeout;
|
||||
/* 4.096 usec. * (1 << qp->timeout) */
|
||||
qp->s_timer.expires = jiffies + qp->timeout_jiffies;
|
||||
add_timer(&qp->s_timer);
|
||||
}
|
||||
|
||||
/**
|
||||
* qib_make_rc_ack - construct a response packet (ACK, NAK, or RDMA read)
|
||||
* @dev: the device for this QP
|
||||
@ -144,7 +134,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct rvt_qp *qp,
|
||||
qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
|
||||
e->sent = 1;
|
||||
}
|
||||
ohdr->u.aeth = qib_compute_aeth(qp);
|
||||
ohdr->u.aeth = rvt_compute_aeth(qp);
|
||||
hwords++;
|
||||
qp->s_ack_rdma_psn = e->psn;
|
||||
bth2 = qp->s_ack_rdma_psn++ & QIB_PSN_MASK;
|
||||
@ -153,7 +143,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct rvt_qp *qp,
|
||||
qp->s_cur_sge = NULL;
|
||||
len = 0;
|
||||
qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
|
||||
ohdr->u.at.aeth = qib_compute_aeth(qp);
|
||||
ohdr->u.at.aeth = rvt_compute_aeth(qp);
|
||||
ib_u64_put(e->atomic_data, &ohdr->u.at.atomic_ack_eth);
|
||||
hwords += sizeof(ohdr->u.at) / sizeof(u32);
|
||||
bth2 = e->psn & QIB_PSN_MASK;
|
||||
@ -174,7 +164,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct rvt_qp *qp,
|
||||
if (len > pmtu)
|
||||
len = pmtu;
|
||||
else {
|
||||
ohdr->u.aeth = qib_compute_aeth(qp);
|
||||
ohdr->u.aeth = rvt_compute_aeth(qp);
|
||||
hwords++;
|
||||
qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
|
||||
e = &qp->s_ack_queue[qp->s_tail_ack_queue];
|
||||
@ -197,11 +187,11 @@ normal:
|
||||
qp->s_cur_sge = NULL;
|
||||
if (qp->s_nak_state)
|
||||
ohdr->u.aeth =
|
||||
cpu_to_be32((qp->r_msn & QIB_MSN_MASK) |
|
||||
cpu_to_be32((qp->r_msn & IB_MSN_MASK) |
|
||||
(qp->s_nak_state <<
|
||||
QIB_AETH_CREDIT_SHIFT));
|
||||
IB_AETH_CREDIT_SHIFT));
|
||||
else
|
||||
ohdr->u.aeth = qib_compute_aeth(qp);
|
||||
ohdr->u.aeth = rvt_compute_aeth(qp);
|
||||
hwords++;
|
||||
len = 0;
|
||||
bth0 = OP(ACKNOWLEDGE) << 24;
|
||||
@ -257,7 +247,7 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags)
|
||||
goto bail;
|
||||
/* We are in the error state, flush the work request. */
|
||||
smp_read_barrier_depends(); /* see post_one_send() */
|
||||
if (qp->s_last == ACCESS_ONCE(qp->s_head))
|
||||
if (qp->s_last == READ_ONCE(qp->s_head))
|
||||
goto bail;
|
||||
/* If DMAs are in progress, we can't flush immediately. */
|
||||
if (atomic_read(&priv->s_dma_busy)) {
|
||||
@ -303,7 +293,8 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags)
|
||||
newreq = 0;
|
||||
if (qp->s_cur == qp->s_tail) {
|
||||
/* Check if send work queue is empty. */
|
||||
if (qp->s_tail == qp->s_head)
|
||||
smp_read_barrier_depends(); /* see post_one_send() */
|
||||
if (qp->s_tail == READ_ONCE(qp->s_head))
|
||||
goto bail;
|
||||
/*
|
||||
* If a fence is requested, wait for previous
|
||||
@ -330,7 +321,7 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags)
|
||||
case IB_WR_SEND_WITH_IMM:
|
||||
/* If no credit, return. */
|
||||
if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
|
||||
qib_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) {
|
||||
rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
|
||||
qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
|
||||
goto bail;
|
||||
}
|
||||
@ -361,7 +352,7 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags)
|
||||
case IB_WR_RDMA_WRITE_WITH_IMM:
|
||||
/* If no credit, return. */
|
||||
if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
|
||||
qib_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) {
|
||||
rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
|
||||
qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
|
||||
goto bail;
|
||||
}
|
||||
@ -657,11 +648,11 @@ void qib_send_rc_ack(struct rvt_qp *qp)
|
||||
if (qp->s_mig_state == IB_MIG_MIGRATED)
|
||||
bth0 |= IB_BTH_MIG_REQ;
|
||||
if (qp->r_nak_state)
|
||||
ohdr->u.aeth = cpu_to_be32((qp->r_msn & QIB_MSN_MASK) |
|
||||
ohdr->u.aeth = cpu_to_be32((qp->r_msn & IB_MSN_MASK) |
|
||||
(qp->r_nak_state <<
|
||||
QIB_AETH_CREDIT_SHIFT));
|
||||
IB_AETH_CREDIT_SHIFT));
|
||||
else
|
||||
ohdr->u.aeth = qib_compute_aeth(qp);
|
||||
ohdr->u.aeth = rvt_compute_aeth(qp);
|
||||
lrh0 |= ibp->sl_to_vl[qp->remote_ah_attr.sl] << 12 |
|
||||
qp->remote_ah_attr.sl << 4;
|
||||
hdr.lrh[0] = cpu_to_be16(lrh0);
|
||||
@ -836,7 +827,7 @@ done:
|
||||
* Back up requester to resend the last un-ACKed request.
|
||||
* The QP r_lock and s_lock should be held and interrupts disabled.
|
||||
*/
|
||||
static void qib_restart_rc(struct rvt_qp *qp, u32 psn, int wait)
|
||||
void qib_restart_rc(struct rvt_qp *qp, u32 psn, int wait)
|
||||
{
|
||||
struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
|
||||
struct qib_ibport *ibp;
|
||||
@ -868,46 +859,6 @@ static void qib_restart_rc(struct rvt_qp *qp, u32 psn, int wait)
|
||||
reset_psn(qp, psn);
|
||||
}
|
||||
|
||||
/*
|
||||
* This is called from s_timer for missing responses.
|
||||
*/
|
||||
static void rc_timeout(unsigned long arg)
|
||||
{
|
||||
struct rvt_qp *qp = (struct rvt_qp *)arg;
|
||||
struct qib_ibport *ibp;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&qp->r_lock, flags);
|
||||
spin_lock(&qp->s_lock);
|
||||
if (qp->s_flags & RVT_S_TIMER) {
|
||||
ibp = to_iport(qp->ibqp.device, qp->port_num);
|
||||
ibp->rvp.n_rc_timeouts++;
|
||||
qp->s_flags &= ~RVT_S_TIMER;
|
||||
del_timer(&qp->s_timer);
|
||||
qib_restart_rc(qp, qp->s_last_psn + 1, 1);
|
||||
qib_schedule_send(qp);
|
||||
}
|
||||
spin_unlock(&qp->s_lock);
|
||||
spin_unlock_irqrestore(&qp->r_lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* This is called from s_timer for RNR timeouts.
|
||||
*/
|
||||
void qib_rc_rnr_retry(unsigned long arg)
|
||||
{
|
||||
struct rvt_qp *qp = (struct rvt_qp *)arg;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&qp->s_lock, flags);
|
||||
if (qp->s_flags & RVT_S_WAIT_RNR) {
|
||||
qp->s_flags &= ~RVT_S_WAIT_RNR;
|
||||
del_timer(&qp->s_timer);
|
||||
qib_schedule_send(qp);
|
||||
}
|
||||
spin_unlock_irqrestore(&qp->s_lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set qp->s_sending_psn to the next PSN after the given one.
|
||||
* This would be psn+1 except when RDMA reads are present.
|
||||
@ -944,7 +895,7 @@ void qib_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr)
|
||||
u32 opcode;
|
||||
u32 psn;
|
||||
|
||||
if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
|
||||
if (!(ib_rvt_state_ops[qp->state] & RVT_SEND_OR_FLUSH_OR_RECV_OK))
|
||||
return;
|
||||
|
||||
/* Find out where the BTH is */
|
||||
@ -971,7 +922,7 @@ void qib_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr)
|
||||
if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail &&
|
||||
!(qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR | RVT_S_WAIT_PSN)) &&
|
||||
(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
|
||||
start_timer(qp);
|
||||
rvt_add_retry_timer(qp);
|
||||
|
||||
while (qp->s_last != qp->s_acked) {
|
||||
u32 s_last;
|
||||
@ -1084,12 +1035,6 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
|
||||
u32 ack_psn;
|
||||
int diff;
|
||||
|
||||
/* Remove QP from retry timer */
|
||||
if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
|
||||
qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
|
||||
del_timer(&qp->s_timer);
|
||||
}
|
||||
|
||||
/*
|
||||
* Note that NAKs implicitly ACK outstanding SEND and RDMA write
|
||||
* requests and implicitly NAK RDMA read and atomic requests issued
|
||||
@ -1097,7 +1042,7 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
|
||||
* request but will include an ACK'ed request(s).
|
||||
*/
|
||||
ack_psn = psn;
|
||||
if (aeth >> 29)
|
||||
if (aeth >> IB_AETH_NAK_SHIFT)
|
||||
ack_psn--;
|
||||
wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
|
||||
ibp = to_iport(qp->ibqp.device, qp->port_num);
|
||||
@ -1177,7 +1122,7 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
|
||||
break;
|
||||
}
|
||||
|
||||
switch (aeth >> 29) {
|
||||
switch (aeth >> IB_AETH_NAK_SHIFT) {
|
||||
case 0: /* ACK */
|
||||
this_cpu_inc(*ibp->rvp.rc_acks);
|
||||
if (qp->s_acked != qp->s_tail) {
|
||||
@ -1185,27 +1130,30 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
|
||||
* We are expecting more ACKs so
|
||||
* reset the retransmit timer.
|
||||
*/
|
||||
start_timer(qp);
|
||||
rvt_mod_retry_timer(qp);
|
||||
/*
|
||||
* We can stop resending the earlier packets and
|
||||
* continue with the next packet the receiver wants.
|
||||
*/
|
||||
if (qib_cmp24(qp->s_psn, psn) <= 0)
|
||||
reset_psn(qp, psn + 1);
|
||||
} else if (qib_cmp24(qp->s_psn, psn) <= 0) {
|
||||
qp->s_state = OP(SEND_LAST);
|
||||
qp->s_psn = psn + 1;
|
||||
} else {
|
||||
/* No more acks - kill all timers */
|
||||
rvt_stop_rc_timers(qp);
|
||||
if (qib_cmp24(qp->s_psn, psn) <= 0) {
|
||||
qp->s_state = OP(SEND_LAST);
|
||||
qp->s_psn = psn + 1;
|
||||
}
|
||||
}
|
||||
if (qp->s_flags & RVT_S_WAIT_ACK) {
|
||||
qp->s_flags &= ~RVT_S_WAIT_ACK;
|
||||
qib_schedule_send(qp);
|
||||
}
|
||||
qib_get_credit(qp, aeth);
|
||||
rvt_get_credit(qp, aeth);
|
||||
qp->s_rnr_retry = qp->s_rnr_retry_cnt;
|
||||
qp->s_retry = qp->s_retry_cnt;
|
||||
update_last_psn(qp, psn);
|
||||
ret = 1;
|
||||
goto bail;
|
||||
return 1;
|
||||
|
||||
case 1: /* RNR NAK */
|
||||
ibp->rvp.n_rnr_naks++;
|
||||
@ -1228,21 +1176,17 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
|
||||
reset_psn(qp, psn);
|
||||
|
||||
qp->s_flags &= ~(RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_ACK);
|
||||
qp->s_flags |= RVT_S_WAIT_RNR;
|
||||
qp->s_timer.function = qib_rc_rnr_retry;
|
||||
qp->s_timer.expires = jiffies + usecs_to_jiffies(
|
||||
ib_qib_rnr_table[(aeth >> QIB_AETH_CREDIT_SHIFT) &
|
||||
QIB_AETH_CREDIT_MASK]);
|
||||
add_timer(&qp->s_timer);
|
||||
goto bail;
|
||||
rvt_stop_rc_timers(qp);
|
||||
rvt_add_rnr_timer(qp, aeth);
|
||||
return 0;
|
||||
|
||||
case 3: /* NAK */
|
||||
if (qp->s_acked == qp->s_tail)
|
||||
goto bail;
|
||||
/* The last valid PSN is the previous PSN. */
|
||||
update_last_psn(qp, psn - 1);
|
||||
switch ((aeth >> QIB_AETH_CREDIT_SHIFT) &
|
||||
QIB_AETH_CREDIT_MASK) {
|
||||
switch ((aeth >> IB_AETH_CREDIT_SHIFT) &
|
||||
IB_AETH_CREDIT_MASK) {
|
||||
case 0: /* PSN sequence error */
|
||||
ibp->rvp.n_seq_naks++;
|
||||
/*
|
||||
@ -1290,6 +1234,7 @@ reserved:
|
||||
}
|
||||
|
||||
bail:
|
||||
rvt_stop_rc_timers(qp);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1303,10 +1248,7 @@ static void rdma_seq_err(struct rvt_qp *qp, struct qib_ibport *ibp, u32 psn,
|
||||
struct rvt_swqe *wqe;
|
||||
|
||||
/* Remove QP from retry timer */
|
||||
if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
|
||||
qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
|
||||
del_timer(&qp->s_timer);
|
||||
}
|
||||
rvt_stop_rc_timers(qp);
|
||||
|
||||
wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
|
||||
|
||||
@ -1390,7 +1332,7 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp,
|
||||
|
||||
/* Ignore invalid responses. */
|
||||
smp_read_barrier_depends(); /* see post_one_send */
|
||||
if (qib_cmp24(psn, ACCESS_ONCE(qp->s_next_psn)) >= 0)
|
||||
if (qib_cmp24(psn, READ_ONCE(qp->s_next_psn)) >= 0)
|
||||
goto ack_done;
|
||||
|
||||
/* Ignore duplicate responses. */
|
||||
@ -1399,8 +1341,8 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp,
|
||||
/* Update credits for "ghost" ACKs */
|
||||
if (diff == 0 && opcode == OP(ACKNOWLEDGE)) {
|
||||
aeth = be32_to_cpu(ohdr->u.aeth);
|
||||
if ((aeth >> 29) == 0)
|
||||
qib_get_credit(qp, aeth);
|
||||
if ((aeth >> IB_AETH_NAK_SHIFT) == 0)
|
||||
rvt_get_credit(qp, aeth);
|
||||
}
|
||||
goto ack_done;
|
||||
}
|
||||
@ -1461,8 +1403,7 @@ read_middle:
|
||||
* We got a response so update the timeout.
|
||||
* 4.096 usec. * (1 << qp->timeout)
|
||||
*/
|
||||
qp->s_flags |= RVT_S_TIMER;
|
||||
mod_timer(&qp->s_timer, jiffies + qp->timeout_jiffies);
|
||||
rvt_mod_retry_timer(qp);
|
||||
if (qp->s_flags & RVT_S_WAIT_ACK) {
|
||||
qp->s_flags &= ~RVT_S_WAIT_ACK;
|
||||
qib_schedule_send(qp);
|
||||
@ -1764,25 +1705,6 @@ send_ack:
|
||||
return 0;
|
||||
}
|
||||
|
||||
void qib_rc_error(struct rvt_qp *qp, enum ib_wc_status err)
|
||||
{
|
||||
unsigned long flags;
|
||||
int lastwqe;
|
||||
|
||||
spin_lock_irqsave(&qp->s_lock, flags);
|
||||
lastwqe = rvt_error_qp(qp, err);
|
||||
spin_unlock_irqrestore(&qp->s_lock, flags);
|
||||
|
||||
if (lastwqe) {
|
||||
struct ib_event ev;
|
||||
|
||||
ev.device = qp->ibqp.device;
|
||||
ev.element.qp = &qp->ibqp;
|
||||
ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
|
||||
qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void qib_update_ack_queue(struct rvt_qp *qp, unsigned n)
|
||||
{
|
||||
unsigned next;
|
||||
@ -1894,17 +1816,8 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct ib_header *hdr,
|
||||
break;
|
||||
}
|
||||
|
||||
if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST)) {
|
||||
qp->r_flags |= RVT_R_COMM_EST;
|
||||
if (qp->ibqp.event_handler) {
|
||||
struct ib_event ev;
|
||||
|
||||
ev.device = qp->ibqp.device;
|
||||
ev.element.qp = &qp->ibqp;
|
||||
ev.event = IB_EVENT_COMM_EST;
|
||||
qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
|
||||
}
|
||||
}
|
||||
if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST))
|
||||
rvt_comm_est(qp);
|
||||
|
||||
/* OK, process the packet. */
|
||||
switch (opcode) {
|
||||
@ -2196,7 +2109,7 @@ rnr_nak:
|
||||
return;
|
||||
|
||||
nack_op_err:
|
||||
qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
|
||||
rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
|
||||
qp->r_nak_state = IB_NAK_REMOTE_OPERATIONAL_ERROR;
|
||||
qp->r_ack_psn = qp->r_psn;
|
||||
/* Queue NAK for later */
|
||||
@ -2210,7 +2123,7 @@ nack_op_err:
|
||||
nack_inv_unlck:
|
||||
spin_unlock_irqrestore(&qp->s_lock, flags);
|
||||
nack_inv:
|
||||
qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
|
||||
rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
|
||||
qp->r_nak_state = IB_NAK_INVALID_REQUEST;
|
||||
qp->r_ack_psn = qp->r_psn;
|
||||
/* Queue NAK for later */
|
||||
@ -2224,7 +2137,7 @@ nack_inv:
|
||||
nack_acc_unlck:
|
||||
spin_unlock_irqrestore(&qp->s_lock, flags);
|
||||
nack_acc:
|
||||
qib_rc_error(qp, IB_WC_LOC_PROT_ERR);
|
||||
rvt_rc_error(qp, IB_WC_LOC_PROT_ERR);
|
||||
qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
|
||||
qp->r_ack_psn = qp->r_psn;
|
||||
send_ack:
|
||||
|
@ -37,44 +37,6 @@
|
||||
#include "qib.h"
|
||||
#include "qib_mad.h"
|
||||
|
||||
/*
|
||||
* Convert the AETH RNR timeout code into the number of microseconds.
|
||||
*/
|
||||
const u32 ib_qib_rnr_table[32] = {
|
||||
655360, /* 00: 655.36 */
|
||||
10, /* 01: .01 */
|
||||
20, /* 02 .02 */
|
||||
30, /* 03: .03 */
|
||||
40, /* 04: .04 */
|
||||
60, /* 05: .06 */
|
||||
80, /* 06: .08 */
|
||||
120, /* 07: .12 */
|
||||
160, /* 08: .16 */
|
||||
240, /* 09: .24 */
|
||||
320, /* 0A: .32 */
|
||||
480, /* 0B: .48 */
|
||||
640, /* 0C: .64 */
|
||||
960, /* 0D: .96 */
|
||||
1280, /* 0E: 1.28 */
|
||||
1920, /* 0F: 1.92 */
|
||||
2560, /* 10: 2.56 */
|
||||
3840, /* 11: 3.84 */
|
||||
5120, /* 12: 5.12 */
|
||||
7680, /* 13: 7.68 */
|
||||
10240, /* 14: 10.24 */
|
||||
15360, /* 15: 15.36 */
|
||||
20480, /* 16: 20.48 */
|
||||
30720, /* 17: 30.72 */
|
||||
40960, /* 18: 40.96 */
|
||||
61440, /* 19: 61.44 */
|
||||
81920, /* 1A: 81.92 */
|
||||
122880, /* 1B: 122.88 */
|
||||
163840, /* 1C: 163.84 */
|
||||
245760, /* 1D: 245.76 */
|
||||
327680, /* 1E: 327.68 */
|
||||
491520 /* 1F: 491.52 */
|
||||
};
|
||||
|
||||
/*
|
||||
* Validate a RWQE and fill in the SGE state.
|
||||
* Return 1 if OK.
|
||||
@ -599,11 +561,8 @@ rnr_nak:
|
||||
spin_lock_irqsave(&sqp->s_lock, flags);
|
||||
if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_RECV_OK))
|
||||
goto clr_busy;
|
||||
sqp->s_flags |= RVT_S_WAIT_RNR;
|
||||
sqp->s_timer.function = qib_rc_rnr_retry;
|
||||
sqp->s_timer.expires = jiffies +
|
||||
usecs_to_jiffies(ib_qib_rnr_table[qp->r_min_rnr_timer]);
|
||||
add_timer(&sqp->s_timer);
|
||||
rvt_add_rnr_timer(sqp, qp->r_min_rnr_timer <<
|
||||
IB_AETH_CREDIT_SHIFT);
|
||||
goto clr_busy;
|
||||
|
||||
op_err:
|
||||
@ -621,7 +580,7 @@ acc_err:
|
||||
wc.status = IB_WC_LOC_PROT_ERR;
|
||||
err:
|
||||
/* responder goes to error state */
|
||||
qib_rc_error(qp, wc.status);
|
||||
rvt_rc_error(qp, wc.status);
|
||||
|
||||
serr:
|
||||
spin_lock_irqsave(&sqp->s_lock, flags);
|
||||
|
@ -325,17 +325,8 @@ inv:
|
||||
goto inv;
|
||||
}
|
||||
|
||||
if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST)) {
|
||||
qp->r_flags |= RVT_R_COMM_EST;
|
||||
if (qp->ibqp.event_handler) {
|
||||
struct ib_event ev;
|
||||
|
||||
ev.device = qp->ibqp.device;
|
||||
ev.element.qp = &qp->ibqp;
|
||||
ev.event = IB_EVENT_COMM_EST;
|
||||
qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
|
||||
}
|
||||
}
|
||||
if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST))
|
||||
rvt_comm_est(qp);
|
||||
|
||||
/* OK, process the packet. */
|
||||
switch (opcode) {
|
||||
@ -527,7 +518,7 @@ drop:
|
||||
return;
|
||||
|
||||
op_err:
|
||||
qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
|
||||
rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
|
||||
return;
|
||||
|
||||
}
|
||||
|
@ -152,7 +152,7 @@ static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
|
||||
|
||||
ret = qib_get_rwqe(qp, 0);
|
||||
if (ret < 0) {
|
||||
qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
|
||||
rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
|
||||
goto bail_unlock;
|
||||
}
|
||||
if (!ret) {
|
||||
@ -177,7 +177,7 @@ static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
|
||||
sizeof(grh), 1);
|
||||
wc.wc_flags |= IB_WC_GRH;
|
||||
} else
|
||||
qib_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1);
|
||||
rvt_skip_sge(&qp->r_sge, sizeof(struct ib_grh), true);
|
||||
ssge.sg_list = swqe->sg_list + 1;
|
||||
ssge.sge = *swqe->sg_list;
|
||||
ssge.num_sge = swqe->wr.num_sge;
|
||||
@ -548,7 +548,7 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct ib_header *hdr,
|
||||
|
||||
ret = qib_get_rwqe(qp, 0);
|
||||
if (ret < 0) {
|
||||
qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
|
||||
rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
|
||||
return;
|
||||
}
|
||||
if (!ret) {
|
||||
@ -567,7 +567,7 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct ib_header *hdr,
|
||||
sizeof(struct ib_grh), 1);
|
||||
wc.wc_flags |= IB_WC_GRH;
|
||||
} else
|
||||
qib_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1);
|
||||
rvt_skip_sge(&qp->r_sge, sizeof(struct ib_grh), true);
|
||||
qib_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh), 1);
|
||||
rvt_put_ss(&qp->r_sge);
|
||||
if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
|
||||
|
@ -144,8 +144,8 @@ qib_user_sdma_rb_search(struct rb_root *root, pid_t pid)
|
||||
struct rb_node *node = root->rb_node;
|
||||
|
||||
while (node) {
|
||||
sdma_rb_node = container_of(node,
|
||||
struct qib_user_sdma_rb_node, node);
|
||||
sdma_rb_node = rb_entry(node, struct qib_user_sdma_rb_node,
|
||||
node);
|
||||
if (pid < sdma_rb_node->pid)
|
||||
node = node->rb_left;
|
||||
else if (pid > sdma_rb_node->pid)
|
||||
@ -164,7 +164,7 @@ qib_user_sdma_rb_insert(struct rb_root *root, struct qib_user_sdma_rb_node *new)
|
||||
struct qib_user_sdma_rb_node *got;
|
||||
|
||||
while (*node) {
|
||||
got = container_of(*node, struct qib_user_sdma_rb_node, node);
|
||||
got = rb_entry(*node, struct qib_user_sdma_rb_node, node);
|
||||
parent = *node;
|
||||
if (new->pid < got->pid)
|
||||
node = &((*node)->rb_left);
|
||||
|
@ -129,78 +129,16 @@ void qib_copy_sge(struct rvt_sge_state *ss, void *data, u32 length, int release)
|
||||
struct rvt_sge *sge = &ss->sge;
|
||||
|
||||
while (length) {
|
||||
u32 len = sge->length;
|
||||
u32 len = rvt_get_sge_length(sge, length);
|
||||
|
||||
if (len > length)
|
||||
len = length;
|
||||
if (len > sge->sge_length)
|
||||
len = sge->sge_length;
|
||||
BUG_ON(len == 0);
|
||||
WARN_ON_ONCE(len == 0);
|
||||
memcpy(sge->vaddr, data, len);
|
||||
sge->vaddr += len;
|
||||
sge->length -= len;
|
||||
sge->sge_length -= len;
|
||||
if (sge->sge_length == 0) {
|
||||
if (release)
|
||||
rvt_put_mr(sge->mr);
|
||||
if (--ss->num_sge)
|
||||
*sge = *ss->sg_list++;
|
||||
} else if (sge->length == 0 && sge->mr->lkey) {
|
||||
if (++sge->n >= RVT_SEGSZ) {
|
||||
if (++sge->m >= sge->mr->mapsz)
|
||||
break;
|
||||
sge->n = 0;
|
||||
}
|
||||
sge->vaddr =
|
||||
sge->mr->map[sge->m]->segs[sge->n].vaddr;
|
||||
sge->length =
|
||||
sge->mr->map[sge->m]->segs[sge->n].length;
|
||||
}
|
||||
rvt_update_sge(ss, len, release);
|
||||
data += len;
|
||||
length -= len;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* qib_skip_sge - skip over SGE memory - XXX almost dup of prev func
|
||||
* @ss: the SGE state
|
||||
* @length: the number of bytes to skip
|
||||
*/
|
||||
void qib_skip_sge(struct rvt_sge_state *ss, u32 length, int release)
|
||||
{
|
||||
struct rvt_sge *sge = &ss->sge;
|
||||
|
||||
while (length) {
|
||||
u32 len = sge->length;
|
||||
|
||||
if (len > length)
|
||||
len = length;
|
||||
if (len > sge->sge_length)
|
||||
len = sge->sge_length;
|
||||
BUG_ON(len == 0);
|
||||
sge->vaddr += len;
|
||||
sge->length -= len;
|
||||
sge->sge_length -= len;
|
||||
if (sge->sge_length == 0) {
|
||||
if (release)
|
||||
rvt_put_mr(sge->mr);
|
||||
if (--ss->num_sge)
|
||||
*sge = *ss->sg_list++;
|
||||
} else if (sge->length == 0 && sge->mr->lkey) {
|
||||
if (++sge->n >= RVT_SEGSZ) {
|
||||
if (++sge->m >= sge->mr->mapsz)
|
||||
break;
|
||||
sge->n = 0;
|
||||
}
|
||||
sge->vaddr =
|
||||
sge->mr->map[sge->m]->segs[sge->n].vaddr;
|
||||
sge->length =
|
||||
sge->mr->map[sge->m]->segs[sge->n].length;
|
||||
}
|
||||
length -= len;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Count the number of DMA descriptors needed to send length bytes of data.
|
||||
* Don't modify the qib_sge_state to get the count.
|
||||
@ -468,27 +406,6 @@ static void mem_timer(unsigned long data)
|
||||
}
|
||||
}
|
||||
|
||||
static void update_sge(struct rvt_sge_state *ss, u32 length)
|
||||
{
|
||||
struct rvt_sge *sge = &ss->sge;
|
||||
|
||||
sge->vaddr += length;
|
||||
sge->length -= length;
|
||||
sge->sge_length -= length;
|
||||
if (sge->sge_length == 0) {
|
||||
if (--ss->num_sge)
|
||||
*sge = *ss->sg_list++;
|
||||
} else if (sge->length == 0 && sge->mr->lkey) {
|
||||
if (++sge->n >= RVT_SEGSZ) {
|
||||
if (++sge->m >= sge->mr->mapsz)
|
||||
return;
|
||||
sge->n = 0;
|
||||
}
|
||||
sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
|
||||
sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef __LITTLE_ENDIAN
|
||||
static inline u32 get_upper_bits(u32 data, u32 shift)
|
||||
{
|
||||
@ -646,11 +563,11 @@ static void copy_io(u32 __iomem *piobuf, struct rvt_sge_state *ss,
|
||||
data = clear_upper_bytes(v, extra, 0);
|
||||
}
|
||||
}
|
||||
update_sge(ss, len);
|
||||
rvt_update_sge(ss, len, false);
|
||||
length -= len;
|
||||
}
|
||||
/* Update address before sending packet. */
|
||||
update_sge(ss, length);
|
||||
rvt_update_sge(ss, length, false);
|
||||
if (flush_wc) {
|
||||
/* must flush early everything before trigger word */
|
||||
qib_flush_wc();
|
||||
@ -1069,7 +986,7 @@ static int qib_verbs_send_pio(struct rvt_qp *qp, struct ib_header *ibhdr,
|
||||
u32 *addr = (u32 *) ss->sge.vaddr;
|
||||
|
||||
/* Update address before sending packet. */
|
||||
update_sge(ss, len);
|
||||
rvt_update_sge(ss, len, false);
|
||||
if (flush_wc) {
|
||||
qib_pio_copy(piobuf, addr, dwords - 1);
|
||||
/* must flush early everything before trigger word */
|
||||
@ -1659,6 +1576,7 @@ int qib_register_ib_device(struct qib_devdata *dd)
|
||||
dd->verbs_dev.rdi.driver_f.stop_send_queue = qib_stop_send_queue;
|
||||
dd->verbs_dev.rdi.driver_f.flush_qp_waiters = qib_flush_qp_waiters;
|
||||
dd->verbs_dev.rdi.driver_f.notify_error_qp = qib_notify_error_qp;
|
||||
dd->verbs_dev.rdi.driver_f.notify_restart_rc = qib_restart_rc;
|
||||
dd->verbs_dev.rdi.driver_f.mtu_to_path_mtu = qib_mtu_to_path_mtu;
|
||||
dd->verbs_dev.rdi.driver_f.mtu_from_qp = qib_mtu_from_qp;
|
||||
dd->verbs_dev.rdi.driver_f.get_pmtu_from_attr = qib_get_pmtu_from_attr;
|
||||
|
@ -270,8 +270,6 @@ int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords,
|
||||
int qib_get_counters(struct qib_pportdata *ppd,
|
||||
struct qib_verbs_counters *cntrs);
|
||||
|
||||
__be32 qib_compute_aeth(struct rvt_qp *qp);
|
||||
|
||||
/*
|
||||
* Functions provided by qib driver for rdmavt to use
|
||||
*/
|
||||
@ -281,7 +279,7 @@ void qib_qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp);
|
||||
void qib_notify_qp_reset(struct rvt_qp *qp);
|
||||
int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
|
||||
enum ib_qp_type type, u8 port, gfp_t gfp);
|
||||
|
||||
void qib_restart_rc(struct rvt_qp *qp, u32 psn, int wait);
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
|
||||
struct qib_qp_iter;
|
||||
@ -294,8 +292,6 @@ void qib_qp_iter_print(struct seq_file *s, struct qib_qp_iter *iter);
|
||||
|
||||
#endif
|
||||
|
||||
void qib_get_credit(struct rvt_qp *qp, u32 aeth);
|
||||
|
||||
unsigned qib_pkt_delay(u32 plen, u8 snd_mult, u8 rcv_mult);
|
||||
|
||||
void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail);
|
||||
@ -308,8 +304,6 @@ int qib_verbs_send(struct rvt_qp *qp, struct ib_header *hdr,
|
||||
void qib_copy_sge(struct rvt_sge_state *ss, void *data, u32 length,
|
||||
int release);
|
||||
|
||||
void qib_skip_sge(struct rvt_sge_state *ss, u32 length, int release);
|
||||
|
||||
void qib_uc_rcv(struct qib_ibport *ibp, struct ib_header *hdr,
|
||||
int has_grh, void *data, u32 tlen, struct rvt_qp *qp);
|
||||
|
||||
@ -326,8 +320,6 @@ void qib_rc_rnr_retry(unsigned long arg);
|
||||
|
||||
void qib_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr);
|
||||
|
||||
void qib_rc_error(struct rvt_qp *qp, enum ib_wc_status err);
|
||||
|
||||
int qib_post_ud_send(struct rvt_qp *qp, struct ib_send_wr *wr);
|
||||
|
||||
void qib_ud_rcv(struct qib_ibport *ibp, struct ib_header *hdr,
|
||||
|
@ -34,7 +34,6 @@
|
||||
#ifndef USNIC_CMN_PKT_HDR_H
|
||||
#define USNIC_CMN_PKT_HDR_H
|
||||
|
||||
#define USNIC_ROCE_ETHERTYPE (0x8915)
|
||||
#define USNIC_ROCE_GRH_VER (8)
|
||||
#define USNIC_PROTO_VER (1)
|
||||
#define USNIC_ROCE_GRH_VER_SHIFT (4)
|
||||
|
@ -36,6 +36,7 @@
|
||||
|
||||
#include <linux/if.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/if_ether.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/in.h>
|
||||
|
||||
@ -97,7 +98,7 @@ static inline void usnic_fwd_init_usnic_filter(struct filter *filter,
|
||||
uint32_t usnic_id)
|
||||
{
|
||||
filter->type = FILTER_USNIC_ID;
|
||||
filter->u.usnic.ethtype = USNIC_ROCE_ETHERTYPE;
|
||||
filter->u.usnic.ethtype = ETH_P_IBOE;
|
||||
filter->u.usnic.flags = FILTER_FIELD_USNIC_ETHTYPE |
|
||||
FILTER_FIELD_USNIC_ID |
|
||||
FILTER_FIELD_USNIC_PROTO;
|
||||
|
@ -196,13 +196,7 @@ struct pvrdma_dev {
|
||||
spinlock_t cmd_lock; /* Command lock. */
|
||||
struct semaphore cmd_sema;
|
||||
struct completion cmd_done;
|
||||
struct {
|
||||
enum pvrdma_intr_type type; /* Intr type */
|
||||
struct msix_entry msix_entry[PVRDMA_MAX_INTERRUPTS];
|
||||
irq_handler_t handler[PVRDMA_MAX_INTERRUPTS];
|
||||
u8 enabled[PVRDMA_MAX_INTERRUPTS];
|
||||
u8 size;
|
||||
} intr;
|
||||
unsigned int nr_vectors;
|
||||
|
||||
/* RDMA-related device information. */
|
||||
union ib_gid *sgid_tbl;
|
||||
|
@ -373,7 +373,7 @@ retry:
|
||||
wc->sl = cqe->sl;
|
||||
wc->dlid_path_bits = cqe->dlid_path_bits;
|
||||
wc->port_num = cqe->port_num;
|
||||
wc->vendor_err = 0;
|
||||
wc->vendor_err = cqe->vendor_err;
|
||||
|
||||
/* Update shared ring state */
|
||||
pvrdma_idx_ring_inc(&cq->ring_state->rx.cons_head, cq->ibcq.cqe);
|
||||
|
@ -149,12 +149,6 @@ enum pvrdma_intr_cause {
|
||||
PVRDMA_INTR_CAUSE_CQ = (1 << PVRDMA_INTR_VECTOR_CQ),
|
||||
};
|
||||
|
||||
enum pvrdma_intr_type {
|
||||
PVRDMA_INTR_TYPE_INTX, /* Legacy. */
|
||||
PVRDMA_INTR_TYPE_MSI, /* MSI. */
|
||||
PVRDMA_INTR_TYPE_MSIX, /* MSI-X. */
|
||||
};
|
||||
|
||||
enum pvrdma_gos_bits {
|
||||
PVRDMA_GOS_BITS_UNK, /* Unknown. */
|
||||
PVRDMA_GOS_BITS_32, /* 32-bit. */
|
||||
|
@ -282,7 +282,7 @@ static irqreturn_t pvrdma_intr0_handler(int irq, void *dev_id)
|
||||
|
||||
dev_dbg(&dev->pdev->dev, "interrupt 0 (response) handler\n");
|
||||
|
||||
if (dev->intr.type != PVRDMA_INTR_TYPE_MSIX) {
|
||||
if (!dev->pdev->msix_enabled) {
|
||||
/* Legacy intr */
|
||||
icr = pvrdma_read_reg(dev, PVRDMA_REG_ICR);
|
||||
if (icr == 0)
|
||||
@ -489,31 +489,13 @@ static irqreturn_t pvrdma_intrx_handler(int irq, void *dev_id)
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static void pvrdma_disable_msi_all(struct pvrdma_dev *dev)
|
||||
{
|
||||
if (dev->intr.type == PVRDMA_INTR_TYPE_MSIX)
|
||||
pci_disable_msix(dev->pdev);
|
||||
else if (dev->intr.type == PVRDMA_INTR_TYPE_MSI)
|
||||
pci_disable_msi(dev->pdev);
|
||||
}
|
||||
|
||||
static void pvrdma_free_irq(struct pvrdma_dev *dev)
|
||||
{
|
||||
int i;
|
||||
|
||||
dev_dbg(&dev->pdev->dev, "freeing interrupts\n");
|
||||
|
||||
if (dev->intr.type == PVRDMA_INTR_TYPE_MSIX) {
|
||||
for (i = 0; i < dev->intr.size; i++) {
|
||||
if (dev->intr.enabled[i]) {
|
||||
free_irq(dev->intr.msix_entry[i].vector, dev);
|
||||
dev->intr.enabled[i] = 0;
|
||||
}
|
||||
}
|
||||
} else if (dev->intr.type == PVRDMA_INTR_TYPE_INTX ||
|
||||
dev->intr.type == PVRDMA_INTR_TYPE_MSI) {
|
||||
free_irq(dev->pdev->irq, dev);
|
||||
}
|
||||
for (i = 0; i < dev->nr_vectors; i++)
|
||||
free_irq(pci_irq_vector(dev->pdev, i), dev);
|
||||
}
|
||||
|
||||
static void pvrdma_enable_intrs(struct pvrdma_dev *dev)
|
||||
@ -528,126 +510,48 @@ static void pvrdma_disable_intrs(struct pvrdma_dev *dev)
|
||||
pvrdma_write_reg(dev, PVRDMA_REG_IMR, ~0);
|
||||
}
|
||||
|
||||
static int pvrdma_enable_msix(struct pci_dev *pdev, struct pvrdma_dev *dev)
|
||||
{
|
||||
int i;
|
||||
int ret;
|
||||
|
||||
for (i = 0; i < PVRDMA_MAX_INTERRUPTS; i++) {
|
||||
dev->intr.msix_entry[i].entry = i;
|
||||
dev->intr.msix_entry[i].vector = i;
|
||||
|
||||
switch (i) {
|
||||
case 0:
|
||||
/* CMD ring handler */
|
||||
dev->intr.handler[i] = pvrdma_intr0_handler;
|
||||
break;
|
||||
case 1:
|
||||
/* Async event ring handler */
|
||||
dev->intr.handler[i] = pvrdma_intr1_handler;
|
||||
break;
|
||||
default:
|
||||
/* Completion queue handler */
|
||||
dev->intr.handler[i] = pvrdma_intrx_handler;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
ret = pci_enable_msix(pdev, dev->intr.msix_entry,
|
||||
PVRDMA_MAX_INTERRUPTS);
|
||||
if (!ret) {
|
||||
dev->intr.type = PVRDMA_INTR_TYPE_MSIX;
|
||||
dev->intr.size = PVRDMA_MAX_INTERRUPTS;
|
||||
} else if (ret > 0) {
|
||||
ret = pci_enable_msix(pdev, dev->intr.msix_entry, ret);
|
||||
if (!ret) {
|
||||
dev->intr.type = PVRDMA_INTR_TYPE_MSIX;
|
||||
dev->intr.size = ret;
|
||||
} else {
|
||||
dev->intr.size = 0;
|
||||
}
|
||||
}
|
||||
|
||||
dev_dbg(&pdev->dev, "using interrupt type %d, size %d\n",
|
||||
dev->intr.type, dev->intr.size);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int pvrdma_alloc_intrs(struct pvrdma_dev *dev)
|
||||
{
|
||||
int ret = 0;
|
||||
int i;
|
||||
struct pci_dev *pdev = dev->pdev;
|
||||
int ret = 0, i;
|
||||
|
||||
if (pci_find_capability(dev->pdev, PCI_CAP_ID_MSIX) &&
|
||||
pvrdma_enable_msix(dev->pdev, dev)) {
|
||||
/* Try MSI */
|
||||
ret = pci_enable_msi(dev->pdev);
|
||||
if (!ret) {
|
||||
dev->intr.type = PVRDMA_INTR_TYPE_MSI;
|
||||
} else {
|
||||
/* Legacy INTR */
|
||||
dev->intr.type = PVRDMA_INTR_TYPE_INTX;
|
||||
}
|
||||
ret = pci_alloc_irq_vectors(pdev, 1, PVRDMA_MAX_INTERRUPTS,
|
||||
PCI_IRQ_MSIX);
|
||||
if (ret < 0) {
|
||||
ret = pci_alloc_irq_vectors(pdev, 1, 1,
|
||||
PCI_IRQ_MSI | PCI_IRQ_LEGACY);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
dev->nr_vectors = ret;
|
||||
|
||||
ret = request_irq(pci_irq_vector(dev->pdev, 0), pvrdma_intr0_handler,
|
||||
pdev->msix_enabled ? 0 : IRQF_SHARED, DRV_NAME, dev);
|
||||
if (ret) {
|
||||
dev_err(&dev->pdev->dev,
|
||||
"failed to request interrupt 0\n");
|
||||
goto out_free_vectors;
|
||||
}
|
||||
|
||||
/* Request First IRQ */
|
||||
switch (dev->intr.type) {
|
||||
case PVRDMA_INTR_TYPE_INTX:
|
||||
case PVRDMA_INTR_TYPE_MSI:
|
||||
ret = request_irq(dev->pdev->irq, pvrdma_intr0_handler,
|
||||
IRQF_SHARED, DRV_NAME, dev);
|
||||
for (i = 1; i < dev->nr_vectors; i++) {
|
||||
ret = request_irq(pci_irq_vector(dev->pdev, i),
|
||||
i == 1 ? pvrdma_intr1_handler :
|
||||
pvrdma_intrx_handler,
|
||||
0, DRV_NAME, dev);
|
||||
if (ret) {
|
||||
dev_err(&dev->pdev->dev,
|
||||
"failed to request interrupt\n");
|
||||
goto disable_msi;
|
||||
}
|
||||
break;
|
||||
case PVRDMA_INTR_TYPE_MSIX:
|
||||
ret = request_irq(dev->intr.msix_entry[0].vector,
|
||||
pvrdma_intr0_handler, 0, DRV_NAME, dev);
|
||||
if (ret) {
|
||||
dev_err(&dev->pdev->dev,
|
||||
"failed to request interrupt 0\n");
|
||||
goto disable_msi;
|
||||
}
|
||||
dev->intr.enabled[0] = 1;
|
||||
break;
|
||||
default:
|
||||
/* Not reached */
|
||||
break;
|
||||
}
|
||||
|
||||
/* For MSIX: request intr for each vector */
|
||||
if (dev->intr.size > 1) {
|
||||
ret = request_irq(dev->intr.msix_entry[1].vector,
|
||||
pvrdma_intr1_handler, 0, DRV_NAME, dev);
|
||||
if (ret) {
|
||||
dev_err(&dev->pdev->dev,
|
||||
"failed to request interrupt 1\n");
|
||||
goto free_irq;
|
||||
}
|
||||
dev->intr.enabled[1] = 1;
|
||||
|
||||
for (i = 2; i < dev->intr.size; i++) {
|
||||
ret = request_irq(dev->intr.msix_entry[i].vector,
|
||||
pvrdma_intrx_handler, 0,
|
||||
DRV_NAME, dev);
|
||||
if (ret) {
|
||||
dev_err(&dev->pdev->dev,
|
||||
"failed to request interrupt %d\n", i);
|
||||
goto free_irq;
|
||||
}
|
||||
dev->intr.enabled[i] = 1;
|
||||
"failed to request interrupt %d\n", i);
|
||||
goto free_irqs;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
free_irq:
|
||||
pvrdma_free_irq(dev);
|
||||
disable_msi:
|
||||
pvrdma_disable_msi_all(dev);
|
||||
free_irqs:
|
||||
while (--i >= 0)
|
||||
free_irq(pci_irq_vector(dev->pdev, i), dev);
|
||||
out_free_vectors:
|
||||
pci_free_irq_vectors(pdev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1091,7 +995,7 @@ err_free_uar_table:
|
||||
pvrdma_uar_table_cleanup(dev);
|
||||
err_free_intrs:
|
||||
pvrdma_free_irq(dev);
|
||||
pvrdma_disable_msi_all(dev);
|
||||
pci_free_irq_vectors(pdev);
|
||||
err_free_cq_ring:
|
||||
pvrdma_page_dir_cleanup(dev, &dev->cq_pdir);
|
||||
err_free_async_ring:
|
||||
@ -1141,7 +1045,7 @@ static void pvrdma_pci_remove(struct pci_dev *pdev)
|
||||
|
||||
pvrdma_disable_intrs(dev);
|
||||
pvrdma_free_irq(dev);
|
||||
pvrdma_disable_msi_all(dev);
|
||||
pci_free_irq_vectors(pdev);
|
||||
|
||||
/* Deactivate pvrdma device */
|
||||
pvrdma_write_reg(dev, PVRDMA_REG_CTL, PVRDMA_DEVICE_CTL_RESET);
|
||||
|
@ -151,7 +151,7 @@ static int pvrdma_set_rq_size(struct pvrdma_dev *dev,
|
||||
}
|
||||
|
||||
static int pvrdma_set_sq_size(struct pvrdma_dev *dev, struct ib_qp_cap *req_cap,
|
||||
enum ib_qp_type type, struct pvrdma_qp *qp)
|
||||
struct pvrdma_qp *qp)
|
||||
{
|
||||
if (req_cap->max_send_wr > dev->dsr->caps.max_qp_wr ||
|
||||
req_cap->max_send_sge > dev->dsr->caps.max_sge) {
|
||||
@ -276,8 +276,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
|
||||
qp->is_kernel = true;
|
||||
|
||||
ret = pvrdma_set_sq_size(to_vdev(pd->device),
|
||||
&init_attr->cap,
|
||||
init_attr->qp_type, qp);
|
||||
&init_attr->cap, qp);
|
||||
if (ret)
|
||||
goto err_qp;
|
||||
|
||||
|
@ -7,7 +7,7 @@
|
||||
#
|
||||
obj-$(CONFIG_INFINIBAND_RDMAVT) += rdmavt.o
|
||||
|
||||
rdmavt-y := vt.o ah.o cq.o dma.o mad.o mcast.o mmap.o mr.o pd.o qp.o srq.o \
|
||||
trace.o
|
||||
rdmavt-y := vt.o ah.o cq.o dma.o mad.o mcast.o mmap.o mr.o pd.o qp.o \
|
||||
rc.o srq.o trace.o
|
||||
|
||||
CFLAGS_trace.o = -I$(src)
|
||||
|
@ -120,10 +120,19 @@ static void rvt_deinit_mregion(struct rvt_mregion *mr)
|
||||
mr->mapsz = 0;
|
||||
while (i)
|
||||
kfree(mr->map[--i]);
|
||||
percpu_ref_exit(&mr->refcount);
|
||||
}
|
||||
|
||||
static void __rvt_mregion_complete(struct percpu_ref *ref)
|
||||
{
|
||||
struct rvt_mregion *mr = container_of(ref, struct rvt_mregion,
|
||||
refcount);
|
||||
|
||||
complete(&mr->comp);
|
||||
}
|
||||
|
||||
static int rvt_init_mregion(struct rvt_mregion *mr, struct ib_pd *pd,
|
||||
int count)
|
||||
int count, unsigned int percpu_flags)
|
||||
{
|
||||
int m, i = 0;
|
||||
struct rvt_dev_info *dev = ib_to_rvt(pd->device);
|
||||
@ -133,19 +142,23 @@ static int rvt_init_mregion(struct rvt_mregion *mr, struct ib_pd *pd,
|
||||
for (; i < m; i++) {
|
||||
mr->map[i] = kzalloc_node(sizeof(*mr->map[0]), GFP_KERNEL,
|
||||
dev->dparms.node);
|
||||
if (!mr->map[i]) {
|
||||
rvt_deinit_mregion(mr);
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (!mr->map[i])
|
||||
goto bail;
|
||||
mr->mapsz++;
|
||||
}
|
||||
init_completion(&mr->comp);
|
||||
/* count returning the ptr to user */
|
||||
atomic_set(&mr->refcount, 1);
|
||||
if (percpu_ref_init(&mr->refcount, &__rvt_mregion_complete,
|
||||
percpu_flags, GFP_KERNEL))
|
||||
goto bail;
|
||||
|
||||
atomic_set(&mr->lkey_invalid, 0);
|
||||
mr->pd = pd;
|
||||
mr->max_segs = count;
|
||||
return 0;
|
||||
bail:
|
||||
rvt_deinit_mregion(mr);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -180,8 +193,7 @@ static int rvt_alloc_lkey(struct rvt_mregion *mr, int dma_region)
|
||||
if (!tmr) {
|
||||
rcu_assign_pointer(dev->dma_mr, mr);
|
||||
mr->lkey_published = 1;
|
||||
} else {
|
||||
rvt_put_mr(mr);
|
||||
rvt_get_mr(mr);
|
||||
}
|
||||
goto success;
|
||||
}
|
||||
@ -239,11 +251,14 @@ static void rvt_free_lkey(struct rvt_mregion *mr)
|
||||
int freed = 0;
|
||||
|
||||
spin_lock_irqsave(&rkt->lock, flags);
|
||||
if (!mr->lkey_published)
|
||||
goto out;
|
||||
if (lkey == 0) {
|
||||
RCU_INIT_POINTER(dev->dma_mr, NULL);
|
||||
if (!lkey) {
|
||||
if (mr->lkey_published) {
|
||||
RCU_INIT_POINTER(dev->dma_mr, NULL);
|
||||
rvt_put_mr(mr);
|
||||
}
|
||||
} else {
|
||||
if (!mr->lkey_published)
|
||||
goto out;
|
||||
r = lkey >> (32 - dev->dparms.lkey_table_size);
|
||||
RCU_INIT_POINTER(rkt->table[r], NULL);
|
||||
}
|
||||
@ -253,7 +268,7 @@ out:
|
||||
spin_unlock_irqrestore(&rkt->lock, flags);
|
||||
if (freed) {
|
||||
synchronize_rcu();
|
||||
rvt_put_mr(mr);
|
||||
percpu_ref_kill(&mr->refcount);
|
||||
}
|
||||
}
|
||||
|
||||
@ -269,7 +284,7 @@ static struct rvt_mr *__rvt_alloc_mr(int count, struct ib_pd *pd)
|
||||
if (!mr)
|
||||
goto bail;
|
||||
|
||||
rval = rvt_init_mregion(&mr->mr, pd, count);
|
||||
rval = rvt_init_mregion(&mr->mr, pd, count, 0);
|
||||
if (rval)
|
||||
goto bail;
|
||||
/*
|
||||
@ -294,8 +309,8 @@ bail:
|
||||
|
||||
static void __rvt_free_mr(struct rvt_mr *mr)
|
||||
{
|
||||
rvt_deinit_mregion(&mr->mr);
|
||||
rvt_free_lkey(&mr->mr);
|
||||
rvt_deinit_mregion(&mr->mr);
|
||||
kfree(mr);
|
||||
}
|
||||
|
||||
@ -323,7 +338,7 @@ struct ib_mr *rvt_get_dma_mr(struct ib_pd *pd, int acc)
|
||||
goto bail;
|
||||
}
|
||||
|
||||
rval = rvt_init_mregion(&mr->mr, pd, 0);
|
||||
rval = rvt_init_mregion(&mr->mr, pd, 0, 0);
|
||||
if (rval) {
|
||||
ret = ERR_PTR(rval);
|
||||
goto bail;
|
||||
@ -445,8 +460,8 @@ int rvt_dereg_mr(struct ib_mr *ibmr)
|
||||
timeout = wait_for_completion_timeout(&mr->mr.comp, 5 * HZ);
|
||||
if (!timeout) {
|
||||
rvt_pr_err(rdi,
|
||||
"rvt_dereg_mr timeout mr %p pd %p refcount %u\n",
|
||||
mr, mr->mr.pd, atomic_read(&mr->mr.refcount));
|
||||
"rvt_dereg_mr timeout mr %p pd %p\n",
|
||||
mr, mr->mr.pd);
|
||||
rvt_get_mr(&mr->mr);
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
@ -623,7 +638,8 @@ struct ib_fmr *rvt_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
|
||||
if (!fmr)
|
||||
goto bail;
|
||||
|
||||
rval = rvt_init_mregion(&fmr->mr, pd, fmr_attr->max_pages);
|
||||
rval = rvt_init_mregion(&fmr->mr, pd, fmr_attr->max_pages,
|
||||
PERCPU_REF_INIT_ATOMIC);
|
||||
if (rval)
|
||||
goto bail;
|
||||
|
||||
@ -674,11 +690,12 @@ int rvt_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
|
||||
struct rvt_fmr *fmr = to_ifmr(ibfmr);
|
||||
struct rvt_lkey_table *rkt;
|
||||
unsigned long flags;
|
||||
int m, n, i;
|
||||
int m, n;
|
||||
unsigned long i;
|
||||
u32 ps;
|
||||
struct rvt_dev_info *rdi = ib_to_rvt(ibfmr->device);
|
||||
|
||||
i = atomic_read(&fmr->mr.refcount);
|
||||
i = atomic_long_read(&fmr->mr.refcount.count);
|
||||
if (i > 2)
|
||||
return -EBUSY;
|
||||
|
||||
|
@ -90,7 +90,7 @@ struct ib_pd *rvt_alloc_pd(struct ib_device *ibdev,
|
||||
spin_unlock(&dev->n_pds_lock);
|
||||
|
||||
/* ib_alloc_pd() will initialize pd->ibpd. */
|
||||
pd->user = udata ? 1 : 0;
|
||||
pd->user = !!udata;
|
||||
|
||||
ret = &pd->ibpd;
|
||||
|
||||
|
@ -51,10 +51,51 @@
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/slab.h>
|
||||
#include <rdma/ib_verbs.h>
|
||||
#include <rdma/ib_hdrs.h>
|
||||
#include "qp.h"
|
||||
#include "vt.h"
|
||||
#include "trace.h"
|
||||
|
||||
static void rvt_rc_timeout(unsigned long arg);
|
||||
|
||||
/*
|
||||
* Convert the AETH RNR timeout code into the number of microseconds.
|
||||
*/
|
||||
static const u32 ib_rvt_rnr_table[32] = {
|
||||
655360, /* 00: 655.36 */
|
||||
10, /* 01: .01 */
|
||||
20, /* 02 .02 */
|
||||
30, /* 03: .03 */
|
||||
40, /* 04: .04 */
|
||||
60, /* 05: .06 */
|
||||
80, /* 06: .08 */
|
||||
120, /* 07: .12 */
|
||||
160, /* 08: .16 */
|
||||
240, /* 09: .24 */
|
||||
320, /* 0A: .32 */
|
||||
480, /* 0B: .48 */
|
||||
640, /* 0C: .64 */
|
||||
960, /* 0D: .96 */
|
||||
1280, /* 0E: 1.28 */
|
||||
1920, /* 0F: 1.92 */
|
||||
2560, /* 10: 2.56 */
|
||||
3840, /* 11: 3.84 */
|
||||
5120, /* 12: 5.12 */
|
||||
7680, /* 13: 7.68 */
|
||||
10240, /* 14: 10.24 */
|
||||
15360, /* 15: 15.36 */
|
||||
20480, /* 16: 20.48 */
|
||||
30720, /* 17: 30.72 */
|
||||
40960, /* 18: 40.96 */
|
||||
61440, /* 19: 61.44 */
|
||||
81920, /* 1A: 81.92 */
|
||||
122880, /* 1B: 122.88 */
|
||||
163840, /* 1C: 163.84 */
|
||||
245760, /* 1D: 245.76 */
|
||||
327680, /* 1E: 327.68 */
|
||||
491520 /* 1F: 491.52 */
|
||||
};
|
||||
|
||||
/*
|
||||
* Note that it is OK to post send work requests in the SQE and ERR
|
||||
* states; rvt_do_send() will process them and generate error
|
||||
@ -200,7 +241,8 @@ int rvt_driver_qp_init(struct rvt_dev_info *rdi)
|
||||
if (!rdi->driver_f.free_all_qps ||
|
||||
!rdi->driver_f.qp_priv_alloc ||
|
||||
!rdi->driver_f.qp_priv_free ||
|
||||
!rdi->driver_f.notify_qp_reset)
|
||||
!rdi->driver_f.notify_qp_reset ||
|
||||
!rdi->driver_f.notify_restart_rc)
|
||||
return -EINVAL;
|
||||
|
||||
/* allocate parent object */
|
||||
@ -587,6 +629,7 @@ static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
|
||||
|
||||
/* Let drivers flush their waitlist */
|
||||
rdi->driver_f.flush_qp_waiters(qp);
|
||||
rvt_stop_rc_timers(qp);
|
||||
qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT);
|
||||
spin_unlock(&qp->s_lock);
|
||||
spin_unlock(&qp->s_hlock);
|
||||
@ -594,7 +637,7 @@ static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
|
||||
|
||||
/* Stop the send queue and the retry timer */
|
||||
rdi->driver_f.stop_send_queue(qp);
|
||||
|
||||
rvt_del_timers_sync(qp);
|
||||
/* Wait for things to stop */
|
||||
rdi->driver_f.quiesce_qp(qp);
|
||||
|
||||
@ -730,6 +773,11 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
|
||||
if (!qp->s_ack_queue)
|
||||
goto bail_qp;
|
||||
}
|
||||
/* initialize timers needed for rc qp */
|
||||
setup_timer(&qp->s_timer, rvt_rc_timeout, (unsigned long)qp);
|
||||
hrtimer_init(&qp->s_rnr_timer, CLOCK_MONOTONIC,
|
||||
HRTIMER_MODE_REL);
|
||||
qp->s_rnr_timer.function = rvt_rc_rnr_retry;
|
||||
|
||||
/*
|
||||
* Driver needs to set up it's private QP structure and do any
|
||||
@ -1868,3 +1916,184 @@ int rvt_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* qp_comm_est - handle trap with QP established
|
||||
* @qp: the QP
|
||||
*/
|
||||
void rvt_comm_est(struct rvt_qp *qp)
|
||||
{
|
||||
qp->r_flags |= RVT_R_COMM_EST;
|
||||
if (qp->ibqp.event_handler) {
|
||||
struct ib_event ev;
|
||||
|
||||
ev.device = qp->ibqp.device;
|
||||
ev.element.qp = &qp->ibqp;
|
||||
ev.event = IB_EVENT_COMM_EST;
|
||||
qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(rvt_comm_est);
|
||||
|
||||
void rvt_rc_error(struct rvt_qp *qp, enum ib_wc_status err)
|
||||
{
|
||||
unsigned long flags;
|
||||
int lastwqe;
|
||||
|
||||
spin_lock_irqsave(&qp->s_lock, flags);
|
||||
lastwqe = rvt_error_qp(qp, err);
|
||||
spin_unlock_irqrestore(&qp->s_lock, flags);
|
||||
|
||||
if (lastwqe) {
|
||||
struct ib_event ev;
|
||||
|
||||
ev.device = qp->ibqp.device;
|
||||
ev.element.qp = &qp->ibqp;
|
||||
ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
|
||||
qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(rvt_rc_error);
|
||||
|
||||
/*
|
||||
* rvt_rnr_tbl_to_usec - return index into ib_rvt_rnr_table
|
||||
* @index - the index
|
||||
* return usec from an index into ib_rvt_rnr_table
|
||||
*/
|
||||
unsigned long rvt_rnr_tbl_to_usec(u32 index)
|
||||
{
|
||||
return ib_rvt_rnr_table[(index & IB_AETH_CREDIT_MASK)];
|
||||
}
|
||||
EXPORT_SYMBOL(rvt_rnr_tbl_to_usec);
|
||||
|
||||
static inline unsigned long rvt_aeth_to_usec(u32 aeth)
|
||||
{
|
||||
return ib_rvt_rnr_table[(aeth >> IB_AETH_CREDIT_SHIFT) &
|
||||
IB_AETH_CREDIT_MASK];
|
||||
}
|
||||
|
||||
/*
|
||||
* rvt_add_retry_timer - add/start a retry timer
|
||||
* @qp - the QP
|
||||
* add a retry timer on the QP
|
||||
*/
|
||||
void rvt_add_retry_timer(struct rvt_qp *qp)
|
||||
{
|
||||
struct ib_qp *ibqp = &qp->ibqp;
|
||||
struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
|
||||
|
||||
lockdep_assert_held(&qp->s_lock);
|
||||
qp->s_flags |= RVT_S_TIMER;
|
||||
/* 4.096 usec. * (1 << qp->timeout) */
|
||||
qp->s_timer.expires = jiffies + qp->timeout_jiffies +
|
||||
rdi->busy_jiffies;
|
||||
add_timer(&qp->s_timer);
|
||||
}
|
||||
EXPORT_SYMBOL(rvt_add_retry_timer);
|
||||
|
||||
/**
|
||||
* rvt_add_rnr_timer - add/start an rnr timer
|
||||
* @qp - the QP
|
||||
* @aeth - aeth of RNR timeout, simulated aeth for loopback
|
||||
* add an rnr timer on the QP
|
||||
*/
|
||||
void rvt_add_rnr_timer(struct rvt_qp *qp, u32 aeth)
|
||||
{
|
||||
u32 to;
|
||||
|
||||
lockdep_assert_held(&qp->s_lock);
|
||||
qp->s_flags |= RVT_S_WAIT_RNR;
|
||||
to = rvt_aeth_to_usec(aeth);
|
||||
hrtimer_start(&qp->s_rnr_timer,
|
||||
ns_to_ktime(1000 * to), HRTIMER_MODE_REL);
|
||||
}
|
||||
EXPORT_SYMBOL(rvt_add_rnr_timer);
|
||||
|
||||
/**
|
||||
* rvt_stop_rc_timers - stop all timers
|
||||
* @qp - the QP
|
||||
* stop any pending timers
|
||||
*/
|
||||
void rvt_stop_rc_timers(struct rvt_qp *qp)
|
||||
{
|
||||
lockdep_assert_held(&qp->s_lock);
|
||||
/* Remove QP from all timers */
|
||||
if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
|
||||
qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
|
||||
del_timer(&qp->s_timer);
|
||||
hrtimer_try_to_cancel(&qp->s_rnr_timer);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(rvt_stop_rc_timers);
|
||||
|
||||
/**
|
||||
* rvt_stop_rnr_timer - stop an rnr timer
|
||||
* @qp - the QP
|
||||
*
|
||||
* stop an rnr timer and return if the timer
|
||||
* had been pending.
|
||||
*/
|
||||
static int rvt_stop_rnr_timer(struct rvt_qp *qp)
|
||||
{
|
||||
int rval = 0;
|
||||
|
||||
lockdep_assert_held(&qp->s_lock);
|
||||
/* Remove QP from rnr timer */
|
||||
if (qp->s_flags & RVT_S_WAIT_RNR) {
|
||||
qp->s_flags &= ~RVT_S_WAIT_RNR;
|
||||
rval = hrtimer_try_to_cancel(&qp->s_rnr_timer);
|
||||
}
|
||||
return rval;
|
||||
}
|
||||
|
||||
/**
|
||||
* rvt_del_timers_sync - wait for any timeout routines to exit
|
||||
* @qp - the QP
|
||||
*/
|
||||
void rvt_del_timers_sync(struct rvt_qp *qp)
|
||||
{
|
||||
del_timer_sync(&qp->s_timer);
|
||||
hrtimer_cancel(&qp->s_rnr_timer);
|
||||
}
|
||||
EXPORT_SYMBOL(rvt_del_timers_sync);
|
||||
|
||||
/**
|
||||
* This is called from s_timer for missing responses.
|
||||
*/
|
||||
static void rvt_rc_timeout(unsigned long arg)
|
||||
{
|
||||
struct rvt_qp *qp = (struct rvt_qp *)arg;
|
||||
struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&qp->r_lock, flags);
|
||||
spin_lock(&qp->s_lock);
|
||||
if (qp->s_flags & RVT_S_TIMER) {
|
||||
qp->s_flags &= ~RVT_S_TIMER;
|
||||
del_timer(&qp->s_timer);
|
||||
if (rdi->driver_f.notify_restart_rc)
|
||||
rdi->driver_f.notify_restart_rc(qp,
|
||||
qp->s_last_psn + 1,
|
||||
1);
|
||||
rdi->driver_f.schedule_send(qp);
|
||||
}
|
||||
spin_unlock(&qp->s_lock);
|
||||
spin_unlock_irqrestore(&qp->r_lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* This is called from s_timer for RNR timeouts.
|
||||
*/
|
||||
enum hrtimer_restart rvt_rc_rnr_retry(struct hrtimer *t)
|
||||
{
|
||||
struct rvt_qp *qp = container_of(t, struct rvt_qp, s_rnr_timer);
|
||||
struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&qp->s_lock, flags);
|
||||
rvt_stop_rnr_timer(qp);
|
||||
rdi->driver_f.schedule_send(qp);
|
||||
spin_unlock_irqrestore(&qp->s_lock, flags);
|
||||
return HRTIMER_NORESTART;
|
||||
}
|
||||
EXPORT_SYMBOL(rvt_rc_rnr_retry);
|
||||
|
189
drivers/infiniband/sw/rdmavt/rc.c
Normal file
189
drivers/infiniband/sw/rdmavt/rc.c
Normal file
@ -0,0 +1,189 @@
|
||||
/*
|
||||
* Copyright(c) 2016 Intel Corporation.
|
||||
*
|
||||
* This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
* redistributing this file, you may do so under either license.
|
||||
*
|
||||
* GPL LICENSE SUMMARY
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* BSD LICENSE
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* - Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* - Neither the name of Intel Corporation nor the names of its
|
||||
* contributors may be used to endorse or promote products derived
|
||||
* from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <rdma/rdma_vt.h>
|
||||
#include <rdma/ib_hdrs.h>
|
||||
|
||||
/*
|
||||
* Convert the AETH credit code into the number of credits.
|
||||
*/
|
||||
static const u16 credit_table[31] = {
|
||||
0, /* 0 */
|
||||
1, /* 1 */
|
||||
2, /* 2 */
|
||||
3, /* 3 */
|
||||
4, /* 4 */
|
||||
6, /* 5 */
|
||||
8, /* 6 */
|
||||
12, /* 7 */
|
||||
16, /* 8 */
|
||||
24, /* 9 */
|
||||
32, /* A */
|
||||
48, /* B */
|
||||
64, /* C */
|
||||
96, /* D */
|
||||
128, /* E */
|
||||
192, /* F */
|
||||
256, /* 10 */
|
||||
384, /* 11 */
|
||||
512, /* 12 */
|
||||
768, /* 13 */
|
||||
1024, /* 14 */
|
||||
1536, /* 15 */
|
||||
2048, /* 16 */
|
||||
3072, /* 17 */
|
||||
4096, /* 18 */
|
||||
6144, /* 19 */
|
||||
8192, /* 1A */
|
||||
12288, /* 1B */
|
||||
16384, /* 1C */
|
||||
24576, /* 1D */
|
||||
32768 /* 1E */
|
||||
};
|
||||
|
||||
/**
|
||||
* rvt_compute_aeth - compute the AETH (syndrome + MSN)
|
||||
* @qp: the queue pair to compute the AETH for
|
||||
*
|
||||
* Returns the AETH.
|
||||
*/
|
||||
__be32 rvt_compute_aeth(struct rvt_qp *qp)
|
||||
{
|
||||
u32 aeth = qp->r_msn & IB_MSN_MASK;
|
||||
|
||||
if (qp->ibqp.srq) {
|
||||
/*
|
||||
* Shared receive queues don't generate credits.
|
||||
* Set the credit field to the invalid value.
|
||||
*/
|
||||
aeth |= IB_AETH_CREDIT_INVAL << IB_AETH_CREDIT_SHIFT;
|
||||
} else {
|
||||
u32 min, max, x;
|
||||
u32 credits;
|
||||
struct rvt_rwq *wq = qp->r_rq.wq;
|
||||
u32 head;
|
||||
u32 tail;
|
||||
|
||||
/* sanity check pointers before trusting them */
|
||||
head = wq->head;
|
||||
if (head >= qp->r_rq.size)
|
||||
head = 0;
|
||||
tail = wq->tail;
|
||||
if (tail >= qp->r_rq.size)
|
||||
tail = 0;
|
||||
/*
|
||||
* Compute the number of credits available (RWQEs).
|
||||
* There is a small chance that the pair of reads are
|
||||
* not atomic, which is OK, since the fuzziness is
|
||||
* resolved as further ACKs go out.
|
||||
*/
|
||||
credits = head - tail;
|
||||
if ((int)credits < 0)
|
||||
credits += qp->r_rq.size;
|
||||
/*
|
||||
* Binary search the credit table to find the code to
|
||||
* use.
|
||||
*/
|
||||
min = 0;
|
||||
max = 31;
|
||||
for (;;) {
|
||||
x = (min + max) / 2;
|
||||
if (credit_table[x] == credits)
|
||||
break;
|
||||
if (credit_table[x] > credits) {
|
||||
max = x;
|
||||
} else {
|
||||
if (min == x)
|
||||
break;
|
||||
min = x;
|
||||
}
|
||||
}
|
||||
aeth |= x << IB_AETH_CREDIT_SHIFT;
|
||||
}
|
||||
return cpu_to_be32(aeth);
|
||||
}
|
||||
EXPORT_SYMBOL(rvt_compute_aeth);
|
||||
|
||||
/**
|
||||
* rvt_get_credit - flush the send work queue of a QP
|
||||
* @qp: the qp who's send work queue to flush
|
||||
* @aeth: the Acknowledge Extended Transport Header
|
||||
*
|
||||
* The QP s_lock should be held.
|
||||
*/
|
||||
void rvt_get_credit(struct rvt_qp *qp, u32 aeth)
|
||||
{
|
||||
struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
|
||||
u32 credit = (aeth >> IB_AETH_CREDIT_SHIFT) & IB_AETH_CREDIT_MASK;
|
||||
|
||||
lockdep_assert_held(&qp->s_lock);
|
||||
/*
|
||||
* If the credit is invalid, we can send
|
||||
* as many packets as we like. Otherwise, we have to
|
||||
* honor the credit field.
|
||||
*/
|
||||
if (credit == IB_AETH_CREDIT_INVAL) {
|
||||
if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) {
|
||||
qp->s_flags |= RVT_S_UNLIMITED_CREDIT;
|
||||
if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) {
|
||||
qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT;
|
||||
rdi->driver_f.schedule_send(qp);
|
||||
}
|
||||
}
|
||||
} else if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) {
|
||||
/* Compute new LSN (i.e., MSN + credit) */
|
||||
credit = (aeth + credit_table[credit]) & IB_MSN_MASK;
|
||||
if (rvt_cmp_msn(credit, qp->s_lsn) > 0) {
|
||||
qp->s_lsn = credit;
|
||||
if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) {
|
||||
qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT;
|
||||
rdi->driver_f.schedule_send(qp);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(rvt_get_credit);
|
@ -178,7 +178,7 @@ static int rxe_init_ports(struct rxe_dev *rxe)
|
||||
return -ENOMEM;
|
||||
|
||||
port->pkey_tbl[0] = 0xffff;
|
||||
port->port_guid = rxe->ifc_ops->port_guid(rxe);
|
||||
port->port_guid = rxe_port_guid(rxe);
|
||||
|
||||
spin_lock_init(&port->port_lock);
|
||||
|
||||
|
@ -254,7 +254,7 @@ static inline enum comp_state check_ack(struct rxe_qp *qp,
|
||||
}
|
||||
break;
|
||||
default:
|
||||
WARN_ON(1);
|
||||
WARN_ON_ONCE(1);
|
||||
}
|
||||
|
||||
/* Check operation validity. */
|
||||
@ -412,13 +412,21 @@ static void make_send_cqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* IBA Spec. Section 10.7.3.1 SIGNALED COMPLETIONS
|
||||
* ---------8<---------8<-------------
|
||||
* ...Note that if a completion error occurs, a Work Completion
|
||||
* will always be generated, even if the signaling
|
||||
* indicator requests an Unsignaled Completion.
|
||||
* ---------8<---------8<-------------
|
||||
*/
|
||||
static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
|
||||
{
|
||||
struct rxe_cqe cqe;
|
||||
|
||||
if ((qp->sq_sig_type == IB_SIGNAL_ALL_WR) ||
|
||||
(wqe->wr.send_flags & IB_SEND_SIGNALED) ||
|
||||
(qp->req.state == QP_STATE_ERROR)) {
|
||||
wqe->status != IB_WC_SUCCESS) {
|
||||
make_send_cqe(qp, wqe, &cqe);
|
||||
advance_consumer(qp->sq.queue);
|
||||
rxe_cq_post(qp->scq, &cqe, 0);
|
||||
@ -503,6 +511,26 @@ static inline enum comp_state complete_wqe(struct rxe_qp *qp,
|
||||
return COMPST_GET_WQE;
|
||||
}
|
||||
|
||||
static void rxe_drain_resp_pkts(struct rxe_qp *qp, bool notify)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
struct rxe_send_wqe *wqe;
|
||||
|
||||
while ((skb = skb_dequeue(&qp->resp_pkts))) {
|
||||
rxe_drop_ref(qp);
|
||||
kfree_skb(skb);
|
||||
}
|
||||
|
||||
while ((wqe = queue_head(qp->sq.queue))) {
|
||||
if (notify) {
|
||||
wqe->status = IB_WC_WR_FLUSH_ERR;
|
||||
do_complete(qp, wqe);
|
||||
} else {
|
||||
advance_consumer(qp->sq.queue);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int rxe_completer(void *arg)
|
||||
{
|
||||
struct rxe_qp *qp = (struct rxe_qp *)arg;
|
||||
@ -513,47 +541,10 @@ int rxe_completer(void *arg)
|
||||
|
||||
rxe_add_ref(qp);
|
||||
|
||||
if (!qp->valid) {
|
||||
while ((skb = skb_dequeue(&qp->resp_pkts))) {
|
||||
rxe_drop_ref(qp);
|
||||
kfree_skb(skb);
|
||||
}
|
||||
skb = NULL;
|
||||
pkt = NULL;
|
||||
|
||||
while (queue_head(qp->sq.queue))
|
||||
advance_consumer(qp->sq.queue);
|
||||
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (qp->req.state == QP_STATE_ERROR) {
|
||||
while ((skb = skb_dequeue(&qp->resp_pkts))) {
|
||||
rxe_drop_ref(qp);
|
||||
kfree_skb(skb);
|
||||
}
|
||||
skb = NULL;
|
||||
pkt = NULL;
|
||||
|
||||
while ((wqe = queue_head(qp->sq.queue))) {
|
||||
wqe->status = IB_WC_WR_FLUSH_ERR;
|
||||
do_complete(qp, wqe);
|
||||
}
|
||||
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (qp->req.state == QP_STATE_RESET) {
|
||||
while ((skb = skb_dequeue(&qp->resp_pkts))) {
|
||||
rxe_drop_ref(qp);
|
||||
kfree_skb(skb);
|
||||
}
|
||||
skb = NULL;
|
||||
pkt = NULL;
|
||||
|
||||
while (queue_head(qp->sq.queue))
|
||||
advance_consumer(qp->sq.queue);
|
||||
|
||||
if (!qp->valid || qp->req.state == QP_STATE_ERROR ||
|
||||
qp->req.state == QP_STATE_RESET) {
|
||||
rxe_drain_resp_pkts(qp, qp->valid &&
|
||||
qp->req.state == QP_STATE_ERROR);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
@ -639,6 +630,7 @@ int rxe_completer(void *arg)
|
||||
if (pkt) {
|
||||
rxe_drop_ref(pkt->qp);
|
||||
kfree_skb(skb);
|
||||
skb = NULL;
|
||||
}
|
||||
goto done;
|
||||
|
||||
@ -662,6 +654,7 @@ int rxe_completer(void *arg)
|
||||
qp->qp_timeout_jiffies)
|
||||
mod_timer(&qp->retrans_timer,
|
||||
jiffies + qp->qp_timeout_jiffies);
|
||||
WARN_ON_ONCE(skb);
|
||||
goto exit;
|
||||
|
||||
case COMPST_ERROR_RETRY:
|
||||
@ -674,8 +667,10 @@ int rxe_completer(void *arg)
|
||||
*/
|
||||
|
||||
/* there is nothing to retry in this case */
|
||||
if (!wqe || (wqe->state == wqe_state_posted))
|
||||
if (!wqe || (wqe->state == wqe_state_posted)) {
|
||||
WARN_ON_ONCE(skb);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (qp->comp.retry_cnt > 0) {
|
||||
if (qp->comp.retry_cnt != 7)
|
||||
@ -697,8 +692,10 @@ int rxe_completer(void *arg)
|
||||
if (pkt) {
|
||||
rxe_drop_ref(pkt->qp);
|
||||
kfree_skb(skb);
|
||||
skb = NULL;
|
||||
}
|
||||
|
||||
WARN_ON_ONCE(skb);
|
||||
goto exit;
|
||||
|
||||
} else {
|
||||
@ -718,6 +715,9 @@ int rxe_completer(void *arg)
|
||||
mod_timer(&qp->rnr_nak_timer,
|
||||
jiffies + rnrnak_jiffies(aeth_syn(pkt)
|
||||
& ~AETH_TYPE_MASK));
|
||||
rxe_drop_ref(pkt->qp);
|
||||
kfree_skb(skb);
|
||||
skb = NULL;
|
||||
goto exit;
|
||||
} else {
|
||||
wqe->status = IB_WC_RNR_RETRY_EXC_ERR;
|
||||
@ -726,14 +726,17 @@ int rxe_completer(void *arg)
|
||||
break;
|
||||
|
||||
case COMPST_ERROR:
|
||||
WARN_ON_ONCE(wqe->status == IB_WC_SUCCESS);
|
||||
do_complete(qp, wqe);
|
||||
rxe_qp_error(qp);
|
||||
|
||||
if (pkt) {
|
||||
rxe_drop_ref(pkt->qp);
|
||||
kfree_skb(skb);
|
||||
skb = NULL;
|
||||
}
|
||||
|
||||
WARN_ON_ONCE(skb);
|
||||
goto exit;
|
||||
}
|
||||
}
|
||||
@ -742,6 +745,7 @@ exit:
|
||||
/* we come here if we are done with processing and want the task to
|
||||
* exit from the loop calling us
|
||||
*/
|
||||
WARN_ON_ONCE(skb);
|
||||
rxe_drop_ref(qp);
|
||||
return -EAGAIN;
|
||||
|
||||
@ -749,6 +753,7 @@ done:
|
||||
/* we come here if we have processed a packet we want the task to call
|
||||
* us again to see if there is anything else to do
|
||||
*/
|
||||
WARN_ON_ONCE(skb);
|
||||
rxe_drop_ref(qp);
|
||||
return 0;
|
||||
}
|
||||
|
@ -156,9 +156,9 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void rxe_cq_cleanup(void *arg)
|
||||
void rxe_cq_cleanup(struct rxe_pool_entry *arg)
|
||||
{
|
||||
struct rxe_cq *cq = arg;
|
||||
struct rxe_cq *cq = container_of(arg, typeof(*cq), pelem);
|
||||
|
||||
if (cq->queue)
|
||||
rxe_queue_cleanup(cq->queue);
|
||||
|
@ -53,8 +53,16 @@ struct rxe_pkt_info {
|
||||
};
|
||||
|
||||
/* Macros should be used only for received skb */
|
||||
#define SKB_TO_PKT(skb) ((struct rxe_pkt_info *)(skb)->cb)
|
||||
#define PKT_TO_SKB(pkt) container_of((void *)(pkt), struct sk_buff, cb)
|
||||
static inline struct rxe_pkt_info *SKB_TO_PKT(struct sk_buff *skb)
|
||||
{
|
||||
BUILD_BUG_ON(sizeof(struct rxe_pkt_info) > sizeof(skb->cb));
|
||||
return (void *)skb->cb;
|
||||
}
|
||||
|
||||
static inline struct sk_buff *PKT_TO_SKB(struct rxe_pkt_info *pkt)
|
||||
{
|
||||
return container_of((void *)pkt, struct sk_buff, cb);
|
||||
}
|
||||
|
||||
/*
|
||||
* IBA header types and methods
|
||||
|
@ -64,7 +64,7 @@ int rxe_cq_resize_queue(struct rxe_cq *cq, int new_cqe, struct ib_udata *udata);
|
||||
|
||||
int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited);
|
||||
|
||||
void rxe_cq_cleanup(void *arg);
|
||||
void rxe_cq_cleanup(struct rxe_pool_entry *arg);
|
||||
|
||||
/* rxe_mcast.c */
|
||||
int rxe_mcast_get_grp(struct rxe_dev *rxe, union ib_gid *mgid,
|
||||
@ -78,7 +78,7 @@ int rxe_mcast_drop_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
|
||||
|
||||
void rxe_drop_all_mcast_groups(struct rxe_qp *qp);
|
||||
|
||||
void rxe_mc_cleanup(void *arg);
|
||||
void rxe_mc_cleanup(struct rxe_pool_entry *arg);
|
||||
|
||||
/* rxe_mmap.c */
|
||||
struct rxe_mmap_info {
|
||||
@ -137,10 +137,26 @@ int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length);
|
||||
int rxe_mem_map_pages(struct rxe_dev *rxe, struct rxe_mem *mem,
|
||||
u64 *page, int num_pages, u64 iova);
|
||||
|
||||
void rxe_mem_cleanup(void *arg);
|
||||
void rxe_mem_cleanup(struct rxe_pool_entry *arg);
|
||||
|
||||
int advance_dma_data(struct rxe_dma_info *dma, unsigned int length);
|
||||
|
||||
/* rxe_net.c */
|
||||
int rxe_loopback(struct sk_buff *skb);
|
||||
int rxe_send(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
|
||||
struct sk_buff *skb);
|
||||
__be64 rxe_port_guid(struct rxe_dev *rxe);
|
||||
struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
|
||||
int paylen, struct rxe_pkt_info *pkt);
|
||||
int rxe_prepare(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
|
||||
struct sk_buff *skb, u32 *crc);
|
||||
enum rdma_link_layer rxe_link_layer(struct rxe_dev *rxe, unsigned int port_num);
|
||||
const char *rxe_parent_name(struct rxe_dev *rxe, unsigned int port_num);
|
||||
struct device *rxe_dma_device(struct rxe_dev *rxe);
|
||||
__be64 rxe_node_guid(struct rxe_dev *rxe);
|
||||
int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid);
|
||||
int rxe_mcast_delete(struct rxe_dev *rxe, union ib_gid *mgid);
|
||||
|
||||
/* rxe_qp.c */
|
||||
int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init);
|
||||
|
||||
@ -162,7 +178,7 @@ void rxe_qp_error(struct rxe_qp *qp);
|
||||
|
||||
void rxe_qp_destroy(struct rxe_qp *qp);
|
||||
|
||||
void rxe_qp_cleanup(void *arg);
|
||||
void rxe_qp_cleanup(struct rxe_pool_entry *arg);
|
||||
|
||||
static inline int qp_num(struct rxe_qp *qp)
|
||||
{
|
||||
@ -225,6 +241,7 @@ extern struct ib_dma_mapping_ops rxe_dma_mapping_ops;
|
||||
|
||||
void rxe_release(struct kref *kref);
|
||||
|
||||
void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify);
|
||||
int rxe_completer(void *arg);
|
||||
int rxe_requester(void *arg);
|
||||
int rxe_responder(void *arg);
|
||||
@ -256,9 +273,9 @@ static inline int rxe_xmit_packet(struct rxe_dev *rxe, struct rxe_qp *qp,
|
||||
|
||||
if (pkt->mask & RXE_LOOPBACK_MASK) {
|
||||
memcpy(SKB_TO_PKT(skb), pkt, sizeof(*pkt));
|
||||
err = rxe->ifc_ops->loopback(skb);
|
||||
err = rxe_loopback(skb);
|
||||
} else {
|
||||
err = rxe->ifc_ops->send(rxe, pkt, skb);
|
||||
err = rxe_send(rxe, pkt, skb);
|
||||
}
|
||||
|
||||
if (err) {
|
||||
|
@ -61,7 +61,7 @@ int rxe_mcast_get_grp(struct rxe_dev *rxe, union ib_gid *mgid,
|
||||
|
||||
rxe_add_key(grp, mgid);
|
||||
|
||||
err = rxe->ifc_ops->mcast_add(rxe, mgid);
|
||||
err = rxe_mcast_add(rxe, mgid);
|
||||
if (err)
|
||||
goto err2;
|
||||
|
||||
@ -180,11 +180,11 @@ void rxe_drop_all_mcast_groups(struct rxe_qp *qp)
|
||||
}
|
||||
}
|
||||
|
||||
void rxe_mc_cleanup(void *arg)
|
||||
void rxe_mc_cleanup(struct rxe_pool_entry *arg)
|
||||
{
|
||||
struct rxe_mc_grp *grp = arg;
|
||||
struct rxe_mc_grp *grp = container_of(arg, typeof(*grp), pelem);
|
||||
struct rxe_dev *rxe = grp->rxe;
|
||||
|
||||
rxe_drop_key(grp);
|
||||
rxe->ifc_ops->mcast_delete(rxe, &grp->mgid);
|
||||
rxe_mcast_delete(rxe, &grp->mgid);
|
||||
}
|
||||
|
@ -91,9 +91,9 @@ static void rxe_mem_init(int access, struct rxe_mem *mem)
|
||||
mem->map_shift = ilog2(RXE_BUF_PER_MAP);
|
||||
}
|
||||
|
||||
void rxe_mem_cleanup(void *arg)
|
||||
void rxe_mem_cleanup(struct rxe_pool_entry *arg)
|
||||
{
|
||||
struct rxe_mem *mem = arg;
|
||||
struct rxe_mem *mem = container_of(arg, typeof(*mem), pelem);
|
||||
int i;
|
||||
|
||||
if (mem->umem)
|
||||
@ -125,7 +125,7 @@ static int rxe_mem_alloc(struct rxe_dev *rxe, struct rxe_mem *mem, int num_buf)
|
||||
goto err2;
|
||||
}
|
||||
|
||||
WARN_ON(!is_power_of_2(RXE_BUF_PER_MAP));
|
||||
BUILD_BUG_ON(!is_power_of_2(RXE_BUF_PER_MAP));
|
||||
|
||||
mem->map_shift = ilog2(RXE_BUF_PER_MAP);
|
||||
mem->map_mask = RXE_BUF_PER_MAP - 1;
|
||||
@ -191,7 +191,7 @@ int rxe_mem_init_user(struct rxe_dev *rxe, struct rxe_pd *pd, u64 start,
|
||||
goto err1;
|
||||
}
|
||||
|
||||
WARN_ON(!is_power_of_2(umem->page_size));
|
||||
WARN_ON_ONCE(!is_power_of_2(umem->page_size));
|
||||
|
||||
mem->page_shift = ilog2(umem->page_size);
|
||||
mem->page_mask = umem->page_size - 1;
|
||||
@ -377,7 +377,7 @@ int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr, int length,
|
||||
return 0;
|
||||
}
|
||||
|
||||
WARN_ON(!mem->map);
|
||||
WARN_ON_ONCE(!mem->map);
|
||||
|
||||
err = mem_check_range(mem, iova, length);
|
||||
if (err) {
|
||||
|
@ -102,17 +102,17 @@ static __be64 rxe_mac_to_eui64(struct net_device *ndev)
|
||||
return eui64;
|
||||
}
|
||||
|
||||
static __be64 node_guid(struct rxe_dev *rxe)
|
||||
__be64 rxe_node_guid(struct rxe_dev *rxe)
|
||||
{
|
||||
return rxe_mac_to_eui64(rxe->ndev);
|
||||
}
|
||||
|
||||
static __be64 port_guid(struct rxe_dev *rxe)
|
||||
__be64 rxe_port_guid(struct rxe_dev *rxe)
|
||||
{
|
||||
return rxe_mac_to_eui64(rxe->ndev);
|
||||
}
|
||||
|
||||
static struct device *dma_device(struct rxe_dev *rxe)
|
||||
struct device *rxe_dma_device(struct rxe_dev *rxe)
|
||||
{
|
||||
struct net_device *ndev;
|
||||
|
||||
@ -124,7 +124,7 @@ static struct device *dma_device(struct rxe_dev *rxe)
|
||||
return ndev->dev.parent;
|
||||
}
|
||||
|
||||
static int mcast_add(struct rxe_dev *rxe, union ib_gid *mgid)
|
||||
int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid)
|
||||
{
|
||||
int err;
|
||||
unsigned char ll_addr[ETH_ALEN];
|
||||
@ -135,7 +135,7 @@ static int mcast_add(struct rxe_dev *rxe, union ib_gid *mgid)
|
||||
return err;
|
||||
}
|
||||
|
||||
static int mcast_delete(struct rxe_dev *rxe, union ib_gid *mgid)
|
||||
int rxe_mcast_delete(struct rxe_dev *rxe, union ib_gid *mgid)
|
||||
{
|
||||
int err;
|
||||
unsigned char ll_addr[ETH_ALEN];
|
||||
@ -243,8 +243,8 @@ static struct socket *rxe_setup_udp_tunnel(struct net *net, __be16 port,
|
||||
{
|
||||
int err;
|
||||
struct socket *sock;
|
||||
struct udp_port_cfg udp_cfg = {0};
|
||||
struct udp_tunnel_sock_cfg tnl_cfg = {0};
|
||||
struct udp_port_cfg udp_cfg = { };
|
||||
struct udp_tunnel_sock_cfg tnl_cfg = { };
|
||||
|
||||
if (ipv6) {
|
||||
udp_cfg.family = AF_INET6;
|
||||
@ -397,8 +397,8 @@ static int prepare6(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int prepare(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
|
||||
struct sk_buff *skb, u32 *crc)
|
||||
int rxe_prepare(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
|
||||
struct sk_buff *skb, u32 *crc)
|
||||
{
|
||||
int err = 0;
|
||||
struct rxe_av *av = rxe_get_av(pkt);
|
||||
@ -424,8 +424,7 @@ static void rxe_skb_tx_dtor(struct sk_buff *skb)
|
||||
rxe_run_task(&qp->req.task, 1);
|
||||
}
|
||||
|
||||
static int send(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
|
||||
struct sk_buff *skb)
|
||||
int rxe_send(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, struct sk_buff *skb)
|
||||
{
|
||||
struct sk_buff *nskb;
|
||||
struct rxe_av *av;
|
||||
@ -461,7 +460,7 @@ static int send(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int loopback(struct sk_buff *skb)
|
||||
int rxe_loopback(struct sk_buff *skb)
|
||||
{
|
||||
return rxe_rcv(skb);
|
||||
}
|
||||
@ -471,8 +470,8 @@ static inline int addr_same(struct rxe_dev *rxe, struct rxe_av *av)
|
||||
return rxe->port.port_guid == av->grh.dgid.global.interface_id;
|
||||
}
|
||||
|
||||
static struct sk_buff *init_packet(struct rxe_dev *rxe, struct rxe_av *av,
|
||||
int paylen, struct rxe_pkt_info *pkt)
|
||||
struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
|
||||
int paylen, struct rxe_pkt_info *pkt)
|
||||
{
|
||||
unsigned int hdr_len;
|
||||
struct sk_buff *skb;
|
||||
@ -511,31 +510,16 @@ static struct sk_buff *init_packet(struct rxe_dev *rxe, struct rxe_av *av,
|
||||
* this is required by rxe_cfg to match rxe devices in
|
||||
* /sys/class/infiniband up with their underlying ethernet devices
|
||||
*/
|
||||
static char *parent_name(struct rxe_dev *rxe, unsigned int port_num)
|
||||
const char *rxe_parent_name(struct rxe_dev *rxe, unsigned int port_num)
|
||||
{
|
||||
return rxe->ndev->name;
|
||||
}
|
||||
|
||||
static enum rdma_link_layer link_layer(struct rxe_dev *rxe,
|
||||
unsigned int port_num)
|
||||
enum rdma_link_layer rxe_link_layer(struct rxe_dev *rxe, unsigned int port_num)
|
||||
{
|
||||
return IB_LINK_LAYER_ETHERNET;
|
||||
}
|
||||
|
||||
static struct rxe_ifc_ops ifc_ops = {
|
||||
.node_guid = node_guid,
|
||||
.port_guid = port_guid,
|
||||
.dma_device = dma_device,
|
||||
.mcast_add = mcast_add,
|
||||
.mcast_delete = mcast_delete,
|
||||
.prepare = prepare,
|
||||
.send = send,
|
||||
.loopback = loopback,
|
||||
.init_packet = init_packet,
|
||||
.parent_name = parent_name,
|
||||
.link_layer = link_layer,
|
||||
};
|
||||
|
||||
struct rxe_dev *rxe_net_add(struct net_device *ndev)
|
||||
{
|
||||
int err;
|
||||
@ -545,7 +529,6 @@ struct rxe_dev *rxe_net_add(struct net_device *ndev)
|
||||
if (!rxe)
|
||||
return NULL;
|
||||
|
||||
rxe->ifc_ops = &ifc_ops;
|
||||
rxe->ndev = ndev;
|
||||
|
||||
err = rxe_add(rxe, ndev->mtu);
|
||||
@ -658,7 +641,7 @@ struct notifier_block rxe_net_notifier = {
|
||||
.notifier_call = rxe_notify,
|
||||
};
|
||||
|
||||
int rxe_net_ipv4_init(void)
|
||||
static int rxe_net_ipv4_init(void)
|
||||
{
|
||||
recv_sockets.sk4 = rxe_setup_udp_tunnel(&init_net,
|
||||
htons(ROCE_V2_UDP_DPORT), false);
|
||||
@ -671,7 +654,7 @@ int rxe_net_ipv4_init(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int rxe_net_ipv6_init(void)
|
||||
static int rxe_net_ipv6_init(void)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
|
||||
|
@ -102,7 +102,7 @@ struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = {
|
||||
},
|
||||
};
|
||||
|
||||
static inline char *pool_name(struct rxe_pool *pool)
|
||||
static inline const char *pool_name(struct rxe_pool *pool)
|
||||
{
|
||||
return rxe_type_info[pool->type].name;
|
||||
}
|
||||
@ -112,13 +112,6 @@ static inline struct kmem_cache *pool_cache(struct rxe_pool *pool)
|
||||
return rxe_type_info[pool->type].cache;
|
||||
}
|
||||
|
||||
static inline enum rxe_elem_type rxe_type(void *arg)
|
||||
{
|
||||
struct rxe_pool_entry *elem = arg;
|
||||
|
||||
return elem->pool->type;
|
||||
}
|
||||
|
||||
int rxe_cache_init(void)
|
||||
{
|
||||
int err;
|
||||
@ -273,6 +266,7 @@ static u32 alloc_index(struct rxe_pool *pool)
|
||||
if (index >= range)
|
||||
index = find_first_zero_bit(pool->table, range);
|
||||
|
||||
WARN_ON_ONCE(index >= range);
|
||||
set_bit(index, pool->table);
|
||||
pool->last = index;
|
||||
return index + pool->min_index;
|
||||
@ -461,7 +455,7 @@ void *rxe_pool_get_index(struct rxe_pool *pool, u32 index)
|
||||
|
||||
out:
|
||||
spin_unlock_irqrestore(&pool->pool_lock, flags);
|
||||
return node ? (void *)elem : NULL;
|
||||
return node ? elem : NULL;
|
||||
}
|
||||
|
||||
void *rxe_pool_get_key(struct rxe_pool *pool, void *key)
|
||||
@ -497,5 +491,5 @@ void *rxe_pool_get_key(struct rxe_pool *pool, void *key)
|
||||
|
||||
out:
|
||||
spin_unlock_irqrestore(&pool->pool_lock, flags);
|
||||
return node ? ((void *)elem) : NULL;
|
||||
return node ? elem : NULL;
|
||||
}
|
||||
|
@ -57,10 +57,12 @@ enum rxe_elem_type {
|
||||
RXE_NUM_TYPES, /* keep me last */
|
||||
};
|
||||
|
||||
struct rxe_pool_entry;
|
||||
|
||||
struct rxe_type_info {
|
||||
char *name;
|
||||
const char *name;
|
||||
size_t size;
|
||||
void (*cleanup)(void *obj);
|
||||
void (*cleanup)(struct rxe_pool_entry *obj);
|
||||
enum rxe_pool_flags flags;
|
||||
u32 max_index;
|
||||
u32 min_index;
|
||||
@ -91,7 +93,7 @@ struct rxe_pool {
|
||||
spinlock_t pool_lock; /* pool spinlock */
|
||||
size_t elem_size;
|
||||
struct kref ref_cnt;
|
||||
void (*cleanup)(void *obj);
|
||||
void (*cleanup)(struct rxe_pool_entry *obj);
|
||||
enum rxe_pool_state state;
|
||||
enum rxe_pool_flags flags;
|
||||
enum rxe_elem_type type;
|
||||
|
@ -273,13 +273,8 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
|
||||
rxe_init_task(rxe, &qp->comp.task, qp,
|
||||
rxe_completer, "comp");
|
||||
|
||||
init_timer(&qp->rnr_nak_timer);
|
||||
qp->rnr_nak_timer.function = rnr_nak_timer;
|
||||
qp->rnr_nak_timer.data = (unsigned long)qp;
|
||||
|
||||
init_timer(&qp->retrans_timer);
|
||||
qp->retrans_timer.function = retransmit_timer;
|
||||
qp->retrans_timer.data = (unsigned long)qp;
|
||||
setup_timer(&qp->rnr_nak_timer, rnr_nak_timer, (unsigned long)qp);
|
||||
setup_timer(&qp->retrans_timer, retransmit_timer, (unsigned long)qp);
|
||||
qp->qp_timeout_jiffies = 0; /* Can't be set for UD/UC in modify_qp */
|
||||
|
||||
return 0;
|
||||
@ -824,9 +819,9 @@ void rxe_qp_destroy(struct rxe_qp *qp)
|
||||
}
|
||||
|
||||
/* called when the last reference to the qp is dropped */
|
||||
void rxe_qp_cleanup(void *arg)
|
||||
void rxe_qp_cleanup(struct rxe_pool_entry *arg)
|
||||
{
|
||||
struct rxe_qp *qp = arg;
|
||||
struct rxe_qp *qp = container_of(arg, typeof(*qp), pelem);
|
||||
|
||||
rxe_drop_all_mcast_groups(qp);
|
||||
|
||||
|
@ -389,7 +389,7 @@ int rxe_rcv(struct sk_buff *skb)
|
||||
calc_icrc = rxe_icrc_hdr(pkt, skb);
|
||||
calc_icrc = crc32_le(calc_icrc, (u8 *)payload_addr(pkt),
|
||||
payload_size(pkt));
|
||||
calc_icrc = cpu_to_be32(~calc_icrc);
|
||||
calc_icrc = (__force u32)cpu_to_be32(~calc_icrc);
|
||||
if (unlikely(calc_icrc != pack_icrc)) {
|
||||
if (skb->protocol == htons(ETH_P_IPV6))
|
||||
pr_warn_ratelimited("bad ICRC from %pI6c\n",
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user