RDMA for v6.7

The usual collection of patches:
 
 - Bug fixes for hns, mlx5, and hfi1
 
 - Hardening patches for size_*, counted_by, strscpy
 
 - rts fixes from static analysis
 
 - Dump SRQ objects in rdma netlink, with hns support
 
 - Fix a performance regression in mlx5 MR deregistration
 
 - New XDR (200Gb/lane) link speed
 
 - SRQ record doorbell latency optimization for hns
 
 - IPSEC support for mlx5 multi-port mode
 
 - ibv_rereg_mr() support for irdma
 
 - Affiliated event support for bnxt_re
 
 - Opt out for the spec compliant qkey security enforcement as we
   discovered SW that breaks under enforcement
 
 - Comment and trivial updates
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYIAB0WIQRRRCHOFoQz/8F5bUaFwuHvBreFYQUCZUEvXwAKCRCFwuHvBreF
 YQ/iAPkBl7vCH+oFGJKnh9/pUjKrWwRYBkGNgNesH7lX4Q/cIAEA+R3bAI2d4O4i
 tMrRRowTnxohbVRebuy4Pfsoo6m9dA4=
 =TxMJ
 -----END PGP SIGNATURE-----

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma

Pull rdma updates from Jason Gunthorpe:
 "Nothing exciting this cycle, most of the diffstat is changing SPDX
  'or' to 'OR'.

  Summary:

   - Bugfixes for hns, mlx5, and hfi1

   - Hardening patches for size_*, counted_by, strscpy

   - rts fixes from static analysis

   - Dump SRQ objects in rdma netlink, with hns support

   - Fix a performance regression in mlx5 MR deregistration

   - New XDR (200Gb/lane) link speed

   - SRQ record doorbell latency optimization for hns

   - IPSEC support for mlx5 multi-port mode

   - ibv_rereg_mr() support for irdma

   - Affiliated event support for bnxt_re

   - Opt out for the spec compliant qkey security enforcement as we
     discovered SW that breaks under enforcement

   - Comment and trivial updates"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (50 commits)
  IB/mlx5: Fix init stage error handling to avoid double free of same QP and UAF
  RDMA/mlx5: Fix mkey cache WQ flush
  RDMA/hfi1: Workaround truncation compilation error
  IB/hfi1: Fix potential deadlock on &irq_src_lock and &dd->uctxt_lock
  RDMA/core: Remove NULL check before dev_{put, hold}
  RDMA/hfi1: Remove redundant assignment to pointer ppd
  RDMA/mlx5: Change the key being sent for MPV device affiliation
  RDMA/bnxt_re: Fix clang -Wimplicit-fallthrough in bnxt_re_handle_cq_async_error()
  RDMA/hns: Fix init failure of RoCE VF and HIP08
  RDMA/hns: Fix unnecessary port_num transition in HW stats allocation
  RDMA/hns: The UD mode can only be configured with DCQCN
  RDMA/hns: Add check for SL
  RDMA/hns: Fix signed-unsigned mixed comparisons
  RDMA/hns: Fix uninitialized ucmd in hns_roce_create_qp_common()
  RDMA/hns: Fix printing level of asynchronous events
  RDMA/core: Add support to set privileged QKEY parameter
  RDMA/bnxt_re: Do not report SRQ error in srq notification
  RDMA/bnxt_re: Report async events and errors
  RDMA/bnxt_re: Update HW interface headers
  IB/mlx5: Fix rdma counter binding for RAW QP
  ...
This commit is contained in:
Linus Torvalds 2023-11-02 15:20:30 -10:00
commit 43468456c9
194 changed files with 1178 additions and 498 deletions

View File

@ -46,7 +46,7 @@
struct ib_pkey_cache {
int table_len;
u16 table[];
u16 table[] __counted_by(table_len);
};
struct ib_update_work {

View File

@ -373,4 +373,5 @@ void rdma_umap_priv_init(struct rdma_umap_priv *priv,
void ib_cq_pool_cleanup(struct ib_device *dev);
bool rdma_nl_get_privileged_qkey(void);
#endif /* _CORE_PRIV_H */

View File

@ -804,7 +804,7 @@ static int alloc_port_data(struct ib_device *device)
* empty slots at the beginning.
*/
pdata_rcu = kzalloc(struct_size(pdata_rcu, pdata,
rdma_end_port(device) + 1),
size_add(rdma_end_port(device), 1)),
GFP_KERNEL);
if (!pdata_rcu)
return -ENOMEM;
@ -2651,6 +2651,8 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, fill_res_mr_entry_raw);
SET_DEVICE_OP(dev_ops, fill_res_qp_entry);
SET_DEVICE_OP(dev_ops, fill_res_qp_entry_raw);
SET_DEVICE_OP(dev_ops, fill_res_srq_entry);
SET_DEVICE_OP(dev_ops, fill_res_srq_entry_raw);
SET_DEVICE_OP(dev_ops, fill_stat_mr_entry);
SET_DEVICE_OP(dev_ops, get_dev_fw_str);
SET_DEVICE_OP(dev_ops, get_dma_mr);

View File

@ -102,8 +102,7 @@ static struct net_device *rdma_get_xmit_slave_udp(struct ib_device *device,
void rdma_lag_put_ah_roce_slave(struct net_device *xmit_slave)
{
if (xmit_slave)
dev_put(xmit_slave);
dev_put(xmit_slave);
}
struct net_device *rdma_lag_get_ah_roce_slave(struct ib_device *device,

View File

@ -43,6 +43,13 @@
#include "restrack.h"
#include "uverbs.h"
/*
* This determines whether a non-privileged user is allowed to specify a
* controlled QKEY or not, when true non-privileged user is allowed to specify
* a controlled QKEY.
*/
static bool privileged_qkey;
typedef int (*res_fill_func_t)(struct sk_buff*, bool,
struct rdma_restrack_entry*, uint32_t);
@ -156,6 +163,7 @@ static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = {
[RDMA_NLDEV_SYS_ATTR_COPY_ON_FORK] = { .type = NLA_U8 },
[RDMA_NLDEV_ATTR_STAT_HWCOUNTER_INDEX] = { .type = NLA_U32 },
[RDMA_NLDEV_ATTR_STAT_HWCOUNTER_DYNAMIC] = { .type = NLA_U8 },
[RDMA_NLDEV_SYS_ATTR_PRIVILEGED_QKEY_MODE] = { .type = NLA_U8 },
};
static int put_driver_name_print_type(struct sk_buff *msg, const char *name,
@ -237,6 +245,12 @@ int rdma_nl_put_driver_u64_hex(struct sk_buff *msg, const char *name, u64 value)
}
EXPORT_SYMBOL(rdma_nl_put_driver_u64_hex);
bool rdma_nl_get_privileged_qkey(void)
{
return privileged_qkey || capable(CAP_NET_RAW);
}
EXPORT_SYMBOL(rdma_nl_get_privileged_qkey);
static int fill_nldev_handle(struct sk_buff *msg, struct ib_device *device)
{
if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DEV_INDEX, device->index))
@ -357,8 +371,7 @@ static int fill_port_info(struct sk_buff *msg,
}
out:
if (netdev)
dev_put(netdev);
dev_put(netdev);
return ret;
}
@ -818,6 +831,7 @@ static int fill_res_srq_entry(struct sk_buff *msg, bool has_cap_net_admin,
struct rdma_restrack_entry *res, uint32_t port)
{
struct ib_srq *srq = container_of(res, struct ib_srq, res);
struct ib_device *dev = srq->device;
if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_SRQN, srq->res.id))
goto err;
@ -837,12 +851,29 @@ static int fill_res_srq_entry(struct sk_buff *msg, bool has_cap_net_admin,
if (fill_res_srq_qps(msg, srq))
goto err;
return fill_res_name_pid(msg, res);
if (fill_res_name_pid(msg, res))
goto err;
if (dev->ops.fill_res_srq_entry)
return dev->ops.fill_res_srq_entry(msg, srq);
return 0;
err:
return -EMSGSIZE;
}
static int fill_res_srq_raw_entry(struct sk_buff *msg, bool has_cap_net_admin,
struct rdma_restrack_entry *res, uint32_t port)
{
struct ib_srq *srq = container_of(res, struct ib_srq, res);
struct ib_device *dev = srq->device;
if (!dev->ops.fill_res_srq_entry_raw)
return -EINVAL;
return dev->ops.fill_res_srq_entry_raw(msg, srq);
}
static int fill_stat_counter_mode(struct sk_buff *msg,
struct rdma_counter *counter)
{
@ -1652,6 +1683,7 @@ RES_GET_FUNCS(mr_raw, RDMA_RESTRACK_MR);
RES_GET_FUNCS(counter, RDMA_RESTRACK_COUNTER);
RES_GET_FUNCS(ctx, RDMA_RESTRACK_CTX);
RES_GET_FUNCS(srq, RDMA_RESTRACK_SRQ);
RES_GET_FUNCS(srq_raw, RDMA_RESTRACK_SRQ);
static LIST_HEAD(link_ops);
static DECLARE_RWSEM(link_ops_rwsem);
@ -1882,6 +1914,12 @@ static int nldev_sys_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
return err;
}
err = nla_put_u8(msg, RDMA_NLDEV_SYS_ATTR_PRIVILEGED_QKEY_MODE,
(u8)privileged_qkey);
if (err) {
nlmsg_free(msg);
return err;
}
/*
* Copy-on-fork is supported.
* See commits:
@ -1898,18 +1936,11 @@ static int nldev_sys_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
}
static int nldev_set_sys_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
static int nldev_set_sys_set_netns_doit(struct nlattr *tb[])
{
struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
u8 enable;
int err;
err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
nldev_policy, extack);
if (err || !tb[RDMA_NLDEV_SYS_ATTR_NETNS_MODE])
return -EINVAL;
enable = nla_get_u8(tb[RDMA_NLDEV_SYS_ATTR_NETNS_MODE]);
/* Only 0 and 1 are supported */
if (enable > 1)
@ -1919,6 +1950,40 @@ static int nldev_set_sys_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
return err;
}
static int nldev_set_sys_set_pqkey_doit(struct nlattr *tb[])
{
u8 enable;
enable = nla_get_u8(tb[RDMA_NLDEV_SYS_ATTR_PRIVILEGED_QKEY_MODE]);
/* Only 0 and 1 are supported */
if (enable > 1)
return -EINVAL;
privileged_qkey = enable;
return 0;
}
static int nldev_set_sys_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
int err;
err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
nldev_policy, extack);
if (err)
return -EINVAL;
if (tb[RDMA_NLDEV_SYS_ATTR_NETNS_MODE])
return nldev_set_sys_set_netns_doit(tb);
if (tb[RDMA_NLDEV_SYS_ATTR_PRIVILEGED_QKEY_MODE])
return nldev_set_sys_set_pqkey_doit(tb);
return -EINVAL;
}
static int nldev_stat_set_mode_doit(struct sk_buff *msg,
struct netlink_ext_ack *extack,
struct nlattr *tb[],
@ -2558,6 +2623,11 @@ static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
.dump = nldev_res_get_mr_raw_dumpit,
.flags = RDMA_NL_ADMIN_PERM,
},
[RDMA_NLDEV_CMD_RES_SRQ_GET_RAW] = {
.doit = nldev_res_get_srq_raw_doit,
.dump = nldev_res_get_srq_raw_dumpit,
.flags = RDMA_NL_ADMIN_PERM,
},
[RDMA_NLDEV_CMD_STAT_GET_STATUS] = {
.doit = nldev_stat_get_counter_status_doit,
},

View File

@ -666,7 +666,7 @@ void rdma_rw_init_qp(struct ib_device *dev, struct ib_qp_init_attr *attr)
factor = 1;
/*
* If the devices needs MRs to perform RDMA READ or WRITE operations,
* If the device needs MRs to perform RDMA READ or WRITE operations,
* we'll need two additional MRs for the registrations and the
* invalidation.
*/

View File

@ -2159,7 +2159,9 @@ static int ib_sa_add_one(struct ib_device *device)
s = rdma_start_port(device);
e = rdma_end_port(device);
sa_dev = kzalloc(struct_size(sa_dev, port, e - s + 1), GFP_KERNEL);
sa_dev = kzalloc(struct_size(sa_dev, port,
size_add(size_sub(e, s), 1)),
GFP_KERNEL);
if (!sa_dev)
return -ENOMEM;

View File

@ -342,6 +342,10 @@ static ssize_t rate_show(struct ib_device *ibdev, u32 port_num,
speed = " NDR";
rate = 1000;
break;
case IB_SPEED_XDR:
speed = " XDR";
rate = 2000;
break;
case IB_SPEED_SDR:
default: /* default to SDR for invalid rates */
speed = " SDR";
@ -903,7 +907,7 @@ alloc_hw_stats_device(struct ib_device *ibdev)
* Two extra attribue elements here, one for the lifespan entry and
* one to NULL terminate the list for the sysfs core code
*/
data = kzalloc(struct_size(data, attrs, stats->num_counters + 1),
data = kzalloc(struct_size(data, attrs, size_add(stats->num_counters, 1)),
GFP_KERNEL);
if (!data)
goto err_free_stats;
@ -1009,7 +1013,7 @@ alloc_hw_stats_port(struct ib_port *port, struct attribute_group *group)
* Two extra attribue elements here, one for the lifespan entry and
* one to NULL terminate the list for the sysfs core code
*/
data = kzalloc(struct_size(data, attrs, stats->num_counters + 1),
data = kzalloc(struct_size(data, attrs, size_add(stats->num_counters, 1)),
GFP_KERNEL);
if (!data)
goto err_free_stats;
@ -1140,7 +1144,7 @@ static int setup_gid_attrs(struct ib_port *port,
int ret;
gid_attr_group = kzalloc(struct_size(gid_attr_group, attrs_list,
attr->gid_tbl_len * 2),
size_mul(attr->gid_tbl_len, 2)),
GFP_KERNEL);
if (!gid_attr_group)
return -ENOMEM;
@ -1205,8 +1209,8 @@ static struct ib_port *setup_port(struct ib_core_device *coredev, int port_num,
int ret;
p = kvzalloc(struct_size(p, attrs_list,
attr->gid_tbl_len + attr->pkey_tbl_len),
GFP_KERNEL);
size_add(attr->gid_tbl_len, attr->pkey_tbl_len)),
GFP_KERNEL);
if (!p)
return ERR_PTR(-ENOMEM);
p->ibdev = device;

View File

@ -1378,7 +1378,9 @@ static int ib_umad_add_one(struct ib_device *device)
s = rdma_start_port(device);
e = rdma_end_port(device);
umad_dev = kzalloc(struct_size(umad_dev, ports, e - s + 1), GFP_KERNEL);
umad_dev = kzalloc(struct_size(umad_dev, ports,
size_add(size_sub(e, s), 1)),
GFP_KERNEL);
if (!umad_dev)
return -ENOMEM;

View File

@ -1851,7 +1851,8 @@ static int modify_qp(struct uverbs_attr_bundle *attrs,
if (cmd->base.attr_mask & IB_QP_PATH_MIG_STATE)
attr->path_mig_state = cmd->base.path_mig_state;
if (cmd->base.attr_mask & IB_QP_QKEY) {
if (cmd->base.qkey & IB_QP_SET_QKEY && !capable(CAP_NET_RAW)) {
if (cmd->base.qkey & IB_QP_SET_QKEY &&
!rdma_nl_get_privileged_qkey()) {
ret = -EPERM;
goto release_qp;
}

View File

@ -203,6 +203,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_PORT)(
copy_port_attr_to_resp(&attr, &resp.legacy_resp, ib_dev, port_num);
resp.port_cap_flags2 = attr.port_cap_flags2;
resp.active_speed_ex = attr.active_speed;
return uverbs_copy_to_struct_or_zero(attrs, UVERBS_ATTR_QUERY_PORT_RESP,
&resp, sizeof(resp));
@ -461,7 +462,7 @@ DECLARE_UVERBS_NAMED_METHOD(
UVERBS_ATTR_PTR_OUT(
UVERBS_ATTR_QUERY_PORT_RESP,
UVERBS_ATTR_STRUCT(struct ib_uverbs_query_port_resp_ex,
reserved),
active_speed_ex),
UA_MANDATORY));
DECLARE_UVERBS_NAMED_METHOD(

View File

@ -147,6 +147,7 @@ __attribute_const__ int ib_rate_to_mult(enum ib_rate rate)
case IB_RATE_50_GBPS: return 20;
case IB_RATE_400_GBPS: return 160;
case IB_RATE_600_GBPS: return 240;
case IB_RATE_800_GBPS: return 320;
default: return -1;
}
}
@ -176,6 +177,7 @@ __attribute_const__ enum ib_rate mult_to_ib_rate(int mult)
case 20: return IB_RATE_50_GBPS;
case 160: return IB_RATE_400_GBPS;
case 240: return IB_RATE_600_GBPS;
case 320: return IB_RATE_800_GBPS;
default: return IB_RATE_PORT_CURRENT;
}
}
@ -205,6 +207,7 @@ __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate)
case IB_RATE_50_GBPS: return 53125;
case IB_RATE_400_GBPS: return 425000;
case IB_RATE_600_GBPS: return 637500;
case IB_RATE_800_GBPS: return 850000;
default: return -1;
}
}
@ -366,7 +369,7 @@ void rdma_copy_ah_attr(struct rdma_ah_attr *dest,
EXPORT_SYMBOL(rdma_copy_ah_attr);
/**
* rdma_replace_ah_attr - Replace valid ah_attr with new new one.
* rdma_replace_ah_attr - Replace valid ah_attr with new one.
* @old: Pointer to existing ah_attr which needs to be replaced.
* old is assumed to be valid or zero'd
* @new: Pointer to the new ah_attr.
@ -744,7 +747,7 @@ EXPORT_SYMBOL(ib_get_gids_from_rdma_hdr);
/* Resolve destination mac address and hop limit for unicast destination
* GID entry, considering the source GID entry as well.
* ah_attribute must have have valid port_num, sgid_index.
* ah_attribute must have valid port_num, sgid_index.
*/
static int ib_resolve_unicast_gid_dmac(struct ib_device *device,
struct rdma_ah_attr *ah_attr)

View File

@ -970,6 +970,9 @@ static int bnxt_re_handle_unaffi_async_event(struct creq_func_event
static int bnxt_re_handle_qp_async_event(struct creq_qp_event *qp_event,
struct bnxt_re_qp *qp)
{
struct bnxt_re_srq *srq = container_of(qp->qplib_qp.srq, struct bnxt_re_srq,
qplib_srq);
struct creq_qp_error_notification *err_event;
struct ib_event event = {};
unsigned int flags;
@ -980,14 +983,147 @@ static int bnxt_re_handle_qp_async_event(struct creq_qp_event *qp_event,
bnxt_re_unlock_cqs(qp, flags);
}
if (qp->qplib_qp.srq) {
event.device = &qp->rdev->ibdev;
event.element.qp = &qp->ib_qp;
event.event = IB_EVENT_QP_LAST_WQE_REACHED;
event.device = &qp->rdev->ibdev;
event.element.qp = &qp->ib_qp;
event.event = IB_EVENT_QP_FATAL;
err_event = (struct creq_qp_error_notification *)qp_event;
switch (err_event->req_err_state_reason) {
case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_OPCODE_ERROR:
case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_TIMEOUT_RETRY_LIMIT:
case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RNR_TIMEOUT_RETRY_LIMIT:
case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_NAK_ARRIVAL_2:
case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_NAK_ARRIVAL_3:
case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_INVALID_READ_RESP:
case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_ILLEGAL_BIND:
case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_ILLEGAL_FAST_REG:
case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_ILLEGAL_INVALIDATE:
case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RETRAN_LOCAL_ERROR:
case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_AV_DOMAIN_ERROR:
case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_PROD_WQE_MSMTCH_ERROR:
case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_PSN_RANGE_CHECK_ERROR:
event.event = IB_EVENT_QP_ACCESS_ERR;
break;
case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_NAK_ARRIVAL_1:
case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_NAK_ARRIVAL_4:
case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_READ_RESP_LENGTH:
case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_WQE_FORMAT_ERROR:
case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_ORRQ_FORMAT_ERROR:
case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_INVALID_AVID_ERROR:
case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_SERV_TYPE_ERROR:
case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_INVALID_OP_ERROR:
event.event = IB_EVENT_QP_REQ_ERR;
break;
case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RX_MEMORY_ERROR:
case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_TX_MEMORY_ERROR:
case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_CMP_ERROR:
case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_CQ_LOAD_ERROR:
case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_TX_PCI_ERROR:
case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RX_PCI_ERROR:
case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RETX_SETUP_ERROR:
event.event = IB_EVENT_QP_FATAL;
break;
default:
break;
}
if (event.device && qp->ib_qp.event_handler)
switch (err_event->res_err_state_reason) {
case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_EXCEED_MAX:
case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_PAYLOAD_LENGTH_MISMATCH:
case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_PSN_SEQ_ERROR_RETRY_LIMIT:
case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_INVALID_R_KEY:
case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_DOMAIN_ERROR:
case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_NO_PERMISSION:
case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_RANGE_ERROR:
case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_INVALID_R_KEY:
case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_DOMAIN_ERROR:
case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_NO_PERMISSION:
case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_RANGE_ERROR:
case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_UNALIGN_ATOMIC:
case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_PSN_NOT_FOUND:
case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_INVALID_DUP_RKEY:
case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_IRRQ_FORMAT_ERROR:
event.event = IB_EVENT_QP_ACCESS_ERR;
break;
case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_EXCEEDS_WQE:
case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_WQE_FORMAT_ERROR:
case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_UNSUPPORTED_OPCODE:
case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_REM_INVALIDATE:
case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_OPCODE_ERROR:
event.event = IB_EVENT_QP_REQ_ERR;
break;
case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_IRRQ_OFLOW:
case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_CMP_ERROR:
case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_CQ_LOAD_ERROR:
case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_PCI_ERROR:
case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_PCI_ERROR:
case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_MEMORY_ERROR:
event.event = IB_EVENT_QP_FATAL;
break;
case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_SRQ_LOAD_ERROR:
case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_SRQ_ERROR:
if (srq)
event.event = IB_EVENT_SRQ_ERR;
break;
default:
break;
}
if (err_event->res_err_state_reason || err_event->req_err_state_reason) {
ibdev_dbg(&qp->rdev->ibdev,
"%s %s qp_id: %d cons (%d %d) req (%d %d) res (%d %d)\n",
__func__, rdma_is_kernel_res(&qp->ib_qp.res) ? "kernel" : "user",
qp->qplib_qp.id,
err_event->sq_cons_idx,
err_event->rq_cons_idx,
err_event->req_slow_path_state,
err_event->req_err_state_reason,
err_event->res_slow_path_state,
err_event->res_err_state_reason);
} else {
if (srq)
event.event = IB_EVENT_QP_LAST_WQE_REACHED;
}
if (event.event == IB_EVENT_SRQ_ERR && srq->ib_srq.event_handler) {
(*srq->ib_srq.event_handler)(&event,
srq->ib_srq.srq_context);
} else if (event.device && qp->ib_qp.event_handler) {
qp->ib_qp.event_handler(&event, qp->ib_qp.qp_context);
}
return 0;
}
static int bnxt_re_handle_cq_async_error(void *event, struct bnxt_re_cq *cq)
{
struct creq_cq_error_notification *cqerr;
struct ib_event ibevent = {};
cqerr = event;
switch (cqerr->cq_err_reason) {
case CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_REQ_CQ_INVALID_ERROR:
case CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_REQ_CQ_OVERFLOW_ERROR:
case CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_REQ_CQ_LOAD_ERROR:
case CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_RES_CQ_INVALID_ERROR:
case CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_RES_CQ_OVERFLOW_ERROR:
case CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_RES_CQ_LOAD_ERROR:
ibevent.event = IB_EVENT_CQ_ERR;
break;
default:
break;
}
if (ibevent.event == IB_EVENT_CQ_ERR && cq->ib_cq.event_handler) {
ibevent.element.cq = &cq->ib_cq;
ibevent.device = &cq->rdev->ibdev;
ibdev_dbg(&cq->rdev->ibdev,
"%s err reason %d\n", __func__, cqerr->cq_err_reason);
cq->ib_cq.event_handler(&ibevent, cq->ib_cq.cq_context);
}
return 0;
}
@ -995,6 +1131,10 @@ static int bnxt_re_handle_qp_async_event(struct creq_qp_event *qp_event,
static int bnxt_re_handle_affi_async_event(struct creq_qp_event *affi_async,
void *obj)
{
struct bnxt_qplib_qp *lib_qp;
struct bnxt_qplib_cq *lib_cq;
struct bnxt_re_qp *qp;
struct bnxt_re_cq *cq;
int rc = 0;
u8 event;
@ -1002,11 +1142,19 @@ static int bnxt_re_handle_affi_async_event(struct creq_qp_event *affi_async,
return rc; /* QP was already dead, still return success */
event = affi_async->event;
if (event == CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION) {
struct bnxt_qplib_qp *lib_qp = obj;
struct bnxt_re_qp *qp = container_of(lib_qp, struct bnxt_re_qp,
qplib_qp);
switch (event) {
case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION:
lib_qp = obj;
qp = container_of(lib_qp, struct bnxt_re_qp, qplib_qp);
rc = bnxt_re_handle_qp_async_event(affi_async, qp);
break;
case CREQ_QP_EVENT_EVENT_CQ_ERROR_NOTIFICATION:
lib_cq = obj;
cq = container_of(lib_cq, struct bnxt_re_cq, qplib_cq);
rc = bnxt_re_handle_cq_async_error(affi_async, cq);
break;
default:
rc = -EINVAL;
}
return rc;
}
@ -1040,13 +1188,10 @@ static int bnxt_re_srqn_handler(struct bnxt_qplib_nq *nq,
ib_event.device = &srq->rdev->ibdev;
ib_event.element.srq = &srq->ib_srq;
if (event == NQ_SRQ_EVENT_EVENT_SRQ_THRESHOLD_EVENT)
ib_event.event = IB_EVENT_SRQ_LIMIT_REACHED;
else
ib_event.event = IB_EVENT_SRQ_ERR;
if (srq->ib_srq.event_handler) {
/* Lock event_handler? */
if (event == NQ_SRQ_EVENT_EVENT_SRQ_THRESHOLD_EVENT)
ib_event.event = IB_EVENT_SRQ_LIMIT_REACHED;
(*srq->ib_srq.event_handler)(&ib_event,
srq->ib_srq.srq_context);
}

View File

@ -1,4 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
#ifndef __QPLIB_TLV_H__
#define __QPLIB_TLV_H__

View File

@ -2919,6 +2919,35 @@ struct creq_qp_error_notification {
u8 status;
u8 req_slow_path_state;
u8 req_err_state_reason;
#define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_NO_ERROR 0X0UL
#define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_OPCODE_ERROR 0X1UL
#define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_TIMEOUT_RETRY_LIMIT 0X2UL
#define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RNR_TIMEOUT_RETRY_LIMIT 0X3UL
#define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_NAK_ARRIVAL_1 0X4UL
#define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_NAK_ARRIVAL_2 0X5UL
#define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_NAK_ARRIVAL_3 0X6UL
#define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_NAK_ARRIVAL_4 0X7UL
#define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RX_MEMORY_ERROR 0X8UL
#define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_TX_MEMORY_ERROR 0X9UL
#define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_READ_RESP_LENGTH 0XAUL
#define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_INVALID_READ_RESP 0XBUL
#define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_ILLEGAL_BIND 0XCUL
#define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_ILLEGAL_FAST_REG 0XDUL
#define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_ILLEGAL_INVALIDATE 0XEUL
#define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_CMP_ERROR 0XFUL
#define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RETRAN_LOCAL_ERROR 0X10UL
#define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_WQE_FORMAT_ERROR 0X11UL
#define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_ORRQ_FORMAT_ERROR 0X12UL
#define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_INVALID_AVID_ERROR 0X13UL
#define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_AV_DOMAIN_ERROR 0X14UL
#define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_CQ_LOAD_ERROR 0X15UL
#define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_SERV_TYPE_ERROR 0X16UL
#define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_INVALID_OP_ERROR 0X17UL
#define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_TX_PCI_ERROR 0X18UL
#define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RX_PCI_ERROR 0X19UL
#define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_PROD_WQE_MSMTCH_ERROR 0X1AUL
#define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_PSN_RANGE_CHECK_ERROR 0X1BUL
#define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RETX_SETUP_ERROR 0X1CUL
__le32 xid;
u8 v;
#define CREQ_QP_ERROR_NOTIFICATION_V 0x1UL
@ -2928,6 +2957,35 @@ struct creq_qp_error_notification {
CREQ_QP_ERROR_NOTIFICATION_EVENT_QP_ERROR_NOTIFICATION
u8 res_slow_path_state;
u8 res_err_state_reason;
#define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_NO_ERROR 0x0UL
#define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_EXCEED_MAX 0x1UL
#define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_PAYLOAD_LENGTH_MISMATCH 0x2UL
#define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_EXCEEDS_WQE 0x3UL
#define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_OPCODE_ERROR 0x4UL
#define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_PSN_SEQ_ERROR_RETRY_LIMIT 0x5UL
#define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_INVALID_R_KEY 0x6UL
#define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_DOMAIN_ERROR 0x7UL
#define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_NO_PERMISSION 0x8UL
#define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_RANGE_ERROR 0x9UL
#define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_INVALID_R_KEY 0xaUL
#define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_DOMAIN_ERROR 0xbUL
#define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_NO_PERMISSION 0xcUL
#define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_RANGE_ERROR 0xdUL
#define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_IRRQ_OFLOW 0xeUL
#define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_UNSUPPORTED_OPCODE 0xfUL
#define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_UNALIGN_ATOMIC 0x10UL
#define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_REM_INVALIDATE 0x11UL
#define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_MEMORY_ERROR 0x12UL
#define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_SRQ_ERROR 0x13UL
#define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_CMP_ERROR 0x14UL
#define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_INVALID_DUP_RKEY 0x15UL
#define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_WQE_FORMAT_ERROR 0x16UL
#define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_IRRQ_FORMAT_ERROR 0x17UL
#define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_CQ_LOAD_ERROR 0x18UL
#define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_SRQ_LOAD_ERROR 0x19UL
#define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_PCI_ERROR 0x1bUL
#define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_PCI_ERROR 0x1cUL
#define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_PSN_NOT_FOUND 0x1dUL
__le16 sq_cons_idx;
__le16 rq_cons_idx;
};

View File

@ -1,4 +1,4 @@
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright(c) 2015 - 2020 Intel Corporation.
*/

View File

@ -1,4 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright(c) 2015 - 2020 Intel Corporation.
*/

View File

@ -1,4 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright(c) 2015-2017 Intel Corporation.
*/

View File

@ -1,4 +1,4 @@
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright(c) 2015 - 2020 Intel Corporation.
* Copyright(c) 2021 Cornelis Networks.
@ -5334,7 +5334,7 @@ static const char * const cce_misc_names[] = {
static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source)
{
if (source < ARRAY_SIZE(cce_misc_names))
strncpy(buf, cce_misc_names[source], bsize);
strscpy_pad(buf, cce_misc_names[source], bsize);
else
snprintf(buf, bsize, "Reserved%u",
source + IS_GENERAL_ERR_START);
@ -5374,7 +5374,7 @@ static const char * const various_names[] = {
static char *is_various_name(char *buf, size_t bsize, unsigned int source)
{
if (source < ARRAY_SIZE(various_names))
strncpy(buf, various_names[source], bsize);
strscpy_pad(buf, various_names[source], bsize);
else
snprintf(buf, bsize, "Reserved%u", source + IS_VARIOUS_START);
return buf;
@ -13185,15 +13185,16 @@ static void read_mod_write(struct hfi1_devdata *dd, u16 src, u64 bits,
{
u64 reg;
u16 idx = src / BITS_PER_REGISTER;
unsigned long flags;
spin_lock(&dd->irq_src_lock);
spin_lock_irqsave(&dd->irq_src_lock, flags);
reg = read_csr(dd, CCE_INT_MASK + (8 * idx));
if (set)
reg |= bits;
else
reg &= ~bits;
write_csr(dd, CCE_INT_MASK + (8 * idx), reg);
spin_unlock(&dd->irq_src_lock);
spin_unlock_irqrestore(&dd->irq_src_lock, flags);
}
/**

View File

@ -1,4 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright(c) 2015 - 2020 Intel Corporation.
*/

View File

@ -1,4 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright(c) 2015, 2016 Intel Corporation.
*/

View File

@ -1,4 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright(c) 2015 - 2020 Intel Corporation.
*/

View File

@ -1,4 +1,4 @@
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright(c) 2015-2018 Intel Corporation.
*/

View File

@ -1,4 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright(c) 2015, 2016, 2018 Intel Corporation.
*/

View File

@ -1,4 +1,4 @@
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright(c) 2015, 2016 Intel Corporation.
*/

View File

@ -1,4 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright(c) 2015, 2016 Intel Corporation.
*/

View File

@ -1,4 +1,4 @@
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright(c) 2015-2020 Intel Corporation.
* Copyright(c) 2021 Cornelis Networks.

View File

@ -1,4 +1,4 @@
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright(c) 2015, 2016 Intel Corporation.
*/
@ -112,7 +112,7 @@ int read_hfi1_efi_var(struct hfi1_devdata *dd, const char *kind,
unsigned long *size, void **return_data)
{
char prefix_name[64];
char name[64];
char name[128];
int result;
/* create a common prefix */

View File

@ -1,4 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright(c) 2015, 2016 Intel Corporation.
*/

View File

@ -1,4 +1,4 @@
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright(c) 2015, 2016 Intel Corporation.
*/

View File

@ -1,4 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright(c) 2015, 2016 Intel Corporation.
*/

View File

@ -1,4 +1,4 @@
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright(c) 2017 Intel Corporation.
*/

View File

@ -1,4 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright(c) 2017 Intel Corporation.
*/

View File

@ -1,4 +1,4 @@
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright(c) 2018 Intel Corporation.
*/

View File

@ -1,4 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright(c) 2018 Intel Corporation.
*/

View File

@ -1,4 +1,4 @@
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright(c) 2020 Cornelis Networks, Inc.
* Copyright(c) 2015-2020 Intel Corporation.

View File

@ -1,4 +1,4 @@
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright(c) 2015 - 2017 Intel Corporation.
*/

View File

@ -1,4 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright(c) 2020-2023 Cornelis Networks, Inc.
* Copyright(c) 2015-2020 Intel Corporation.

View File

@ -1,4 +1,4 @@
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright(c) 2015 - 2020 Intel Corporation.
* Copyright(c) 2021 Cornelis Networks.
@ -1027,7 +1027,6 @@ static void shutdown_device(struct hfi1_devdata *dd)
msix_clean_up_interrupts(dd);
for (pidx = 0; pidx < dd->num_pports; ++pidx) {
ppd = dd->pport + pidx;
for (i = 0; i < dd->num_rcv_contexts; i++) {
rcd = hfi1_rcd_get_by_index(dd, i);
hfi1_rcvctrl(dd, HFI1_RCVCTRL_TAILUPD_DIS |

View File

@ -1,4 +1,4 @@
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright(c) 2015, 2016 Intel Corporation.
*/

View File

@ -1,4 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright(c) 2015 - 2018 Intel Corporation.
*/

View File

@ -217,7 +217,7 @@ static int hfi1_ipoib_build_ulp_payload(struct ipoib_txreq *tx,
ret = sdma_txadd_page(dd,
txreq,
skb_frag_page(frag),
frag->bv_offset,
skb_frag_off(frag),
skb_frag_size(frag),
NULL, NULL, NULL);
if (unlikely(ret))

View File

@ -1,4 +1,4 @@
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright(c) 2015-2018 Intel Corporation.
*/

View File

@ -1,4 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright(c) 2015 - 2017 Intel Corporation.
*/

View File

@ -1,4 +1,4 @@
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright(c) 2020 Cornelis Networks, Inc.
* Copyright(c) 2016 - 2017 Intel Corporation.

View File

@ -1,4 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright(c) 2020 Cornelis Networks, Inc.
* Copyright(c) 2016 Intel Corporation.

View File

@ -1,4 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright(c) 2015, 2016 Intel Corporation.
*/

View File

@ -1,8 +1,9 @@
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright(c) 2015 - 2019 Intel Corporation.
*/
#include <linux/bitfield.h>
#include <linux/pci.h>
#include <linux/io.h>
#include <linux/delay.h>
@ -210,12 +211,6 @@ static u32 extract_speed(u16 linkstat)
return speed;
}
/* return the PCIe link speed from the given link status */
static u32 extract_width(u16 linkstat)
{
return (linkstat & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT;
}
/* read the link status and set dd->{lbus_width,lbus_speed,lbus_info} */
static void update_lbus_info(struct hfi1_devdata *dd)
{
@ -228,7 +223,7 @@ static void update_lbus_info(struct hfi1_devdata *dd)
return;
}
dd->lbus_width = extract_width(linkstat);
dd->lbus_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, linkstat);
dd->lbus_speed = extract_speed(linkstat);
snprintf(dd->lbus_info, sizeof(dd->lbus_info),
"PCIe,%uMHz,x%u", dd->lbus_speed, dd->lbus_width);

View File

@ -1,4 +1,4 @@
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright(c) 2015-2018 Intel Corporation.
*/

View File

@ -1,4 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright(c) 2015-2017 Intel Corporation.
*/

View File

@ -1,4 +1,4 @@
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright(c) 2015, 2016 Intel Corporation.
*/

View File

@ -1,4 +1,4 @@
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright(c) 2015, 2016 Intel Corporation.
*/

View File

@ -1,4 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright(c) 2015, 2016 Intel Corporation.
*/

View File

@ -1,4 +1,4 @@
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright(c) 2015 - 2020 Intel Corporation.
*/

View File

@ -1,4 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright(c) 2015 - 2018 Intel Corporation.
*/

View File

@ -1,4 +1,4 @@
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright(c) 2015, 2016 Intel Corporation.
*/

View File

@ -1,4 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright(c) 2015, 2016 Intel Corporation.
*/

View File

@ -1,4 +1,4 @@
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright(c) 2015 - 2018 Intel Corporation.
*/

View File

@ -1,4 +1,4 @@
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright(c) 2015 - 2018 Intel Corporation.
*/

View File

@ -1,4 +1,4 @@
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright(c) 2015 - 2018 Intel Corporation.
*/

View File

@ -1,4 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright(c) 2015 - 2018 Intel Corporation.
*/

View File

@ -1,4 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright(c) 2016 Intel Corporation.
*/

View File

@ -1,4 +1,4 @@
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright(c) 2015-2017 Intel Corporation.
*/

View File

@ -1,4 +1,4 @@
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright(c) 2015 - 2020 Intel Corporation.
*/

View File

@ -1,4 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright(c) 2015 - 2018 Intel Corporation.
*/

View File

@ -1,4 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright(c) 2015 - 2020 Intel Corporation.
*/

View File

@ -1,4 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright(c) 2015 - 2018 Intel Corporation.
*/

View File

@ -1,4 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright(c) 2015 - 2017 Intel Corporation.
*/

View File

@ -1,4 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright(c) 2015, 2016 Intel Corporation.
*/

View File

@ -1,4 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright(c) 2017 Intel Corporation.
*/

View File

@ -1,4 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright(c) 2015, 2016, 2017 Intel Corporation.
*/

View File

@ -1,4 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright(c) 2015 - 2018 Intel Corporation.
*/

View File

@ -1,4 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright(c) 2015 - 2017 Intel Corporation.
*/

View File

@ -1,4 +1,4 @@
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright(c) 2015 - 2018 Intel Corporation.
*/

View File

@ -1,4 +1,4 @@
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright(c) 2015 - 2019 Intel Corporation.
*/

View File

@ -1,4 +1,4 @@
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright(c) 2020 Cornelis Networks, Inc.
* Copyright(c) 2015-2018 Intel Corporation.

View File

@ -1,4 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright(c) 2020 - Cornelis Networks, Inc.
* Copyright(c) 2015 - 2017 Intel Corporation.
@ -36,7 +36,7 @@ struct tid_rb_node {
dma_addr_t dma_addr;
bool freed;
unsigned int npages;
struct page *pages[];
struct page *pages[] __counted_by(npages);
};
static inline int num_user_pages(unsigned long addr,

View File

@ -1,4 +1,4 @@
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright(c) 2015-2017 Intel Corporation.
*/

View File

@ -1,4 +1,4 @@
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright(c) 2020 - 2023 Cornelis Networks, Inc.
* Copyright(c) 2015 - 2018 Intel Corporation.

View File

@ -1,4 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright(c) 2023 - Cornelis Networks, Inc.
* Copyright(c) 2015 - 2018 Intel Corporation.

View File

@ -1,4 +1,4 @@
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright(c) 2015 - 2020 Intel Corporation.
*/

View File

@ -1,4 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright(c) 2015 - 2018 Intel Corporation.
*/

View File

@ -1,4 +1,4 @@
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright(c) 2016 - 2018 Intel Corporation.
*/

View File

@ -1,4 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright(c) 2016 - 2018 Intel Corporation.
*/

View File

@ -1,4 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright(c) 2017 - 2020 Intel Corporation.
*/

View File

@ -1,4 +1,4 @@
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright(c) 2017 - 2020 Intel Corporation.
*/

View File

@ -1,4 +1,4 @@
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright(c) 2017 - 2018 Intel Corporation.
*/

View File

@ -33,7 +33,9 @@
#include <linux/pci.h>
#include <rdma/ib_addr.h>
#include <rdma/ib_cache.h>
#include "hnae3.h"
#include "hns_roce_device.h"
#include "hns_roce_hw_v2.h"
static inline u16 get_ah_udp_sport(const struct rdma_ah_attr *ah_attr)
{
@ -57,6 +59,7 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
struct hns_roce_dev *hr_dev = to_hr_dev(ibah->device);
struct hns_roce_ah *ah = to_hr_ah(ibah);
int ret = 0;
u32 max_sl;
if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 && udata)
return -EOPNOTSUPP;
@ -70,9 +73,17 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
ah->av.hop_limit = grh->hop_limit;
ah->av.flowlabel = grh->flow_label;
ah->av.udp_sport = get_ah_udp_sport(ah_attr);
ah->av.sl = rdma_ah_get_sl(ah_attr);
ah->av.tclass = get_tclass(grh);
ah->av.sl = rdma_ah_get_sl(ah_attr);
max_sl = min_t(u32, MAX_SERVICE_LEVEL, hr_dev->caps.sl_num - 1);
if (unlikely(ah->av.sl > max_sl)) {
ibdev_err_ratelimited(&hr_dev->ib_dev,
"failed to set sl, sl (%u) shouldn't be larger than %u.\n",
ah->av.sl, max_sl);
return -EINVAL;
}
memcpy(ah->av.dgid, grh->dgid.raw, HNS_ROCE_GID_SIZE);
memcpy(ah->av.mac, ah_attr->roce.dmac, ETH_ALEN);

View File

@ -146,6 +146,7 @@ enum {
HNS_ROCE_CAP_FLAG_SDI_MODE = BIT(14),
HNS_ROCE_CAP_FLAG_STASH = BIT(17),
HNS_ROCE_CAP_FLAG_CQE_INLINE = BIT(19),
HNS_ROCE_CAP_FLAG_SRQ_RECORD_DB = BIT(22),
};
#define HNS_ROCE_DB_TYPE_COUNT 2
@ -453,6 +454,8 @@ struct hns_roce_srq {
spinlock_t lock;
struct mutex mutex;
void (*event)(struct hns_roce_srq *srq, enum hns_roce_event event);
struct hns_roce_db rdb;
u32 cap_flags;
};
struct hns_roce_uar_table {
@ -908,6 +911,7 @@ struct hns_roce_hw {
int (*query_cqc)(struct hns_roce_dev *hr_dev, u32 cqn, void *buffer);
int (*query_qpc)(struct hns_roce_dev *hr_dev, u32 qpn, void *buffer);
int (*query_mpt)(struct hns_roce_dev *hr_dev, u32 key, void *buffer);
int (*query_srqc)(struct hns_roce_dev *hr_dev, u32 srqn, void *buffer);
int (*query_hw_counter)(struct hns_roce_dev *hr_dev,
u64 *stats, u32 port, int *hw_counters);
const struct ib_device_ops *hns_roce_dev_ops;
@ -1239,6 +1243,8 @@ int hns_roce_fill_res_qp_entry(struct sk_buff *msg, struct ib_qp *ib_qp);
int hns_roce_fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ib_qp);
int hns_roce_fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ib_mr);
int hns_roce_fill_res_mr_entry_raw(struct sk_buff *msg, struct ib_mr *ib_mr);
int hns_roce_fill_res_srq_entry(struct sk_buff *msg, struct ib_srq *ib_srq);
int hns_roce_fill_res_srq_entry_raw(struct sk_buff *msg, struct ib_srq *ib_srq);
struct hns_user_mmap_entry *
hns_roce_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address,
size_t length,

View File

@ -270,7 +270,7 @@ static bool check_inl_data_len(struct hns_roce_qp *qp, unsigned int len)
struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device);
int mtu = ib_mtu_enum_to_int(qp->path_mtu);
if (len > qp->max_inline_data || len > mtu) {
if (mtu < 0 || len > qp->max_inline_data || len > mtu) {
ibdev_err(&hr_dev->ib_dev,
"invalid length of data, data len = %u, max inline len = %u, path mtu = %d.\n",
len, qp->max_inline_data, mtu);
@ -941,20 +941,23 @@ static void fill_wqe_idx(struct hns_roce_srq *srq, unsigned int wqe_idx)
idx_que->head++;
}
static void update_srq_db(struct hns_roce_v2_db *db, struct hns_roce_srq *srq)
static void update_srq_db(struct hns_roce_srq *srq)
{
hr_reg_write(db, DB_TAG, srq->srqn);
hr_reg_write(db, DB_CMD, HNS_ROCE_V2_SRQ_DB);
hr_reg_write(db, DB_PI, srq->idx_que.head);
struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device);
struct hns_roce_v2_db db;
hr_reg_write(&db, DB_TAG, srq->srqn);
hr_reg_write(&db, DB_CMD, HNS_ROCE_V2_SRQ_DB);
hr_reg_write(&db, DB_PI, srq->idx_que.head);
hns_roce_write64(hr_dev, (__le32 *)&db, srq->db_reg);
}
static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_wr)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
struct hns_roce_srq *srq = to_hr_srq(ibsrq);
struct hns_roce_v2_db srq_db;
unsigned long flags;
int ret = 0;
u32 max_sge;
@ -985,9 +988,11 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
}
if (likely(nreq)) {
update_srq_db(&srq_db, srq);
hns_roce_write64(hr_dev, (__le32 *)&srq_db, srq->db_reg);
if (srq->cap_flags & HNS_ROCE_SRQ_CAP_RECORD_DB)
*srq->rdb.db_record = srq->idx_que.head &
V2_DB_PRODUCER_IDX_M;
else
update_srq_db(srq);
}
spin_unlock_irqrestore(&srq->lock, flags);
@ -4725,6 +4730,9 @@ static int check_cong_type(struct ib_qp *ibqp,
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
if (ibqp->qp_type == IB_QPT_UD)
hr_dev->caps.cong_type = CONG_TYPE_DCQCN;
/* different congestion types match different configurations */
switch (hr_dev->caps.cong_type) {
case CONG_TYPE_DCQCN:
@ -4821,22 +4829,32 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
struct ib_device *ibdev = &hr_dev->ib_dev;
const struct ib_gid_attr *gid_attr = NULL;
u8 sl = rdma_ah_get_sl(&attr->ah_attr);
int is_roce_protocol;
u16 vlan_id = 0xffff;
bool is_udp = false;
u32 max_sl;
u8 ib_port;
u8 hr_port;
int ret;
max_sl = min_t(u32, MAX_SERVICE_LEVEL, hr_dev->caps.sl_num - 1);
if (unlikely(sl > max_sl)) {
ibdev_err_ratelimited(ibdev,
"failed to fill QPC, sl (%u) shouldn't be larger than %u.\n",
sl, max_sl);
return -EINVAL;
}
/*
* If free_mr_en of qp is set, it means that this qp comes from
* free mr. This qp will perform the loopback operation.
* In the loopback scenario, only sl needs to be set.
*/
if (hr_qp->free_mr_en) {
hr_reg_write(context, QPC_SL, rdma_ah_get_sl(&attr->ah_attr));
hr_reg_write(context, QPC_SL, sl);
hr_reg_clear(qpc_mask, QPC_SL);
hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
hr_qp->sl = sl;
return 0;
}
@ -4903,14 +4921,7 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw));
hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
if (unlikely(hr_qp->sl > MAX_SERVICE_LEVEL)) {
ibdev_err(ibdev,
"failed to fill QPC, sl (%u) shouldn't be larger than %d.\n",
hr_qp->sl, MAX_SERVICE_LEVEL);
return -EINVAL;
}
hr_qp->sl = sl;
hr_reg_write(context, QPC_SL, hr_qp->sl);
hr_reg_clear(qpc_mask, QPC_SL);
@ -5272,6 +5283,30 @@ out:
return ret;
}
static int hns_roce_v2_query_srqc(struct hns_roce_dev *hr_dev, u32 srqn,
void *buffer)
{
struct hns_roce_srq_context *context;
struct hns_roce_cmd_mailbox *mailbox;
int ret;
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
context = mailbox->buf;
ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, HNS_ROCE_CMD_QUERY_SRQC,
srqn);
if (ret)
goto out;
memcpy(buffer, context, sizeof(*context));
out:
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
return ret;
}
static u8 get_qp_timeout_attr(struct hns_roce_dev *hr_dev,
struct hns_roce_v2_qp_context *context)
{
@ -5606,6 +5641,14 @@ static int hns_roce_v2_write_srqc(struct hns_roce_srq *srq, void *mb_buf)
hr_reg_write(ctx, SRQC_WQE_BUF_PG_SZ,
to_hr_hw_page_shift(srq->buf_mtr.hem_cfg.buf_pg_shift));
if (srq->cap_flags & HNS_ROCE_SRQ_CAP_RECORD_DB) {
hr_reg_enable(ctx, SRQC_DB_RECORD_EN);
hr_reg_write(ctx, SRQC_DB_RECORD_ADDR_L,
lower_32_bits(srq->rdb.dma) >> 1);
hr_reg_write(ctx, SRQC_DB_RECORD_ADDR_H,
upper_32_bits(srq->rdb.dma));
}
return hns_roce_v2_write_srqc_index_queue(srq, ctx);
}
@ -5804,7 +5847,7 @@ static void hns_roce_irq_work_handle(struct work_struct *work)
case HNS_ROCE_EVENT_TYPE_COMM_EST:
break;
case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
ibdev_warn(ibdev, "send queue drained.\n");
ibdev_dbg(ibdev, "send queue drained.\n");
break;
case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
ibdev_err(ibdev, "local work queue 0x%x catast error, sub_event type is: %d\n",
@ -5819,10 +5862,10 @@ static void hns_roce_irq_work_handle(struct work_struct *work)
irq_work->queue_num, irq_work->sub_type);
break;
case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
ibdev_warn(ibdev, "SRQ limit reach.\n");
ibdev_dbg(ibdev, "SRQ limit reach.\n");
break;
case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
ibdev_warn(ibdev, "SRQ last wqe reach.\n");
ibdev_dbg(ibdev, "SRQ last wqe reach.\n");
break;
case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
ibdev_err(ibdev, "SRQ catas error.\n");
@ -6632,6 +6675,7 @@ static const struct hns_roce_hw hns_roce_hw_v2 = {
.query_cqc = hns_roce_v2_query_cqc,
.query_qpc = hns_roce_v2_query_qpc,
.query_mpt = hns_roce_v2_query_mpt,
.query_srqc = hns_roce_v2_query_srqc,
.query_hw_counter = hns_roce_hw_v2_query_counter,
.hns_roce_dev_ops = &hns_roce_v2_dev_ops,
.hns_roce_dev_srq_ops = &hns_roce_v2_dev_srq_ops,

View File

@ -547,17 +547,12 @@ static struct rdma_hw_stats *hns_roce_alloc_hw_port_stats(
struct ib_device *device, u32 port_num)
{
struct hns_roce_dev *hr_dev = to_hr_dev(device);
u32 port = port_num - 1;
if (port > hr_dev->caps.num_ports) {
if (port_num > hr_dev->caps.num_ports) {
ibdev_err(device, "invalid port num.\n");
return NULL;
}
if (hr_dev->pci_dev->revision <= PCI_REVISION_ID_HIP08 ||
hr_dev->is_vf)
return NULL;
return rdma_alloc_hw_stats_struct(hns_roce_port_stats_descs,
ARRAY_SIZE(hns_roce_port_stats_descs),
RDMA_HW_STATS_DEFAULT_LIFESPAN);
@ -577,10 +572,6 @@ static int hns_roce_get_hw_stats(struct ib_device *device,
if (port > hr_dev->caps.num_ports)
return -EINVAL;
if (hr_dev->pci_dev->revision <= PCI_REVISION_ID_HIP08 ||
hr_dev->is_vf)
return -EOPNOTSUPP;
ret = hr_dev->hw->query_hw_counter(hr_dev, stats->value, port,
&num_counters);
if (ret) {
@ -634,8 +625,6 @@ static const struct ib_device_ops hns_roce_dev_ops = {
.query_pkey = hns_roce_query_pkey,
.query_port = hns_roce_query_port,
.reg_user_mr = hns_roce_reg_user_mr,
.alloc_hw_port_stats = hns_roce_alloc_hw_port_stats,
.get_hw_stats = hns_roce_get_hw_stats,
INIT_RDMA_OBJ_SIZE(ib_ah, hns_roce_ah, ibah),
INIT_RDMA_OBJ_SIZE(ib_cq, hns_roce_cq, ib_cq),
@ -644,6 +633,11 @@ static const struct ib_device_ops hns_roce_dev_ops = {
INIT_RDMA_OBJ_SIZE(ib_ucontext, hns_roce_ucontext, ibucontext),
};
static const struct ib_device_ops hns_roce_dev_hw_stats_ops = {
.alloc_hw_port_stats = hns_roce_alloc_hw_port_stats,
.get_hw_stats = hns_roce_get_hw_stats,
};
static const struct ib_device_ops hns_roce_dev_mr_ops = {
.rereg_user_mr = hns_roce_rereg_user_mr,
};
@ -681,6 +675,8 @@ static const struct ib_device_ops hns_roce_dev_restrack_ops = {
.fill_res_qp_entry_raw = hns_roce_fill_res_qp_entry_raw,
.fill_res_mr_entry = hns_roce_fill_res_mr_entry,
.fill_res_mr_entry_raw = hns_roce_fill_res_mr_entry_raw,
.fill_res_srq_entry = hns_roce_fill_res_srq_entry,
.fill_res_srq_entry_raw = hns_roce_fill_res_srq_entry_raw,
};
static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
@ -720,6 +716,10 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)
ib_set_device_ops(ib_dev, &hns_roce_dev_xrcd_ops);
if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09 &&
!hr_dev->is_vf)
ib_set_device_ops(ib_dev, &hns_roce_dev_hw_stats_ops);
ib_set_device_ops(ib_dev, hr_dev->hw->hns_roce_dev_ops);
ib_set_device_ops(ib_dev, &hns_roce_dev_ops);
ib_set_device_ops(ib_dev, &hns_roce_dev_restrack_ops);

View File

@ -1064,7 +1064,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
{
struct hns_roce_ib_create_qp_resp resp = {};
struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_ib_create_qp ucmd;
struct hns_roce_ib_create_qp ucmd = {};
int ret;
mutex_init(&hr_qp->mutex);

View File

@ -160,3 +160,52 @@ int hns_roce_fill_res_mr_entry_raw(struct sk_buff *msg, struct ib_mr *ib_mr)
return ret;
}
int hns_roce_fill_res_srq_entry(struct sk_buff *msg, struct ib_srq *ib_srq)
{
struct hns_roce_srq *hr_srq = to_hr_srq(ib_srq);
struct nlattr *table_attr;
table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
if (!table_attr)
return -EMSGSIZE;
if (rdma_nl_put_driver_u32_hex(msg, "srqn", hr_srq->srqn))
goto err;
if (rdma_nl_put_driver_u32_hex(msg, "wqe_cnt", hr_srq->wqe_cnt))
goto err;
if (rdma_nl_put_driver_u32_hex(msg, "max_gs", hr_srq->max_gs))
goto err;
if (rdma_nl_put_driver_u32_hex(msg, "xrcdn", hr_srq->xrcdn))
goto err;
nla_nest_end(msg, table_attr);
return 0;
err:
nla_nest_cancel(msg, table_attr);
return -EMSGSIZE;
}
int hns_roce_fill_res_srq_entry_raw(struct sk_buff *msg, struct ib_srq *ib_srq)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ib_srq->device);
struct hns_roce_srq *hr_srq = to_hr_srq(ib_srq);
struct hns_roce_srq_context context;
int ret;
if (!hr_dev->hw->query_srqc)
return -EINVAL;
ret = hr_dev->hw->query_srqc(hr_dev, hr_srq->srqn, &context);
if (ret)
return ret;
ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, sizeof(context), &context);
return ret;
}

View File

@ -5,6 +5,7 @@
#include <linux/pci.h>
#include <rdma/ib_umem.h>
#include <rdma/uverbs_ioctl.h>
#include "hns_roce_device.h"
#include "hns_roce_cmd.h"
#include "hns_roce_hem.h"
@ -387,6 +388,79 @@ static void free_srq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
free_srq_idx(hr_dev, srq);
}
static int get_srq_ucmd(struct hns_roce_srq *srq, struct ib_udata *udata,
struct hns_roce_ib_create_srq *ucmd)
{
struct ib_device *ibdev = srq->ibsrq.device;
int ret;
ret = ib_copy_from_udata(ucmd, udata, min(udata->inlen, sizeof(*ucmd)));
if (ret) {
ibdev_err(ibdev, "failed to copy SRQ udata, ret = %d.\n", ret);
return ret;
}
return 0;
}
static void free_srq_db(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
struct ib_udata *udata)
{
struct hns_roce_ucontext *uctx;
if (!(srq->cap_flags & HNS_ROCE_SRQ_CAP_RECORD_DB))
return;
srq->cap_flags &= ~HNS_ROCE_SRQ_CAP_RECORD_DB;
if (udata) {
uctx = rdma_udata_to_drv_context(udata,
struct hns_roce_ucontext,
ibucontext);
hns_roce_db_unmap_user(uctx, &srq->rdb);
} else {
hns_roce_free_db(hr_dev, &srq->rdb);
}
}
static int alloc_srq_db(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
struct ib_udata *udata,
struct hns_roce_ib_create_srq_resp *resp)
{
struct hns_roce_ib_create_srq ucmd = {};
struct hns_roce_ucontext *uctx;
int ret;
if (udata) {
ret = get_srq_ucmd(srq, udata, &ucmd);
if (ret)
return ret;
if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ_RECORD_DB) &&
(ucmd.req_cap_flags & HNS_ROCE_SRQ_CAP_RECORD_DB)) {
uctx = rdma_udata_to_drv_context(udata,
struct hns_roce_ucontext, ibucontext);
ret = hns_roce_db_map_user(uctx, ucmd.db_addr,
&srq->rdb);
if (ret)
return ret;
srq->cap_flags |= HNS_ROCE_RSP_SRQ_CAP_RECORD_DB;
}
} else {
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ_RECORD_DB) {
ret = hns_roce_alloc_db(hr_dev, &srq->rdb, 1);
if (ret)
return ret;
*srq->rdb.db_record = 0;
srq->cap_flags |= HNS_ROCE_RSP_SRQ_CAP_RECORD_DB;
}
srq->db_reg = hr_dev->reg_base + SRQ_DB_REG;
}
return 0;
}
int hns_roce_create_srq(struct ib_srq *ib_srq,
struct ib_srq_init_attr *init_attr,
struct ib_udata *udata)
@ -407,15 +481,20 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
if (ret)
return ret;
ret = alloc_srqn(hr_dev, srq);
ret = alloc_srq_db(hr_dev, srq, udata, &resp);
if (ret)
goto err_srq_buf;
ret = alloc_srqn(hr_dev, srq);
if (ret)
goto err_srq_db;
ret = alloc_srqc(hr_dev, srq);
if (ret)
goto err_srqn;
if (udata) {
resp.cap_flags = srq->cap_flags;
resp.srqn = srq->srqn;
if (ib_copy_to_udata(udata, &resp,
min(udata->outlen, sizeof(resp)))) {
@ -424,7 +503,6 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
}
}
srq->db_reg = hr_dev->reg_base + SRQ_DB_REG;
srq->event = hns_roce_ib_srq_event;
refcount_set(&srq->refcount, 1);
init_completion(&srq->free);
@ -435,6 +513,8 @@ err_srqc:
free_srqc(hr_dev, srq);
err_srqn:
free_srqn(hr_dev, srq);
err_srq_db:
free_srq_db(hr_dev, srq, udata);
err_srq_buf:
free_srq_buf(hr_dev, srq);
@ -448,6 +528,7 @@ int hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
free_srqc(hr_dev, srq);
free_srqn(hr_dev, srq);
free_srq_db(hr_dev, srq, udata);
free_srq_buf(hr_dev, srq);
return 0;
}

View File

@ -1,4 +1,4 @@
// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2015 - 2021 Intel Corporation */
#include "main.h"
#include "trace.h"

View File

@ -1,4 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2015 - 2021 Intel Corporation */
#ifndef IRDMA_CM_H
#define IRDMA_CM_H

View File

@ -1,4 +1,4 @@
// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2015 - 2021 Intel Corporation */
#include <linux/etherdevice.h>

View File

@ -1,4 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2015 - 2021 Intel Corporation */
#ifndef IRDMA_DEFS_H
#define IRDMA_DEFS_H

View File

@ -1,4 +1,4 @@
// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2015 - 2021 Intel Corporation */
#include "osdep.h"
#include "hmc.h"

View File

@ -1,4 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2015 - 2020 Intel Corporation */
#ifndef IRDMA_HMC_H
#define IRDMA_HMC_H

Some files were not shown because too many files have changed in this diff Show More