22664c06e9
Previously when destroying a QP/RQ, the result of the firmware destruction function was ignored and upper layers weren't informed about the failure. Which in turn could lead to various problems since when upper layer isn't aware of the failure it continues its operation thinking that the related QP/RQ was successfully destroyed while it actually wasn't, which could lead to the below kernel WARN. Currently, we return the correct firmware destruction status to upper layers which in case of the RQ would be mlx5_ib_destroy_wq() which was already capable of handling RQ destruction failure or in case of a QP to destroy_qp_common(), which now would actually warn upon qp destruction failure. WARNING: CPU: 3 PID: 995 at drivers/infiniband/core/rdma_core.c:940 uverbs_destroy_ufile_hw+0xcb/0xe0 [ib_uverbs] Modules linked in: xt_conntrack xt_MASQUERADE nf_conntrack_netlink nfnetlink xt_addrtype iptable_nat nf_nat br_netfilter rpcrdma rdma_ucm ib_iser libiscsi scsi_transport_iscsi rdma_cm ib_umad ib_ipoib iw_cm ib_cm mlx5_ib ib_uverbs ib_core overlay mlx5_core fuse CPU: 3 PID: 995 Comm: python3 Not tainted 5.16.0-rc5+ #1 Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS rel-1.13.0-0-gf21b5a4aeb02-prebuilt.qemu.org 04/01/2014 RIP: 0010:uverbs_destroy_ufile_hw+0xcb/0xe0 [ib_uverbs] Code: 41 5c 41 5d 41 5e e9 44 34 f0 e0 48 89 df e8 4c 77 ff ff 49 8b 86 10 01 00 00 48 85 c0 74 a1 4c 89 e7 ff d0 eb 9a 0f 0b eb c1 <0f> 0b be 04 00 00 00 48 89 df e8 b6 f6 ff ff e9 75 ff ff ff 90 0f RSP: 0018:ffff8881533e3e78 EFLAGS: 00010287 RAX: ffff88811b2cf3e0 RBX: ffff888106209700 RCX: 0000000000000000 RDX: ffff888106209780 RSI: ffff8881533e3d30 RDI: ffff888109b101a0 RBP: 0000000000000001 R08: ffff888127cb381c R09: 0de9890000000009 R10: ffff888127cb3800 R11: 0000000000000000 R12: ffff888106209780 R13: ffff888106209750 R14: ffff888100f20660 R15: 0000000000000000 FS: 00007f8be353b740(0000) GS:ffff88852c980000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007f8bd5b117c0 CR3: 000000012cd8a004 CR4: 0000000000370ea0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 Call Trace: <TASK> ib_uverbs_close+0x1a/0x90 [ib_uverbs] __fput+0x82/0x230 task_work_run+0x59/0x90 exit_to_user_mode_prepare+0x138/0x140 syscall_exit_to_user_mode+0x1d/0x50 ? __x64_sys_close+0xe/0x40 do_syscall_64+0x4a/0x90 entry_SYSCALL_64_after_hwframe+0x44/0xae RIP: 0033:0x7f8be3ae0abb Code: 03 00 00 00 0f 05 48 3d 00 f0 ff ff 77 41 c3 48 83 ec 18 89 7c 24 0c e8 83 43 f9 ff 8b 7c 24 0c 41 89 c0 b8 03 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 35 44 89 c7 89 44 24 0c e8 c1 43 f9 ff 8b 44 RSP: 002b:00007ffdb51909c0 EFLAGS: 00000293 ORIG_RAX: 0000000000000003 RAX: 0000000000000000 RBX: 0000557bb7f7c020 RCX: 00007f8be3ae0abb RDX: 0000557bb7c74010 RSI: 0000557bb7f14ca0 RDI: 0000000000000005 RBP: 0000557bb7fbd598 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000293 R12: 0000557bb7fbd5b8 R13: 0000557bb7fbd5a8 R14: 0000000000001000 R15: 0000557bb7f7c020 </TASK> Signed-off-by: Patrisious Haddad <phaddad@nvidia.com> Link: https://lore.kernel.org/r/c6df677f931d18090bafbe7f7dbb9524047b7d9b.1685953497.git.leon@kernel.org Signed-off-by: Leon Romanovsky <leon@kernel.org>
667 lines
17 KiB
C
667 lines
17 KiB
C
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
|
|
/*
|
|
* Copyright (c) 2013-2020, Mellanox Technologies inc. All rights reserved.
|
|
*/
|
|
|
|
#include <linux/gfp.h>
|
|
#include <linux/mlx5/qp.h>
|
|
#include <linux/mlx5/driver.h>
|
|
#include "mlx5_ib.h"
|
|
#include "qp.h"
|
|
|
|
static int mlx5_core_drain_dct(struct mlx5_ib_dev *dev,
|
|
struct mlx5_core_dct *dct);
|
|
|
|
static struct mlx5_core_rsc_common *
|
|
mlx5_get_rsc(struct mlx5_qp_table *table, u32 rsn)
|
|
{
|
|
struct mlx5_core_rsc_common *common;
|
|
unsigned long flags;
|
|
|
|
spin_lock_irqsave(&table->lock, flags);
|
|
|
|
common = radix_tree_lookup(&table->tree, rsn);
|
|
if (common)
|
|
refcount_inc(&common->refcount);
|
|
|
|
spin_unlock_irqrestore(&table->lock, flags);
|
|
|
|
return common;
|
|
}
|
|
|
|
void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common)
|
|
{
|
|
if (refcount_dec_and_test(&common->refcount))
|
|
complete(&common->free);
|
|
}
|
|
|
|
static u64 qp_allowed_event_types(void)
|
|
{
|
|
u64 mask;
|
|
|
|
mask = BIT(MLX5_EVENT_TYPE_PATH_MIG) |
|
|
BIT(MLX5_EVENT_TYPE_COMM_EST) |
|
|
BIT(MLX5_EVENT_TYPE_SQ_DRAINED) |
|
|
BIT(MLX5_EVENT_TYPE_SRQ_LAST_WQE) |
|
|
BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR) |
|
|
BIT(MLX5_EVENT_TYPE_PATH_MIG_FAILED) |
|
|
BIT(MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) |
|
|
BIT(MLX5_EVENT_TYPE_WQ_ACCESS_ERROR);
|
|
|
|
return mask;
|
|
}
|
|
|
|
static u64 rq_allowed_event_types(void)
|
|
{
|
|
u64 mask;
|
|
|
|
mask = BIT(MLX5_EVENT_TYPE_SRQ_LAST_WQE) |
|
|
BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR);
|
|
|
|
return mask;
|
|
}
|
|
|
|
static u64 sq_allowed_event_types(void)
|
|
{
|
|
return BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR);
|
|
}
|
|
|
|
static u64 dct_allowed_event_types(void)
|
|
{
|
|
return BIT(MLX5_EVENT_TYPE_DCT_DRAINED);
|
|
}
|
|
|
|
static bool is_event_type_allowed(int rsc_type, int event_type)
|
|
{
|
|
switch (rsc_type) {
|
|
case MLX5_EVENT_QUEUE_TYPE_QP:
|
|
return BIT(event_type) & qp_allowed_event_types();
|
|
case MLX5_EVENT_QUEUE_TYPE_RQ:
|
|
return BIT(event_type) & rq_allowed_event_types();
|
|
case MLX5_EVENT_QUEUE_TYPE_SQ:
|
|
return BIT(event_type) & sq_allowed_event_types();
|
|
case MLX5_EVENT_QUEUE_TYPE_DCT:
|
|
return BIT(event_type) & dct_allowed_event_types();
|
|
default:
|
|
WARN(1, "Event arrived for unknown resource type");
|
|
return false;
|
|
}
|
|
}
|
|
|
|
static int dct_event_notifier(struct mlx5_ib_dev *dev, struct mlx5_eqe *eqe)
|
|
{
|
|
struct mlx5_core_dct *dct;
|
|
unsigned long flags;
|
|
u32 qpn;
|
|
|
|
qpn = be32_to_cpu(eqe->data.dct.dctn) & 0xFFFFFF;
|
|
xa_lock_irqsave(&dev->qp_table.dct_xa, flags);
|
|
dct = xa_load(&dev->qp_table.dct_xa, qpn);
|
|
if (dct)
|
|
complete(&dct->drained);
|
|
xa_unlock_irqrestore(&dev->qp_table.dct_xa, flags);
|
|
return NOTIFY_OK;
|
|
}
|
|
|
|
static int rsc_event_notifier(struct notifier_block *nb,
|
|
unsigned long type, void *data)
|
|
{
|
|
struct mlx5_ib_dev *dev =
|
|
container_of(nb, struct mlx5_ib_dev, qp_table.nb);
|
|
struct mlx5_core_rsc_common *common;
|
|
struct mlx5_eqe *eqe = data;
|
|
u8 event_type = (u8)type;
|
|
struct mlx5_core_qp *qp;
|
|
u32 rsn;
|
|
|
|
switch (event_type) {
|
|
case MLX5_EVENT_TYPE_DCT_DRAINED:
|
|
return dct_event_notifier(dev, eqe);
|
|
case MLX5_EVENT_TYPE_PATH_MIG:
|
|
case MLX5_EVENT_TYPE_COMM_EST:
|
|
case MLX5_EVENT_TYPE_SQ_DRAINED:
|
|
case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
|
|
case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
|
|
case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
|
|
case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
|
|
case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
|
|
rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
|
|
rsn |= (eqe->data.qp_srq.type << MLX5_USER_INDEX_LEN);
|
|
break;
|
|
default:
|
|
return NOTIFY_DONE;
|
|
}
|
|
|
|
common = mlx5_get_rsc(&dev->qp_table, rsn);
|
|
if (!common)
|
|
return NOTIFY_OK;
|
|
|
|
if (!is_event_type_allowed((rsn >> MLX5_USER_INDEX_LEN), event_type))
|
|
goto out;
|
|
|
|
switch (common->res) {
|
|
case MLX5_RES_QP:
|
|
case MLX5_RES_RQ:
|
|
case MLX5_RES_SQ:
|
|
qp = (struct mlx5_core_qp *)common;
|
|
qp->event(qp, event_type);
|
|
/* Need to put resource in event handler */
|
|
return NOTIFY_OK;
|
|
default:
|
|
break;
|
|
}
|
|
out:
|
|
mlx5_core_put_rsc(common);
|
|
|
|
return NOTIFY_OK;
|
|
}
|
|
|
|
static int create_resource_common(struct mlx5_ib_dev *dev,
|
|
struct mlx5_core_qp *qp, int rsc_type)
|
|
{
|
|
struct mlx5_qp_table *table = &dev->qp_table;
|
|
int err;
|
|
|
|
qp->common.res = rsc_type;
|
|
spin_lock_irq(&table->lock);
|
|
err = radix_tree_insert(&table->tree,
|
|
qp->qpn | (rsc_type << MLX5_USER_INDEX_LEN),
|
|
qp);
|
|
spin_unlock_irq(&table->lock);
|
|
if (err)
|
|
return err;
|
|
|
|
refcount_set(&qp->common.refcount, 1);
|
|
init_completion(&qp->common.free);
|
|
qp->pid = current->pid;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void destroy_resource_common(struct mlx5_ib_dev *dev,
|
|
struct mlx5_core_qp *qp)
|
|
{
|
|
struct mlx5_qp_table *table = &dev->qp_table;
|
|
unsigned long flags;
|
|
|
|
spin_lock_irqsave(&table->lock, flags);
|
|
radix_tree_delete(&table->tree,
|
|
qp->qpn | (qp->common.res << MLX5_USER_INDEX_LEN));
|
|
spin_unlock_irqrestore(&table->lock, flags);
|
|
mlx5_core_put_rsc((struct mlx5_core_rsc_common *)qp);
|
|
wait_for_completion(&qp->common.free);
|
|
}
|
|
|
|
static int _mlx5_core_destroy_dct(struct mlx5_ib_dev *dev,
|
|
struct mlx5_core_dct *dct)
|
|
{
|
|
u32 in[MLX5_ST_SZ_DW(destroy_dct_in)] = {};
|
|
struct mlx5_core_qp *qp = &dct->mqp;
|
|
|
|
MLX5_SET(destroy_dct_in, in, opcode, MLX5_CMD_OP_DESTROY_DCT);
|
|
MLX5_SET(destroy_dct_in, in, dctn, qp->qpn);
|
|
MLX5_SET(destroy_dct_in, in, uid, qp->uid);
|
|
return mlx5_cmd_exec_in(dev->mdev, destroy_dct, in);
|
|
}
|
|
|
|
int mlx5_core_create_dct(struct mlx5_ib_dev *dev, struct mlx5_core_dct *dct,
|
|
u32 *in, int inlen, u32 *out, int outlen)
|
|
{
|
|
struct mlx5_core_qp *qp = &dct->mqp;
|
|
int err;
|
|
|
|
init_completion(&dct->drained);
|
|
MLX5_SET(create_dct_in, in, opcode, MLX5_CMD_OP_CREATE_DCT);
|
|
|
|
err = mlx5_cmd_do(dev->mdev, in, inlen, out, outlen);
|
|
if (err)
|
|
return err;
|
|
|
|
qp->qpn = MLX5_GET(create_dct_out, out, dctn);
|
|
qp->uid = MLX5_GET(create_dct_in, in, uid);
|
|
err = xa_err(xa_store_irq(&dev->qp_table.dct_xa, qp->qpn, dct, GFP_KERNEL));
|
|
if (err)
|
|
goto err_cmd;
|
|
|
|
return 0;
|
|
err_cmd:
|
|
_mlx5_core_destroy_dct(dev, dct);
|
|
return err;
|
|
}
|
|
|
|
int mlx5_qpc_create_qp(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp,
|
|
u32 *in, int inlen, u32 *out)
|
|
{
|
|
u32 din[MLX5_ST_SZ_DW(destroy_qp_in)] = {};
|
|
int err;
|
|
|
|
MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
|
|
|
|
err = mlx5_cmd_exec(dev->mdev, in, inlen, out,
|
|
MLX5_ST_SZ_BYTES(create_qp_out));
|
|
if (err)
|
|
return err;
|
|
|
|
qp->uid = MLX5_GET(create_qp_in, in, uid);
|
|
qp->qpn = MLX5_GET(create_qp_out, out, qpn);
|
|
|
|
err = create_resource_common(dev, qp, MLX5_RES_QP);
|
|
if (err)
|
|
goto err_cmd;
|
|
|
|
mlx5_debug_qp_add(dev->mdev, qp);
|
|
|
|
return 0;
|
|
|
|
err_cmd:
|
|
MLX5_SET(destroy_qp_in, din, opcode, MLX5_CMD_OP_DESTROY_QP);
|
|
MLX5_SET(destroy_qp_in, din, qpn, qp->qpn);
|
|
MLX5_SET(destroy_qp_in, din, uid, qp->uid);
|
|
mlx5_cmd_exec_in(dev->mdev, destroy_qp, din);
|
|
return err;
|
|
}
|
|
|
|
static int mlx5_core_drain_dct(struct mlx5_ib_dev *dev,
|
|
struct mlx5_core_dct *dct)
|
|
{
|
|
u32 in[MLX5_ST_SZ_DW(drain_dct_in)] = {};
|
|
struct mlx5_core_qp *qp = &dct->mqp;
|
|
|
|
MLX5_SET(drain_dct_in, in, opcode, MLX5_CMD_OP_DRAIN_DCT);
|
|
MLX5_SET(drain_dct_in, in, dctn, qp->qpn);
|
|
MLX5_SET(drain_dct_in, in, uid, qp->uid);
|
|
return mlx5_cmd_exec_in(dev->mdev, drain_dct, in);
|
|
}
|
|
|
|
int mlx5_core_destroy_dct(struct mlx5_ib_dev *dev,
|
|
struct mlx5_core_dct *dct)
|
|
{
|
|
struct mlx5_qp_table *table = &dev->qp_table;
|
|
struct mlx5_core_dct *tmp;
|
|
int err;
|
|
|
|
err = mlx5_core_drain_dct(dev, dct);
|
|
if (err) {
|
|
if (dev->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
|
|
goto destroy;
|
|
|
|
return err;
|
|
}
|
|
wait_for_completion(&dct->drained);
|
|
|
|
destroy:
|
|
tmp = xa_cmpxchg_irq(&table->dct_xa, dct->mqp.qpn, dct, XA_ZERO_ENTRY, GFP_KERNEL);
|
|
if (WARN_ON(tmp != dct))
|
|
return xa_err(tmp) ?: -EINVAL;
|
|
|
|
err = _mlx5_core_destroy_dct(dev, dct);
|
|
if (err) {
|
|
xa_cmpxchg_irq(&table->dct_xa, dct->mqp.qpn, XA_ZERO_ENTRY, dct, 0);
|
|
return err;
|
|
}
|
|
xa_erase_irq(&table->dct_xa, dct->mqp.qpn);
|
|
return 0;
|
|
}
|
|
|
|
int mlx5_core_destroy_qp(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp)
|
|
{
|
|
u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {};
|
|
|
|
mlx5_debug_qp_remove(dev->mdev, qp);
|
|
|
|
destroy_resource_common(dev, qp);
|
|
|
|
MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
|
|
MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
|
|
MLX5_SET(destroy_qp_in, in, uid, qp->uid);
|
|
return mlx5_cmd_exec_in(dev->mdev, destroy_qp, in);
|
|
}
|
|
|
|
int mlx5_core_set_delay_drop(struct mlx5_ib_dev *dev,
|
|
u32 timeout_usec)
|
|
{
|
|
u32 in[MLX5_ST_SZ_DW(set_delay_drop_params_in)] = {};
|
|
|
|
MLX5_SET(set_delay_drop_params_in, in, opcode,
|
|
MLX5_CMD_OP_SET_DELAY_DROP_PARAMS);
|
|
MLX5_SET(set_delay_drop_params_in, in, delay_drop_timeout,
|
|
timeout_usec / 100);
|
|
return mlx5_cmd_exec_in(dev->mdev, set_delay_drop_params, in);
|
|
}
|
|
|
|
struct mbox_info {
|
|
u32 *in;
|
|
u32 *out;
|
|
int inlen;
|
|
int outlen;
|
|
};
|
|
|
|
static int mbox_alloc(struct mbox_info *mbox, int inlen, int outlen)
|
|
{
|
|
mbox->inlen = inlen;
|
|
mbox->outlen = outlen;
|
|
mbox->in = kzalloc(mbox->inlen, GFP_KERNEL);
|
|
mbox->out = kzalloc(mbox->outlen, GFP_KERNEL);
|
|
if (!mbox->in || !mbox->out) {
|
|
kfree(mbox->in);
|
|
kfree(mbox->out);
|
|
return -ENOMEM;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void mbox_free(struct mbox_info *mbox)
|
|
{
|
|
kfree(mbox->in);
|
|
kfree(mbox->out);
|
|
}
|
|
|
|
static int get_ece_from_mbox(void *out, u16 opcode)
|
|
{
|
|
int ece = 0;
|
|
|
|
switch (opcode) {
|
|
case MLX5_CMD_OP_INIT2INIT_QP:
|
|
ece = MLX5_GET(init2init_qp_out, out, ece);
|
|
break;
|
|
case MLX5_CMD_OP_INIT2RTR_QP:
|
|
ece = MLX5_GET(init2rtr_qp_out, out, ece);
|
|
break;
|
|
case MLX5_CMD_OP_RTR2RTS_QP:
|
|
ece = MLX5_GET(rtr2rts_qp_out, out, ece);
|
|
break;
|
|
case MLX5_CMD_OP_RTS2RTS_QP:
|
|
ece = MLX5_GET(rts2rts_qp_out, out, ece);
|
|
break;
|
|
case MLX5_CMD_OP_RST2INIT_QP:
|
|
ece = MLX5_GET(rst2init_qp_out, out, ece);
|
|
break;
|
|
default:
|
|
break;
|
|
}
|
|
|
|
return ece;
|
|
}
|
|
|
|
static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn,
|
|
u32 opt_param_mask, void *qpc,
|
|
struct mbox_info *mbox, u16 uid, u32 ece)
|
|
{
|
|
mbox->out = NULL;
|
|
mbox->in = NULL;
|
|
|
|
#define MBOX_ALLOC(mbox, typ) \
|
|
mbox_alloc(mbox, MLX5_ST_SZ_BYTES(typ##_in), MLX5_ST_SZ_BYTES(typ##_out))
|
|
|
|
#define MOD_QP_IN_SET(typ, in, _opcode, _qpn, _uid) \
|
|
do { \
|
|
MLX5_SET(typ##_in, in, opcode, _opcode); \
|
|
MLX5_SET(typ##_in, in, qpn, _qpn); \
|
|
MLX5_SET(typ##_in, in, uid, _uid); \
|
|
} while (0)
|
|
|
|
#define MOD_QP_IN_SET_QPC(typ, in, _opcode, _qpn, _opt_p, _qpc, _uid) \
|
|
do { \
|
|
MOD_QP_IN_SET(typ, in, _opcode, _qpn, _uid); \
|
|
MLX5_SET(typ##_in, in, opt_param_mask, _opt_p); \
|
|
memcpy(MLX5_ADDR_OF(typ##_in, in, qpc), _qpc, \
|
|
MLX5_ST_SZ_BYTES(qpc)); \
|
|
} while (0)
|
|
|
|
switch (opcode) {
|
|
/* 2RST & 2ERR */
|
|
case MLX5_CMD_OP_2RST_QP:
|
|
if (MBOX_ALLOC(mbox, qp_2rst))
|
|
return -ENOMEM;
|
|
MOD_QP_IN_SET(qp_2rst, mbox->in, opcode, qpn, uid);
|
|
break;
|
|
case MLX5_CMD_OP_2ERR_QP:
|
|
if (MBOX_ALLOC(mbox, qp_2err))
|
|
return -ENOMEM;
|
|
MOD_QP_IN_SET(qp_2err, mbox->in, opcode, qpn, uid);
|
|
break;
|
|
|
|
/* MODIFY with QPC */
|
|
case MLX5_CMD_OP_RST2INIT_QP:
|
|
if (MBOX_ALLOC(mbox, rst2init_qp))
|
|
return -ENOMEM;
|
|
MOD_QP_IN_SET_QPC(rst2init_qp, mbox->in, opcode, qpn,
|
|
opt_param_mask, qpc, uid);
|
|
MLX5_SET(rst2init_qp_in, mbox->in, ece, ece);
|
|
break;
|
|
case MLX5_CMD_OP_INIT2RTR_QP:
|
|
if (MBOX_ALLOC(mbox, init2rtr_qp))
|
|
return -ENOMEM;
|
|
MOD_QP_IN_SET_QPC(init2rtr_qp, mbox->in, opcode, qpn,
|
|
opt_param_mask, qpc, uid);
|
|
MLX5_SET(init2rtr_qp_in, mbox->in, ece, ece);
|
|
break;
|
|
case MLX5_CMD_OP_RTR2RTS_QP:
|
|
if (MBOX_ALLOC(mbox, rtr2rts_qp))
|
|
return -ENOMEM;
|
|
MOD_QP_IN_SET_QPC(rtr2rts_qp, mbox->in, opcode, qpn,
|
|
opt_param_mask, qpc, uid);
|
|
MLX5_SET(rtr2rts_qp_in, mbox->in, ece, ece);
|
|
break;
|
|
case MLX5_CMD_OP_RTS2RTS_QP:
|
|
if (MBOX_ALLOC(mbox, rts2rts_qp))
|
|
return -ENOMEM;
|
|
MOD_QP_IN_SET_QPC(rts2rts_qp, mbox->in, opcode, qpn,
|
|
opt_param_mask, qpc, uid);
|
|
MLX5_SET(rts2rts_qp_in, mbox->in, ece, ece);
|
|
break;
|
|
case MLX5_CMD_OP_SQERR2RTS_QP:
|
|
if (MBOX_ALLOC(mbox, sqerr2rts_qp))
|
|
return -ENOMEM;
|
|
MOD_QP_IN_SET_QPC(sqerr2rts_qp, mbox->in, opcode, qpn,
|
|
opt_param_mask, qpc, uid);
|
|
break;
|
|
case MLX5_CMD_OP_SQD_RTS_QP:
|
|
if (MBOX_ALLOC(mbox, sqd2rts_qp))
|
|
return -ENOMEM;
|
|
MOD_QP_IN_SET_QPC(sqd2rts_qp, mbox->in, opcode, qpn,
|
|
opt_param_mask, qpc, uid);
|
|
break;
|
|
case MLX5_CMD_OP_INIT2INIT_QP:
|
|
if (MBOX_ALLOC(mbox, init2init_qp))
|
|
return -ENOMEM;
|
|
MOD_QP_IN_SET_QPC(init2init_qp, mbox->in, opcode, qpn,
|
|
opt_param_mask, qpc, uid);
|
|
MLX5_SET(init2init_qp_in, mbox->in, ece, ece);
|
|
break;
|
|
default:
|
|
return -EINVAL;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
int mlx5_core_qp_modify(struct mlx5_ib_dev *dev, u16 opcode, u32 opt_param_mask,
|
|
void *qpc, struct mlx5_core_qp *qp, u32 *ece)
|
|
{
|
|
struct mbox_info mbox;
|
|
int err;
|
|
|
|
err = modify_qp_mbox_alloc(dev->mdev, opcode, qp->qpn, opt_param_mask,
|
|
qpc, &mbox, qp->uid, (ece) ? *ece : 0);
|
|
if (err)
|
|
return err;
|
|
|
|
err = mlx5_cmd_exec(dev->mdev, mbox.in, mbox.inlen, mbox.out,
|
|
mbox.outlen);
|
|
|
|
if (ece)
|
|
*ece = get_ece_from_mbox(mbox.out, opcode);
|
|
|
|
mbox_free(&mbox);
|
|
return err;
|
|
}
|
|
|
|
int mlx5_init_qp_table(struct mlx5_ib_dev *dev)
|
|
{
|
|
struct mlx5_qp_table *table = &dev->qp_table;
|
|
|
|
spin_lock_init(&table->lock);
|
|
INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
|
|
xa_init(&table->dct_xa);
|
|
mlx5_qp_debugfs_init(dev->mdev);
|
|
|
|
table->nb.notifier_call = rsc_event_notifier;
|
|
mlx5_notifier_register(dev->mdev, &table->nb);
|
|
|
|
return 0;
|
|
}
|
|
|
|
void mlx5_cleanup_qp_table(struct mlx5_ib_dev *dev)
|
|
{
|
|
struct mlx5_qp_table *table = &dev->qp_table;
|
|
|
|
mlx5_notifier_unregister(dev->mdev, &table->nb);
|
|
mlx5_qp_debugfs_cleanup(dev->mdev);
|
|
}
|
|
|
|
int mlx5_core_qp_query(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp,
|
|
u32 *out, int outlen, bool qpc_ext)
|
|
{
|
|
u32 in[MLX5_ST_SZ_DW(query_qp_in)] = {};
|
|
|
|
MLX5_SET(query_qp_in, in, opcode, MLX5_CMD_OP_QUERY_QP);
|
|
MLX5_SET(query_qp_in, in, qpn, qp->qpn);
|
|
MLX5_SET(query_qp_in, in, qpc_ext, qpc_ext);
|
|
|
|
return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, outlen);
|
|
}
|
|
|
|
int mlx5_core_dct_query(struct mlx5_ib_dev *dev, struct mlx5_core_dct *dct,
|
|
u32 *out, int outlen)
|
|
{
|
|
u32 in[MLX5_ST_SZ_DW(query_dct_in)] = {};
|
|
struct mlx5_core_qp *qp = &dct->mqp;
|
|
|
|
MLX5_SET(query_dct_in, in, opcode, MLX5_CMD_OP_QUERY_DCT);
|
|
MLX5_SET(query_dct_in, in, dctn, qp->qpn);
|
|
|
|
return mlx5_cmd_exec(dev->mdev, (void *)&in, sizeof(in), (void *)out,
|
|
outlen);
|
|
}
|
|
|
|
int mlx5_core_xrcd_alloc(struct mlx5_ib_dev *dev, u32 *xrcdn)
|
|
{
|
|
u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)] = {};
|
|
u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)] = {};
|
|
int err;
|
|
|
|
MLX5_SET(alloc_xrcd_in, in, opcode, MLX5_CMD_OP_ALLOC_XRCD);
|
|
err = mlx5_cmd_exec_inout(dev->mdev, alloc_xrcd, in, out);
|
|
if (!err)
|
|
*xrcdn = MLX5_GET(alloc_xrcd_out, out, xrcd);
|
|
return err;
|
|
}
|
|
|
|
int mlx5_core_xrcd_dealloc(struct mlx5_ib_dev *dev, u32 xrcdn)
|
|
{
|
|
u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)] = {};
|
|
|
|
MLX5_SET(dealloc_xrcd_in, in, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
|
|
MLX5_SET(dealloc_xrcd_in, in, xrcd, xrcdn);
|
|
return mlx5_cmd_exec_in(dev->mdev, dealloc_xrcd, in);
|
|
}
|
|
|
|
static int destroy_rq_tracked(struct mlx5_ib_dev *dev, u32 rqn, u16 uid)
|
|
{
|
|
u32 in[MLX5_ST_SZ_DW(destroy_rq_in)] = {};
|
|
|
|
MLX5_SET(destroy_rq_in, in, opcode, MLX5_CMD_OP_DESTROY_RQ);
|
|
MLX5_SET(destroy_rq_in, in, rqn, rqn);
|
|
MLX5_SET(destroy_rq_in, in, uid, uid);
|
|
return mlx5_cmd_exec_in(dev->mdev, destroy_rq, in);
|
|
}
|
|
|
|
int mlx5_core_create_rq_tracked(struct mlx5_ib_dev *dev, u32 *in, int inlen,
|
|
struct mlx5_core_qp *rq)
|
|
{
|
|
int err;
|
|
u32 rqn;
|
|
|
|
err = mlx5_core_create_rq(dev->mdev, in, inlen, &rqn);
|
|
if (err)
|
|
return err;
|
|
|
|
rq->uid = MLX5_GET(create_rq_in, in, uid);
|
|
rq->qpn = rqn;
|
|
err = create_resource_common(dev, rq, MLX5_RES_RQ);
|
|
if (err)
|
|
goto err_destroy_rq;
|
|
|
|
return 0;
|
|
|
|
err_destroy_rq:
|
|
destroy_rq_tracked(dev, rq->qpn, rq->uid);
|
|
|
|
return err;
|
|
}
|
|
|
|
int mlx5_core_destroy_rq_tracked(struct mlx5_ib_dev *dev,
|
|
struct mlx5_core_qp *rq)
|
|
{
|
|
destroy_resource_common(dev, rq);
|
|
return destroy_rq_tracked(dev, rq->qpn, rq->uid);
|
|
}
|
|
|
|
static void destroy_sq_tracked(struct mlx5_ib_dev *dev, u32 sqn, u16 uid)
|
|
{
|
|
u32 in[MLX5_ST_SZ_DW(destroy_sq_in)] = {};
|
|
|
|
MLX5_SET(destroy_sq_in, in, opcode, MLX5_CMD_OP_DESTROY_SQ);
|
|
MLX5_SET(destroy_sq_in, in, sqn, sqn);
|
|
MLX5_SET(destroy_sq_in, in, uid, uid);
|
|
mlx5_cmd_exec_in(dev->mdev, destroy_sq, in);
|
|
}
|
|
|
|
int mlx5_core_create_sq_tracked(struct mlx5_ib_dev *dev, u32 *in, int inlen,
|
|
struct mlx5_core_qp *sq)
|
|
{
|
|
u32 out[MLX5_ST_SZ_DW(create_sq_out)] = {};
|
|
int err;
|
|
|
|
MLX5_SET(create_sq_in, in, opcode, MLX5_CMD_OP_CREATE_SQ);
|
|
err = mlx5_cmd_exec(dev->mdev, in, inlen, out, sizeof(out));
|
|
if (err)
|
|
return err;
|
|
|
|
sq->qpn = MLX5_GET(create_sq_out, out, sqn);
|
|
sq->uid = MLX5_GET(create_sq_in, in, uid);
|
|
err = create_resource_common(dev, sq, MLX5_RES_SQ);
|
|
if (err)
|
|
goto err_destroy_sq;
|
|
|
|
return 0;
|
|
|
|
err_destroy_sq:
|
|
destroy_sq_tracked(dev, sq->qpn, sq->uid);
|
|
|
|
return err;
|
|
}
|
|
|
|
void mlx5_core_destroy_sq_tracked(struct mlx5_ib_dev *dev,
|
|
struct mlx5_core_qp *sq)
|
|
{
|
|
destroy_resource_common(dev, sq);
|
|
destroy_sq_tracked(dev, sq->qpn, sq->uid);
|
|
}
|
|
|
|
struct mlx5_core_rsc_common *mlx5_core_res_hold(struct mlx5_ib_dev *dev,
|
|
int res_num,
|
|
enum mlx5_res_type res_type)
|
|
{
|
|
u32 rsn = res_num | (res_type << MLX5_USER_INDEX_LEN);
|
|
struct mlx5_qp_table *table = &dev->qp_table;
|
|
|
|
return mlx5_get_rsc(table, rsn);
|
|
}
|
|
|
|
void mlx5_core_res_put(struct mlx5_core_rsc_common *res)
|
|
{
|
|
mlx5_core_put_rsc(res);
|
|
}
|