Merge branch 'mlx5-next' into rdma.git
From git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux mlx5 updates taken for dependencies on later ODP patches. Conflict resolved by deleting mlx5_ib_get_vector_affinity() * branch 'mlx5-next': (21 commits) net/mlx5: EQ, Make EQE access methods inline {net,IB}/mlx5: Move Page fault EQ and ODP logic to RDMA net/mlx5: EQ, Generic EQ net/mlx5: EQ, Different EQ types net/mlx5: EQ, Privatize eq_table and friends net/mlx5: EQ, irq_info and rmap belong to eq_table net/mlx5: EQ, Create all EQs in one place net/mlx5: EQ, Move all EQ logic to eq.c net/mlx5: EQ, Remove redundant completion EQ list lock net/mlx5: EQ, No need to store eq index as a field net/mlx5: EQ, Remove unused fields and structures net/mlx5: EQ, Use the right place to store/read IRQ affinity hint IB/mlx5: Improve ODP debugging messages net/mlx5: Use multi threaded workqueue for page fault handling net/mlx5: Return success for PAGE_FAULT_RESUME in internal error state IB/mlx5: Lock QP during page fault handling net/mlx5: Enumerate page fault types net/mlx5: Add interface to hold and release core resources net/mlx5: Release resource on error flow net/mlx5: Fix offsets of ifc reserved fields ... Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
commit
8742902475
@ -655,8 +655,13 @@ int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 user_virt,
|
||||
flags, local_page_list, NULL, NULL);
|
||||
up_read(&owning_mm->mmap_sem);
|
||||
|
||||
if (npages < 0)
|
||||
if (npages < 0) {
|
||||
if (npages != -EAGAIN)
|
||||
pr_warn("fail to get %zu user pages with error %d\n", gup_num_pages, npages);
|
||||
else
|
||||
pr_debug("fail to get %zu user pages with error %d\n", gup_num_pages, npages);
|
||||
break;
|
||||
}
|
||||
|
||||
bcnt -= min_t(size_t, npages << PAGE_SHIFT, bcnt);
|
||||
mutex_lock(&umem_odp->umem_mutex);
|
||||
@ -674,8 +679,13 @@ int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 user_virt,
|
||||
ret = ib_umem_odp_map_dma_single_page(
|
||||
umem_odp, k, local_page_list[j],
|
||||
access_mask, current_seq);
|
||||
if (ret < 0)
|
||||
if (ret < 0) {
|
||||
if (ret != -EAGAIN)
|
||||
pr_warn("ib_umem_odp_map_dma_single_page failed with error %d\n", ret);
|
||||
else
|
||||
pr_debug("ib_umem_odp_map_dma_single_page failed with error %d\n", ret);
|
||||
break;
|
||||
}
|
||||
|
||||
p = page_to_phys(local_page_list[j]);
|
||||
k++;
|
||||
|
@ -5693,8 +5693,7 @@ int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
|
||||
dev->ib_dev.node_type = RDMA_NODE_IB_CA;
|
||||
dev->ib_dev.local_dma_lkey = 0 /* not supported for now */;
|
||||
dev->ib_dev.phys_port_cnt = dev->num_ports;
|
||||
dev->ib_dev.num_comp_vectors =
|
||||
dev->mdev->priv.eq_table.num_comp_vectors;
|
||||
dev->ib_dev.num_comp_vectors = mlx5_comp_vectors_count(mdev);
|
||||
dev->ib_dev.dev.parent = &mdev->pdev->dev;
|
||||
|
||||
mutex_init(&dev->cap_mask_mutex);
|
||||
@ -6032,6 +6031,11 @@ static int mlx5_ib_stage_odp_init(struct mlx5_ib_dev *dev)
|
||||
return mlx5_ib_odp_init_one(dev);
|
||||
}
|
||||
|
||||
void mlx5_ib_stage_odp_cleanup(struct mlx5_ib_dev *dev)
|
||||
{
|
||||
mlx5_ib_odp_cleanup_one(dev);
|
||||
}
|
||||
|
||||
int mlx5_ib_stage_counters_init(struct mlx5_ib_dev *dev)
|
||||
{
|
||||
if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) {
|
||||
@ -6217,7 +6221,7 @@ static const struct mlx5_ib_profile pf_profile = {
|
||||
mlx5_ib_stage_dev_res_cleanup),
|
||||
STAGE_CREATE(MLX5_IB_STAGE_ODP,
|
||||
mlx5_ib_stage_odp_init,
|
||||
NULL),
|
||||
mlx5_ib_stage_odp_cleanup),
|
||||
STAGE_CREATE(MLX5_IB_STAGE_COUNTERS,
|
||||
mlx5_ib_stage_counters_init,
|
||||
mlx5_ib_stage_counters_cleanup),
|
||||
@ -6387,9 +6391,6 @@ static struct mlx5_interface mlx5_ib_interface = {
|
||||
.add = mlx5_ib_add,
|
||||
.remove = mlx5_ib_remove,
|
||||
.event = mlx5_ib_event,
|
||||
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
|
||||
.pfault = mlx5_ib_pfault,
|
||||
#endif
|
||||
.protocol = MLX5_INTERFACE_PROTOCOL_IB,
|
||||
};
|
||||
|
||||
|
@ -880,6 +880,15 @@ struct mlx5_ib_lb_state {
|
||||
bool enabled;
|
||||
};
|
||||
|
||||
struct mlx5_ib_pf_eq {
|
||||
struct mlx5_ib_dev *dev;
|
||||
struct mlx5_eq *core;
|
||||
struct work_struct work;
|
||||
spinlock_t lock; /* Pagefaults spinlock */
|
||||
struct workqueue_struct *wq;
|
||||
mempool_t *pool;
|
||||
};
|
||||
|
||||
struct mlx5_ib_dev {
|
||||
struct ib_device ib_dev;
|
||||
const struct uverbs_object_tree_def *driver_trees[7];
|
||||
@ -902,6 +911,8 @@ struct mlx5_ib_dev {
|
||||
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
|
||||
struct ib_odp_caps odp_caps;
|
||||
u64 odp_max_size;
|
||||
struct mlx5_ib_pf_eq odp_pf_eq;
|
||||
|
||||
/*
|
||||
* Sleepable RCU that prevents destruction of MRs while they are still
|
||||
* being used by a page fault handler.
|
||||
@ -1158,9 +1169,8 @@ struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm,
|
||||
|
||||
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
|
||||
void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev);
|
||||
void mlx5_ib_pfault(struct mlx5_core_dev *mdev, void *context,
|
||||
struct mlx5_pagefault *pfault);
|
||||
int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev);
|
||||
void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev);
|
||||
int __init mlx5_ib_odp_init(void);
|
||||
void mlx5_ib_odp_cleanup(void);
|
||||
void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
|
||||
@ -1175,6 +1185,7 @@ static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
|
||||
}
|
||||
|
||||
static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { return 0; }
|
||||
static inline void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev) {}
|
||||
static inline int mlx5_ib_odp_init(void) { return 0; }
|
||||
static inline void mlx5_ib_odp_cleanup(void) {}
|
||||
static inline void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent) {}
|
||||
|
@ -37,6 +37,46 @@
|
||||
#include "mlx5_ib.h"
|
||||
#include "cmd.h"
|
||||
|
||||
#include <linux/mlx5/eq.h>
|
||||
|
||||
/* Contains the details of a pagefault. */
|
||||
struct mlx5_pagefault {
|
||||
u32 bytes_committed;
|
||||
u32 token;
|
||||
u8 event_subtype;
|
||||
u8 type;
|
||||
union {
|
||||
/* Initiator or send message responder pagefault details. */
|
||||
struct {
|
||||
/* Received packet size, only valid for responders. */
|
||||
u32 packet_size;
|
||||
/*
|
||||
* Number of resource holding WQE, depends on type.
|
||||
*/
|
||||
u32 wq_num;
|
||||
/*
|
||||
* WQE index. Refers to either the send queue or
|
||||
* receive queue, according to event_subtype.
|
||||
*/
|
||||
u16 wqe_index;
|
||||
} wqe;
|
||||
/* RDMA responder pagefault details */
|
||||
struct {
|
||||
u32 r_key;
|
||||
/*
|
||||
* Received packet size, minimal size page fault
|
||||
* resolution required for forward progress.
|
||||
*/
|
||||
u32 packet_size;
|
||||
u32 rdma_op_len;
|
||||
u64 rdma_va;
|
||||
} rdma;
|
||||
};
|
||||
|
||||
struct mlx5_ib_pf_eq *eq;
|
||||
struct work_struct work;
|
||||
};
|
||||
|
||||
#define MAX_PREFETCH_LEN (4*1024*1024U)
|
||||
|
||||
/* Timeout in ms to wait for an active mmu notifier to complete when handling
|
||||
@ -304,14 +344,20 @@ static void mlx5_ib_page_fault_resume(struct mlx5_ib_dev *dev,
|
||||
{
|
||||
int wq_num = pfault->event_subtype == MLX5_PFAULT_SUBTYPE_WQE ?
|
||||
pfault->wqe.wq_num : pfault->token;
|
||||
int ret = mlx5_core_page_fault_resume(dev->mdev,
|
||||
pfault->token,
|
||||
wq_num,
|
||||
pfault->type,
|
||||
error);
|
||||
if (ret)
|
||||
mlx5_ib_err(dev, "Failed to resolve the page fault on WQ 0x%x\n",
|
||||
wq_num);
|
||||
u32 out[MLX5_ST_SZ_DW(page_fault_resume_out)] = { };
|
||||
u32 in[MLX5_ST_SZ_DW(page_fault_resume_in)] = { };
|
||||
int err;
|
||||
|
||||
MLX5_SET(page_fault_resume_in, in, opcode, MLX5_CMD_OP_PAGE_FAULT_RESUME);
|
||||
MLX5_SET(page_fault_resume_in, in, page_fault_type, pfault->type);
|
||||
MLX5_SET(page_fault_resume_in, in, token, pfault->token);
|
||||
MLX5_SET(page_fault_resume_in, in, wq_number, wq_num);
|
||||
MLX5_SET(page_fault_resume_in, in, error, !!error);
|
||||
|
||||
err = mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
|
||||
if (err)
|
||||
mlx5_ib_err(dev, "Failed to resolve the page fault on WQ 0x%x err %d\n",
|
||||
wq_num, err);
|
||||
}
|
||||
|
||||
static struct mlx5_ib_mr *implicit_mr_alloc(struct ib_pd *pd,
|
||||
@ -607,8 +653,8 @@ out:
|
||||
if (!wait_for_completion_timeout(
|
||||
&odp->notifier_completion,
|
||||
timeout)) {
|
||||
mlx5_ib_warn(dev, "timeout waiting for mmu notifier. seq %d against %d\n",
|
||||
current_seq, odp->notifiers_seq);
|
||||
mlx5_ib_warn(dev, "timeout waiting for mmu notifier. seq %d against %d. notifiers_count=%d\n",
|
||||
current_seq, odp->notifiers_seq, odp->notifiers_count);
|
||||
}
|
||||
} else {
|
||||
/* The MR is being killed, kill the QP as well. */
|
||||
@ -1016,16 +1062,31 @@ invalid_transport_or_opcode:
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct mlx5_ib_qp *mlx5_ib_odp_find_qp(struct mlx5_ib_dev *dev,
|
||||
u32 wq_num)
|
||||
static inline struct mlx5_core_rsc_common *odp_get_rsc(struct mlx5_ib_dev *dev,
|
||||
u32 wq_num, int pf_type)
|
||||
{
|
||||
struct mlx5_core_qp *mqp = __mlx5_qp_lookup(dev->mdev, wq_num);
|
||||
enum mlx5_res_type res_type;
|
||||
|
||||
if (!mqp) {
|
||||
mlx5_ib_err(dev, "QPN 0x%6x not found\n", wq_num);
|
||||
switch (pf_type) {
|
||||
case MLX5_WQE_PF_TYPE_RMP:
|
||||
res_type = MLX5_RES_SRQ;
|
||||
break;
|
||||
case MLX5_WQE_PF_TYPE_REQ_SEND_OR_WRITE:
|
||||
case MLX5_WQE_PF_TYPE_RESP:
|
||||
case MLX5_WQE_PF_TYPE_REQ_READ_OR_ATOMIC:
|
||||
res_type = MLX5_RES_QP;
|
||||
break;
|
||||
default:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return mlx5_core_res_hold(dev->mdev, wq_num, res_type);
|
||||
}
|
||||
|
||||
static inline struct mlx5_ib_qp *res_to_qp(struct mlx5_core_rsc_common *res)
|
||||
{
|
||||
struct mlx5_core_qp *mqp = (struct mlx5_core_qp *)res;
|
||||
|
||||
return to_mibqp(mqp);
|
||||
}
|
||||
|
||||
@ -1039,18 +1100,30 @@ static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_dev *dev,
|
||||
int resume_with_error = 1;
|
||||
u16 wqe_index = pfault->wqe.wqe_index;
|
||||
int requestor = pfault->type & MLX5_PFAULT_REQUESTOR;
|
||||
struct mlx5_core_rsc_common *res;
|
||||
struct mlx5_ib_qp *qp;
|
||||
|
||||
res = odp_get_rsc(dev, pfault->wqe.wq_num, pfault->type);
|
||||
if (!res) {
|
||||
mlx5_ib_dbg(dev, "wqe page fault for missing resource %d\n", pfault->wqe.wq_num);
|
||||
return;
|
||||
}
|
||||
|
||||
switch (res->res) {
|
||||
case MLX5_RES_QP:
|
||||
qp = res_to_qp(res);
|
||||
break;
|
||||
default:
|
||||
mlx5_ib_err(dev, "wqe page fault for unsupported type %d\n", pfault->type);
|
||||
goto resolve_page_fault;
|
||||
}
|
||||
|
||||
buffer = (char *)__get_free_page(GFP_KERNEL);
|
||||
if (!buffer) {
|
||||
mlx5_ib_err(dev, "Error allocating memory for IO page fault handling.\n");
|
||||
goto resolve_page_fault;
|
||||
}
|
||||
|
||||
qp = mlx5_ib_odp_find_qp(dev, pfault->wqe.wq_num);
|
||||
if (!qp)
|
||||
goto resolve_page_fault;
|
||||
|
||||
ret = mlx5_ib_read_user_wqe(qp, requestor, wqe_index, buffer,
|
||||
PAGE_SIZE, &qp->trans_qp.base);
|
||||
if (ret < 0) {
|
||||
@ -1090,6 +1163,7 @@ resolve_page_fault:
|
||||
mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%x resume_with_error=%d, type: 0x%x\n",
|
||||
pfault->wqe.wq_num, resume_with_error,
|
||||
pfault->type);
|
||||
mlx5_core_res_put(res);
|
||||
free_page((unsigned long)buffer);
|
||||
}
|
||||
|
||||
@ -1168,10 +1242,8 @@ static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_dev *dev,
|
||||
}
|
||||
}
|
||||
|
||||
void mlx5_ib_pfault(struct mlx5_core_dev *mdev, void *context,
|
||||
struct mlx5_pagefault *pfault)
|
||||
static void mlx5_ib_pfault(struct mlx5_ib_dev *dev, struct mlx5_pagefault *pfault)
|
||||
{
|
||||
struct mlx5_ib_dev *dev = context;
|
||||
u8 event_subtype = pfault->event_subtype;
|
||||
|
||||
switch (event_subtype) {
|
||||
@ -1188,6 +1260,203 @@ void mlx5_ib_pfault(struct mlx5_core_dev *mdev, void *context,
|
||||
}
|
||||
}
|
||||
|
||||
static void mlx5_ib_eqe_pf_action(struct work_struct *work)
|
||||
{
|
||||
struct mlx5_pagefault *pfault = container_of(work,
|
||||
struct mlx5_pagefault,
|
||||
work);
|
||||
struct mlx5_ib_pf_eq *eq = pfault->eq;
|
||||
|
||||
mlx5_ib_pfault(eq->dev, pfault);
|
||||
mempool_free(pfault, eq->pool);
|
||||
}
|
||||
|
||||
static void mlx5_ib_eq_pf_process(struct mlx5_ib_pf_eq *eq)
|
||||
{
|
||||
struct mlx5_eqe_page_fault *pf_eqe;
|
||||
struct mlx5_pagefault *pfault;
|
||||
struct mlx5_eqe *eqe;
|
||||
int cc = 0;
|
||||
|
||||
while ((eqe = mlx5_eq_get_eqe(eq->core, cc))) {
|
||||
pfault = mempool_alloc(eq->pool, GFP_ATOMIC);
|
||||
if (!pfault) {
|
||||
schedule_work(&eq->work);
|
||||
break;
|
||||
}
|
||||
|
||||
pf_eqe = &eqe->data.page_fault;
|
||||
pfault->event_subtype = eqe->sub_type;
|
||||
pfault->bytes_committed = be32_to_cpu(pf_eqe->bytes_committed);
|
||||
|
||||
mlx5_ib_dbg(eq->dev,
|
||||
"PAGE_FAULT: subtype: 0x%02x, bytes_committed: 0x%06x\n",
|
||||
eqe->sub_type, pfault->bytes_committed);
|
||||
|
||||
switch (eqe->sub_type) {
|
||||
case MLX5_PFAULT_SUBTYPE_RDMA:
|
||||
/* RDMA based event */
|
||||
pfault->type =
|
||||
be32_to_cpu(pf_eqe->rdma.pftype_token) >> 24;
|
||||
pfault->token =
|
||||
be32_to_cpu(pf_eqe->rdma.pftype_token) &
|
||||
MLX5_24BIT_MASK;
|
||||
pfault->rdma.r_key =
|
||||
be32_to_cpu(pf_eqe->rdma.r_key);
|
||||
pfault->rdma.packet_size =
|
||||
be16_to_cpu(pf_eqe->rdma.packet_length);
|
||||
pfault->rdma.rdma_op_len =
|
||||
be32_to_cpu(pf_eqe->rdma.rdma_op_len);
|
||||
pfault->rdma.rdma_va =
|
||||
be64_to_cpu(pf_eqe->rdma.rdma_va);
|
||||
mlx5_ib_dbg(eq->dev,
|
||||
"PAGE_FAULT: type:0x%x, token: 0x%06x, r_key: 0x%08x\n",
|
||||
pfault->type, pfault->token,
|
||||
pfault->rdma.r_key);
|
||||
mlx5_ib_dbg(eq->dev,
|
||||
"PAGE_FAULT: rdma_op_len: 0x%08x, rdma_va: 0x%016llx\n",
|
||||
pfault->rdma.rdma_op_len,
|
||||
pfault->rdma.rdma_va);
|
||||
break;
|
||||
|
||||
case MLX5_PFAULT_SUBTYPE_WQE:
|
||||
/* WQE based event */
|
||||
pfault->type =
|
||||
(be32_to_cpu(pf_eqe->wqe.pftype_wq) >> 24) & 0x7;
|
||||
pfault->token =
|
||||
be32_to_cpu(pf_eqe->wqe.token);
|
||||
pfault->wqe.wq_num =
|
||||
be32_to_cpu(pf_eqe->wqe.pftype_wq) &
|
||||
MLX5_24BIT_MASK;
|
||||
pfault->wqe.wqe_index =
|
||||
be16_to_cpu(pf_eqe->wqe.wqe_index);
|
||||
pfault->wqe.packet_size =
|
||||
be16_to_cpu(pf_eqe->wqe.packet_length);
|
||||
mlx5_ib_dbg(eq->dev,
|
||||
"PAGE_FAULT: type:0x%x, token: 0x%06x, wq_num: 0x%06x, wqe_index: 0x%04x\n",
|
||||
pfault->type, pfault->token,
|
||||
pfault->wqe.wq_num,
|
||||
pfault->wqe.wqe_index);
|
||||
break;
|
||||
|
||||
default:
|
||||
mlx5_ib_warn(eq->dev,
|
||||
"Unsupported page fault event sub-type: 0x%02hhx\n",
|
||||
eqe->sub_type);
|
||||
/* Unsupported page faults should still be
|
||||
* resolved by the page fault handler
|
||||
*/
|
||||
}
|
||||
|
||||
pfault->eq = eq;
|
||||
INIT_WORK(&pfault->work, mlx5_ib_eqe_pf_action);
|
||||
queue_work(eq->wq, &pfault->work);
|
||||
|
||||
cc = mlx5_eq_update_cc(eq->core, ++cc);
|
||||
}
|
||||
|
||||
mlx5_eq_update_ci(eq->core, cc, 1);
|
||||
}
|
||||
|
||||
static irqreturn_t mlx5_ib_eq_pf_int(int irq, void *eq_ptr)
|
||||
{
|
||||
struct mlx5_ib_pf_eq *eq = eq_ptr;
|
||||
unsigned long flags;
|
||||
|
||||
if (spin_trylock_irqsave(&eq->lock, flags)) {
|
||||
mlx5_ib_eq_pf_process(eq);
|
||||
spin_unlock_irqrestore(&eq->lock, flags);
|
||||
} else {
|
||||
schedule_work(&eq->work);
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
/* mempool_refill() was proposed but unfortunately wasn't accepted
|
||||
* http://lkml.iu.edu/hypermail/linux/kernel/1512.1/05073.html
|
||||
* Cheap workaround.
|
||||
*/
|
||||
static void mempool_refill(mempool_t *pool)
|
||||
{
|
||||
while (pool->curr_nr < pool->min_nr)
|
||||
mempool_free(mempool_alloc(pool, GFP_KERNEL), pool);
|
||||
}
|
||||
|
||||
static void mlx5_ib_eq_pf_action(struct work_struct *work)
|
||||
{
|
||||
struct mlx5_ib_pf_eq *eq =
|
||||
container_of(work, struct mlx5_ib_pf_eq, work);
|
||||
|
||||
mempool_refill(eq->pool);
|
||||
|
||||
spin_lock_irq(&eq->lock);
|
||||
mlx5_ib_eq_pf_process(eq);
|
||||
spin_unlock_irq(&eq->lock);
|
||||
}
|
||||
|
||||
enum {
|
||||
MLX5_IB_NUM_PF_EQE = 0x1000,
|
||||
MLX5_IB_NUM_PF_DRAIN = 64,
|
||||
};
|
||||
|
||||
static int
|
||||
mlx5_ib_create_pf_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
|
||||
{
|
||||
struct mlx5_eq_param param = {};
|
||||
int err;
|
||||
|
||||
INIT_WORK(&eq->work, mlx5_ib_eq_pf_action);
|
||||
spin_lock_init(&eq->lock);
|
||||
eq->dev = dev;
|
||||
|
||||
eq->pool = mempool_create_kmalloc_pool(MLX5_IB_NUM_PF_DRAIN,
|
||||
sizeof(struct mlx5_pagefault));
|
||||
if (!eq->pool)
|
||||
return -ENOMEM;
|
||||
|
||||
eq->wq = alloc_workqueue("mlx5_ib_page_fault",
|
||||
WQ_HIGHPRI | WQ_UNBOUND | WQ_MEM_RECLAIM,
|
||||
MLX5_NUM_CMD_EQE);
|
||||
if (!eq->wq) {
|
||||
err = -ENOMEM;
|
||||
goto err_mempool;
|
||||
}
|
||||
|
||||
param = (struct mlx5_eq_param) {
|
||||
.index = MLX5_EQ_PFAULT_IDX,
|
||||
.mask = 1 << MLX5_EVENT_TYPE_PAGE_FAULT,
|
||||
.nent = MLX5_IB_NUM_PF_EQE,
|
||||
.context = eq,
|
||||
.handler = mlx5_ib_eq_pf_int
|
||||
};
|
||||
eq->core = mlx5_eq_create_generic(dev->mdev, "mlx5_ib_page_fault_eq", ¶m);
|
||||
if (IS_ERR(eq->core)) {
|
||||
err = PTR_ERR(eq->core);
|
||||
goto err_wq;
|
||||
}
|
||||
|
||||
return 0;
|
||||
err_wq:
|
||||
destroy_workqueue(eq->wq);
|
||||
err_mempool:
|
||||
mempool_destroy(eq->pool);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int
|
||||
mlx5_ib_destroy_pf_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = mlx5_eq_destroy_generic(dev->mdev, eq->core);
|
||||
cancel_work_sync(&eq->work);
|
||||
destroy_workqueue(eq->wq);
|
||||
mempool_destroy(eq->pool);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent)
|
||||
{
|
||||
if (!(ent->dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
|
||||
@ -1216,7 +1485,7 @@ void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent)
|
||||
|
||||
int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
|
||||
{
|
||||
int ret;
|
||||
int ret = 0;
|
||||
|
||||
if (dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT) {
|
||||
ret = mlx5_cmd_null_mkey(dev->mdev, &dev->null_mkey);
|
||||
@ -1226,7 +1495,20 @@ int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
if (!MLX5_CAP_GEN(dev->mdev, pg))
|
||||
return ret;
|
||||
|
||||
ret = mlx5_ib_create_pf_eq(dev, &dev->odp_pf_eq);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *dev)
|
||||
{
|
||||
if (!MLX5_CAP_GEN(dev->mdev, pg))
|
||||
return;
|
||||
|
||||
mlx5_ib_destroy_pf_eq(dev, &dev->odp_pf_eq);
|
||||
}
|
||||
|
||||
int mlx5_ib_odp_init(void)
|
||||
@ -1236,4 +1518,3 @@ int mlx5_ib_odp_init(void)
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -313,6 +313,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
|
||||
case MLX5_CMD_OP_FPGA_DESTROY_QP:
|
||||
case MLX5_CMD_OP_DESTROY_GENERAL_OBJECT:
|
||||
case MLX5_CMD_OP_DEALLOC_MEMIC:
|
||||
case MLX5_CMD_OP_PAGE_FAULT_RESUME:
|
||||
return MLX5_CMD_STAT_OK;
|
||||
|
||||
case MLX5_CMD_OP_QUERY_HCA_CAP:
|
||||
@ -326,7 +327,6 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
|
||||
case MLX5_CMD_OP_CREATE_MKEY:
|
||||
case MLX5_CMD_OP_QUERY_MKEY:
|
||||
case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS:
|
||||
case MLX5_CMD_OP_PAGE_FAULT_RESUME:
|
||||
case MLX5_CMD_OP_CREATE_EQ:
|
||||
case MLX5_CMD_OP_QUERY_EQ:
|
||||
case MLX5_CMD_OP_GEN_EQE:
|
||||
|
@ -38,6 +38,7 @@
|
||||
#include <rdma/ib_verbs.h>
|
||||
#include <linux/mlx5/cq.h>
|
||||
#include "mlx5_core.h"
|
||||
#include "lib/eq.h"
|
||||
|
||||
#define TASKLET_MAX_TIME 2
|
||||
#define TASKLET_MAX_TIME_JIFFIES msecs_to_jiffies(TASKLET_MAX_TIME)
|
||||
@ -92,10 +93,10 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
|
||||
u32 dout[MLX5_ST_SZ_DW(destroy_cq_out)];
|
||||
u32 out[MLX5_ST_SZ_DW(create_cq_out)];
|
||||
u32 din[MLX5_ST_SZ_DW(destroy_cq_in)];
|
||||
struct mlx5_eq *eq;
|
||||
struct mlx5_eq_comp *eq;
|
||||
int err;
|
||||
|
||||
eq = mlx5_eqn2eq(dev, eqn);
|
||||
eq = mlx5_eqn2comp_eq(dev, eqn);
|
||||
if (IS_ERR(eq))
|
||||
return PTR_ERR(eq);
|
||||
|
||||
@ -119,12 +120,12 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
|
||||
INIT_LIST_HEAD(&cq->tasklet_ctx.list);
|
||||
|
||||
/* Add to comp EQ CQ tree to recv comp events */
|
||||
err = mlx5_eq_add_cq(eq, cq);
|
||||
err = mlx5_eq_add_cq(&eq->core, cq);
|
||||
if (err)
|
||||
goto err_cmd;
|
||||
|
||||
/* Add to async EQ CQ tree to recv async events */
|
||||
err = mlx5_eq_add_cq(&dev->priv.eq_table.async_eq, cq);
|
||||
err = mlx5_eq_add_cq(mlx5_get_async_eq(dev), cq);
|
||||
if (err)
|
||||
goto err_cq_add;
|
||||
|
||||
@ -139,7 +140,7 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
|
||||
return 0;
|
||||
|
||||
err_cq_add:
|
||||
mlx5_eq_del_cq(eq, cq);
|
||||
mlx5_eq_del_cq(&eq->core, cq);
|
||||
err_cmd:
|
||||
memset(din, 0, sizeof(din));
|
||||
memset(dout, 0, sizeof(dout));
|
||||
@ -157,11 +158,11 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
|
||||
u32 in[MLX5_ST_SZ_DW(destroy_cq_in)] = {0};
|
||||
int err;
|
||||
|
||||
err = mlx5_eq_del_cq(&dev->priv.eq_table.async_eq, cq);
|
||||
err = mlx5_eq_del_cq(mlx5_get_async_eq(dev), cq);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = mlx5_eq_del_cq(cq->eq, cq);
|
||||
err = mlx5_eq_del_cq(&cq->eq->core, cq);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
@ -36,6 +36,7 @@
|
||||
#include <linux/mlx5/cq.h>
|
||||
#include <linux/mlx5/driver.h>
|
||||
#include "mlx5_core.h"
|
||||
#include "lib/eq.h"
|
||||
|
||||
enum {
|
||||
QP_PID,
|
||||
@ -349,6 +350,16 @@ out:
|
||||
return param;
|
||||
}
|
||||
|
||||
static int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
|
||||
u32 *out, int outlen)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(query_eq_in)] = {};
|
||||
|
||||
MLX5_SET(query_eq_in, in, opcode, MLX5_CMD_OP_QUERY_EQ);
|
||||
MLX5_SET(query_eq_in, in, eq_number, eq->eqn);
|
||||
return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
|
||||
}
|
||||
|
||||
static u64 eq_read_field(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
|
||||
int index)
|
||||
{
|
||||
|
@ -139,17 +139,6 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
|
||||
|
||||
spin_lock_irq(&priv->ctx_lock);
|
||||
list_add_tail(&dev_ctx->list, &priv->ctx_list);
|
||||
|
||||
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
|
||||
if (dev_ctx->intf->pfault) {
|
||||
if (priv->pfault) {
|
||||
mlx5_core_err(dev, "multiple page fault handlers not supported");
|
||||
} else {
|
||||
priv->pfault_ctx = dev_ctx->context;
|
||||
priv->pfault = dev_ctx->intf->pfault;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
spin_unlock_irq(&priv->ctx_lock);
|
||||
}
|
||||
|
||||
@ -179,15 +168,6 @@ void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
|
||||
if (!dev_ctx)
|
||||
return;
|
||||
|
||||
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
|
||||
spin_lock_irq(&priv->ctx_lock);
|
||||
if (priv->pfault == dev_ctx->intf->pfault)
|
||||
priv->pfault = NULL;
|
||||
spin_unlock_irq(&priv->ctx_lock);
|
||||
|
||||
synchronize_srcu(&priv->pfault_srcu);
|
||||
#endif
|
||||
|
||||
spin_lock_irq(&priv->ctx_lock);
|
||||
list_del(&dev_ctx->list);
|
||||
spin_unlock_irq(&priv->ctx_lock);
|
||||
@ -447,20 +427,6 @@ void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
|
||||
spin_unlock_irqrestore(&priv->ctx_lock, flags);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
|
||||
void mlx5_core_page_fault(struct mlx5_core_dev *dev,
|
||||
struct mlx5_pagefault *pfault)
|
||||
{
|
||||
struct mlx5_priv *priv = &dev->priv;
|
||||
int srcu_idx;
|
||||
|
||||
srcu_idx = srcu_read_lock(&priv->pfault_srcu);
|
||||
if (priv->pfault)
|
||||
priv->pfault(dev, priv->pfault_ctx, pfault);
|
||||
srcu_read_unlock(&priv->pfault_srcu, srcu_idx);
|
||||
}
|
||||
#endif
|
||||
|
||||
void mlx5_dev_list_lock(void)
|
||||
{
|
||||
mutex_lock(&mlx5_intf_mutex);
|
||||
|
@ -178,8 +178,7 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
|
||||
{
|
||||
return is_kdump_kernel() ?
|
||||
MLX5E_MIN_NUM_CHANNELS :
|
||||
min_t(int, mdev->priv.eq_table.num_comp_vectors,
|
||||
MLX5E_MAX_NUM_CHANNELS);
|
||||
min_t(int, mlx5_comp_vectors_count(mdev), MLX5E_MAX_NUM_CHANNELS);
|
||||
}
|
||||
|
||||
/* Use this function to get max num channels after netdev was created */
|
||||
|
@ -49,6 +49,7 @@
|
||||
#include "lib/clock.h"
|
||||
#include "en/port.h"
|
||||
#include "en/xdp.h"
|
||||
#include "lib/eq.h"
|
||||
|
||||
struct mlx5e_rq_param {
|
||||
u32 rqc[MLX5_ST_SZ_DW(rqc)];
|
||||
@ -319,7 +320,7 @@ static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
|
||||
static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
|
||||
{
|
||||
clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
|
||||
synchronize_irq(pci_irq_vector(priv->mdev->pdev, MLX5_EQ_VEC_ASYNC));
|
||||
mlx5_eq_synchronize_async_irq(priv->mdev);
|
||||
}
|
||||
|
||||
static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
|
||||
@ -1758,11 +1759,6 @@ static void mlx5e_close_cq(struct mlx5e_cq *cq)
|
||||
mlx5e_free_cq(cq);
|
||||
}
|
||||
|
||||
static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix)
|
||||
{
|
||||
return cpumask_first(priv->mdev->priv.irq_info[ix].mask);
|
||||
}
|
||||
|
||||
static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
|
||||
struct mlx5e_params *params,
|
||||
struct mlx5e_channel_param *cparam)
|
||||
@ -1913,9 +1909,9 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
|
||||
struct mlx5e_channel_param *cparam,
|
||||
struct mlx5e_channel **cp)
|
||||
{
|
||||
int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(priv->mdev, ix));
|
||||
struct net_dim_cq_moder icocq_moder = {0, 0};
|
||||
struct net_device *netdev = priv->netdev;
|
||||
int cpu = mlx5e_get_cpu(priv, ix);
|
||||
struct mlx5e_channel *c;
|
||||
unsigned int irq;
|
||||
int err;
|
||||
@ -4121,17 +4117,17 @@ static netdev_features_t mlx5e_features_check(struct sk_buff *skb,
|
||||
static bool mlx5e_tx_timeout_eq_recover(struct net_device *dev,
|
||||
struct mlx5e_txqsq *sq)
|
||||
{
|
||||
struct mlx5_eq *eq = sq->cq.mcq.eq;
|
||||
struct mlx5_eq_comp *eq = sq->cq.mcq.eq;
|
||||
u32 eqe_count;
|
||||
|
||||
netdev_err(dev, "EQ 0x%x: Cons = 0x%x, irqn = 0x%x\n",
|
||||
eq->eqn, eq->cons_index, eq->irqn);
|
||||
eq->core.eqn, eq->core.cons_index, eq->core.irqn);
|
||||
|
||||
eqe_count = mlx5_eq_poll_irq_disabled(eq);
|
||||
if (!eqe_count)
|
||||
return false;
|
||||
|
||||
netdev_err(dev, "Recover %d eqes on EQ 0x%x\n", eqe_count, eq->eqn);
|
||||
netdev_err(dev, "Recover %d eqes on EQ 0x%x\n", eqe_count, eq->core.eqn);
|
||||
sq->channel->stats->eq_rearm++;
|
||||
return true;
|
||||
}
|
||||
@ -4960,7 +4956,7 @@ int mlx5e_netdev_init(struct net_device *netdev,
|
||||
netif_carrier_off(netdev);
|
||||
|
||||
#ifdef CONFIG_MLX5_EN_ARFS
|
||||
netdev->rx_cpu_rmap = mdev->rmap;
|
||||
netdev->rx_cpu_rmap = mlx5_eq_table_get_rmap(mdev);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -38,6 +38,7 @@
|
||||
#include "mlx5_core.h"
|
||||
#include "eswitch.h"
|
||||
#include "fs_core.h"
|
||||
#include "lib/eq.h"
|
||||
|
||||
#define UPLINK_VPORT 0xFFFF
|
||||
|
||||
@ -1567,7 +1568,7 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
|
||||
/* Mark this vport as disabled to discard new events */
|
||||
vport->enabled = false;
|
||||
|
||||
synchronize_irq(pci_irq_vector(esw->dev->pdev, MLX5_EQ_VEC_ASYNC));
|
||||
mlx5_eq_synchronize_async_irq(esw->dev);
|
||||
/* Wait for current already scheduled events to complete */
|
||||
flush_workqueue(esw->work_queue);
|
||||
/* Disable events from this vport */
|
||||
|
@ -38,6 +38,7 @@
|
||||
#include <linux/mlx5/driver.h>
|
||||
#include <linux/mlx5/cmd.h>
|
||||
#include "mlx5_core.h"
|
||||
#include "lib/eq.h"
|
||||
|
||||
enum {
|
||||
MLX5_HEALTH_POLL_INTERVAL = 2 * HZ,
|
||||
@ -84,7 +85,7 @@ static void trigger_cmd_completions(struct mlx5_core_dev *dev)
|
||||
u64 vector;
|
||||
|
||||
/* wait for pending handlers to complete */
|
||||
synchronize_irq(pci_irq_vector(dev->pdev, MLX5_EQ_VEC_CMD));
|
||||
mlx5_eq_synchronize_cmd_irq(dev);
|
||||
spin_lock_irqsave(&dev->cmd.alloc_lock, flags);
|
||||
vector = ~dev->cmd.bitmask & ((1ul << (1 << dev->cmd.log_sz)) - 1);
|
||||
if (!vector)
|
||||
|
93
drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
Normal file
93
drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
Normal file
@ -0,0 +1,93 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
|
||||
/* Copyright (c) 2018 Mellanox Technologies */
|
||||
|
||||
#ifndef __LIB_MLX5_EQ_H__
|
||||
#define __LIB_MLX5_EQ_H__
|
||||
#include <linux/mlx5/driver.h>
|
||||
|
||||
#define MLX5_MAX_IRQ_NAME (32)
|
||||
#define MLX5_EQE_SIZE (sizeof(struct mlx5_eqe))
|
||||
|
||||
struct mlx5_eq_tasklet {
|
||||
struct list_head list;
|
||||
struct list_head process_list;
|
||||
struct tasklet_struct task;
|
||||
spinlock_t lock; /* lock completion tasklet list */
|
||||
};
|
||||
|
||||
struct mlx5_cq_table {
|
||||
spinlock_t lock; /* protect radix tree */
|
||||
struct radix_tree_root tree;
|
||||
};
|
||||
|
||||
struct mlx5_eq {
|
||||
struct mlx5_core_dev *dev;
|
||||
struct mlx5_cq_table cq_table;
|
||||
__be32 __iomem *doorbell;
|
||||
u32 cons_index;
|
||||
struct mlx5_frag_buf buf;
|
||||
int size;
|
||||
unsigned int vecidx;
|
||||
unsigned int irqn;
|
||||
u8 eqn;
|
||||
int nent;
|
||||
struct mlx5_rsc_debug *dbg;
|
||||
};
|
||||
|
||||
struct mlx5_eq_comp {
|
||||
struct mlx5_eq core; /* Must be first */
|
||||
struct mlx5_eq_tasklet tasklet_ctx;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
static inline struct mlx5_eqe *get_eqe(struct mlx5_eq *eq, u32 entry)
|
||||
{
|
||||
return mlx5_buf_offset(&eq->buf, entry * MLX5_EQE_SIZE);
|
||||
}
|
||||
|
||||
static inline struct mlx5_eqe *next_eqe_sw(struct mlx5_eq *eq)
|
||||
{
|
||||
struct mlx5_eqe *eqe = get_eqe(eq, eq->cons_index & (eq->nent - 1));
|
||||
|
||||
return ((eqe->owner & 1) ^ !!(eq->cons_index & eq->nent)) ? NULL : eqe;
|
||||
}
|
||||
|
||||
static inline void eq_update_ci(struct mlx5_eq *eq, int arm)
|
||||
{
|
||||
__be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2);
|
||||
u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24);
|
||||
|
||||
__raw_writel((__force u32)cpu_to_be32(val), addr);
|
||||
/* We still want ordering, just not swabbing, so add a barrier */
|
||||
mb();
|
||||
}
|
||||
|
||||
int mlx5_eq_table_init(struct mlx5_core_dev *dev);
|
||||
void mlx5_eq_table_cleanup(struct mlx5_core_dev *dev);
|
||||
int mlx5_eq_table_create(struct mlx5_core_dev *dev);
|
||||
void mlx5_eq_table_destroy(struct mlx5_core_dev *dev);
|
||||
|
||||
int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq);
|
||||
int mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq);
|
||||
struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn);
|
||||
struct mlx5_eq *mlx5_get_async_eq(struct mlx5_core_dev *dev);
|
||||
void mlx5_cq_tasklet_cb(unsigned long data);
|
||||
struct cpumask *mlx5_eq_comp_cpumask(struct mlx5_core_dev *dev, int ix);
|
||||
|
||||
u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq_comp *eq);
|
||||
void mlx5_eq_synchronize_async_irq(struct mlx5_core_dev *dev);
|
||||
void mlx5_eq_synchronize_cmd_irq(struct mlx5_core_dev *dev);
|
||||
|
||||
int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
|
||||
void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
|
||||
int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev);
|
||||
void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev);
|
||||
|
||||
/* This function should only be called after mlx5_cmd_force_teardown_hca */
|
||||
void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev);
|
||||
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev);
|
||||
#endif
|
||||
|
||||
#endif
|
@ -53,6 +53,7 @@
|
||||
#endif
|
||||
#include <net/devlink.h>
|
||||
#include "mlx5_core.h"
|
||||
#include "lib/eq.h"
|
||||
#include "fs_core.h"
|
||||
#include "lib/mpfs.h"
|
||||
#include "eswitch.h"
|
||||
@ -319,51 +320,6 @@ static void release_bar(struct pci_dev *pdev)
|
||||
pci_release_regions(pdev);
|
||||
}
|
||||
|
||||
static int mlx5_alloc_irq_vectors(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_priv *priv = &dev->priv;
|
||||
struct mlx5_eq_table *table = &priv->eq_table;
|
||||
int num_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ?
|
||||
MLX5_CAP_GEN(dev, max_num_eqs) :
|
||||
1 << MLX5_CAP_GEN(dev, log_max_eq);
|
||||
int nvec;
|
||||
int err;
|
||||
|
||||
nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() +
|
||||
MLX5_EQ_VEC_COMP_BASE;
|
||||
nvec = min_t(int, nvec, num_eqs);
|
||||
if (nvec <= MLX5_EQ_VEC_COMP_BASE)
|
||||
return -ENOMEM;
|
||||
|
||||
priv->irq_info = kcalloc(nvec, sizeof(*priv->irq_info), GFP_KERNEL);
|
||||
if (!priv->irq_info)
|
||||
return -ENOMEM;
|
||||
|
||||
nvec = pci_alloc_irq_vectors(dev->pdev,
|
||||
MLX5_EQ_VEC_COMP_BASE + 1, nvec,
|
||||
PCI_IRQ_MSIX);
|
||||
if (nvec < 0) {
|
||||
err = nvec;
|
||||
goto err_free_irq_info;
|
||||
}
|
||||
|
||||
table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE;
|
||||
|
||||
return 0;
|
||||
|
||||
err_free_irq_info:
|
||||
kfree(priv->irq_info);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void mlx5_free_irq_vectors(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_priv *priv = &dev->priv;
|
||||
|
||||
pci_free_irq_vectors(dev->pdev);
|
||||
kfree(priv->irq_info);
|
||||
}
|
||||
|
||||
struct mlx5_reg_host_endianness {
|
||||
u8 he;
|
||||
u8 rsvd[15];
|
||||
@ -637,177 +593,6 @@ u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev)
|
||||
return (u64)timer_l | (u64)timer_h1 << 32;
|
||||
}
|
||||
|
||||
static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
|
||||
{
|
||||
struct mlx5_priv *priv = &mdev->priv;
|
||||
int irq = pci_irq_vector(mdev->pdev, MLX5_EQ_VEC_COMP_BASE + i);
|
||||
|
||||
if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) {
|
||||
mlx5_core_warn(mdev, "zalloc_cpumask_var failed");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node),
|
||||
priv->irq_info[i].mask);
|
||||
|
||||
if (IS_ENABLED(CONFIG_SMP) &&
|
||||
irq_set_affinity_hint(irq, priv->irq_info[i].mask))
|
||||
mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev *mdev, int i)
|
||||
{
|
||||
struct mlx5_priv *priv = &mdev->priv;
|
||||
int irq = pci_irq_vector(mdev->pdev, MLX5_EQ_VEC_COMP_BASE + i);
|
||||
|
||||
irq_set_affinity_hint(irq, NULL);
|
||||
free_cpumask_var(priv->irq_info[i].mask);
|
||||
}
|
||||
|
||||
static int mlx5_irq_set_affinity_hints(struct mlx5_core_dev *mdev)
|
||||
{
|
||||
int err;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++) {
|
||||
err = mlx5_irq_set_affinity_hint(mdev, i);
|
||||
if (err)
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_out:
|
||||
for (i--; i >= 0; i--)
|
||||
mlx5_irq_clear_affinity_hint(mdev, i);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void mlx5_irq_clear_affinity_hints(struct mlx5_core_dev *mdev)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++)
|
||||
mlx5_irq_clear_affinity_hint(mdev, i);
|
||||
}
|
||||
|
||||
int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
|
||||
unsigned int *irqn)
|
||||
{
|
||||
struct mlx5_eq_table *table = &dev->priv.eq_table;
|
||||
struct mlx5_eq *eq, *n;
|
||||
int err = -ENOENT;
|
||||
|
||||
spin_lock(&table->lock);
|
||||
list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
|
||||
if (eq->index == vector) {
|
||||
*eqn = eq->eqn;
|
||||
*irqn = eq->irqn;
|
||||
err = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock(&table->lock);
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_vector2eqn);
|
||||
|
||||
struct mlx5_eq *mlx5_eqn2eq(struct mlx5_core_dev *dev, int eqn)
|
||||
{
|
||||
struct mlx5_eq_table *table = &dev->priv.eq_table;
|
||||
struct mlx5_eq *eq;
|
||||
|
||||
spin_lock(&table->lock);
|
||||
list_for_each_entry(eq, &table->comp_eqs_list, list)
|
||||
if (eq->eqn == eqn) {
|
||||
spin_unlock(&table->lock);
|
||||
return eq;
|
||||
}
|
||||
|
||||
spin_unlock(&table->lock);
|
||||
|
||||
return ERR_PTR(-ENOENT);
|
||||
}
|
||||
|
||||
static void free_comp_eqs(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_eq_table *table = &dev->priv.eq_table;
|
||||
struct mlx5_eq *eq, *n;
|
||||
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
if (dev->rmap) {
|
||||
free_irq_cpu_rmap(dev->rmap);
|
||||
dev->rmap = NULL;
|
||||
}
|
||||
#endif
|
||||
spin_lock(&table->lock);
|
||||
list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
|
||||
list_del(&eq->list);
|
||||
spin_unlock(&table->lock);
|
||||
if (mlx5_destroy_unmap_eq(dev, eq))
|
||||
mlx5_core_warn(dev, "failed to destroy EQ 0x%x\n",
|
||||
eq->eqn);
|
||||
kfree(eq);
|
||||
spin_lock(&table->lock);
|
||||
}
|
||||
spin_unlock(&table->lock);
|
||||
}
|
||||
|
||||
static int alloc_comp_eqs(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_eq_table *table = &dev->priv.eq_table;
|
||||
char name[MLX5_MAX_IRQ_NAME];
|
||||
struct mlx5_eq *eq;
|
||||
int ncomp_vec;
|
||||
int nent;
|
||||
int err;
|
||||
int i;
|
||||
|
||||
INIT_LIST_HEAD(&table->comp_eqs_list);
|
||||
ncomp_vec = table->num_comp_vectors;
|
||||
nent = MLX5_COMP_EQ_SIZE;
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
dev->rmap = alloc_irq_cpu_rmap(ncomp_vec);
|
||||
if (!dev->rmap)
|
||||
return -ENOMEM;
|
||||
#endif
|
||||
for (i = 0; i < ncomp_vec; i++) {
|
||||
eq = kzalloc(sizeof(*eq), GFP_KERNEL);
|
||||
if (!eq) {
|
||||
err = -ENOMEM;
|
||||
goto clean;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
irq_cpu_rmap_add(dev->rmap, pci_irq_vector(dev->pdev,
|
||||
MLX5_EQ_VEC_COMP_BASE + i));
|
||||
#endif
|
||||
snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", i);
|
||||
err = mlx5_create_map_eq(dev, eq,
|
||||
i + MLX5_EQ_VEC_COMP_BASE, nent, 0,
|
||||
name, MLX5_EQ_TYPE_COMP);
|
||||
if (err) {
|
||||
kfree(eq);
|
||||
goto clean;
|
||||
}
|
||||
mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->eqn);
|
||||
eq->index = i;
|
||||
spin_lock(&table->lock);
|
||||
list_add_tail(&eq->list, &table->comp_eqs_list);
|
||||
spin_unlock(&table->lock);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
clean:
|
||||
free_comp_eqs(dev);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
|
||||
{
|
||||
u32 query_in[MLX5_ST_SZ_DW(query_issi_in)] = {0};
|
||||
@ -944,7 +729,7 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = mlx5_eq_init(dev);
|
||||
err = mlx5_eq_table_init(dev);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "failed to initialize eq\n");
|
||||
goto out;
|
||||
@ -1018,7 +803,7 @@ err_tables_cleanup:
|
||||
mlx5_cq_debugfs_cleanup(dev);
|
||||
|
||||
err_eq_cleanup:
|
||||
mlx5_eq_cleanup(dev);
|
||||
mlx5_eq_table_cleanup(dev);
|
||||
|
||||
out:
|
||||
return err;
|
||||
@ -1039,7 +824,7 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
|
||||
mlx5_cleanup_srq_table(dev);
|
||||
mlx5_cleanup_qp_table(dev);
|
||||
mlx5_cq_debugfs_cleanup(dev);
|
||||
mlx5_eq_cleanup(dev);
|
||||
mlx5_eq_table_cleanup(dev);
|
||||
}
|
||||
|
||||
static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
|
||||
@ -1161,23 +946,17 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
|
||||
}
|
||||
}
|
||||
|
||||
err = mlx5_alloc_irq_vectors(dev);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "alloc irq vectors failed\n");
|
||||
goto err_cleanup_once;
|
||||
}
|
||||
|
||||
dev->priv.uar = mlx5_get_uars_page(dev);
|
||||
if (IS_ERR(dev->priv.uar)) {
|
||||
dev_err(&pdev->dev, "Failed allocating uar, aborting\n");
|
||||
err = PTR_ERR(dev->priv.uar);
|
||||
goto err_disable_msix;
|
||||
goto err_get_uars;
|
||||
}
|
||||
|
||||
err = mlx5_start_eqs(dev);
|
||||
err = mlx5_eq_table_create(dev);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "Failed to start pages and async EQs\n");
|
||||
goto err_put_uars;
|
||||
dev_err(&pdev->dev, "Failed to create EQs\n");
|
||||
goto err_eq_table;
|
||||
}
|
||||
|
||||
err = mlx5_fw_tracer_init(dev->tracer);
|
||||
@ -1186,18 +965,6 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
|
||||
goto err_fw_tracer;
|
||||
}
|
||||
|
||||
err = alloc_comp_eqs(dev);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "Failed to alloc completion EQs\n");
|
||||
goto err_comp_eqs;
|
||||
}
|
||||
|
||||
err = mlx5_irq_set_affinity_hints(dev);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "Failed to alloc affinity hint cpumask\n");
|
||||
goto err_affinity_hints;
|
||||
}
|
||||
|
||||
err = mlx5_fpga_device_start(dev);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "fpga device start failed %d\n", err);
|
||||
@ -1266,24 +1033,15 @@ err_ipsec_start:
|
||||
mlx5_fpga_device_stop(dev);
|
||||
|
||||
err_fpga_start:
|
||||
mlx5_irq_clear_affinity_hints(dev);
|
||||
|
||||
err_affinity_hints:
|
||||
free_comp_eqs(dev);
|
||||
|
||||
err_comp_eqs:
|
||||
mlx5_fw_tracer_cleanup(dev->tracer);
|
||||
|
||||
err_fw_tracer:
|
||||
mlx5_stop_eqs(dev);
|
||||
mlx5_eq_table_destroy(dev);
|
||||
|
||||
err_put_uars:
|
||||
err_eq_table:
|
||||
mlx5_put_uars_page(dev, priv->uar);
|
||||
|
||||
err_disable_msix:
|
||||
mlx5_free_irq_vectors(dev);
|
||||
|
||||
err_cleanup_once:
|
||||
err_get_uars:
|
||||
if (boot)
|
||||
mlx5_cleanup_once(dev);
|
||||
|
||||
@ -1340,12 +1098,9 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
|
||||
mlx5_accel_ipsec_cleanup(dev);
|
||||
mlx5_accel_tls_cleanup(dev);
|
||||
mlx5_fpga_device_stop(dev);
|
||||
mlx5_irq_clear_affinity_hints(dev);
|
||||
free_comp_eqs(dev);
|
||||
mlx5_fw_tracer_cleanup(dev->tracer);
|
||||
mlx5_stop_eqs(dev);
|
||||
mlx5_eq_table_destroy(dev);
|
||||
mlx5_put_uars_page(dev, priv->uar);
|
||||
mlx5_free_irq_vectors(dev);
|
||||
if (cleanup)
|
||||
mlx5_cleanup_once(dev);
|
||||
mlx5_stop_health_poll(dev, cleanup);
|
||||
@ -1414,14 +1169,6 @@ static int init_one(struct pci_dev *pdev,
|
||||
INIT_LIST_HEAD(&priv->waiting_events_list);
|
||||
priv->is_accum_events = false;
|
||||
|
||||
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
|
||||
err = init_srcu_struct(&priv->pfault_srcu);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "init_srcu_struct failed with error code %d\n",
|
||||
err);
|
||||
goto clean_dev;
|
||||
}
|
||||
#endif
|
||||
mutex_init(&priv->bfregs.reg_head.lock);
|
||||
mutex_init(&priv->bfregs.wc_head.lock);
|
||||
INIT_LIST_HEAD(&priv->bfregs.reg_head.list);
|
||||
@ -1430,7 +1177,7 @@ static int init_one(struct pci_dev *pdev,
|
||||
err = mlx5_pci_init(dev, priv);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "mlx5_pci_init failed with error code %d\n", err);
|
||||
goto clean_srcu;
|
||||
goto clean_dev;
|
||||
}
|
||||
|
||||
err = mlx5_health_init(dev);
|
||||
@ -1463,11 +1210,7 @@ clean_health:
|
||||
mlx5_health_cleanup(dev);
|
||||
close_pci:
|
||||
mlx5_pci_close(dev, priv);
|
||||
clean_srcu:
|
||||
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
|
||||
cleanup_srcu_struct(&priv->pfault_srcu);
|
||||
clean_dev:
|
||||
#endif
|
||||
devlink_free(devlink);
|
||||
|
||||
return err;
|
||||
@ -1491,9 +1234,6 @@ static void remove_one(struct pci_dev *pdev)
|
||||
mlx5_pagealloc_cleanup(dev);
|
||||
mlx5_health_cleanup(dev);
|
||||
mlx5_pci_close(dev, priv);
|
||||
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
|
||||
cleanup_srcu_struct(&priv->pfault_srcu);
|
||||
#endif
|
||||
devlink_free(devlink);
|
||||
}
|
||||
|
||||
@ -1637,7 +1377,6 @@ succeed:
|
||||
* kexec. There is no need to cleanup the mlx5_core software
|
||||
* contexts.
|
||||
*/
|
||||
mlx5_irq_clear_affinity_hints(dev);
|
||||
mlx5_core_eq_free_irqs(dev);
|
||||
|
||||
return 0;
|
||||
|
@ -100,8 +100,6 @@ int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev);
|
||||
|
||||
void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
|
||||
unsigned long param);
|
||||
void mlx5_core_page_fault(struct mlx5_core_dev *dev,
|
||||
struct mlx5_pagefault *pfault);
|
||||
void mlx5_port_module_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
|
||||
void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force);
|
||||
void mlx5_disable_device(struct mlx5_core_dev *dev);
|
||||
@ -124,28 +122,7 @@ int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
|
||||
int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev);
|
||||
u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev);
|
||||
|
||||
int mlx5_eq_init(struct mlx5_core_dev *dev);
|
||||
void mlx5_eq_cleanup(struct mlx5_core_dev *dev);
|
||||
int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
|
||||
int nent, u64 mask, const char *name,
|
||||
enum mlx5_eq_type type);
|
||||
int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
|
||||
int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq);
|
||||
int mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq);
|
||||
int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
|
||||
u32 *out, int outlen);
|
||||
int mlx5_start_eqs(struct mlx5_core_dev *dev);
|
||||
void mlx5_stop_eqs(struct mlx5_core_dev *dev);
|
||||
/* This function should only be called after mlx5_cmd_force_teardown_hca */
|
||||
void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev);
|
||||
struct mlx5_eq *mlx5_eqn2eq(struct mlx5_core_dev *dev, int eqn);
|
||||
u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq *eq);
|
||||
void mlx5_cq_tasklet_cb(unsigned long data);
|
||||
void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced);
|
||||
int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
|
||||
void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
|
||||
int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev);
|
||||
void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev);
|
||||
int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
|
||||
void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
|
||||
|
||||
|
@ -132,7 +132,7 @@ void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type)
|
||||
if (!is_event_type_allowed((rsn >> MLX5_USER_INDEX_LEN), event_type)) {
|
||||
mlx5_core_warn(dev, "event 0x%.2x is not allowed on resource 0x%.8x\n",
|
||||
event_type, rsn);
|
||||
return;
|
||||
goto out;
|
||||
}
|
||||
|
||||
switch (common->res) {
|
||||
@ -150,7 +150,7 @@ void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type)
|
||||
default:
|
||||
mlx5_core_warn(dev, "invalid resource type for 0x%x\n", rsn);
|
||||
}
|
||||
|
||||
out:
|
||||
mlx5_core_put_rsc(common);
|
||||
}
|
||||
|
||||
@ -670,3 +670,19 @@ int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id,
|
||||
return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_core_query_q_counter);
|
||||
|
||||
struct mlx5_core_rsc_common *mlx5_core_res_hold(struct mlx5_core_dev *dev,
|
||||
int res_num,
|
||||
enum mlx5_res_type res_type)
|
||||
{
|
||||
u32 rsn = res_num | (res_type << MLX5_USER_INDEX_LEN);
|
||||
|
||||
return mlx5_get_rsc(dev, rsn);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_core_res_hold);
|
||||
|
||||
void mlx5_core_res_put(struct mlx5_core_rsc_common *res)
|
||||
{
|
||||
mlx5_core_put_rsc(res);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_core_res_put);
|
||||
|
@ -60,7 +60,7 @@ struct mlx5_core_cq {
|
||||
} tasklet_ctx;
|
||||
int reset_notify_added;
|
||||
struct list_head reset_notify;
|
||||
struct mlx5_eq *eq;
|
||||
struct mlx5_eq_comp *eq;
|
||||
u16 uid;
|
||||
};
|
||||
|
||||
|
@ -212,6 +212,13 @@ enum {
|
||||
MLX5_PFAULT_SUBTYPE_RDMA = 1,
|
||||
};
|
||||
|
||||
enum wqe_page_fault_type {
|
||||
MLX5_WQE_PF_TYPE_RMP = 0,
|
||||
MLX5_WQE_PF_TYPE_REQ_SEND_OR_WRITE = 1,
|
||||
MLX5_WQE_PF_TYPE_RESP = 2,
|
||||
MLX5_WQE_PF_TYPE_REQ_READ_OR_ATOMIC = 3,
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_PERM_LOCAL_READ = 1 << 2,
|
||||
MLX5_PERM_LOCAL_WRITE = 1 << 3,
|
||||
|
@ -84,18 +84,6 @@ enum {
|
||||
MLX5_MAX_PORTS = 2,
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_EQ_VEC_PAGES = 0,
|
||||
MLX5_EQ_VEC_CMD = 1,
|
||||
MLX5_EQ_VEC_ASYNC = 2,
|
||||
MLX5_EQ_VEC_PFAULT = 3,
|
||||
MLX5_EQ_VEC_COMP_BASE,
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_MAX_IRQ_NAME = 32
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_ATOMIC_MODE_OFFSET = 16,
|
||||
MLX5_ATOMIC_MODE_IB_COMP = 1,
|
||||
@ -222,14 +210,6 @@ enum mlx5_port_status {
|
||||
MLX5_PORT_DOWN = 2,
|
||||
};
|
||||
|
||||
enum mlx5_eq_type {
|
||||
MLX5_EQ_TYPE_COMP,
|
||||
MLX5_EQ_TYPE_ASYNC,
|
||||
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
|
||||
MLX5_EQ_TYPE_PF,
|
||||
#endif
|
||||
};
|
||||
|
||||
struct mlx5_bfreg_info {
|
||||
u32 *sys_pages;
|
||||
int num_low_latency_bfregs;
|
||||
@ -366,51 +346,6 @@ struct mlx5_frag_buf_ctrl {
|
||||
u8 log_frag_strides;
|
||||
};
|
||||
|
||||
struct mlx5_eq_tasklet {
|
||||
struct list_head list;
|
||||
struct list_head process_list;
|
||||
struct tasklet_struct task;
|
||||
/* lock on completion tasklet list */
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
struct mlx5_eq_pagefault {
|
||||
struct work_struct work;
|
||||
/* Pagefaults lock */
|
||||
spinlock_t lock;
|
||||
struct workqueue_struct *wq;
|
||||
mempool_t *pool;
|
||||
};
|
||||
|
||||
struct mlx5_cq_table {
|
||||
/* protect radix tree */
|
||||
spinlock_t lock;
|
||||
struct radix_tree_root tree;
|
||||
};
|
||||
|
||||
struct mlx5_eq {
|
||||
struct mlx5_core_dev *dev;
|
||||
struct mlx5_cq_table cq_table;
|
||||
__be32 __iomem *doorbell;
|
||||
u32 cons_index;
|
||||
struct mlx5_frag_buf buf;
|
||||
int size;
|
||||
unsigned int irqn;
|
||||
u8 eqn;
|
||||
int nent;
|
||||
u64 mask;
|
||||
struct list_head list;
|
||||
int index;
|
||||
struct mlx5_rsc_debug *dbg;
|
||||
enum mlx5_eq_type type;
|
||||
union {
|
||||
struct mlx5_eq_tasklet tasklet_ctx;
|
||||
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
|
||||
struct mlx5_eq_pagefault pf_ctx;
|
||||
#endif
|
||||
};
|
||||
};
|
||||
|
||||
struct mlx5_core_psv {
|
||||
u32 psv_idx;
|
||||
struct psv_layout {
|
||||
@ -477,22 +412,6 @@ struct mlx5_core_srq {
|
||||
u16 uid;
|
||||
};
|
||||
|
||||
struct mlx5_eq_table {
|
||||
void __iomem *update_ci;
|
||||
void __iomem *update_arm_ci;
|
||||
struct list_head comp_eqs_list;
|
||||
struct mlx5_eq pages_eq;
|
||||
struct mlx5_eq async_eq;
|
||||
struct mlx5_eq cmd_eq;
|
||||
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
|
||||
struct mlx5_eq pfault_eq;
|
||||
#endif
|
||||
int num_comp_vectors;
|
||||
/* protect EQs list
|
||||
*/
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
struct mlx5_uars_page {
|
||||
void __iomem *map;
|
||||
bool wc;
|
||||
@ -575,11 +494,6 @@ struct mlx5_core_sriov {
|
||||
int enabled_vfs;
|
||||
};
|
||||
|
||||
struct mlx5_irq_info {
|
||||
cpumask_var_t mask;
|
||||
char name[MLX5_MAX_IRQ_NAME];
|
||||
};
|
||||
|
||||
struct mlx5_fc_stats {
|
||||
spinlock_t counters_idr_lock; /* protects counters_idr */
|
||||
struct idr counters_idr;
|
||||
@ -596,7 +510,7 @@ struct mlx5_fc_stats {
|
||||
struct mlx5_mpfs;
|
||||
struct mlx5_eswitch;
|
||||
struct mlx5_lag;
|
||||
struct mlx5_pagefault;
|
||||
struct mlx5_eq_table;
|
||||
|
||||
struct mlx5_rate_limit {
|
||||
u32 rate;
|
||||
@ -646,8 +560,7 @@ struct mlx5_port_module_event_stats {
|
||||
|
||||
struct mlx5_priv {
|
||||
char name[MLX5_MAX_NAME_LEN];
|
||||
struct mlx5_eq_table eq_table;
|
||||
struct mlx5_irq_info *irq_info;
|
||||
struct mlx5_eq_table *eq_table;
|
||||
|
||||
/* pages stuff */
|
||||
struct workqueue_struct *pg_wq;
|
||||
@ -705,13 +618,6 @@ struct mlx5_priv {
|
||||
|
||||
struct mlx5_port_module_event_stats pme_stats;
|
||||
|
||||
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
|
||||
void (*pfault)(struct mlx5_core_dev *dev,
|
||||
void *context,
|
||||
struct mlx5_pagefault *pfault);
|
||||
void *pfault_ctx;
|
||||
struct srcu_struct pfault_srcu;
|
||||
#endif
|
||||
struct mlx5_bfreg_data bfregs;
|
||||
struct mlx5_uars_page *uar;
|
||||
};
|
||||
@ -736,44 +642,6 @@ enum mlx5_pagefault_type_flags {
|
||||
MLX5_PFAULT_RDMA = 1 << 2,
|
||||
};
|
||||
|
||||
/* Contains the details of a pagefault. */
|
||||
struct mlx5_pagefault {
|
||||
u32 bytes_committed;
|
||||
u32 token;
|
||||
u8 event_subtype;
|
||||
u8 type;
|
||||
union {
|
||||
/* Initiator or send message responder pagefault details. */
|
||||
struct {
|
||||
/* Received packet size, only valid for responders. */
|
||||
u32 packet_size;
|
||||
/*
|
||||
* Number of resource holding WQE, depends on type.
|
||||
*/
|
||||
u32 wq_num;
|
||||
/*
|
||||
* WQE index. Refers to either the send queue or
|
||||
* receive queue, according to event_subtype.
|
||||
*/
|
||||
u16 wqe_index;
|
||||
} wqe;
|
||||
/* RDMA responder pagefault details */
|
||||
struct {
|
||||
u32 r_key;
|
||||
/*
|
||||
* Received packet size, minimal size page fault
|
||||
* resolution required for forward progress.
|
||||
*/
|
||||
u32 packet_size;
|
||||
u32 rdma_op_len;
|
||||
u64 rdma_va;
|
||||
} rdma;
|
||||
};
|
||||
|
||||
struct mlx5_eq *eq;
|
||||
struct work_struct work;
|
||||
};
|
||||
|
||||
struct mlx5_td {
|
||||
struct list_head tirs_list;
|
||||
u32 tdn;
|
||||
@ -858,9 +726,6 @@ struct mlx5_core_dev {
|
||||
} roce;
|
||||
#ifdef CONFIG_MLX5_FPGA
|
||||
struct mlx5_fpga_device *fpga;
|
||||
#endif
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
struct cpu_rmap *rmap;
|
||||
#endif
|
||||
struct mlx5_clock clock;
|
||||
struct mlx5_ib_clock_info *clock_info;
|
||||
@ -1155,6 +1020,9 @@ int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
|
||||
bool map_wc, bool fast_path);
|
||||
void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg);
|
||||
|
||||
unsigned int mlx5_comp_vectors_count(struct mlx5_core_dev *dev);
|
||||
struct cpumask *
|
||||
mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector);
|
||||
unsigned int mlx5_core_reserved_gids_count(struct mlx5_core_dev *dev);
|
||||
int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index,
|
||||
u8 roce_version, u8 roce_l3_type, const u8 *gid,
|
||||
@ -1204,9 +1072,6 @@ struct mlx5_interface {
|
||||
void (*detach)(struct mlx5_core_dev *dev, void *context);
|
||||
void (*event)(struct mlx5_core_dev *dev, void *context,
|
||||
enum mlx5_dev_event event, unsigned long param);
|
||||
void (*pfault)(struct mlx5_core_dev *dev,
|
||||
void *context,
|
||||
struct mlx5_pagefault *pfault);
|
||||
void * (*get_dev)(void *context);
|
||||
int protocol;
|
||||
struct list_head list;
|
||||
|
60
include/linux/mlx5/eq.h
Normal file
60
include/linux/mlx5/eq.h
Normal file
@ -0,0 +1,60 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
|
||||
/* Copyright (c) 2018 Mellanox Technologies. */
|
||||
|
||||
#ifndef MLX5_CORE_EQ_H
|
||||
#define MLX5_CORE_EQ_H
|
||||
|
||||
#include <linux/mlx5/driver.h>
|
||||
|
||||
enum {
|
||||
MLX5_EQ_PAGEREQ_IDX = 0,
|
||||
MLX5_EQ_CMD_IDX = 1,
|
||||
MLX5_EQ_ASYNC_IDX = 2,
|
||||
/* reserved to be used by mlx5_core ulps (mlx5e/mlx5_ib) */
|
||||
MLX5_EQ_PFAULT_IDX = 3,
|
||||
MLX5_EQ_MAX_ASYNC_EQS,
|
||||
/* completion eqs vector indices start here */
|
||||
MLX5_EQ_VEC_COMP_BASE = MLX5_EQ_MAX_ASYNC_EQS,
|
||||
};
|
||||
|
||||
#define MLX5_NUM_CMD_EQE (32)
|
||||
#define MLX5_NUM_ASYNC_EQE (0x1000)
|
||||
#define MLX5_NUM_SPARE_EQE (0x80)
|
||||
|
||||
struct mlx5_eq;
|
||||
|
||||
struct mlx5_eq_param {
|
||||
u8 index;
|
||||
int nent;
|
||||
u64 mask;
|
||||
void *context;
|
||||
irq_handler_t handler;
|
||||
};
|
||||
|
||||
struct mlx5_eq *
|
||||
mlx5_eq_create_generic(struct mlx5_core_dev *dev, const char *name,
|
||||
struct mlx5_eq_param *param);
|
||||
int
|
||||
mlx5_eq_destroy_generic(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
|
||||
|
||||
struct mlx5_eqe *mlx5_eq_get_eqe(struct mlx5_eq *eq, u32 cc);
|
||||
void mlx5_eq_update_ci(struct mlx5_eq *eq, u32 cc, bool arm);
|
||||
|
||||
/* The HCA will think the queue has overflowed if we
|
||||
* don't tell it we've been processing events. We
|
||||
* create EQs with MLX5_NUM_SPARE_EQE extra entries,
|
||||
* so we must update our consumer index at
|
||||
* least that often.
|
||||
*
|
||||
* mlx5_eq_update_cc must be called on every EQE @EQ irq handler
|
||||
*/
|
||||
static inline u32 mlx5_eq_update_cc(struct mlx5_eq *eq, u32 cc)
|
||||
{
|
||||
if (unlikely(cc >= MLX5_NUM_SPARE_EQE)) {
|
||||
mlx5_eq_update_ci(eq, cc, 0);
|
||||
cc = 0;
|
||||
}
|
||||
return cc;
|
||||
}
|
||||
|
||||
#endif /* MLX5_CORE_EQ_H */
|
@ -349,7 +349,7 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
|
||||
u8 reformat_l3_tunnel_to_l2[0x1];
|
||||
u8 reformat_l2_to_l3_tunnel[0x1];
|
||||
u8 reformat_and_modify_action[0x1];
|
||||
u8 reserved_at_14[0xb];
|
||||
u8 reserved_at_15[0xb];
|
||||
u8 reserved_at_20[0x2];
|
||||
u8 log_max_ft_size[0x6];
|
||||
u8 log_max_modify_header_context[0x8];
|
||||
@ -586,7 +586,7 @@ struct mlx5_ifc_flow_table_eswitch_cap_bits {
|
||||
u8 fdb_multi_path_to_table[0x1];
|
||||
u8 reserved_at_1d[0x1];
|
||||
u8 multi_fdb_encap[0x1];
|
||||
u8 reserved_at_1e[0x1e1];
|
||||
u8 reserved_at_1f[0x1e1];
|
||||
|
||||
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_esw_fdb;
|
||||
|
||||
@ -829,7 +829,7 @@ struct mlx5_ifc_vector_calc_cap_bits {
|
||||
struct mlx5_ifc_calc_op calc2;
|
||||
struct mlx5_ifc_calc_op calc3;
|
||||
|
||||
u8 reserved_at_e0[0x720];
|
||||
u8 reserved_at_c0[0x720];
|
||||
};
|
||||
|
||||
enum {
|
||||
@ -2473,14 +2473,15 @@ struct mlx5_ifc_xrc_srqc_bits {
|
||||
|
||||
u8 wq_signature[0x1];
|
||||
u8 cont_srq[0x1];
|
||||
u8 dbr_umem_valid[0x1];
|
||||
u8 reserved_at_22[0x1];
|
||||
u8 rlky[0x1];
|
||||
u8 basic_cyclic_rcv_wqe[0x1];
|
||||
u8 log_rq_stride[0x3];
|
||||
u8 xrcd[0x18];
|
||||
|
||||
u8 page_offset[0x6];
|
||||
u8 reserved_at_46[0x2];
|
||||
u8 reserved_at_46[0x1];
|
||||
u8 dbr_umem_valid[0x1];
|
||||
u8 cqn[0x18];
|
||||
|
||||
u8 reserved_at_60[0x20];
|
||||
@ -5566,7 +5567,7 @@ struct mlx5_ifc_modify_nic_vport_context_out_bits {
|
||||
struct mlx5_ifc_modify_nic_vport_field_select_bits {
|
||||
u8 reserved_at_0[0x12];
|
||||
u8 affiliation[0x1];
|
||||
u8 reserved_at_e[0x1];
|
||||
u8 reserved_at_13[0x1];
|
||||
u8 disable_uc_local_lb[0x1];
|
||||
u8 disable_mc_local_lb[0x1];
|
||||
u8 node_guid[0x1];
|
||||
@ -6689,9 +6690,12 @@ struct mlx5_ifc_create_xrc_srq_in_bits {
|
||||
|
||||
struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry;
|
||||
|
||||
u8 reserved_at_280[0x40];
|
||||
u8 reserved_at_280[0x60];
|
||||
|
||||
u8 xrc_srq_umem_valid[0x1];
|
||||
u8 reserved_at_2c1[0x5bf];
|
||||
u8 reserved_at_2e1[0x1f];
|
||||
|
||||
u8 reserved_at_300[0x580];
|
||||
|
||||
u8 pas[0][0x40];
|
||||
};
|
||||
@ -9024,7 +9028,7 @@ struct mlx5_ifc_dcbx_param_bits {
|
||||
u8 dcbx_cee_cap[0x1];
|
||||
u8 dcbx_ieee_cap[0x1];
|
||||
u8 dcbx_standby_cap[0x1];
|
||||
u8 reserved_at_0[0x5];
|
||||
u8 reserved_at_3[0x5];
|
||||
u8 port_number[0x8];
|
||||
u8 reserved_at_10[0xa];
|
||||
u8 max_application_table_size[6];
|
||||
|
@ -596,6 +596,11 @@ int mlx5_core_dealloc_q_counter(struct mlx5_core_dev *dev, u16 counter_id);
|
||||
int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id,
|
||||
int reset, void *out, int out_size);
|
||||
|
||||
struct mlx5_core_rsc_common *mlx5_core_res_hold(struct mlx5_core_dev *dev,
|
||||
int res_num,
|
||||
enum mlx5_res_type res_type);
|
||||
void mlx5_core_res_put(struct mlx5_core_rsc_common *res);
|
||||
|
||||
static inline const char *mlx5_qp_type_str(int type)
|
||||
{
|
||||
switch (type) {
|
||||
|
Loading…
x
Reference in New Issue
Block a user