5bcf5a59c4
In order to prevent user space from modifying the index that belongs to the kernel for shared queues let the kernel use a local copy of the index and copy any new values of that index to the shared rxe_queue_bus struct. This adds more switch statements which decreases the performance of the queue API. Move the type into the parameter list for these functions so that the compiler can optimize out the switch statements when the explicit type is known. Modify all the calls in the driver on performance paths to pass in the explicit queue type. Link: https://lore.kernel.org/r/20210527194748.662636-4-rpearsonhpe@gmail.com Link: https://lore.kernel.org/linux-rdma/20210526165239.GP1002214@@nvidia.com/ Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
176 lines
3.7 KiB
C
176 lines
3.7 KiB
C
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
|
|
/*
|
|
* Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
|
|
* Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
|
|
*/
|
|
#include <linux/vmalloc.h>
|
|
#include "rxe.h"
|
|
#include "rxe_loc.h"
|
|
#include "rxe_queue.h"
|
|
|
|
int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq,
|
|
int cqe, int comp_vector)
|
|
{
|
|
int count;
|
|
|
|
if (cqe <= 0) {
|
|
pr_warn("cqe(%d) <= 0\n", cqe);
|
|
goto err1;
|
|
}
|
|
|
|
if (cqe > rxe->attr.max_cqe) {
|
|
pr_warn("cqe(%d) > max_cqe(%d)\n",
|
|
cqe, rxe->attr.max_cqe);
|
|
goto err1;
|
|
}
|
|
|
|
if (cq) {
|
|
if (cq->is_user)
|
|
count = queue_count(cq->queue, QUEUE_TYPE_TO_USER);
|
|
else
|
|
count = queue_count(cq->queue, QUEUE_TYPE_KERNEL);
|
|
|
|
if (cqe < count) {
|
|
pr_warn("cqe(%d) < current # elements in queue (%d)",
|
|
cqe, count);
|
|
goto err1;
|
|
}
|
|
}
|
|
|
|
return 0;
|
|
|
|
err1:
|
|
return -EINVAL;
|
|
}
|
|
|
|
static void rxe_send_complete(struct tasklet_struct *t)
|
|
{
|
|
struct rxe_cq *cq = from_tasklet(cq, t, comp_task);
|
|
unsigned long flags;
|
|
|
|
spin_lock_irqsave(&cq->cq_lock, flags);
|
|
if (cq->is_dying) {
|
|
spin_unlock_irqrestore(&cq->cq_lock, flags);
|
|
return;
|
|
}
|
|
spin_unlock_irqrestore(&cq->cq_lock, flags);
|
|
|
|
cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
|
|
}
|
|
|
|
int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
|
|
int comp_vector, struct ib_udata *udata,
|
|
struct rxe_create_cq_resp __user *uresp)
|
|
{
|
|
int err;
|
|
enum queue_type type;
|
|
|
|
type = uresp ? QUEUE_TYPE_TO_USER : QUEUE_TYPE_KERNEL;
|
|
cq->queue = rxe_queue_init(rxe, &cqe,
|
|
sizeof(struct rxe_cqe), type);
|
|
if (!cq->queue) {
|
|
pr_warn("unable to create cq\n");
|
|
return -ENOMEM;
|
|
}
|
|
|
|
err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, udata,
|
|
cq->queue->buf, cq->queue->buf_size, &cq->queue->ip);
|
|
if (err) {
|
|
vfree(cq->queue->buf);
|
|
kfree(cq->queue);
|
|
return err;
|
|
}
|
|
|
|
if (uresp)
|
|
cq->is_user = 1;
|
|
|
|
cq->is_dying = false;
|
|
|
|
tasklet_setup(&cq->comp_task, rxe_send_complete);
|
|
|
|
spin_lock_init(&cq->cq_lock);
|
|
cq->ibcq.cqe = cqe;
|
|
return 0;
|
|
}
|
|
|
|
int rxe_cq_resize_queue(struct rxe_cq *cq, int cqe,
|
|
struct rxe_resize_cq_resp __user *uresp,
|
|
struct ib_udata *udata)
|
|
{
|
|
int err;
|
|
|
|
err = rxe_queue_resize(cq->queue, (unsigned int *)&cqe,
|
|
sizeof(struct rxe_cqe), udata,
|
|
uresp ? &uresp->mi : NULL, NULL, &cq->cq_lock);
|
|
if (!err)
|
|
cq->ibcq.cqe = cqe;
|
|
|
|
return err;
|
|
}
|
|
|
|
int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
|
|
{
|
|
struct ib_event ev;
|
|
unsigned long flags;
|
|
int full;
|
|
void *addr;
|
|
|
|
spin_lock_irqsave(&cq->cq_lock, flags);
|
|
|
|
if (cq->is_user)
|
|
full = queue_full(cq->queue, QUEUE_TYPE_TO_USER);
|
|
else
|
|
full = queue_full(cq->queue, QUEUE_TYPE_KERNEL);
|
|
|
|
if (unlikely(full)) {
|
|
spin_unlock_irqrestore(&cq->cq_lock, flags);
|
|
if (cq->ibcq.event_handler) {
|
|
ev.device = cq->ibcq.device;
|
|
ev.element.cq = &cq->ibcq;
|
|
ev.event = IB_EVENT_CQ_ERR;
|
|
cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
|
|
}
|
|
|
|
return -EBUSY;
|
|
}
|
|
|
|
if (cq->is_user)
|
|
addr = producer_addr(cq->queue, QUEUE_TYPE_TO_USER);
|
|
else
|
|
addr = producer_addr(cq->queue, QUEUE_TYPE_KERNEL);
|
|
|
|
memcpy(addr, cqe, sizeof(*cqe));
|
|
|
|
if (cq->is_user)
|
|
advance_producer(cq->queue, QUEUE_TYPE_TO_USER);
|
|
else
|
|
advance_producer(cq->queue, QUEUE_TYPE_KERNEL);
|
|
|
|
spin_unlock_irqrestore(&cq->cq_lock, flags);
|
|
|
|
if ((cq->notify == IB_CQ_NEXT_COMP) ||
|
|
(cq->notify == IB_CQ_SOLICITED && solicited)) {
|
|
cq->notify = 0;
|
|
tasklet_schedule(&cq->comp_task);
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
void rxe_cq_disable(struct rxe_cq *cq)
|
|
{
|
|
unsigned long flags;
|
|
|
|
spin_lock_irqsave(&cq->cq_lock, flags);
|
|
cq->is_dying = true;
|
|
spin_unlock_irqrestore(&cq->cq_lock, flags);
|
|
}
|
|
|
|
void rxe_cq_cleanup(struct rxe_pool_entry *arg)
|
|
{
|
|
struct rxe_cq *cq = container_of(arg, typeof(*cq), pelem);
|
|
|
|
if (cq->queue)
|
|
rxe_queue_cleanup(cq->queue);
|
|
}
|