RDMA/rxe: Use structs to describe the uABI instead of opencoding
Open coding pointer math is not acceptable for describing the uABI in RDMA. Provide structs for all the cases. The udata is casted to the struct as close to the verbs entry point as possible for maximum clarity. Function signatures and so forth are revised to allow for this. Tested-by: Yuval Shaia <yuval.shaia@oracle.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
parent
b92ec0fe32
commit
0c43ab371b
@ -83,7 +83,7 @@ static void rxe_send_complete(unsigned long data)
|
||||
|
||||
int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
|
||||
int comp_vector, struct ib_ucontext *context,
|
||||
struct ib_udata *udata)
|
||||
struct rxe_create_cq_resp __user *uresp)
|
||||
{
|
||||
int err;
|
||||
|
||||
@ -94,15 +94,15 @@ int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
err = do_mmap_info(rxe, udata, false, context, cq->queue->buf,
|
||||
cq->queue->buf_size, &cq->queue->ip);
|
||||
err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, context,
|
||||
cq->queue->buf, cq->queue->buf_size, &cq->queue->ip);
|
||||
if (err) {
|
||||
kvfree(cq->queue->buf);
|
||||
kfree(cq->queue);
|
||||
return err;
|
||||
}
|
||||
|
||||
if (udata)
|
||||
if (uresp)
|
||||
cq->is_user = 1;
|
||||
|
||||
cq->is_dying = false;
|
||||
@ -114,14 +114,15 @@ int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int rxe_cq_resize_queue(struct rxe_cq *cq, int cqe, struct ib_udata *udata)
|
||||
int rxe_cq_resize_queue(struct rxe_cq *cq, int cqe,
|
||||
struct rxe_resize_cq_resp __user *uresp)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = rxe_queue_resize(cq->queue, (unsigned int *)&cqe,
|
||||
sizeof(struct rxe_cqe),
|
||||
cq->queue->ip ? cq->queue->ip->context : NULL,
|
||||
udata, NULL, &cq->cq_lock);
|
||||
uresp ? &uresp->mi : NULL, NULL, &cq->cq_lock);
|
||||
if (!err)
|
||||
cq->ibcq.cqe = cqe;
|
||||
|
||||
|
@ -56,9 +56,10 @@ int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq,
|
||||
|
||||
int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
|
||||
int comp_vector, struct ib_ucontext *context,
|
||||
struct ib_udata *udata);
|
||||
struct rxe_create_cq_resp __user *uresp);
|
||||
|
||||
int rxe_cq_resize_queue(struct rxe_cq *cq, int new_cqe, struct ib_udata *udata);
|
||||
int rxe_cq_resize_queue(struct rxe_cq *cq, int new_cqe,
|
||||
struct rxe_resize_cq_resp __user *uresp);
|
||||
|
||||
int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited);
|
||||
|
||||
@ -158,7 +159,8 @@ int rxe_mcast_delete(struct rxe_dev *rxe, union ib_gid *mgid);
|
||||
int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init);
|
||||
|
||||
int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
|
||||
struct ib_qp_init_attr *init, struct ib_udata *udata,
|
||||
struct ib_qp_init_attr *init,
|
||||
struct rxe_create_qp_resp __user *uresp,
|
||||
struct ib_pd *ibpd);
|
||||
|
||||
int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init);
|
||||
@ -226,11 +228,12 @@ int rxe_srq_chk_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
|
||||
|
||||
int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
|
||||
struct ib_srq_init_attr *init,
|
||||
struct ib_ucontext *context, struct ib_udata *udata);
|
||||
struct ib_ucontext *context,
|
||||
struct rxe_create_srq_resp __user *uresp);
|
||||
|
||||
int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
|
||||
struct ib_srq_attr *attr, enum ib_srq_attr_mask mask,
|
||||
struct ib_udata *udata);
|
||||
struct rxe_modify_srq_cmd *ucmd);
|
||||
|
||||
void rxe_release(struct kref *kref);
|
||||
|
||||
|
@ -216,7 +216,8 @@ static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp,
|
||||
|
||||
static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
|
||||
struct ib_qp_init_attr *init,
|
||||
struct ib_ucontext *context, struct ib_udata *udata)
|
||||
struct ib_ucontext *context,
|
||||
struct rxe_create_qp_resp __user *uresp)
|
||||
{
|
||||
int err;
|
||||
int wqe_size;
|
||||
@ -241,9 +242,9 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
|
||||
if (!qp->sq.queue)
|
||||
return -ENOMEM;
|
||||
|
||||
err = do_mmap_info(rxe, udata, true,
|
||||
context, qp->sq.queue->buf,
|
||||
qp->sq.queue->buf_size, &qp->sq.queue->ip);
|
||||
err = do_mmap_info(rxe, uresp ? &uresp->sq_mi : NULL, context,
|
||||
qp->sq.queue->buf, qp->sq.queue->buf_size,
|
||||
&qp->sq.queue->ip);
|
||||
|
||||
if (err) {
|
||||
kvfree(qp->sq.queue->buf);
|
||||
@ -274,7 +275,8 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
|
||||
|
||||
static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
|
||||
struct ib_qp_init_attr *init,
|
||||
struct ib_ucontext *context, struct ib_udata *udata)
|
||||
struct ib_ucontext *context,
|
||||
struct rxe_create_qp_resp __user *uresp)
|
||||
{
|
||||
int err;
|
||||
int wqe_size;
|
||||
@ -294,9 +296,8 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
|
||||
if (!qp->rq.queue)
|
||||
return -ENOMEM;
|
||||
|
||||
err = do_mmap_info(rxe, udata, false, context,
|
||||
qp->rq.queue->buf,
|
||||
qp->rq.queue->buf_size,
|
||||
err = do_mmap_info(rxe, uresp ? &uresp->rq_mi : NULL, context,
|
||||
qp->rq.queue->buf, qp->rq.queue->buf_size,
|
||||
&qp->rq.queue->ip);
|
||||
if (err) {
|
||||
kvfree(qp->rq.queue->buf);
|
||||
@ -322,14 +323,15 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
|
||||
|
||||
/* called by the create qp verb */
|
||||
int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
|
||||
struct ib_qp_init_attr *init, struct ib_udata *udata,
|
||||
struct ib_qp_init_attr *init,
|
||||
struct rxe_create_qp_resp __user *uresp,
|
||||
struct ib_pd *ibpd)
|
||||
{
|
||||
int err;
|
||||
struct rxe_cq *rcq = to_rcq(init->recv_cq);
|
||||
struct rxe_cq *scq = to_rcq(init->send_cq);
|
||||
struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL;
|
||||
struct ib_ucontext *context = udata ? ibpd->uobject->context : NULL;
|
||||
struct ib_ucontext *context = ibpd->uobject ? ibpd->uobject->context : NULL;
|
||||
|
||||
rxe_add_ref(pd);
|
||||
rxe_add_ref(rcq);
|
||||
@ -344,11 +346,11 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
|
||||
|
||||
rxe_qp_init_misc(rxe, qp, init);
|
||||
|
||||
err = rxe_qp_init_req(rxe, qp, init, context, udata);
|
||||
err = rxe_qp_init_req(rxe, qp, init, context, uresp);
|
||||
if (err)
|
||||
goto err1;
|
||||
|
||||
err = rxe_qp_init_resp(rxe, qp, init, context, udata);
|
||||
err = rxe_qp_init_resp(rxe, qp, init, context, uresp);
|
||||
if (err)
|
||||
goto err2;
|
||||
|
||||
|
@ -37,35 +37,21 @@
|
||||
#include "rxe_queue.h"
|
||||
|
||||
int do_mmap_info(struct rxe_dev *rxe,
|
||||
struct ib_udata *udata,
|
||||
bool is_req,
|
||||
struct mminfo __user *outbuf,
|
||||
struct ib_ucontext *context,
|
||||
struct rxe_queue_buf *buf,
|
||||
size_t buf_size,
|
||||
struct rxe_mmap_info **ip_p)
|
||||
{
|
||||
int err;
|
||||
u32 len, offset;
|
||||
struct rxe_mmap_info *ip = NULL;
|
||||
|
||||
if (udata) {
|
||||
if (is_req) {
|
||||
len = udata->outlen - sizeof(struct mminfo);
|
||||
offset = sizeof(struct mminfo);
|
||||
} else {
|
||||
len = udata->outlen;
|
||||
offset = 0;
|
||||
}
|
||||
|
||||
if (len < sizeof(ip->info))
|
||||
goto err1;
|
||||
|
||||
if (outbuf) {
|
||||
ip = rxe_create_mmap_info(rxe, buf_size, context, buf);
|
||||
if (!ip)
|
||||
goto err1;
|
||||
|
||||
err = copy_to_user(udata->outbuf + offset, &ip->info,
|
||||
sizeof(ip->info));
|
||||
err = copy_to_user(outbuf, &ip->info, sizeof(ip->info));
|
||||
if (err)
|
||||
goto err2;
|
||||
|
||||
@ -171,7 +157,7 @@ int rxe_queue_resize(struct rxe_queue *q,
|
||||
unsigned int *num_elem_p,
|
||||
unsigned int elem_size,
|
||||
struct ib_ucontext *context,
|
||||
struct ib_udata *udata,
|
||||
struct mminfo __user *outbuf,
|
||||
spinlock_t *producer_lock,
|
||||
spinlock_t *consumer_lock)
|
||||
{
|
||||
@ -184,7 +170,7 @@ int rxe_queue_resize(struct rxe_queue *q,
|
||||
if (!new_q)
|
||||
return -ENOMEM;
|
||||
|
||||
err = do_mmap_info(new_q->rxe, udata, false, context, new_q->buf,
|
||||
err = do_mmap_info(new_q->rxe, outbuf, context, new_q->buf,
|
||||
new_q->buf_size, &new_q->ip);
|
||||
if (err) {
|
||||
vfree(new_q->buf);
|
||||
|
@ -77,8 +77,7 @@ struct rxe_queue {
|
||||
};
|
||||
|
||||
int do_mmap_info(struct rxe_dev *rxe,
|
||||
struct ib_udata *udata,
|
||||
bool is_req,
|
||||
struct mminfo __user *outbuf,
|
||||
struct ib_ucontext *context,
|
||||
struct rxe_queue_buf *buf,
|
||||
size_t buf_size,
|
||||
@ -94,7 +93,7 @@ int rxe_queue_resize(struct rxe_queue *q,
|
||||
unsigned int *num_elem_p,
|
||||
unsigned int elem_size,
|
||||
struct ib_ucontext *context,
|
||||
struct ib_udata *udata,
|
||||
struct mminfo __user *outbuf,
|
||||
/* Protect producers while resizing queue */
|
||||
spinlock_t *producer_lock,
|
||||
/* Protect consumers while resizing queue */
|
||||
|
@ -99,7 +99,8 @@ err1:
|
||||
|
||||
int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
|
||||
struct ib_srq_init_attr *init,
|
||||
struct ib_ucontext *context, struct ib_udata *udata)
|
||||
struct ib_ucontext *context,
|
||||
struct rxe_create_srq_resp __user *uresp)
|
||||
{
|
||||
int err;
|
||||
int srq_wqe_size;
|
||||
@ -126,55 +127,41 @@ int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
|
||||
|
||||
srq->rq.queue = q;
|
||||
|
||||
err = do_mmap_info(rxe, udata, false, context, q->buf,
|
||||
err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, context, q->buf,
|
||||
q->buf_size, &q->ip);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (udata && udata->outlen >= sizeof(struct mminfo) + sizeof(u32)) {
|
||||
if (copy_to_user(udata->outbuf + sizeof(struct mminfo),
|
||||
&srq->srq_num, sizeof(u32)))
|
||||
if (uresp) {
|
||||
if (copy_to_user(&uresp->srq_num, &srq->srq_num,
|
||||
sizeof(uresp->srq_num)))
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
|
||||
struct ib_srq_attr *attr, enum ib_srq_attr_mask mask,
|
||||
struct ib_udata *udata)
|
||||
struct rxe_modify_srq_cmd *ucmd)
|
||||
{
|
||||
int err;
|
||||
struct rxe_queue *q = srq->rq.queue;
|
||||
struct mminfo mi = { .offset = 1, .size = 0};
|
||||
struct mminfo __user *mi = NULL;
|
||||
|
||||
if (mask & IB_SRQ_MAX_WR) {
|
||||
/* Check that we can write the mminfo struct to user space */
|
||||
if (udata && udata->inlen >= sizeof(__u64)) {
|
||||
__u64 mi_addr;
|
||||
|
||||
/* Get address of user space mminfo struct */
|
||||
err = ib_copy_from_udata(&mi_addr, udata,
|
||||
sizeof(mi_addr));
|
||||
if (err)
|
||||
goto err1;
|
||||
|
||||
udata->outbuf = (void __user *)(unsigned long)mi_addr;
|
||||
udata->outlen = sizeof(mi);
|
||||
|
||||
if (!access_ok(VERIFY_WRITE,
|
||||
(void __user *)udata->outbuf,
|
||||
udata->outlen)) {
|
||||
err = -EFAULT;
|
||||
goto err1;
|
||||
}
|
||||
}
|
||||
/*
|
||||
* This is completely screwed up, the response is supposed to
|
||||
* be in the outbuf not like this.
|
||||
*/
|
||||
mi = u64_to_user_ptr(ucmd->mmap_info_addr);
|
||||
|
||||
err = rxe_queue_resize(q, &attr->max_wr,
|
||||
rcv_wqe_size(srq->rq.max_sge),
|
||||
srq->rq.queue->ip ?
|
||||
srq->rq.queue->ip->context :
|
||||
NULL,
|
||||
udata, &srq->rq.producer_lock,
|
||||
mi, &srq->rq.producer_lock,
|
||||
&srq->rq.consumer_lock);
|
||||
if (err)
|
||||
goto err2;
|
||||
@ -188,6 +175,5 @@ int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
|
||||
err2:
|
||||
rxe_queue_cleanup(q);
|
||||
srq->rq.queue = NULL;
|
||||
err1:
|
||||
return err;
|
||||
}
|
||||
|
@ -407,6 +407,13 @@ static struct ib_srq *rxe_create_srq(struct ib_pd *ibpd,
|
||||
struct rxe_pd *pd = to_rpd(ibpd);
|
||||
struct rxe_srq *srq;
|
||||
struct ib_ucontext *context = udata ? ibpd->uobject->context : NULL;
|
||||
struct rxe_create_srq_resp __user *uresp = NULL;
|
||||
|
||||
if (udata) {
|
||||
if (udata->outlen < sizeof(*uresp))
|
||||
return ERR_PTR(-EINVAL);
|
||||
uresp = udata->outbuf;
|
||||
}
|
||||
|
||||
err = rxe_srq_chk_attr(rxe, NULL, &init->attr, IB_SRQ_INIT_MASK);
|
||||
if (err)
|
||||
@ -422,7 +429,7 @@ static struct ib_srq *rxe_create_srq(struct ib_pd *ibpd,
|
||||
rxe_add_ref(pd);
|
||||
srq->pd = pd;
|
||||
|
||||
err = rxe_srq_from_init(rxe, srq, init, context, udata);
|
||||
err = rxe_srq_from_init(rxe, srq, init, context, uresp);
|
||||
if (err)
|
||||
goto err2;
|
||||
|
||||
@ -443,12 +450,22 @@ static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
|
||||
int err;
|
||||
struct rxe_srq *srq = to_rsrq(ibsrq);
|
||||
struct rxe_dev *rxe = to_rdev(ibsrq->device);
|
||||
struct rxe_modify_srq_cmd ucmd = {};
|
||||
|
||||
if (udata) {
|
||||
if (udata->inlen < sizeof(ucmd))
|
||||
return -EINVAL;
|
||||
|
||||
err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
err = rxe_srq_chk_attr(rxe, srq, attr, mask);
|
||||
if (err)
|
||||
goto err1;
|
||||
|
||||
err = rxe_srq_from_attr(rxe, srq, attr, mask, udata);
|
||||
err = rxe_srq_from_attr(rxe, srq, attr, mask, &ucmd);
|
||||
if (err)
|
||||
goto err1;
|
||||
|
||||
@ -517,6 +534,13 @@ static struct ib_qp *rxe_create_qp(struct ib_pd *ibpd,
|
||||
struct rxe_dev *rxe = to_rdev(ibpd->device);
|
||||
struct rxe_pd *pd = to_rpd(ibpd);
|
||||
struct rxe_qp *qp;
|
||||
struct rxe_create_qp_resp __user *uresp = NULL;
|
||||
|
||||
if (udata) {
|
||||
if (udata->outlen < sizeof(*uresp))
|
||||
return ERR_PTR(-EINVAL);
|
||||
uresp = udata->outbuf;
|
||||
}
|
||||
|
||||
err = rxe_qp_chk_init(rxe, init);
|
||||
if (err)
|
||||
@ -538,7 +562,7 @@ static struct ib_qp *rxe_create_qp(struct ib_pd *ibpd,
|
||||
|
||||
rxe_add_index(qp);
|
||||
|
||||
err = rxe_qp_from_init(rxe, qp, pd, init, udata, ibpd);
|
||||
err = rxe_qp_from_init(rxe, qp, pd, init, uresp, ibpd);
|
||||
if (err)
|
||||
goto err3;
|
||||
|
||||
@ -888,6 +912,13 @@ static struct ib_cq *rxe_create_cq(struct ib_device *dev,
|
||||
int err;
|
||||
struct rxe_dev *rxe = to_rdev(dev);
|
||||
struct rxe_cq *cq;
|
||||
struct rxe_create_cq_resp __user *uresp = NULL;
|
||||
|
||||
if (udata) {
|
||||
if (udata->outlen < sizeof(*uresp))
|
||||
return ERR_PTR(-EINVAL);
|
||||
uresp = udata->outbuf;
|
||||
}
|
||||
|
||||
if (attr->flags)
|
||||
return ERR_PTR(-EINVAL);
|
||||
@ -903,7 +934,7 @@ static struct ib_cq *rxe_create_cq(struct ib_device *dev,
|
||||
}
|
||||
|
||||
err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector,
|
||||
context, udata);
|
||||
context, uresp);
|
||||
if (err)
|
||||
goto err2;
|
||||
|
||||
@ -930,12 +961,19 @@ static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
|
||||
int err;
|
||||
struct rxe_cq *cq = to_rcq(ibcq);
|
||||
struct rxe_dev *rxe = to_rdev(ibcq->device);
|
||||
struct rxe_resize_cq_resp __user *uresp = NULL;
|
||||
|
||||
if (udata) {
|
||||
if (udata->outlen < sizeof(*uresp))
|
||||
return -EINVAL;
|
||||
uresp = udata->outbuf;
|
||||
}
|
||||
|
||||
err = rxe_cq_chk_attr(rxe, cq, cqe, 0);
|
||||
if (err)
|
||||
goto err1;
|
||||
|
||||
err = rxe_cq_resize_queue(cq, cqe, udata);
|
||||
err = rxe_cq_resize_queue(cq, cqe, uresp);
|
||||
if (err)
|
||||
goto err1;
|
||||
|
||||
|
@ -144,4 +144,26 @@ struct rxe_recv_wqe {
|
||||
struct rxe_dma_info dma;
|
||||
};
|
||||
|
||||
struct rxe_create_cq_resp {
|
||||
struct mminfo mi;
|
||||
};
|
||||
|
||||
struct rxe_resize_cq_resp {
|
||||
struct mminfo mi;
|
||||
};
|
||||
|
||||
struct rxe_create_qp_resp {
|
||||
struct mminfo rq_mi;
|
||||
struct mminfo sq_mi;
|
||||
};
|
||||
|
||||
struct rxe_create_srq_resp {
|
||||
struct mminfo mi;
|
||||
__u32 srq_num;
|
||||
};
|
||||
|
||||
struct rxe_modify_srq_cmd {
|
||||
__u64 mmap_info_addr;
|
||||
};
|
||||
|
||||
#endif /* RDMA_USER_RXE_H */
|
||||
|
Loading…
Reference in New Issue
Block a user