RDMA-rxe: Isolate mr code from atomic_write_reply()
Isolate mr specific code from atomic_write_reply() in rxe_resp.c into a subroutine rxe_mr_do_atomic_write() in rxe_mr.c. Check length for atomic write operation. Make iova_to_vaddr() static. Link: https://lore.kernel.org/r/20230119235936.19728-5-rpearsonhpe@gmail.com Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
parent
f04d5b3d91
commit
d8bdb0ebca
@ -71,9 +71,9 @@ int copy_data(struct rxe_pd *pd, int access, struct rxe_dma_info *dma,
|
||||
void *addr, int length, enum rxe_mr_copy_dir dir);
|
||||
int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
|
||||
int sg_nents, unsigned int *sg_offset);
|
||||
void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length);
|
||||
int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
|
||||
u64 compare, u64 swap_add, u64 *orig_val);
|
||||
int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value);
|
||||
struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key,
|
||||
enum rxe_mr_lookup_type type);
|
||||
int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length);
|
||||
|
@ -297,7 +297,7 @@ static void lookup_iova(struct rxe_mr *mr, u64 iova, int *m_out, int *n_out,
|
||||
}
|
||||
}
|
||||
|
||||
void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length)
|
||||
static void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length)
|
||||
{
|
||||
size_t offset;
|
||||
int m, n;
|
||||
@ -565,6 +565,42 @@ int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* only implemented for 64 bit architectures */
|
||||
#if defined CONFIG_64BIT
|
||||
int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
|
||||
{
|
||||
u64 *va;
|
||||
|
||||
/* See IBA oA19-28 */
|
||||
if (unlikely(mr->state != RXE_MR_STATE_VALID)) {
|
||||
rxe_dbg_mr(mr, "mr not in valid state");
|
||||
return RESPST_ERR_RKEY_VIOLATION;
|
||||
}
|
||||
|
||||
va = iova_to_vaddr(mr, iova, sizeof(value));
|
||||
if (unlikely(!va)) {
|
||||
rxe_dbg_mr(mr, "iova out of range");
|
||||
return RESPST_ERR_RKEY_VIOLATION;
|
||||
}
|
||||
|
||||
/* See IBA A19.4.2 */
|
||||
if (unlikely((uintptr_t)va & 0x7 || iova & 0x7)) {
|
||||
rxe_dbg_mr(mr, "misaligned address");
|
||||
return RESPST_ERR_MISALIGNED_ATOMIC;
|
||||
}
|
||||
|
||||
/* Do atomic write after all prior operations have completed */
|
||||
smp_store_release(va, value);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
|
||||
{
|
||||
return RESPST_ERR_UNSUPPORTED_OPCODE;
|
||||
}
|
||||
#endif
|
||||
|
||||
int advance_dma_data(struct rxe_dma_info *dma, unsigned int length)
|
||||
{
|
||||
struct rxe_sge *sge = &dma->sge[dma->cur_sge];
|
||||
|
@ -723,52 +723,14 @@ static enum resp_states atomic_reply(struct rxe_qp *qp,
|
||||
return RESPST_ACKNOWLEDGE;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
static enum resp_states do_atomic_write(struct rxe_qp *qp,
|
||||
struct rxe_pkt_info *pkt)
|
||||
{
|
||||
struct rxe_mr *mr = qp->resp.mr;
|
||||
int payload = payload_size(pkt);
|
||||
u64 src, *dst;
|
||||
|
||||
if (mr->state != RXE_MR_STATE_VALID)
|
||||
return RESPST_ERR_RKEY_VIOLATION;
|
||||
|
||||
memcpy(&src, payload_addr(pkt), payload);
|
||||
|
||||
dst = iova_to_vaddr(mr, qp->resp.va + qp->resp.offset, payload);
|
||||
/* check vaddr is 8 bytes aligned. */
|
||||
if (!dst || (uintptr_t)dst & 7)
|
||||
return RESPST_ERR_MISALIGNED_ATOMIC;
|
||||
|
||||
/* Do atomic write after all prior operations have completed */
|
||||
smp_store_release(dst, src);
|
||||
|
||||
/* decrease resp.resid to zero */
|
||||
qp->resp.resid -= sizeof(payload);
|
||||
|
||||
qp->resp.msn++;
|
||||
|
||||
/* next expected psn, read handles this separately */
|
||||
qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
|
||||
qp->resp.ack_psn = qp->resp.psn;
|
||||
|
||||
qp->resp.opcode = pkt->opcode;
|
||||
qp->resp.status = IB_WC_SUCCESS;
|
||||
return RESPST_ACKNOWLEDGE;
|
||||
}
|
||||
#else
|
||||
static enum resp_states do_atomic_write(struct rxe_qp *qp,
|
||||
struct rxe_pkt_info *pkt)
|
||||
{
|
||||
return RESPST_ERR_UNSUPPORTED_OPCODE;
|
||||
}
|
||||
#endif /* CONFIG_64BIT */
|
||||
|
||||
static enum resp_states atomic_write_reply(struct rxe_qp *qp,
|
||||
struct rxe_pkt_info *pkt)
|
||||
{
|
||||
struct resp_res *res = qp->resp.res;
|
||||
struct rxe_mr *mr;
|
||||
u64 value;
|
||||
u64 iova;
|
||||
int err;
|
||||
|
||||
if (!res) {
|
||||
res = rxe_prepare_res(qp, pkt, RXE_ATOMIC_WRITE_MASK);
|
||||
@ -777,7 +739,26 @@ static enum resp_states atomic_write_reply(struct rxe_qp *qp,
|
||||
|
||||
if (res->replay)
|
||||
return RESPST_ACKNOWLEDGE;
|
||||
return do_atomic_write(qp, pkt);
|
||||
|
||||
mr = qp->resp.mr;
|
||||
value = *(u64 *)payload_addr(pkt);
|
||||
iova = qp->resp.va + qp->resp.offset;
|
||||
|
||||
err = rxe_mr_do_atomic_write(mr, iova, value);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
qp->resp.resid = 0;
|
||||
qp->resp.msn++;
|
||||
|
||||
/* next expected psn, read handles this separately */
|
||||
qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
|
||||
qp->resp.ack_psn = qp->resp.psn;
|
||||
|
||||
qp->resp.opcode = pkt->opcode;
|
||||
qp->resp.status = IB_WC_SUCCESS;
|
||||
|
||||
return RESPST_ACKNOWLEDGE;
|
||||
}
|
||||
|
||||
static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp,
|
||||
|
Loading…
x
Reference in New Issue
Block a user