RDMA/irdma: Add missing read barriers
On code inspection, there are many instances in the driver where CEQE and AEQE fields written to by HW are read without guaranteeing that the polarity bit has been read and checked first. Add a read barrier to avoid reordering of loads on the CEQE/AEQE fields prior to checking the polarity bit. Fixes: 3f49d6842569 ("RDMA/irdma: Implement HW Admin Queue OPs") Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com> Link: https://lore.kernel.org/r/20230711175253.1289-2-shiraz.saleem@intel.com Signed-off-by: Leon Romanovsky <leon@kernel.org>
This commit is contained in:
parent
d64b1ee12a
commit
4984eb5145
@ -3363,6 +3363,9 @@ int irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq,
|
||||
if (polarity != ccq->cq_uk.polarity)
|
||||
return -ENOENT;
|
||||
|
||||
/* Ensure CEQE contents are read after valid bit is checked */
|
||||
dma_rmb();
|
||||
|
||||
get_64bit_val(cqe, 8, &qp_ctx);
|
||||
cqp = (struct irdma_sc_cqp *)(unsigned long)qp_ctx;
|
||||
info->error = (bool)FIELD_GET(IRDMA_CQ_ERROR, temp);
|
||||
@ -4009,13 +4012,17 @@ int irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
|
||||
u8 polarity;
|
||||
|
||||
aeqe = IRDMA_GET_CURRENT_AEQ_ELEM(aeq);
|
||||
get_64bit_val(aeqe, 0, &compl_ctx);
|
||||
get_64bit_val(aeqe, 8, &temp);
|
||||
polarity = (u8)FIELD_GET(IRDMA_AEQE_VALID, temp);
|
||||
|
||||
if (aeq->polarity != polarity)
|
||||
return -ENOENT;
|
||||
|
||||
/* Ensure AEQE contents are read after valid bit is checked */
|
||||
dma_rmb();
|
||||
|
||||
get_64bit_val(aeqe, 0, &compl_ctx);
|
||||
|
||||
print_hex_dump_debug("WQE: AEQ_ENTRY WQE", DUMP_PREFIX_OFFSET, 16, 8,
|
||||
aeqe, 16, false);
|
||||
|
||||
|
@ -230,6 +230,9 @@ static int irdma_puda_poll_info(struct irdma_sc_cq *cq,
|
||||
if (valid_bit != cq_uk->polarity)
|
||||
return -ENOENT;
|
||||
|
||||
/* Ensure CQE contents are read after valid bit is checked */
|
||||
dma_rmb();
|
||||
|
||||
if (cq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
|
||||
ext_valid = (bool)FIELD_GET(IRDMA_CQ_EXTCQE, qword3);
|
||||
|
||||
@ -243,6 +246,9 @@ static int irdma_puda_poll_info(struct irdma_sc_cq *cq,
|
||||
if (polarity != cq_uk->polarity)
|
||||
return -ENOENT;
|
||||
|
||||
/* Ensure ext CQE contents are read after ext valid bit is checked */
|
||||
dma_rmb();
|
||||
|
||||
IRDMA_RING_MOVE_HEAD_NOCHECK(cq_uk->cq_ring);
|
||||
if (!IRDMA_RING_CURRENT_HEAD(cq_uk->cq_ring))
|
||||
cq_uk->polarity = !cq_uk->polarity;
|
||||
|
@ -1527,6 +1527,9 @@ void irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq)
|
||||
if (polarity != temp)
|
||||
break;
|
||||
|
||||
/* Ensure CQE contents are read after valid bit is checked */
|
||||
dma_rmb();
|
||||
|
||||
get_64bit_val(cqe, 8, &comp_ctx);
|
||||
if ((void *)(unsigned long)comp_ctx == q)
|
||||
set_64bit_val(cqe, 8, 0);
|
||||
|
Loading…
x
Reference in New Issue
Block a user