summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorShiraz Saleem <shiraz.saleem@intel.com>2023-07-11 12:52:51 -0500
committerLeon Romanovsky <leon@kernel.org>2023-07-17 08:01:22 +0300
commit4984eb51453ff7eddee9e5ce816145be39c0ec5c (patch)
treef9d69baeb34327823104ff9c0dab987876faf0e0 /drivers/infiniband
parentd64b1ee12a168030fbb3e0aebf7bce49e9a07589 (diff)
downloadlinux-stable-4984eb51453ff7eddee9e5ce816145be39c0ec5c.tar.gz
linux-stable-4984eb51453ff7eddee9e5ce816145be39c0ec5c.tar.bz2
linux-stable-4984eb51453ff7eddee9e5ce816145be39c0ec5c.zip
RDMA/irdma: Add missing read barriers
On code inspection, there are many instances in the driver where CEQE and AEQE fields written to by HW are read without guaranteeing that the polarity bit has been read and checked first. Add a read barrier to avoid reordering of loads on the CEQE/AEQE fields prior to checking the polarity bit. Fixes: 3f49d6842569 ("RDMA/irdma: Implement HW Admin Queue OPs") Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com> Link: https://lore.kernel.org/r/20230711175253.1289-2-shiraz.saleem@intel.com Signed-off-by: Leon Romanovsky <leon@kernel.org>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/hw/irdma/ctrl.c9
-rw-r--r--drivers/infiniband/hw/irdma/puda.c6
-rw-r--r--drivers/infiniband/hw/irdma/uk.c3
3 files changed, 17 insertions, 1 deletions
diff --git a/drivers/infiniband/hw/irdma/ctrl.c b/drivers/infiniband/hw/irdma/ctrl.c
index d88c9184007e..c91439f428c7 100644
--- a/drivers/infiniband/hw/irdma/ctrl.c
+++ b/drivers/infiniband/hw/irdma/ctrl.c
@@ -3363,6 +3363,9 @@ int irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq,
if (polarity != ccq->cq_uk.polarity)
return -ENOENT;
+ /* Ensure CEQE contents are read after valid bit is checked */
+ dma_rmb();
+
get_64bit_val(cqe, 8, &qp_ctx);
cqp = (struct irdma_sc_cqp *)(unsigned long)qp_ctx;
info->error = (bool)FIELD_GET(IRDMA_CQ_ERROR, temp);
@@ -4009,13 +4012,17 @@ int irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
u8 polarity;
aeqe = IRDMA_GET_CURRENT_AEQ_ELEM(aeq);
- get_64bit_val(aeqe, 0, &compl_ctx);
get_64bit_val(aeqe, 8, &temp);
polarity = (u8)FIELD_GET(IRDMA_AEQE_VALID, temp);
if (aeq->polarity != polarity)
return -ENOENT;
+ /* Ensure AEQE contents are read after valid bit is checked */
+ dma_rmb();
+
+ get_64bit_val(aeqe, 0, &compl_ctx);
+
print_hex_dump_debug("WQE: AEQ_ENTRY WQE", DUMP_PREFIX_OFFSET, 16, 8,
aeqe, 16, false);
diff --git a/drivers/infiniband/hw/irdma/puda.c b/drivers/infiniband/hw/irdma/puda.c
index 4ec9639f1bdb..562531712ea4 100644
--- a/drivers/infiniband/hw/irdma/puda.c
+++ b/drivers/infiniband/hw/irdma/puda.c
@@ -230,6 +230,9 @@ static int irdma_puda_poll_info(struct irdma_sc_cq *cq,
if (valid_bit != cq_uk->polarity)
return -ENOENT;
+ /* Ensure CQE contents are read after valid bit is checked */
+ dma_rmb();
+
if (cq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
ext_valid = (bool)FIELD_GET(IRDMA_CQ_EXTCQE, qword3);
@@ -243,6 +246,9 @@ static int irdma_puda_poll_info(struct irdma_sc_cq *cq,
if (polarity != cq_uk->polarity)
return -ENOENT;
+ /* Ensure ext CQE contents are read after ext valid bit is checked */
+ dma_rmb();
+
IRDMA_RING_MOVE_HEAD_NOCHECK(cq_uk->cq_ring);
if (!IRDMA_RING_CURRENT_HEAD(cq_uk->cq_ring))
cq_uk->polarity = !cq_uk->polarity;
diff --git a/drivers/infiniband/hw/irdma/uk.c b/drivers/infiniband/hw/irdma/uk.c
index dd428d915c17..ea2c07751245 100644
--- a/drivers/infiniband/hw/irdma/uk.c
+++ b/drivers/infiniband/hw/irdma/uk.c
@@ -1527,6 +1527,9 @@ void irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq)
if (polarity != temp)
break;
+ /* Ensure CQE contents are read after valid bit is checked */
+ dma_rmb();
+
get_64bit_val(cqe, 8, &comp_ctx);
if ((void *)(unsigned long)comp_ctx == q)
set_64bit_val(cqe, 8, 0);