diff options
author | Jason Gunthorpe <jgg@nvidia.com> | 2020-09-04 19:41:50 -0300 |
---|---|---|
committer | Jason Gunthorpe <jgg@nvidia.com> | 2020-09-11 10:24:53 -0300 |
commit | 68363052ff5addd3817a104cc453f4e2045704c9 (patch) | |
tree | 31502da98c659cfa0e902f6d6417d8ce4fb1e3f7 /drivers/infiniband/hw/qedr | |
parent | 22123a0e4974fe03d1d8446b132a6c455f95a516 (diff) | |
download | linux-68363052ff5addd3817a104cc453f4e2045704c9.tar.gz linux-68363052ff5addd3817a104cc453f4e2045704c9.tar.bz2 linux-68363052ff5addd3817a104cc453f4e2045704c9.zip |
RDMA/qedr: Use rdma_umem_for_each_dma_block() instead of open-coding
This loop is splitting the DMA SGL into pg_shift sized pages, use the core
code for this directly.
Link: https://lore.kernel.org/r/9-v2-270386b7e60b+28f4-umem_1_jgg@nvidia.com
Acked-by: Michal Kalderon <michal.kalderon@marvell.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/infiniband/hw/qedr')
-rw-r--r-- | drivers/infiniband/hw/qedr/verbs.c | 41 |
1 files changed, 16 insertions, 25 deletions
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 02368c3df802..6605841e2739 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -601,11 +601,9 @@ static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem, struct qedr_pbl_info *pbl_info, u32 pg_shift) { int pbe_cnt, total_num_pbes = 0; - u32 fw_pg_cnt, fw_pg_per_umem_pg; struct qedr_pbl *pbl_tbl; - struct sg_dma_page_iter sg_iter; + struct ib_block_iter biter; struct regpair *pbe; - u64 pg_addr; if (!pbl_info->num_pbes) return; @@ -626,32 +624,25 @@ static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem, pbe_cnt = 0; - fw_pg_per_umem_pg = BIT(PAGE_SHIFT - pg_shift); + rdma_umem_for_each_dma_block (umem, &biter, BIT(pg_shift)) { + u64 pg_addr = rdma_block_iter_dma_address(&biter); - for_each_sg_dma_page (umem->sg_head.sgl, &sg_iter, umem->nmap, 0) { - pg_addr = sg_page_iter_dma_address(&sg_iter); - for (fw_pg_cnt = 0; fw_pg_cnt < fw_pg_per_umem_pg;) { - pbe->lo = cpu_to_le32(pg_addr); - pbe->hi = cpu_to_le32(upper_32_bits(pg_addr)); + pbe->lo = cpu_to_le32(pg_addr); + pbe->hi = cpu_to_le32(upper_32_bits(pg_addr)); - pg_addr += BIT(pg_shift); - pbe_cnt++; - total_num_pbes++; - pbe++; + pbe_cnt++; + total_num_pbes++; + pbe++; - if (total_num_pbes == pbl_info->num_pbes) - return; + if (total_num_pbes == pbl_info->num_pbes) + return; - /* If the given pbl is full storing the pbes, - * move to next pbl. - */ - if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) { - pbl_tbl++; - pbe = (struct regpair *)pbl_tbl->va; - pbe_cnt = 0; - } - - fw_pg_cnt++; + /* If the given pbl is full storing the pbes, move to next pbl. + */ + if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) { + pbl_tbl++; + pbe = (struct regpair *)pbl_tbl->va; + pbe_cnt = 0; } } } |