summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/infiniband/hw/irdma/verbs.c82
1 files changed, 50 insertions, 32 deletions
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
index f4674ecf9c8c..45eb2d339802 100644
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -2745,6 +2745,54 @@ static int irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr,
return ret;
}
+static int irdma_reg_user_mr_type_mem(struct irdma_mr *iwmr, int access)
+{
+ struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
+ struct irdma_pbl *iwpbl = &iwmr->iwpbl;
+ bool use_pbles;
+ u32 stag;
+ int err;
+
+ use_pbles = iwmr->page_cnt != 1;
+
+ err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles, false);
+ if (err)
+ return err;
+
+ if (use_pbles) {
+ err = irdma_check_mr_contiguous(&iwpbl->pble_alloc,
+ iwmr->page_size);
+ if (err) {
+ irdma_free_pble(iwdev->rf->pble_rsrc, &iwpbl->pble_alloc);
+ iwpbl->pbl_allocated = false;
+ }
+ }
+
+ stag = irdma_create_stag(iwdev);
+ if (!stag) {
+ err = -ENOMEM;
+ goto free_pble;
+ }
+
+ iwmr->stag = stag;
+ iwmr->ibmr.rkey = stag;
+ iwmr->ibmr.lkey = stag;
+ err = irdma_hwreg_mr(iwdev, iwmr, access);
+ if (err)
+ goto err_hwreg;
+
+ return 0;
+
+err_hwreg:
+ irdma_free_stag(iwdev, stag);
+
+free_pble:
+ if (iwpbl->pble_alloc.level != PBLE_LEVEL_0 && iwpbl->pbl_allocated)
+ irdma_free_pble(iwdev->rf->pble_rsrc, &iwpbl->pble_alloc);
+
+ return err;
+}
+
/**
* irdma_reg_user_mr - Register a user memory region
* @pd: ptr of pd
@@ -2761,12 +2809,11 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
#define IRDMA_MEM_REG_MIN_REQ_LEN offsetofend(struct irdma_mem_reg_req, sq_pages)
struct irdma_device *iwdev = to_iwdev(pd->device);
struct irdma_ucontext *ucontext;
- struct irdma_pble_alloc *palloc;
struct irdma_pbl *iwpbl;
struct irdma_mr *iwmr;
struct ib_umem *region;
struct irdma_mem_reg_req req;
- u32 total, stag = 0;
+ u32 total;
u8 shadow_pgcnt = 1;
bool use_pbles = false;
unsigned long flags;
@@ -2817,7 +2864,6 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
}
iwmr->len = region->length;
iwpbl->user_base = virt;
- palloc = &iwpbl->pble_alloc;
iwmr->type = req.reg_type;
iwmr->page_cnt = ib_umem_num_dma_blocks(region, iwmr->page_size);
@@ -2863,36 +2909,10 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
break;
case IRDMA_MEMREG_TYPE_MEM:
- use_pbles = (iwmr->page_cnt != 1);
-
- err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles, false);
+ err = irdma_reg_user_mr_type_mem(iwmr, access);
if (err)
goto error;
- if (use_pbles) {
- err = irdma_check_mr_contiguous(palloc,
- iwmr->page_size);
- if (err) {
- irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
- iwpbl->pbl_allocated = false;
- }
- }
-
- stag = irdma_create_stag(iwdev);
- if (!stag) {
- err = -ENOMEM;
- goto error;
- }
-
- iwmr->stag = stag;
- iwmr->ibmr.rkey = stag;
- iwmr->ibmr.lkey = stag;
- err = irdma_hwreg_mr(iwdev, iwmr, access);
- if (err) {
- irdma_free_stag(iwdev, stag);
- goto error;
- }
-
break;
default:
goto error;
@@ -2903,8 +2923,6 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
return &iwmr->ibmr;
error:
- if (palloc->level != PBLE_LEVEL_0 && iwpbl->pbl_allocated)
- irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
ib_umem_release(region);
kfree(iwmr);