diff options
author | Roland Dreier <rolandd@cisco.com> | 2007-03-04 16:15:11 -0800 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2007-05-08 18:00:37 -0700 |
commit | f7c6a7b5d59980b076abbf2ceeb8735591290285 (patch) | |
tree | 29c35b47052bba87f031a4744d8ad12ff5187149 /drivers/infiniband/hw/ipath | |
parent | 36f021b579d195cdc5fa6f3e2bab198b4bf70643 (diff) | |
download | linux-f7c6a7b5d59980b076abbf2ceeb8735591290285.tar.gz linux-f7c6a7b5d59980b076abbf2ceeb8735591290285.tar.bz2 linux-f7c6a7b5d59980b076abbf2ceeb8735591290285.zip |
IB/uverbs: Export ib_umem_get()/ib_umem_release() to modules
Export ib_umem_get()/ib_umem_release() and put low-level drivers in
control of when to call ib_umem_get() to pin and DMA map userspace,
rather than always calling it in ib_uverbs_reg_mr() before calling the
low-level driver's reg_user_mr method.
Also move these functions to be in the ib_core module instead of
ib_uverbs, so that driver modules using them do not depend on
ib_uverbs.
This has a number of advantages:
- It is better design from the standpoint of making generic code a
library that can be used or overridden by device-specific code as
the details of specific devices dictate.
- Drivers that do not need to pin userspace memory regions do not
need to take the performance hit of calling ib_mem_get(). For
example, although I have not tried to implement it in this patch,
the ipath driver should be able to avoid pinning memory and just
use copy_{to,from}_user() to access userspace memory regions.
- Buffers that need special mapping treatment can be identified by
the low-level driver. For example, it may be possible to solve
some Altix-specific memory ordering issues with mthca CQs in
userspace by mapping CQ buffers with extra flags.
- Drivers that need to pin and DMA map userspace memory for things
other than memory regions can use ib_umem_get() directly, instead
of hacks using extra parameters to their reg_phys_mr method. For
example, the mlx4 driver that is pending being merged needs to pin
and DMA map QP and CQ buffers, but it does not need to create a
memory key for these buffers. So the cleanest solution is for mlx4
to call ib_umem_get() in the create_qp and create_cq methods.
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/ipath')
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_mr.c | 38 | ||||
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_verbs.h | 5 |
2 files changed, 30 insertions, 13 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_mr.c b/drivers/infiniband/hw/ipath/ipath_mr.c index 31e70732e369..bdeef8d4f279 100644 --- a/drivers/infiniband/hw/ipath/ipath_mr.c +++ b/drivers/infiniband/hw/ipath/ipath_mr.c @@ -31,6 +31,7 @@ * SOFTWARE. */ +#include <rdma/ib_umem.h> #include <rdma/ib_pack.h> #include <rdma/ib_smi.h> @@ -147,6 +148,7 @@ struct ib_mr *ipath_reg_phys_mr(struct ib_pd *pd, mr->mr.offset = 0; mr->mr.access_flags = acc; mr->mr.max_segs = num_phys_buf; + mr->umem = NULL; m = 0; n = 0; @@ -170,46 +172,56 @@ bail: /** * ipath_reg_user_mr - register a userspace memory region * @pd: protection domain for this memory region - * @region: the user memory region + * @start: starting userspace address + * @length: length of region to register + * @virt_addr: virtual address to use (from HCA's point of view) * @mr_access_flags: access flags for this memory region * @udata: unused by the InfiniPath driver * * Returns the memory region on success, otherwise returns an errno. */ -struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, struct ib_umem *region, - int mr_access_flags, struct ib_udata *udata) +struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, + u64 virt_addr, int mr_access_flags, + struct ib_udata *udata) { struct ipath_mr *mr; + struct ib_umem *umem; struct ib_umem_chunk *chunk; int n, m, i; struct ib_mr *ret; - if (region->length == 0) { + if (length == 0) { ret = ERR_PTR(-EINVAL); goto bail; } + umem = ib_umem_get(pd->uobject->context, start, length, mr_access_flags); + if (IS_ERR(umem)) + return (void *) umem; + n = 0; - list_for_each_entry(chunk, ®ion->chunk_list, list) + list_for_each_entry(chunk, &umem->chunk_list, list) n += chunk->nents; mr = alloc_mr(n, &to_idev(pd->device)->lk_table); if (!mr) { ret = ERR_PTR(-ENOMEM); + ib_umem_release(umem); goto bail; } mr->mr.pd = pd; - mr->mr.user_base = region->user_base; - mr->mr.iova = region->virt_base; - mr->mr.length = region->length; - mr->mr.offset = region->offset; + mr->mr.user_base = start; + mr->mr.iova = virt_addr; + mr->mr.length = length; + mr->mr.offset = umem->offset; mr->mr.access_flags = mr_access_flags; mr->mr.max_segs = n; + mr->umem = umem; m = 0; n = 0; - list_for_each_entry(chunk, ®ion->chunk_list, list) { + list_for_each_entry(chunk, &umem->chunk_list, list) { for (i = 0; i < chunk->nents; i++) { void *vaddr; @@ -219,7 +231,7 @@ struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, struct ib_umem *region, goto bail; } mr->mr.map[m]->segs[n].vaddr = vaddr; - mr->mr.map[m]->segs[n].length = region->page_size; + mr->mr.map[m]->segs[n].length = umem->page_size; n++; if (n == IPATH_SEGSZ) { m++; @@ -253,6 +265,10 @@ int ipath_dereg_mr(struct ib_mr *ibmr) i--; kfree(mr->mr.map[i]); } + + if (mr->umem) + ib_umem_release(mr->umem); + kfree(mr); return 0; } diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.h b/drivers/infiniband/hw/ipath/ipath_verbs.h index 7064fc222727..088b837ebea8 100644 --- a/drivers/infiniband/hw/ipath/ipath_verbs.h +++ b/drivers/infiniband/hw/ipath/ipath_verbs.h @@ -251,6 +251,7 @@ struct ipath_sge { /* Memory region */ struct ipath_mr { struct ib_mr ibmr; + struct ib_umem *umem; struct ipath_mregion mr; /* must be last */ }; @@ -751,8 +752,8 @@ struct ib_mr *ipath_reg_phys_mr(struct ib_pd *pd, struct ib_phys_buf *buffer_list, int num_phys_buf, int acc, u64 *iova_start); -struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, struct ib_umem *region, - int mr_access_flags, +struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, + u64 virt_addr, int mr_access_flags, struct ib_udata *udata); int ipath_dereg_mr(struct ib_mr *ibmr); |