diff options
Diffstat (limited to 'drivers/misc/habanalabs/common/command_buffer.c')
-rw-r--r-- | drivers/misc/habanalabs/common/command_buffer.c | 229 |
1 files changed, 210 insertions, 19 deletions
diff --git a/drivers/misc/habanalabs/common/command_buffer.c b/drivers/misc/habanalabs/common/command_buffer.c index a8004911c977..901e213daf40 100644 --- a/drivers/misc/habanalabs/common/command_buffer.c +++ b/drivers/misc/habanalabs/common/command_buffer.c @@ -13,6 +13,131 @@ #include <linux/uaccess.h> #include <linux/genalloc.h> +static int cb_map_mem(struct hl_ctx *ctx, struct hl_cb *cb) +{ + struct hl_device *hdev = ctx->hdev; + struct asic_fixed_properties *prop = &hdev->asic_prop; + struct hl_vm_va_block *va_block, *tmp; + dma_addr_t bus_addr; + u64 virt_addr; + u32 page_size = prop->pmmu.page_size; + s32 offset; + int rc; + + if (!hdev->supports_cb_mapping) { + dev_err_ratelimited(hdev->dev, + "Cannot map CB because no VA range is allocated for CB mapping\n"); + return -EINVAL; + } + + if (!hdev->mmu_enable) { + dev_err_ratelimited(hdev->dev, + "Cannot map CB because MMU is disabled\n"); + return -EINVAL; + } + + INIT_LIST_HEAD(&cb->va_block_list); + + for (bus_addr = cb->bus_address; + bus_addr < cb->bus_address + cb->size; + bus_addr += page_size) { + + virt_addr = (u64) gen_pool_alloc(ctx->cb_va_pool, page_size); + if (!virt_addr) { + dev_err(hdev->dev, + "Failed to allocate device virtual address for CB\n"); + rc = -ENOMEM; + goto err_va_pool_free; + } + + va_block = kzalloc(sizeof(*va_block), GFP_KERNEL); + if (!va_block) { + rc = -ENOMEM; + gen_pool_free(ctx->cb_va_pool, virt_addr, page_size); + goto err_va_pool_free; + } + + va_block->start = virt_addr; + va_block->end = virt_addr + page_size; + va_block->size = page_size; + list_add_tail(&va_block->node, &cb->va_block_list); + } + + mutex_lock(&ctx->mmu_lock); + + bus_addr = cb->bus_address; + offset = 0; + list_for_each_entry(va_block, &cb->va_block_list, node) { + rc = hl_mmu_map(ctx, va_block->start, bus_addr, va_block->size, + list_is_last(&va_block->node, + &cb->va_block_list)); + if (rc) { + dev_err(hdev->dev, "Failed to map VA %#llx to CB\n", + va_block->start); + goto err_va_umap; + } + + bus_addr += va_block->size; + offset += va_block->size; + } + + hdev->asic_funcs->mmu_invalidate_cache(hdev, false, VM_TYPE_USERPTR); + + mutex_unlock(&ctx->mmu_lock); + + cb->is_mmu_mapped = true; + + return 0; + +err_va_umap: + list_for_each_entry(va_block, &cb->va_block_list, node) { + if (offset <= 0) + break; + hl_mmu_unmap(ctx, va_block->start, va_block->size, + offset <= va_block->size); + offset -= va_block->size; + } + + hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR); + + mutex_unlock(&ctx->mmu_lock); + +err_va_pool_free: + list_for_each_entry_safe(va_block, tmp, &cb->va_block_list, node) { + gen_pool_free(ctx->cb_va_pool, va_block->start, va_block->size); + list_del(&va_block->node); + kfree(va_block); + } + + return rc; +} + +static void cb_unmap_mem(struct hl_ctx *ctx, struct hl_cb *cb) +{ + struct hl_device *hdev = ctx->hdev; + struct hl_vm_va_block *va_block, *tmp; + + mutex_lock(&ctx->mmu_lock); + + list_for_each_entry(va_block, &cb->va_block_list, node) + if (hl_mmu_unmap(ctx, va_block->start, va_block->size, + list_is_last(&va_block->node, + &cb->va_block_list))) + dev_warn_ratelimited(hdev->dev, + "Failed to unmap CB's va 0x%llx\n", + va_block->start); + + hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR); + + mutex_unlock(&ctx->mmu_lock); + + list_for_each_entry_safe(va_block, tmp, &cb->va_block_list, node) { + gen_pool_free(ctx->cb_va_pool, va_block->start, va_block->size); + list_del(&va_block->node); + kfree(va_block); + } +} + static void cb_fini(struct hl_device *hdev, struct hl_cb *cb) { if (cb->is_internal) @@ -47,6 +172,11 @@ static void cb_release(struct kref *ref) hl_debugfs_remove_cb(cb); + if (cb->is_mmu_mapped) + cb_unmap_mem(cb->ctx, cb); + + hl_ctx_put(cb->ctx); + cb_do_release(hdev, cb); } @@ -107,11 +237,12 @@ static struct hl_cb *hl_cb_alloc(struct hl_device *hdev, u32 cb_size, } int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr, - u32 cb_size, u64 *handle, int ctx_id, bool internal_cb) + struct hl_ctx *ctx, u32 cb_size, bool internal_cb, + bool map_cb, u64 *handle) { struct hl_cb *cb; bool alloc_new_cb = true; - int rc; + int rc, ctx_id = ctx->asid; /* * Can't use generic function to check this because of special case @@ -163,7 +294,21 @@ int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr, } cb->hdev = hdev; - cb->ctx_id = ctx_id; + cb->ctx = ctx; + hl_ctx_get(hdev, cb->ctx); + + if (map_cb) { + if (ctx_id == HL_KERNEL_ASID_ID) { + dev_err(hdev->dev, + "CB mapping is not supported for kernel context\n"); + rc = -EINVAL; + goto release_cb; + } + + rc = cb_map_mem(ctx, cb); + if (rc) + goto release_cb; + } spin_lock(&mgr->cb_lock); rc = idr_alloc(&mgr->cb_handles, cb, 1, 0, GFP_ATOMIC); @@ -171,10 +316,10 @@ int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr, if (rc < 0) { dev_err(hdev->dev, "Failed to allocate IDR for a new CB\n"); - goto release_cb; + goto unmap_mem; } - cb->id = rc; + cb->id = (u64) rc; kref_init(&cb->refcount); spin_lock_init(&cb->lock); @@ -183,14 +328,18 @@ int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr, * idr is 32-bit so we can safely OR it with a mask that is above * 32 bit */ - *handle = cb->id | HL_MMAP_CB_MASK; + *handle = cb->id | HL_MMAP_TYPE_CB; *handle <<= PAGE_SHIFT; hl_debugfs_add_cb(cb); return 0; +unmap_mem: + if (cb->is_mmu_mapped) + cb_unmap_mem(cb->ctx, cb); release_cb: + hl_ctx_put(cb->ctx); cb_do_release(hdev, cb); out_err: *handle = 0; @@ -250,9 +399,10 @@ int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data) args->in.cb_size, HL_MAX_CB_SIZE); rc = -EINVAL; } else { - rc = hl_cb_create(hdev, &hpriv->cb_mgr, - args->in.cb_size, &handle, - hpriv->ctx->asid, false); + rc = hl_cb_create(hdev, &hpriv->cb_mgr, hpriv->ctx, + args->in.cb_size, false, + !!(args->in.flags & HL_CB_FLAGS_MAP), + &handle); } memset(args, 0, sizeof(*args)); @@ -300,11 +450,14 @@ int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma) { struct hl_device *hdev = hpriv->hdev; struct hl_cb *cb; - phys_addr_t address; u32 handle, user_cb_size; int rc; + /* We use the page offset to hold the idr and thus we need to clear + * it before doing the mmap itself + */ handle = vma->vm_pgoff; + vma->vm_pgoff = 0; /* reference was taken here */ cb = hl_cb_get(hdev, &hpriv->cb_mgr, handle); @@ -356,12 +509,8 @@ int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma) vma->vm_private_data = cb; - /* Calculate address for CB */ - address = virt_to_phys((void *) (uintptr_t) cb->kernel_address); - - rc = hdev->asic_funcs->cb_mmap(hdev, vma, cb->kernel_address, - address, cb->size); - + rc = hdev->asic_funcs->cb_mmap(hdev, vma, (void *) cb->kernel_address, + cb->bus_address, cb->size); if (rc) { spin_lock(&cb->lock); cb->mmap = false; @@ -425,7 +574,7 @@ void hl_cb_mgr_fini(struct hl_device *hdev, struct hl_cb_mgr *mgr) if (kref_put(&cb->refcount, cb_release) != 1) dev_err(hdev->dev, "CB %d for CTX ID %d is still alive\n", - id, cb->ctx_id); + id, cb->ctx->asid); } idr_destroy(&mgr->cb_handles); @@ -438,8 +587,8 @@ struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size, struct hl_cb *cb; int rc; - rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, cb_size, &cb_handle, - HL_KERNEL_ASID_ID, internal_cb); + rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, hdev->kernel_ctx, cb_size, + internal_cb, false, &cb_handle); if (rc) { dev_err(hdev->dev, "Failed to allocate CB for the kernel driver %d\n", rc); @@ -495,3 +644,45 @@ int hl_cb_pool_fini(struct hl_device *hdev) return 0; } + +int hl_cb_va_pool_init(struct hl_ctx *ctx) +{ + struct hl_device *hdev = ctx->hdev; + struct asic_fixed_properties *prop = &hdev->asic_prop; + int rc; + + if (!hdev->supports_cb_mapping) + return 0; + + ctx->cb_va_pool = gen_pool_create(__ffs(prop->pmmu.page_size), -1); + if (!ctx->cb_va_pool) { + dev_err(hdev->dev, + "Failed to create VA gen pool for CB mapping\n"); + return -ENOMEM; + } + + rc = gen_pool_add(ctx->cb_va_pool, prop->cb_va_start_addr, + prop->cb_va_end_addr - prop->cb_va_start_addr, -1); + if (rc) { + dev_err(hdev->dev, + "Failed to add memory to VA gen pool for CB mapping\n"); + goto err_pool_destroy; + } + + return 0; + +err_pool_destroy: + gen_pool_destroy(ctx->cb_va_pool); + + return rc; +} + +void hl_cb_va_pool_fini(struct hl_ctx *ctx) +{ + struct hl_device *hdev = ctx->hdev; + + if (!hdev->supports_cb_mapping) + return; + + gen_pool_destroy(ctx->cb_va_pool); +} |