diff options
Diffstat (limited to 'drivers/gpu/drm/drm_gem_shmem_helper.c')
-rw-r--r-- | drivers/gpu/drm/drm_gem_shmem_helper.c | 172 |
1 files changed, 113 insertions, 59 deletions
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index df31e5782eed..4b7cfbac4daa 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -35,22 +35,12 @@ static const struct drm_gem_object_funcs drm_gem_shmem_funcs = { .mmap = drm_gem_shmem_mmap, }; -/** - * drm_gem_shmem_create - Allocate an object with the given size - * @dev: DRM device - * @size: Size of the object to allocate - * - * This function creates a shmem GEM object. - * - * Returns: - * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative - * error code on failure. - */ -struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size) +static struct drm_gem_shmem_object * +__drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private) { struct drm_gem_shmem_object *shmem; struct drm_gem_object *obj; - int ret; + int ret = 0; size = PAGE_ALIGN(size); @@ -64,7 +54,10 @@ struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t if (!obj->funcs) obj->funcs = &drm_gem_shmem_funcs; - ret = drm_gem_object_init(dev, obj, size); + if (private) + drm_gem_private_object_init(dev, obj, size); + else + ret = drm_gem_object_init(dev, obj, size); if (ret) goto err_free; @@ -77,15 +70,17 @@ struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t mutex_init(&shmem->vmap_lock); INIT_LIST_HEAD(&shmem->madv_list); - /* - * Our buffers are kept pinned, so allocating them - * from the MOVABLE zone is a really bad idea, and - * conflicts with CMA. See comments above new_inode() - * why this is required _and_ expected if you're - * going to pin these pages. - */ - mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER | - __GFP_RETRY_MAYFAIL | __GFP_NOWARN); + if (!private) { + /* + * Our buffers are kept pinned, so allocating them + * from the MOVABLE zone is a really bad idea, and + * conflicts with CMA. See comments above new_inode() + * why this is required _and_ expected if you're + * going to pin these pages. + */ + mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER | + __GFP_RETRY_MAYFAIL | __GFP_NOWARN); + } return shmem; @@ -96,6 +91,21 @@ err_free: return ERR_PTR(ret); } +/** + * drm_gem_shmem_create - Allocate an object with the given size + * @dev: DRM device + * @size: Size of the object to allocate + * + * This function creates a shmem GEM object. + * + * Returns: + * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative + * error code on failure. + */ +struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size) +{ + return __drm_gem_shmem_create(dev, size, false); +} EXPORT_SYMBOL_GPL(drm_gem_shmem_create); /** @@ -103,7 +113,8 @@ EXPORT_SYMBOL_GPL(drm_gem_shmem_create); * @obj: GEM object to free * * This function cleans up the GEM object state and frees the memory used to - * store the object itself. + * store the object itself. It should be used to implement + * &drm_gem_object_funcs.free. */ void drm_gem_shmem_free_object(struct drm_gem_object *obj) { @@ -112,9 +123,7 @@ void drm_gem_shmem_free_object(struct drm_gem_object *obj) WARN_ON(shmem->vmap_use_count); if (obj->import_attach) { - shmem->pages_use_count--; drm_prime_gem_destroy(obj, shmem->sgt); - kvfree(shmem->pages); } else { if (shmem->sgt) { dma_unmap_sg(obj->dev->dev, shmem->sgt->sgl, @@ -169,6 +178,8 @@ int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem) { int ret; + WARN_ON(shmem->base.import_attach); + ret = mutex_lock_interruptible(&shmem->pages_lock); if (ret) return ret; @@ -214,7 +225,8 @@ EXPORT_SYMBOL(drm_gem_shmem_put_pages); * @obj: GEM object * * This function makes sure the backing pages are pinned in memory while the - * buffer is exported. + * buffer is exported. It should only be used to implement + * &drm_gem_object_funcs.pin. * * Returns: * 0 on success or a negative error code on failure. @@ -223,6 +235,8 @@ int drm_gem_shmem_pin(struct drm_gem_object *obj) { struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); + WARN_ON(shmem->base.import_attach); + return drm_gem_shmem_get_pages(shmem); } EXPORT_SYMBOL(drm_gem_shmem_pin); @@ -232,12 +246,14 @@ EXPORT_SYMBOL(drm_gem_shmem_pin); * @obj: GEM object * * This function removes the requirement that the backing pages are pinned in - * memory. + * memory. It should only be used to implement &drm_gem_object_funcs.unpin. */ void drm_gem_shmem_unpin(struct drm_gem_object *obj) { struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); + WARN_ON(shmem->base.import_attach); + drm_gem_shmem_put_pages(shmem); } EXPORT_SYMBOL(drm_gem_shmem_unpin); @@ -250,15 +266,15 @@ static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem) if (shmem->vmap_use_count++ > 0) return shmem->vaddr; - ret = drm_gem_shmem_get_pages(shmem); - if (ret) - goto err_zero_use; - if (obj->import_attach) { shmem->vaddr = dma_buf_vmap(obj->import_attach->dmabuf); } else { pgprot_t prot = PAGE_KERNEL; + ret = drm_gem_shmem_get_pages(shmem); + if (ret) + goto err_zero_use; + if (!shmem->map_cached) prot = pgprot_writecombine(prot); shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT, @@ -274,7 +290,8 @@ static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem) return shmem->vaddr; err_put_pages: - drm_gem_shmem_put_pages(shmem); + if (!obj->import_attach) + drm_gem_shmem_put_pages(shmem); err_zero_use: shmem->vmap_use_count = 0; @@ -285,8 +302,14 @@ err_zero_use: * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object * @shmem: shmem GEM object * - * This function makes sure that a virtual address exists for the buffer backing - * the shmem GEM object. + * This function makes sure that a contiguous kernel virtual address mapping + * exists for the buffer backing the shmem GEM object. + * + * This function can be used to implement &drm_gem_object_funcs.vmap. But it can + * also be called by drivers directly, in which case it will hide the + * differences between dma-buf imported and natively allocated objects. + * + * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap(). * * Returns: * 0 on success or a negative error code on failure. @@ -330,7 +353,13 @@ static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem) * drm_gem_shmem_vunmap - Unmap a virtual mapping fo a shmem GEM object * @shmem: shmem GEM object * - * This function removes the virtual address when use count drops to zero. + * This function cleans up a kernel virtual address mapping acquired by + * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to + * zero. + * + * This function can be used to implement &drm_gem_object_funcs.vmap. But it can + * also be called by drivers directly, in which case it will hide the + * differences between dma-buf imported and natively allocated objects. */ void drm_gem_shmem_vunmap(struct drm_gem_object *obj, void *vaddr) { @@ -360,7 +389,7 @@ drm_gem_shmem_create_with_handle(struct drm_file *file_priv, */ ret = drm_gem_handle_create(file_priv, &shmem->base, handle); /* drop reference from allocate - handle holds it now. */ - drm_gem_object_put_unlocked(&shmem->base); + drm_gem_object_put(&shmem->base); if (ret) return ERR_PTR(ret); @@ -434,6 +463,33 @@ bool drm_gem_shmem_purge(struct drm_gem_object *obj) EXPORT_SYMBOL(drm_gem_shmem_purge); /** + * drm_gem_shmem_create_object_cached - Create a shmem buffer object with + * cached mappings + * @dev: DRM device + * @size: Size of the object to allocate + * + * By default, shmem buffer objects use writecombine mappings. This + * function implements struct drm_driver.gem_create_object for shmem + * buffer objects with cached mappings. + * + * Returns: + * A struct drm_gem_shmem_object * on success or NULL negative on failure. + */ +struct drm_gem_object * +drm_gem_shmem_create_object_cached(struct drm_device *dev, size_t size) +{ + struct drm_gem_shmem_object *shmem; + + shmem = kzalloc(sizeof(*shmem), GFP_KERNEL); + if (!shmem) + return NULL; + shmem->map_cached = true; + + return &shmem->base; +} +EXPORT_SYMBOL(drm_gem_shmem_create_object_cached); + +/** * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object * @file: DRM file structure to create the dumb buffer for * @dev: DRM device @@ -495,6 +551,8 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma) struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); int ret; + WARN_ON(shmem->base.import_attach); + ret = drm_gem_shmem_get_pages(shmem); WARN_ON_ONCE(ret != 0); @@ -536,6 +594,9 @@ int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) /* Remove the fake offset */ vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node); + if (obj->import_attach) + return dma_buf_mmap(obj->dma_buf, vma, 0); + shmem = to_drm_gem_shmem_obj(obj); ret = drm_gem_shmem_get_pages(shmem); @@ -559,6 +620,8 @@ EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap); * @p: DRM printer * @indent: Tab indentation level * @obj: GEM object + * + * This implements the &drm_gem_object_funcs.info callback. */ void drm_gem_shmem_print_info(struct drm_printer *p, unsigned int indent, const struct drm_gem_object *obj) @@ -577,7 +640,12 @@ EXPORT_SYMBOL(drm_gem_shmem_print_info); * @obj: GEM object * * This function exports a scatter/gather table suitable for PRIME usage by - * calling the standard DMA mapping API. + * calling the standard DMA mapping API. Drivers should not call this function + * directly, instead it should only be used as an implementation for + * &drm_gem_object_funcs.get_sg_table. + * + * Drivers who need to acquire an scatter/gather table for objects need to call + * drm_gem_shmem_get_pages_sgt() instead. * * Returns: * A pointer to the scatter/gather table of pinned pages or NULL on failure. @@ -586,6 +654,8 @@ struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_object *obj) { struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); + WARN_ON(shmem->base.import_attach); + return drm_prime_pages_to_sg(shmem->pages, obj->size >> PAGE_SHIFT); } EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table); @@ -599,6 +669,10 @@ EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table); * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg * table created. * + * This is the main function for drivers to get at backing storage, and it hides + * and difference between dma-buf imported and natively allocated objects. + * drm_gem_shmem_get_sg_table() should not be directly called by drivers. + * * Returns: * A pointer to the scatter/gather table of pinned pages or errno on failure. */ @@ -656,36 +730,16 @@ drm_gem_shmem_prime_import_sg_table(struct drm_device *dev, struct sg_table *sgt) { size_t size = PAGE_ALIGN(attach->dmabuf->size); - size_t npages = size >> PAGE_SHIFT; struct drm_gem_shmem_object *shmem; - int ret; - shmem = drm_gem_shmem_create(dev, size); + shmem = __drm_gem_shmem_create(dev, size, true); if (IS_ERR(shmem)) return ERR_CAST(shmem); - shmem->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); - if (!shmem->pages) { - ret = -ENOMEM; - goto err_free_gem; - } - - ret = drm_prime_sg_to_page_addr_arrays(sgt, shmem->pages, NULL, npages); - if (ret < 0) - goto err_free_array; - shmem->sgt = sgt; - shmem->pages_use_count = 1; /* Permanently pinned from our point of view */ DRM_DEBUG_PRIME("size = %zu\n", size); return &shmem->base; - -err_free_array: - kvfree(shmem->pages); -err_free_gem: - drm_gem_object_put_unlocked(&shmem->base); - - return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table); |