summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
diff options
context:
space:
mode:
authorThomas Hellström <thomas.hellstrom@linux.intel.com>2021-06-02 10:38:08 +0200
committerMatthew Auld <matthew.auld@intel.com>2021-06-02 13:21:24 +0100
commitd148738923fdb5077089e48ec15555e6008100d0 (patch)
tree953dfd00a68894481fbb6784545632620ea2751d /drivers/gpu/drm/i915/gem/i915_gem_lmem.c
parent0e4fe0c9f2f981f26e01b73f3c465ca314c4f9c0 (diff)
downloadlinux-stable-d148738923fdb5077089e48ec15555e6008100d0.tar.gz
linux-stable-d148738923fdb5077089e48ec15555e6008100d0.tar.bz2
linux-stable-d148738923fdb5077089e48ec15555e6008100d0.zip
drm/i915/ttm Initialize the ttm device and memory managers
Temporarily remove the buddy allocator and related selftests and hook up the TTM range manager for i915 regions. Also modify the mock region selftests somewhat to account for a fragmenting manager. Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> Reviewed-by: Matthew Auld <matthew.auld@intel.com> Signed-off-by: Matthew Auld <matthew.auld@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20210602083818.241793-2-thomas.hellstrom@linux.intel.com
Diffstat (limited to 'drivers/gpu/drm/i915/gem/i915_gem_lmem.c')
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_lmem.c59
1 files changed, 57 insertions, 2 deletions
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
index f44bdd08f7cb..3b4aa28a076d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
@@ -4,16 +4,71 @@
*/
#include "intel_memory_region.h"
+#include "intel_region_ttm.h"
#include "gem/i915_gem_region.h"
#include "gem/i915_gem_lmem.h"
#include "i915_drv.h"
+static void lmem_put_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
+{
+ intel_region_ttm_node_free(obj->mm.region, obj->mm.st_mm_node);
+ obj->mm.dirty = false;
+ sg_free_table(pages);
+ kfree(pages);
+}
+
+static int lmem_get_pages(struct drm_i915_gem_object *obj)
+{
+ unsigned int flags;
+ struct sg_table *pages;
+
+ flags = I915_ALLOC_MIN_PAGE_SIZE;
+ if (obj->flags & I915_BO_ALLOC_CONTIGUOUS)
+ flags |= I915_ALLOC_CONTIGUOUS;
+
+ obj->mm.st_mm_node = intel_region_ttm_node_alloc(obj->mm.region,
+ obj->base.size,
+ flags);
+ if (IS_ERR(obj->mm.st_mm_node))
+ return PTR_ERR(obj->mm.st_mm_node);
+
+ /* Range manager is always contigous */
+ if (obj->mm.region->is_range_manager)
+ obj->flags |= I915_BO_ALLOC_CONTIGUOUS;
+ pages = intel_region_ttm_node_to_st(obj->mm.region, obj->mm.st_mm_node);
+ if (IS_ERR(pages)) {
+ intel_region_ttm_node_free(obj->mm.region, obj->mm.st_mm_node);
+ return PTR_ERR(pages);
+ }
+
+ __i915_gem_object_set_pages(obj, pages, i915_sg_dma_sizes(pages->sgl));
+
+ if (obj->flags & I915_BO_ALLOC_CPU_CLEAR) {
+ void __iomem *vaddr =
+ i915_gem_object_lmem_io_map(obj, 0, obj->base.size);
+
+ if (!vaddr) {
+ struct sg_table *pages =
+ __i915_gem_object_unset_pages(obj);
+
+ if (!IS_ERR_OR_NULL(pages))
+ lmem_put_pages(obj, pages);
+ }
+
+ memset_io(vaddr, 0, obj->base.size);
+ io_mapping_unmap(vaddr);
+ }
+
+ return 0;
+}
+
const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops = {
.name = "i915_gem_object_lmem",
.flags = I915_GEM_OBJECT_HAS_IOMEM,
- .get_pages = i915_gem_object_get_pages_buddy,
- .put_pages = i915_gem_object_put_pages_buddy,
+ .get_pages = lmem_get_pages,
+ .put_pages = lmem_put_pages,
.release = i915_gem_object_release_memory_region,
};