summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2012-08-11 15:41:03 +0100
committerDaniel Vetter <daniel.vetter@ffwll.ch>2012-08-21 14:34:36 +0200
commitd8cb5086695dcdd076e911fc298a5a6701497371 (patch)
tree89be5fefdb195b4c3ff8878694fa2b6fe790470e
parentc110a6d728d0243a755973d34e363952b74af984 (diff)
downloadlinux-d8cb5086695dcdd076e911fc298a5a6701497371.tar.gz
linux-d8cb5086695dcdd076e911fc298a5a6701497371.tar.bz2
linux-d8cb5086695dcdd076e911fc298a5a6701497371.zip
drm/i915: Try harder to allocate an mmap_offset
Given the persistence of an offset for the lifetime of an object, itis easy to contemplate how the mmap space becomes badly fragmented to the point that further allocations fail with ENOSPC. Our only recourse at this point is to try to purge the objects to release some space and reattempt the allocation. References: https://bugs.freedesktop.org/show_bug.cgi?id=39552 Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c50
1 files changed, 41 insertions, 9 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index a549be5f535b..e1ec587c42c8 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1276,6 +1276,42 @@ i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
return i915_gem_get_gtt_size(dev, size, tiling_mode);
}
+static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ int ret;
+
+ if (obj->base.map_list.map)
+ return 0;
+
+ ret = drm_gem_create_mmap_offset(&obj->base);
+ if (ret != -ENOSPC)
+ return ret;
+
+ /* Badly fragmented mmap space? The only way we can recover
+ * space is by destroying unwanted objects. We can't randomly release
+ * mmap_offsets as userspace expects them to be persistent for the
+ * lifetime of the objects. The closest we can is to release the
+ * offsets on purgeable objects by truncating it and marking it purged,
+ * which prevents userspace from ever using that object again.
+ */
+ i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
+ ret = drm_gem_create_mmap_offset(&obj->base);
+ if (ret != -ENOSPC)
+ return ret;
+
+ i915_gem_shrink_all(dev_priv);
+ return drm_gem_create_mmap_offset(&obj->base);
+}
+
+static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
+{
+ if (!obj->base.map_list.map)
+ return;
+
+ drm_gem_free_mmap_offset(&obj->base);
+}
+
int
i915_gem_mmap_gtt(struct drm_file *file,
struct drm_device *dev,
@@ -1307,11 +1343,9 @@ i915_gem_mmap_gtt(struct drm_file *file,
goto out;
}
- if (!obj->base.map_list.map) {
- ret = drm_gem_create_mmap_offset(&obj->base);
- if (ret)
- goto out;
- }
+ ret = i915_gem_object_create_mmap_offset(obj);
+ if (ret)
+ goto out;
*offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
@@ -1360,8 +1394,7 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj)
inode = obj->base.filp->f_path.dentry->d_inode;
shmem_truncate_range(inode, 0, (loff_t)-1);
- if (obj->base.map_list.map)
- drm_gem_free_mmap_offset(&obj->base);
+ i915_gem_object_free_mmap_offset(obj);
obj->madv = __I915_MADV_PURGED;
}
@@ -3615,8 +3648,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
}
i915_gem_object_put_pages_gtt(obj);
- if (obj->base.map_list.map)
- drm_gem_free_mmap_offset(&obj->base);
+ i915_gem_object_free_mmap_offset(obj);
drm_gem_object_release(&obj->base);
i915_gem_info_remove_obj(dev_priv, obj->base.size);