summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2016-04-20 12:09:52 +0100
committerChris Wilson <chris@chris-wilson.co.uk>2016-04-20 13:49:44 +0100
commit1bec9b0bda3d570cb7ff3a69e44917facf7e8820 (patch)
treef8e9bd21f68e32afd73a336d79f68edd1bacbc02 /drivers/gpu/drm/i915
parent1768d4550c055bfd2d4b44763fcb9230f41e593b (diff)
downloadlinux-1bec9b0bda3d570cb7ff3a69e44917facf7e8820.tar.gz
linux-1bec9b0bda3d570cb7ff3a69e44917facf7e8820.tar.bz2
linux-1bec9b0bda3d570cb7ff3a69e44917facf7e8820.zip
drm/i915/shrinker: Only shmemfs objects are backed by swap
Since we can only swap out shmemfs objects, those are the only ones that can influence the ability of the shrinker to free pages. Currently, all non-shmemfs objects have a raised pages_pin_count to protect them from the shrinker, so this just makes the logic for can_release_pages() clearer (and safer in future so that we don't over estimate our ability to free up pages from future non-swappable objects). Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1461150592-27818-3-git-send-email-chris@chris-wilson.co.uk Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Reviewed-by: Mika Kuoppala <mika.kuoppala@intel.com>
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c10
1 files changed, 4 insertions, 6 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 95cffe9b305d..425e721aac58 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -70,6 +70,10 @@ static bool swap_available(void)
static bool can_release_pages(struct drm_i915_gem_object *obj)
{
+ /* Only shmemfs objects are backed by swap */
+ if (!obj->base.filp)
+ return false;
+
/* Only report true if by unbinding the object and putting its pages
* we can actually make forward progress towards freeing physical
* pages.
@@ -349,18 +353,12 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
*/
unbound = bound = unevictable = 0;
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
- if (!obj->base.filp) /* not backed by a freeable object */
- continue;
-
if (!can_release_pages(obj))
unevictable += obj->base.size >> PAGE_SHIFT;
else
unbound += obj->base.size >> PAGE_SHIFT;
}
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
- if (!obj->base.filp)
- continue;
-
if (!can_release_pages(obj))
unevictable += obj->base.size >> PAGE_SHIFT;
else