diff options
author | Kuo-Hsin Yang <vovoy@chromium.org> | 2019-01-08 15:45:17 +0800 |
---|---|---|
committer | Chris Wilson <chris@chris-wilson.co.uk> | 2019-01-09 21:24:50 +0000 |
commit | fb4b49278f6b2b83bc638d4082301f98581c3598 (patch) | |
tree | d881b7b24a387bc29b4eb36bab1e4a24c9331616 /drivers/gpu/drm/drm_gem.c | |
parent | 2513147dce2353eb6d1a947ab543e3758724362d (diff) | |
download | linux-fb4b49278f6b2b83bc638d4082301f98581c3598.tar.gz linux-fb4b49278f6b2b83bc638d4082301f98581c3598.tar.bz2 linux-fb4b49278f6b2b83bc638d4082301f98581c3598.zip |
drm/gem: Mark pinned pages as unevictable
The gem drivers use shmemfs to allocate backing storage for gem objects.
On Samsung Chromebook Plus, the drm/rockchip driver may call
rockchip_gem_get_pages -> drm_gem_get_pages -> shmem_read_mapping_page
to pin a lot of pages, breaking the page reclaim mechanism and causing
oom-killer invocation.
E.g. when the size of a zone is 3.9 GiB, the inactive_ratio is 5. If
active_anon / inactive_anon < 5 and all pages in the inactive_anon lru
are pinned, page reclaim would keep scanning inactive_anon lru without
reclaiming memory. It breaks page reclaim when the rockchip driver only
pins about 1/6 of the anon lru pages.
Mark these pinned pages as unevictable to avoid the premature oom-killer
invocation. See also similar patch on i915 driver [1].
[1]: https://patchwork.freedesktop.org/patch/msgid/20181106132324.17390-1-chris@chris-wilson.co.uk
Signed-off-by: Kuo-Hsin Yang <vovoy@chromium.org>
Cc: Daniel Vetter <daniel@ffwll.ch>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20190108074517.209860-1-vovoy@chromium.org
Diffstat (limited to 'drivers/gpu/drm/drm_gem.c')
-rw-r--r-- | drivers/gpu/drm/drm_gem.c | 36 |
1 files changed, 33 insertions, 3 deletions
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 8b55ece97967..2896ff60552f 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -37,6 +37,7 @@ #include <linux/shmem_fs.h> #include <linux/dma-buf.h> #include <linux/mem_encrypt.h> +#include <linux/pagevec.h> #include <drm/drmP.h> #include <drm/drm_vma_manager.h> #include <drm/drm_gem.h> @@ -526,6 +527,17 @@ int drm_gem_create_mmap_offset(struct drm_gem_object *obj) } EXPORT_SYMBOL(drm_gem_create_mmap_offset); +/* + * Move pages to appropriate lru and release the pagevec, decrementing the + * ref count of those pages. + */ +static void drm_gem_check_release_pagevec(struct pagevec *pvec) +{ + check_move_unevictable_pages(pvec); + __pagevec_release(pvec); + cond_resched(); +} + /** * drm_gem_get_pages - helper to allocate backing pages for a GEM object * from shmem @@ -551,6 +563,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj) { struct address_space *mapping; struct page *p, **pages; + struct pagevec pvec; int i, npages; /* This is the shared memory object that backs the GEM resource */ @@ -568,6 +581,8 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj) if (pages == NULL) return ERR_PTR(-ENOMEM); + mapping_set_unevictable(mapping); + for (i = 0; i < npages; i++) { p = shmem_read_mapping_page(mapping, i); if (IS_ERR(p)) @@ -586,8 +601,14 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj) return pages; fail: - while (i--) - put_page(pages[i]); + mapping_clear_unevictable(mapping); + pagevec_init(&pvec); + while (i--) { + if (!pagevec_add(&pvec, pages[i])) + drm_gem_check_release_pagevec(&pvec); + } + if (pagevec_count(&pvec)) + drm_gem_check_release_pagevec(&pvec); kvfree(pages); return ERR_CAST(p); @@ -605,6 +626,11 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, bool dirty, bool accessed) { int i, npages; + struct address_space *mapping; + struct pagevec pvec; + + mapping = file_inode(obj->filp)->i_mapping; + mapping_clear_unevictable(mapping); /* We already BUG_ON() for non-page-aligned sizes in * drm_gem_object_init(), so we should never hit this unless @@ -614,6 +640,7 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, npages = obj->size >> PAGE_SHIFT; + pagevec_init(&pvec); for (i = 0; i < npages; i++) { if (dirty) set_page_dirty(pages[i]); @@ -622,8 +649,11 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, mark_page_accessed(pages[i]); /* Undo the reference we took when populating the table */ - put_page(pages[i]); + if (!pagevec_add(&pvec, pages[i])) + drm_gem_check_release_pagevec(&pvec); } + if (pagevec_count(&pvec)) + drm_gem_check_release_pagevec(&pvec); kvfree(pages); } |