summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/gem/i915_gemfs.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2019-05-28 10:29:49 +0100
committerChris Wilson <chris@chris-wilson.co.uk>2019-05-28 12:45:29 +0100
commit10be98a77c558f8cfb823cd2777171fbb35040f6 (patch)
tree282e52f1db25a6d3b9c63714a8461171f6649412 /drivers/gpu/drm/i915/gem/i915_gemfs.c
parentf0e4a06397526d3352a3c80b0575ac22ab24da94 (diff)
downloadlinux-stable-10be98a77c558f8cfb823cd2777171fbb35040f6.tar.gz
linux-stable-10be98a77c558f8cfb823cd2777171fbb35040f6.tar.bz2
linux-stable-10be98a77c558f8cfb823cd2777171fbb35040f6.zip
drm/i915: Move more GEM objects under gem/
Continuing the theme of separating out the GEM clutter. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190528092956.14910-8-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/gem/i915_gemfs.c')
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gemfs.c57
1 files changed, 57 insertions, 0 deletions
diff --git a/drivers/gpu/drm/i915/gem/i915_gemfs.c b/drivers/gpu/drm/i915/gem/i915_gemfs.c
new file mode 100644
index 000000000000..099f3397aada
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gemfs.c
@@ -0,0 +1,57 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2017 Intel Corporation
+ */
+
+#include <linux/fs.h>
+#include <linux/mount.h>
+#include <linux/pagemap.h>
+
+#include "i915_drv.h"
+#include "i915_gemfs.h"
+
+int i915_gemfs_init(struct drm_i915_private *i915)
+{
+ struct file_system_type *type;
+ struct vfsmount *gemfs;
+
+ type = get_fs_type("tmpfs");
+ if (!type)
+ return -ENODEV;
+
+ gemfs = kern_mount(type);
+ if (IS_ERR(gemfs))
+ return PTR_ERR(gemfs);
+
+ /*
+ * Enable huge-pages for objects that are at least HPAGE_PMD_SIZE, most
+ * likely 2M. Note that within_size may overallocate huge-pages, if say
+ * we allocate an object of size 2M + 4K, we may get 2M + 2M, but under
+ * memory pressure shmem should split any huge-pages which can be
+ * shrunk.
+ */
+
+ if (has_transparent_hugepage()) {
+ struct super_block *sb = gemfs->mnt_sb;
+ /* FIXME: Disabled until we get W/A for read BW issue. */
+ char options[] = "huge=never";
+ int flags = 0;
+ int err;
+
+ err = sb->s_op->remount_fs(sb, &flags, options);
+ if (err) {
+ kern_unmount(gemfs);
+ return err;
+ }
+ }
+
+ i915->mm.gemfs = gemfs;
+
+ return 0;
+}
+
+void i915_gemfs_fini(struct drm_i915_private *i915)
+{
+ kern_unmount(i915->mm.gemfs);
+}