summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2017-02-04 00:12:19 +0100
committerIngo Molnar <mingo@kernel.org>2017-03-03 01:43:48 +0100
commit9e7d2e44dd88ba7e29c165b6fca428e384afa5a8 (patch)
treeb6aae24b0290c8dd02714f059017d2198eb68a14 /include
parent76960dbf9b235b957d9d730be2c6c2a70f709026 (diff)
downloadlinux-stable-9e7d2e44dd88ba7e29c165b6fca428e384afa5a8.tar.gz
linux-stable-9e7d2e44dd88ba7e29c165b6fca428e384afa5a8.tar.bz2
linux-stable-9e7d2e44dd88ba7e29c165b6fca428e384afa5a8.zip
mm/headers, sched/headers: Move task related MM types from <linux/mm_types.> to <linux/mm_types_task.h>
Separate all the MM types that are embedded directly in 'struct task_struct' into the <linux/mm_types_task.h> header. The goal is to include this header in <linux/sched.h>, not the full <linux/mm_types.h> header, to reduce the size, complexity and coupling of <linux/sched.h>. (This patch does not change <linux/sched.h> yet.) Acked-by: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include')
-rw-r--r--include/linux/mm_types.h49
-rw-r--r--include/linux/mm_types_task.h55
2 files changed, 55 insertions, 49 deletions
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 28bc710d3467..f60f45fe226f 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -24,11 +24,6 @@
struct address_space;
struct mem_cgroup;
-#define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
-#define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \
- IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK))
-#define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8)
-
/*
* Each physical page in the system has a struct page associated with
* it to keep track of whatever it is we are using the page for at the
@@ -231,17 +226,6 @@ struct page {
#endif
;
-struct page_frag {
- struct page *page;
-#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
- __u32 offset;
- __u32 size;
-#else
- __u16 offset;
- __u16 size;
-#endif
-};
-
#define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK)
#define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE)
@@ -360,18 +344,6 @@ struct vm_area_struct {
struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
};
-/*
- * The per task VMA cache array:
- */
-#define VMACACHE_BITS 2
-#define VMACACHE_SIZE (1U << VMACACHE_BITS)
-#define VMACACHE_MASK (VMACACHE_SIZE - 1)
-
-struct vmacache {
- u32 seqnum;
- struct vm_area_struct *vmas[VMACACHE_SIZE];
-};
-
struct core_thread {
struct task_struct *task;
struct core_thread *next;
@@ -383,27 +355,6 @@ struct core_state {
struct completion startup;
};
-enum {
- MM_FILEPAGES, /* Resident file mapping pages */
- MM_ANONPAGES, /* Resident anonymous pages */
- MM_SWAPENTS, /* Anonymous swap entries */
- MM_SHMEMPAGES, /* Resident shared memory pages */
- NR_MM_COUNTERS
-};
-
-#if USE_SPLIT_PTE_PTLOCKS && defined(CONFIG_MMU)
-#define SPLIT_RSS_COUNTING
-/* per-thread cached information, */
-struct task_rss_stat {
- int events; /* for synchronization threshold */
- int count[NR_MM_COUNTERS];
-};
-#endif /* USE_SPLIT_PTE_PTLOCKS */
-
-struct mm_rss_stat {
- atomic_long_t count[NR_MM_COUNTERS];
-};
-
struct kioctx_table;
struct mm_struct {
struct vm_area_struct *mmap; /* list of VMAs */
diff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h
index 2419d302f03c..9526d8b9fe0e 100644
--- a/include/linux/mm_types_task.h
+++ b/include/linux/mm_types_task.h
@@ -1,10 +1,65 @@
#ifndef _LINUX_MM_TYPES_TASK_H
#define _LINUX_MM_TYPES_TASK_H
+/*
+ * Here are the definitions of the MM data types that are embedded in 'struct task_struct'.
+ *
+ * (These are defined separately to decouple sched.h from mm_types.h as much as possible.)
+ */
+
#include <linux/types.h>
#include <linux/threads.h>
#include <linux/atomic.h>
#include <asm/page.h>
+#define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
+#define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \
+ IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK))
+#define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8)
+
+/*
+ * The per task VMA cache array:
+ */
+#define VMACACHE_BITS 2
+#define VMACACHE_SIZE (1U << VMACACHE_BITS)
+#define VMACACHE_MASK (VMACACHE_SIZE - 1)
+
+struct vmacache {
+ u32 seqnum;
+ struct vm_area_struct *vmas[VMACACHE_SIZE];
+};
+
+enum {
+ MM_FILEPAGES, /* Resident file mapping pages */
+ MM_ANONPAGES, /* Resident anonymous pages */
+ MM_SWAPENTS, /* Anonymous swap entries */
+ MM_SHMEMPAGES, /* Resident shared memory pages */
+ NR_MM_COUNTERS
+};
+
+#if USE_SPLIT_PTE_PTLOCKS && defined(CONFIG_MMU)
+#define SPLIT_RSS_COUNTING
+/* per-thread cached information, */
+struct task_rss_stat {
+ int events; /* for synchronization threshold */
+ int count[NR_MM_COUNTERS];
+};
+#endif /* USE_SPLIT_PTE_PTLOCKS */
+
+struct mm_rss_stat {
+ atomic_long_t count[NR_MM_COUNTERS];
+};
+
+struct page_frag {
+ struct page *page;
+#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
+ __u32 offset;
+ __u32 size;
+#else
+ __u16 offset;
+ __u16 size;
+#endif
+};
+
#endif /* _LINUX_MM_TYPES_TASK_H */