summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>2010-03-05 13:41:40 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2010-03-06 11:26:24 -0800
commit34e55232e59f7b19050267a05ff1226e5cd122a5 (patch)
tree6b94e776e87d2a2fe1ceca7c5606901575323900 /include
parentd559db086ff5be9bcc259e5aa50bf3d881eaf1d1 (diff)
downloadlinux-stable-34e55232e59f7b19050267a05ff1226e5cd122a5.tar.gz
linux-stable-34e55232e59f7b19050267a05ff1226e5cd122a5.tar.bz2
linux-stable-34e55232e59f7b19050267a05ff1226e5cd122a5.zip
mm: avoid false sharing of mm_counter
Considering the nature of per mm stats, it's the shared object among threads and can be a cache-miss point in the page fault path. This patch adds per-thread cache for mm_counter. RSS value will be counted into a struct in task_struct and synchronized with mm's one at events. Now, in this patch, the event is the number of calls to handle_mm_fault. Per-thread value is added to mm at each 64 calls. rough estimation with small benchmark on parallel thread (2threads) shows [before] 4.5 cache-miss/faults [after] 4.0 cache-miss/faults Anyway, the most contended object is mmap_sem if the number of threads grows. [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Minchan Kim <minchan.kim@gmail.com> Cc: Christoph Lameter <cl@linux-foundation.org> Cc: Lee Schermerhorn <lee.schermerhorn@hp.com> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include')
-rw-r--r--include/linux/mm.h8
-rw-r--r--include/linux/mm_types.h6
-rw-r--r--include/linux/sched.h4
3 files changed, 12 insertions, 6 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 2124cdb2d1d0..8e580c07d171 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -873,7 +873,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
/*
* per-process(per-mm_struct) statistics.
*/
-#if USE_SPLIT_PTLOCKS
+#if defined(SPLIT_RSS_COUNTING)
/*
* The mm counters are not protected by its page_table_lock,
* so must be incremented atomically.
@@ -883,10 +883,7 @@ static inline void set_mm_counter(struct mm_struct *mm, int member, long value)
atomic_long_set(&mm->rss_stat.count[member], value);
}
-static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
-{
- return (unsigned long)atomic_long_read(&mm->rss_stat.count[member]);
-}
+unsigned long get_mm_counter(struct mm_struct *mm, int member);
static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
{
@@ -974,6 +971,7 @@ static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
*maxrss = hiwater_rss;
}
+void sync_mm_rss(struct task_struct *task, struct mm_struct *mm);
/*
* A callback you can register to apply pressure to ageable caches.
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index e1ca64be6678..21861239ab0c 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -202,9 +202,15 @@ enum {
};
#if USE_SPLIT_PTLOCKS
+#define SPLIT_RSS_COUNTING
struct mm_rss_stat {
atomic_long_t count[NR_MM_COUNTERS];
};
+/* per-thread cached information, */
+struct task_rss_stat {
+ int events; /* for synchronization threshold */
+ int count[NR_MM_COUNTERS];
+};
#else /* !USE_SPLIT_PTLOCKS */
struct mm_rss_stat {
unsigned long count[NR_MM_COUNTERS];
diff --git a/include/linux/sched.h b/include/linux/sched.h
index cbeafa49a53b..46c6f8d5dc06 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1220,7 +1220,9 @@ struct task_struct {
struct plist_node pushable_tasks;
struct mm_struct *mm, *active_mm;
-
+#if defined(SPLIT_RSS_COUNTING)
+ struct task_rss_stat rss_stat;
+#endif
/* task state */
int exit_state;
int exit_code, exit_signal;