summaryrefslogtreecommitdiffstats
path: root/fs/dcache.c
diff options
context:
space:
mode:
authorDave Chinner <dchinner@redhat.com>2013-08-28 10:17:55 +1000
committerAl Viro <viro@zeniv.linux.org.uk>2013-09-10 18:56:30 -0400
commit19156840e33a23eeb1a749c0f991dab6588b077d (patch)
tree460675d21b0d6a5de3c179b951d18fec24e77cc8 /fs/dcache.c
parent62d36c77035219ac776d1882ed3a662f2b75f258 (diff)
downloadlinux-19156840e33a23eeb1a749c0f991dab6588b077d.tar.gz
linux-19156840e33a23eeb1a749c0f991dab6588b077d.tar.bz2
linux-19156840e33a23eeb1a749c0f991dab6588b077d.zip
dentry: move to per-sb LRU locks
With the dentry LRUs being per-sb structures, there is no real need for a global dentry_lru_lock. The locking can be made more fine-grained by moving to a per-sb LRU lock, isolating the LRU operations of different filesytsems completely from each other. The need for this is independent of any performance consideration that may arise: in the interest of abstracting the lru operations away, it is mandatory that each lru works around its own lock instead of a global lock for all of them. [glommer@openvz.org: updated changelog ] Signed-off-by: Dave Chinner <dchinner@redhat.com> Signed-off-by: Glauber Costa <glommer@openvz.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Acked-by: Mel Gorman <mgorman@suse.de> Cc: "Theodore Ts'o" <tytso@mit.edu> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Artem Bityutskiy <artem.bityutskiy@linux.intel.com> Cc: Arve Hjønnevåg <arve@android.com> Cc: Carlos Maiolino <cmaiolino@redhat.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Chuck Lever <chuck.lever@oracle.com> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: David Rientjes <rientjes@google.com> Cc: Gleb Natapov <gleb@redhat.com> Cc: Greg Thelen <gthelen@google.com> Cc: J. Bruce Fields <bfields@redhat.com> Cc: Jan Kara <jack@suse.cz> Cc: Jerome Glisse <jglisse@redhat.com> Cc: John Stultz <john.stultz@linaro.org> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Kent Overstreet <koverstreet@google.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Steven Whitehouse <swhiteho@redhat.com> Cc: Thomas Hellstrom <thellstrom@vmware.com> Cc: Trond Myklebust <Trond.Myklebust@netapp.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs/dcache.c')
-rw-r--r--fs/dcache.c33
1 files changed, 16 insertions, 17 deletions
diff --git a/fs/dcache.c b/fs/dcache.c
index 03161240e744..e989ecb44a65 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -48,7 +48,7 @@
* - the dcache hash table
* s_anon bl list spinlock protects:
* - the s_anon list (see __d_drop)
- * dcache_lru_lock protects:
+ * dentry->d_sb->s_dentry_lru_lock protects:
* - the dcache lru lists and counters
* d_lock protects:
* - d_flags
@@ -63,7 +63,7 @@
* Ordering:
* dentry->d_inode->i_lock
* dentry->d_lock
- * dcache_lru_lock
+ * dentry->d_sb->s_dentry_lru_lock
* dcache_hash_bucket lock
* s_anon lock
*
@@ -81,7 +81,6 @@
int sysctl_vfs_cache_pressure __read_mostly = 100;
EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
-static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lru_lock);
__cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
EXPORT_SYMBOL(rename_lock);
@@ -362,12 +361,12 @@ static void dentry_unlink_inode(struct dentry * dentry)
static void dentry_lru_add(struct dentry *dentry)
{
if (unlikely(!(dentry->d_flags & DCACHE_LRU_LIST))) {
- spin_lock(&dcache_lru_lock);
+ spin_lock(&dentry->d_sb->s_dentry_lru_lock);
dentry->d_flags |= DCACHE_LRU_LIST;
list_add(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
dentry->d_sb->s_nr_dentry_unused++;
this_cpu_inc(nr_dentry_unused);
- spin_unlock(&dcache_lru_lock);
+ spin_unlock(&dentry->d_sb->s_dentry_lru_lock);
}
}
@@ -385,15 +384,15 @@ static void __dentry_lru_del(struct dentry *dentry)
static void dentry_lru_del(struct dentry *dentry)
{
if (!list_empty(&dentry->d_lru)) {
- spin_lock(&dcache_lru_lock);
+ spin_lock(&dentry->d_sb->s_dentry_lru_lock);
__dentry_lru_del(dentry);
- spin_unlock(&dcache_lru_lock);
+ spin_unlock(&dentry->d_sb->s_dentry_lru_lock);
}
}
static void dentry_lru_move_list(struct dentry *dentry, struct list_head *list)
{
- spin_lock(&dcache_lru_lock);
+ spin_lock(&dentry->d_sb->s_dentry_lru_lock);
if (list_empty(&dentry->d_lru)) {
dentry->d_flags |= DCACHE_LRU_LIST;
list_add_tail(&dentry->d_lru, list);
@@ -402,7 +401,7 @@ static void dentry_lru_move_list(struct dentry *dentry, struct list_head *list)
} else {
list_move_tail(&dentry->d_lru, list);
}
- spin_unlock(&dcache_lru_lock);
+ spin_unlock(&dentry->d_sb->s_dentry_lru_lock);
}
/**
@@ -895,14 +894,14 @@ void prune_dcache_sb(struct super_block *sb, int count)
LIST_HEAD(tmp);
relock:
- spin_lock(&dcache_lru_lock);
+ spin_lock(&sb->s_dentry_lru_lock);
while (!list_empty(&sb->s_dentry_lru)) {
dentry = list_entry(sb->s_dentry_lru.prev,
struct dentry, d_lru);
BUG_ON(dentry->d_sb != sb);
if (!spin_trylock(&dentry->d_lock)) {
- spin_unlock(&dcache_lru_lock);
+ spin_unlock(&sb->s_dentry_lru_lock);
cpu_relax();
goto relock;
}
@@ -918,11 +917,11 @@ relock:
if (!--count)
break;
}
- cond_resched_lock(&dcache_lru_lock);
+ cond_resched_lock(&sb->s_dentry_lru_lock);
}
if (!list_empty(&referenced))
list_splice(&referenced, &sb->s_dentry_lru);
- spin_unlock(&dcache_lru_lock);
+ spin_unlock(&sb->s_dentry_lru_lock);
shrink_dentry_list(&tmp);
}
@@ -938,14 +937,14 @@ void shrink_dcache_sb(struct super_block *sb)
{
LIST_HEAD(tmp);
- spin_lock(&dcache_lru_lock);
+ spin_lock(&sb->s_dentry_lru_lock);
while (!list_empty(&sb->s_dentry_lru)) {
list_splice_init(&sb->s_dentry_lru, &tmp);
- spin_unlock(&dcache_lru_lock);
+ spin_unlock(&sb->s_dentry_lru_lock);
shrink_dentry_list(&tmp);
- spin_lock(&dcache_lru_lock);
+ spin_lock(&sb->s_dentry_lru_lock);
}
- spin_unlock(&dcache_lru_lock);
+ spin_unlock(&sb->s_dentry_lru_lock);
}
EXPORT_SYMBOL(shrink_dcache_sb);