summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJan Kara <jack@suse.cz>2022-09-08 11:10:32 +0200
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2023-01-18 11:30:51 +0100
commit9e04f1f7c93c2ff4a45bd7ca3b910843b2432ea0 (patch)
treec4f16d5f9ca1bf2d3c3055633fe2c253a4aad8ac
parent82725be426bce0a425cc5e26fbad61ffd29cff03 (diff)
downloadlinux-stable-9e04f1f7c93c2ff4a45bd7ca3b910843b2432ea0.tar.gz
linux-stable-9e04f1f7c93c2ff4a45bd7ca3b910843b2432ea0.tar.bz2
linux-stable-9e04f1f7c93c2ff4a45bd7ca3b910843b2432ea0.zip
mbcache: Avoid nesting of cache->c_list_lock under bit locks
commit 5fc4cbd9fde5d4630494fd6ffc884148fb618087 upstream. Commit 307af6c87937 ("mbcache: automatically delete entries from cache on freeing") started nesting cache->c_list_lock under the bit locks protecting hash buckets of the mbcache hash table in mb_cache_entry_create(). This causes problems for real-time kernels because there spinlocks are sleeping locks while bitlocks stay atomic. Luckily the nesting is easy to avoid by holding entry reference until the entry is added to the LRU list. This makes sure we cannot race with entry deletion. Cc: stable@kernel.org Fixes: 307af6c87937 ("mbcache: automatically delete entries from cache on freeing") Reported-by: Mike Galbraith <efault@gmx.de> Signed-off-by: Jan Kara <jack@suse.cz> Link: https://lore.kernel.org/r/20220908091032.10513-1-jack@suse.cz Signed-off-by: Theodore Ts'o <tytso@mit.edu> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--fs/mbcache.c17
1 files changed, 10 insertions, 7 deletions
diff --git a/fs/mbcache.c b/fs/mbcache.c
index 8e9e1888e448..2e2d4de4cf87 100644
--- a/fs/mbcache.c
+++ b/fs/mbcache.c
@@ -89,8 +89,14 @@ int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
return -ENOMEM;
INIT_LIST_HEAD(&entry->e_list);
- /* Initial hash reference */
- atomic_set(&entry->e_refcnt, 1);
+ /*
+ * We create entry with two references. One reference is kept by the
+ * hash table, the other reference is used to protect us from
+ * mb_cache_entry_delete_or_get() until the entry is fully setup. This
+ * avoids nesting of cache->c_list_lock into hash table bit locks which
+ * is problematic for RT.
+ */
+ atomic_set(&entry->e_refcnt, 2);
entry->e_key = key;
entry->e_value = value;
entry->e_flags = 0;
@@ -106,15 +112,12 @@ int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
}
}
hlist_bl_add_head(&entry->e_hash_list, head);
- /*
- * Add entry to LRU list before it can be found by
- * mb_cache_entry_delete() to avoid races
- */
+ hlist_bl_unlock(head);
spin_lock(&cache->c_list_lock);
list_add_tail(&entry->e_list, &cache->c_list);
cache->c_entry_count++;
spin_unlock(&cache->c_list_lock);
- hlist_bl_unlock(head);
+ mb_cache_entry_put(cache, entry);
return 0;
}