summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.com>2019-04-12 11:52:08 +1000
committerDavid S. Miller <davem@davemloft.net>2019-04-12 17:34:45 -0700
commitf4712b46a529ca2da078c82d5d99d367c7ebf82b (patch)
tree0e77f82fcf92c8483e192ecba4acb6ce8c5a86a8
parentadc6a3ab192eb40fb9d8b093c87d9aa785af4513 (diff)
downloadlinux-stable-f4712b46a529ca2da078c82d5d99d367c7ebf82b.tar.gz
linux-stable-f4712b46a529ca2da078c82d5d99d367c7ebf82b.tar.bz2
linux-stable-f4712b46a529ca2da078c82d5d99d367c7ebf82b.zip
rhashtable: replace rht_ptr_locked() with rht_assign_locked()
The only times rht_ptr_locked() is used, it is to store a new value in a bucket-head. This is the only time it makes sense to use it too. So replace it by a function which does the whole task: Sets the lock bit and assigns to a bucket head. Signed-off-by: NeilBrown <neilb@suse.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/linux/rhashtable.h9
-rw-r--r--lib/rhashtable.c6
2 files changed, 9 insertions, 6 deletions
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index b54e6436547e..882bc0fcea4b 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -316,6 +316,7 @@ static inline struct rhash_lock_head __rcu **rht_bucket_insert(
* local_bh. For that we have rht_assign_unlock(). As rcu_assign_pointer()
* provides the same release semantics that bit_spin_unlock() provides,
* this is safe.
+ * When we write to a bucket without unlocking, we use rht_assign_locked().
*/
static inline void rht_lock(struct bucket_table *tbl,
@@ -369,10 +370,12 @@ static inline struct rhash_head *rht_ptr_exclusive(
return (void *)(((unsigned long)p) & ~BIT(1));
}
-static inline struct rhash_lock_head __rcu *rht_ptr_locked(const
- struct rhash_head *p)
+static inline void rht_assign_locked(struct rhash_lock_head __rcu **bkt,
+ struct rhash_head *obj)
{
- return (void *)(((unsigned long)p) | BIT(1));
+ struct rhash_head __rcu **p = (struct rhash_head __rcu **)bkt;
+
+ rcu_assign_pointer(*p, (void *)((unsigned long)obj | BIT(1)));
}
static inline void rht_assign_unlock(struct bucket_table *tbl,
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 237368ea98c5..ef5378efdef3 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -259,7 +259,7 @@ static int rhashtable_rehash_one(struct rhashtable *ht,
rcu_assign_pointer(*pprev, next);
else
/* Need to preserved the bit lock. */
- rcu_assign_pointer(*bkt, rht_ptr_locked(next));
+ rht_assign_locked(bkt, next);
out:
return err;
@@ -517,7 +517,7 @@ static void *rhashtable_lookup_one(struct rhashtable *ht,
rcu_assign_pointer(*pprev, obj);
else
/* Need to preserve the bit lock */
- rcu_assign_pointer(*bkt, rht_ptr_locked(obj));
+ rht_assign_locked(bkt, obj);
return NULL;
}
@@ -570,7 +570,7 @@ static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
/* bkt is always the head of the list, so it holds
* the lock, which we need to preserve
*/
- rcu_assign_pointer(*bkt, rht_ptr_locked(obj));
+ rht_assign_locked(bkt, obj);
atomic_inc(&ht->nelems);
if (rht_grow_above_75(ht, tbl))