summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.com>2018-06-18 12:52:50 +1000
committerDavid S. Miller <davem@davemloft.net>2018-06-22 13:43:27 +0900
commit9b4f64a227b6f462482a8cc68c7134dc6e26f1c1 (patch)
treea293398f90708f23202e5ae6497a45cdfc6d71f9 /lib
parent9f9a707738aa7a8b9f78a641b83927ada256a626 (diff)
downloadlinux-9b4f64a227b6f462482a8cc68c7134dc6e26f1c1.tar.gz
linux-9b4f64a227b6f462482a8cc68c7134dc6e26f1c1.tar.bz2
linux-9b4f64a227b6f462482a8cc68c7134dc6e26f1c1.zip
rhashtable: simplify INIT_RHT_NULLS_HEAD()
The 'ht' and 'hash' arguments to INIT_RHT_NULLS_HEAD() are no longer used - so drop them. This allows us to also remove the nhash argument from nested_table_alloc(). Acked-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: NeilBrown <neilb@suse.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'lib')
-rw-r--r--lib/rhashtable.c15
1 files changed, 6 insertions, 9 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 688693c919be..a81cd27d518c 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -116,8 +116,7 @@ static void bucket_table_free_rcu(struct rcu_head *head)
static union nested_table *nested_table_alloc(struct rhashtable *ht,
union nested_table __rcu **prev,
- unsigned int shifted,
- unsigned int nhash)
+ unsigned int shifted)
{
union nested_table *ntbl;
int i;
@@ -130,8 +129,7 @@ static union nested_table *nested_table_alloc(struct rhashtable *ht,
if (ntbl && shifted) {
for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0].bucket); i++)
- INIT_RHT_NULLS_HEAD(ntbl[i].bucket, ht,
- (i << shifted) | nhash);
+ INIT_RHT_NULLS_HEAD(ntbl[i].bucket);
}
rcu_assign_pointer(*prev, ntbl);
@@ -157,7 +155,7 @@ static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht,
return NULL;
if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets,
- 0, 0)) {
+ 0)) {
kfree(tbl);
return NULL;
}
@@ -207,7 +205,7 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
tbl->hash_rnd = get_random_u32();
for (i = 0; i < nbuckets; i++)
- INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);
+ INIT_RHT_NULLS_HEAD(tbl->buckets[i]);
return tbl;
}
@@ -1217,7 +1215,7 @@ struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
nhash = index;
shifted = tbl->nest;
ntbl = nested_table_alloc(ht, &ntbl[index].table,
- size <= (1 << shift) ? shifted : 0, nhash);
+ size <= (1 << shift) ? shifted : 0);
while (ntbl && size > (1 << shift)) {
index = hash & ((1 << shift) - 1);
@@ -1226,8 +1224,7 @@ struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
nhash |= index << shifted;
shifted += shift;
ntbl = nested_table_alloc(ht, &ntbl[index].table,
- size <= (1 << shift) ? shifted : 0,
- nhash);
+ size <= (1 << shift) ? shifted : 0);
}
if (!ntbl)