diff options
author | NeilBrown <neilb@suse.com> | 2018-06-18 12:52:50 +1000 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2018-06-22 13:43:27 +0900 |
commit | 5af68ef7333c8606bfe6e400cb962081518c3acb (patch) | |
tree | d494680035d9537192c0c0d3094c1ee846051000 /lib/rhashtable.c | |
parent | 9b4f64a227b6f462482a8cc68c7134dc6e26f1c1 (diff) | |
download | linux-5af68ef7333c8606bfe6e400cb962081518c3acb.tar.gz linux-5af68ef7333c8606bfe6e400cb962081518c3acb.tar.bz2 linux-5af68ef7333c8606bfe6e400cb962081518c3acb.zip |
rhashtable: simplify nested_table_alloc() and rht_bucket_nested_insert()
Now that we don't use the hash value or shift in nested_table_alloc()
there is room for simplification.
We only need to pass a "is this a leaf" flag to nested_table_alloc(),
and don't need to track as much information in
rht_bucket_nested_insert().
Note there is another minor cleanup in nested_table_alloc() here.
The number of elements in a page of "union nested_tables" is most naturally
PAGE_SIZE / sizeof(ntbl[0])
The previous code had
PAGE_SIZE / sizeof(ntbl[0].bucket)
which happens to be the correct value only because the bucket uses all
the space in the union.
Acked-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'lib/rhashtable.c')
-rw-r--r-- | lib/rhashtable.c | 18 |
1 files changed, 6 insertions, 12 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c index a81cd27d518c..2aa41c15df17 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -116,7 +116,7 @@ static void bucket_table_free_rcu(struct rcu_head *head) static union nested_table *nested_table_alloc(struct rhashtable *ht, union nested_table __rcu **prev, - unsigned int shifted) + bool leaf) { union nested_table *ntbl; int i; @@ -127,8 +127,8 @@ static union nested_table *nested_table_alloc(struct rhashtable *ht, ntbl = kzalloc(PAGE_SIZE, GFP_ATOMIC); - if (ntbl && shifted) { - for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0].bucket); i++) + if (ntbl && leaf) { + for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0]); i++) INIT_RHT_NULLS_HEAD(ntbl[i].bucket); } @@ -155,7 +155,7 @@ static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht, return NULL; if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets, - 0)) { + false)) { kfree(tbl); return NULL; } @@ -1207,24 +1207,18 @@ struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht, unsigned int index = hash & ((1 << tbl->nest) - 1); unsigned int size = tbl->size >> tbl->nest; union nested_table *ntbl; - unsigned int shifted; - unsigned int nhash; ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]); hash >>= tbl->nest; - nhash = index; - shifted = tbl->nest; ntbl = nested_table_alloc(ht, &ntbl[index].table, - size <= (1 << shift) ? shifted : 0); + size <= (1 << shift)); while (ntbl && size > (1 << shift)) { index = hash & ((1 << shift) - 1); size >>= shift; hash >>= shift; - nhash |= index << shifted; - shifted += shift; ntbl = nested_table_alloc(ht, &ntbl[index].table, - size <= (1 << shift) ? shifted : 0); + size <= (1 << shift)); } if (!ntbl) |