diff options
author | Herbert Xu <herbert@gondor.apana.org.au> | 2015-03-24 00:50:28 +1100 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-03-23 22:07:52 -0400 |
commit | ccd57b1bd32460d27bbb9c599e795628a3c66983 (patch) | |
tree | 320a409508ea1e83a335804bf3d1a840227cb7b7 /lib | |
parent | b9ecfdaa1090b5988422eaf5348ea1954d2d7219 (diff) | |
download | linux-ccd57b1bd32460d27bbb9c599e795628a3c66983.tar.gz linux-ccd57b1bd32460d27bbb9c599e795628a3c66983.tar.bz2 linux-ccd57b1bd32460d27bbb9c599e795628a3c66983.zip |
rhashtable: Add immediate rehash during insertion
This patch reintroduces immediate rehash during insertion. If
we find during insertion that the table is full or the chain
length exceeds a set limit (currently 16 but may be disabled
with insecure_elasticity) then we will force an immediate rehash.
The rehash will contain an expansion if the table utilisation
exceeds 75%.
If this rehash fails then the insertion will fail. Otherwise the
insertion will be reattempted in the new hash table.
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Acked-by: Thomas Graf <tgraf@suug.ch>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'lib')
-rw-r--r-- | lib/rhashtable.c | 60 |
1 files changed, 59 insertions, 1 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 220a11a13d40..7686c1e9934a 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -375,21 +375,76 @@ unlock: schedule_work(&ht->run_work); } +static bool rhashtable_check_elasticity(struct rhashtable *ht, + struct bucket_table *tbl, + unsigned hash) +{ + unsigned elasticity = ht->elasticity; + struct rhash_head *head; + + rht_for_each(head, tbl, hash) + if (!--elasticity) + return true; + + return false; +} + +int rhashtable_insert_rehash(struct rhashtable *ht) +{ + struct bucket_table *old_tbl; + struct bucket_table *new_tbl; + struct bucket_table *tbl; + unsigned int size; + int err; + + old_tbl = rht_dereference_rcu(ht->tbl, ht); + tbl = rhashtable_last_table(ht, old_tbl); + + size = tbl->size; + + if (rht_grow_above_75(ht, tbl)) + size *= 2; + /* More than two rehashes (not resizes) detected. */ + else if (WARN_ON(old_tbl != tbl && old_tbl->size == size)) + return -EBUSY; + + new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC); + if (new_tbl == NULL) + return -ENOMEM; + + err = rhashtable_rehash_attach(ht, tbl, new_tbl); + if (err) { + bucket_table_free(new_tbl); + if (err == -EEXIST) + err = 0; + } else + schedule_work(&ht->run_work); + + return err; +} +EXPORT_SYMBOL_GPL(rhashtable_insert_rehash); + int rhashtable_insert_slow(struct rhashtable *ht, const void *key, struct rhash_head *obj, struct bucket_table *tbl) { struct rhash_head *head; unsigned hash; - int err = -EEXIST; + int err; tbl = rhashtable_last_table(ht, tbl); hash = head_hashfn(ht, tbl, obj); spin_lock_nested(rht_bucket_lock(tbl, hash), SINGLE_DEPTH_NESTING); + err = -EEXIST; if (key && rhashtable_lookup_fast(ht, key, ht->p)) goto exit; + err = -EAGAIN; + if (rhashtable_check_elasticity(ht, tbl, hash) || + rht_grow_above_100(ht, tbl)) + goto exit; + err = 0; head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash); @@ -678,6 +733,9 @@ int rhashtable_init(struct rhashtable *ht, ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE); + if (!params->insecure_elasticity) + ht->elasticity = 16; + if (params->locks_mul) ht->p.locks_mul = roundup_pow_of_two(params->locks_mul); else |