summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorPablo Neira Ayuso <pablo@netfilter.org>2016-08-24 12:31:31 +0200
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2019-02-08 11:25:30 +0100
commit8295999786a5141bb3a8db8e7a8b40e82e66ce3f (patch)
tree03668750f3d71bb39bc4c5fc93cc55aa250316a4 /lib
parenta7fb573c164488b36aeafe63a1531b9e7cf27fad (diff)
downloadlinux-stable-8295999786a5141bb3a8db8e7a8b40e82e66ce3f.tar.gz
linux-stable-8295999786a5141bb3a8db8e7a8b40e82e66ce3f.tar.bz2
linux-stable-8295999786a5141bb3a8db8e7a8b40e82e66ce3f.zip
rhashtable: add rhashtable_lookup_get_insert_key()
commit 5ca8cc5bf11faed257c762018aea9106d529232f upstream. This patch modifies __rhashtable_insert_fast() so it returns the existing object that clashes with the one that you want to insert. In case the object is successfully inserted, NULL is returned. Otherwise, you get an error via ERR_PTR(). This patch adapts the existing callers of __rhashtable_insert_fast() so they handle this new logic, and it adds a new rhashtable_lookup_get_insert_key() interface to fetch this existing object. nf_tables needs this change to improve handling of EEXIST cases via honoring the NLM_F_EXCL flag and by checking if the data part of the mapping matches what we have. Cc: Herbert Xu <herbert@gondor.apana.org.au> Cc: Thomas Graf <tgraf@suug.ch> Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> Acked-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'lib')
-rw-r--r--lib/rhashtable.c10
1 files changed, 7 insertions, 3 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 37ea94b636a3..991bee32e52e 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -441,7 +441,8 @@ EXPORT_SYMBOL_GPL(rhashtable_insert_rehash);
struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht,
const void *key,
struct rhash_head *obj,
- struct bucket_table *tbl)
+ struct bucket_table *tbl,
+ void **data)
{
struct rhash_head *head;
unsigned int hash;
@@ -452,8 +453,11 @@ struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht,
spin_lock_nested(rht_bucket_lock(tbl, hash), SINGLE_DEPTH_NESTING);
err = -EEXIST;
- if (key && rhashtable_lookup_fast(ht, key, ht->p))
- goto exit;
+ if (key) {
+ *data = rhashtable_lookup_fast(ht, key, ht->p);
+ if (*data)
+ goto exit;
+ }
err = -E2BIG;
if (unlikely(rht_grow_above_max(ht, tbl)))