summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2020-08-02 01:02:12 -0700
committerDavid S. Miller <davem@davemloft.net>2020-08-02 01:02:12 -0700
commitbd0b33b24897ba9ddad221e8ac5b6f0e38a2e004 (patch)
tree3f03062a9e759dba7b09df0feccbb58b8f7ab4cc /lib
parent7126bd5c8bcbc015cf89864cf71d750e8f33f924 (diff)
parentac3a0c8472969a03c0496ae774b3a29eb26c8d5a (diff)
downloadlinux-bd0b33b24897ba9ddad221e8ac5b6f0e38a2e004.tar.gz
linux-bd0b33b24897ba9ddad221e8ac5b6f0e38a2e004.tar.bz2
linux-bd0b33b24897ba9ddad221e8ac5b6f0e38a2e004.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Resolved kernel/bpf/btf.c using instructions from merge commit 69138b34a7248d2396ab85c8652e20c0c39beaba Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'lib')
-rw-r--r--lib/random32.c2
-rw-r--r--lib/rhashtable.c35
2 files changed, 17 insertions, 20 deletions
diff --git a/lib/random32.c b/lib/random32.c
index 763b920a6206..3d749abb9e80 100644
--- a/lib/random32.c
+++ b/lib/random32.c
@@ -48,7 +48,7 @@ static inline void prandom_state_selftest(void)
}
#endif
-static DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy;
+DEFINE_PER_CPU(struct rnd_state, net_rand_state);
/**
* prandom_u32_state - seeded pseudo-random number generator.
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 9f6890aedd1a..c949c1e3b87c 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -31,7 +31,7 @@
union nested_table {
union nested_table __rcu *table;
- struct rhash_lock_head *bucket;
+ struct rhash_lock_head __rcu *bucket;
};
static u32 head_hashfn(struct rhashtable *ht,
@@ -222,7 +222,7 @@ static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
}
static int rhashtable_rehash_one(struct rhashtable *ht,
- struct rhash_lock_head **bkt,
+ struct rhash_lock_head __rcu **bkt,
unsigned int old_hash)
{
struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
@@ -275,7 +275,7 @@ static int rhashtable_rehash_chain(struct rhashtable *ht,
unsigned int old_hash)
{
struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
- struct rhash_lock_head **bkt = rht_bucket_var(old_tbl, old_hash);
+ struct rhash_lock_head __rcu **bkt = rht_bucket_var(old_tbl, old_hash);
int err;
if (!bkt)
@@ -485,7 +485,7 @@ fail:
}
static void *rhashtable_lookup_one(struct rhashtable *ht,
- struct rhash_lock_head **bkt,
+ struct rhash_lock_head __rcu **bkt,
struct bucket_table *tbl, unsigned int hash,
const void *key, struct rhash_head *obj)
{
@@ -535,12 +535,10 @@ static void *rhashtable_lookup_one(struct rhashtable *ht,
return ERR_PTR(-ENOENT);
}
-static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
- struct rhash_lock_head **bkt,
- struct bucket_table *tbl,
- unsigned int hash,
- struct rhash_head *obj,
- void *data)
+static struct bucket_table *rhashtable_insert_one(
+ struct rhashtable *ht, struct rhash_lock_head __rcu **bkt,
+ struct bucket_table *tbl, unsigned int hash, struct rhash_head *obj,
+ void *data)
{
struct bucket_table *new_tbl;
struct rhash_head *head;
@@ -591,7 +589,7 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
{
struct bucket_table *new_tbl;
struct bucket_table *tbl;
- struct rhash_lock_head **bkt;
+ struct rhash_lock_head __rcu **bkt;
unsigned int hash;
void *data;
@@ -1173,8 +1171,8 @@ void rhashtable_destroy(struct rhashtable *ht)
}
EXPORT_SYMBOL_GPL(rhashtable_destroy);
-struct rhash_lock_head **__rht_bucket_nested(const struct bucket_table *tbl,
- unsigned int hash)
+struct rhash_lock_head __rcu **__rht_bucket_nested(
+ const struct bucket_table *tbl, unsigned int hash)
{
const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
unsigned int index = hash & ((1 << tbl->nest) - 1);
@@ -1202,10 +1200,10 @@ struct rhash_lock_head **__rht_bucket_nested(const struct bucket_table *tbl,
}
EXPORT_SYMBOL_GPL(__rht_bucket_nested);
-struct rhash_lock_head **rht_bucket_nested(const struct bucket_table *tbl,
- unsigned int hash)
+struct rhash_lock_head __rcu **rht_bucket_nested(
+ const struct bucket_table *tbl, unsigned int hash)
{
- static struct rhash_lock_head *rhnull;
+ static struct rhash_lock_head __rcu *rhnull;
if (!rhnull)
INIT_RHT_NULLS_HEAD(rhnull);
@@ -1213,9 +1211,8 @@ struct rhash_lock_head **rht_bucket_nested(const struct bucket_table *tbl,
}
EXPORT_SYMBOL_GPL(rht_bucket_nested);
-struct rhash_lock_head **rht_bucket_nested_insert(struct rhashtable *ht,
- struct bucket_table *tbl,
- unsigned int hash)
+struct rhash_lock_head __rcu **rht_bucket_nested_insert(
+ struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash)
{
const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
unsigned int index = hash & ((1 << tbl->nest) - 1);