diff options
author | Hannes Frederic Sowa <hannes@stressinduktion.org> | 2015-10-08 01:20:36 +0200 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-10-08 05:26:36 -0700 |
commit | c90aeb948222a7b3d3391d232ec4f50fd8322ad3 (patch) | |
tree | c4625885c7705076fb157b6394fa3969b44bd9f3 /lib/once.c | |
parent | 46234253b9363894a254844a6550b4cc5f3edfe8 (diff) | |
download | linux-c90aeb948222a7b3d3391d232ec4f50fd8322ad3.tar.gz linux-c90aeb948222a7b3d3391d232ec4f50fd8322ad3.tar.bz2 linux-c90aeb948222a7b3d3391d232ec4f50fd8322ad3.zip |
once: make helper generic for calling functions once
Make the get_random_once() helper generic enough, so that functions
in general would only be called once, where one user of this is then
net_get_random_once().
The only implementation specific call is to get_random_bytes(), all
the rest of this *_once() facility would be duplicated among different
subsystems otherwise. The new DO_ONCE() helper will be used by prandom()
later on, but might also be useful for other scenarios/subsystems as
well where a one-time initialization in often-called, possibly fast
path code could occur.
Signed-off-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'lib/once.c')
-rw-r--r-- | lib/once.c | 50 |
1 files changed, 29 insertions, 21 deletions
diff --git a/lib/once.c b/lib/once.c index 2d5a7de17aba..05c8604627eb 100644 --- a/lib/once.c +++ b/lib/once.c @@ -3,52 +3,60 @@ #include <linux/once.h> #include <linux/random.h> -struct __random_once_work { +struct once_work { struct work_struct work; struct static_key *key; }; -static void __random_once_deferred(struct work_struct *w) +static void once_deferred(struct work_struct *w) { - struct __random_once_work *work; + struct once_work *work; - work = container_of(w, struct __random_once_work, work); + work = container_of(w, struct once_work, work); BUG_ON(!static_key_enabled(work->key)); static_key_slow_dec(work->key); kfree(work); } -static void __random_once_disable_jump(struct static_key *key) +static void once_disable_jump(struct static_key *key) { - struct __random_once_work *w; + struct once_work *w; w = kmalloc(sizeof(*w), GFP_ATOMIC); if (!w) return; - INIT_WORK(&w->work, __random_once_deferred); + INIT_WORK(&w->work, once_deferred); w->key = key; schedule_work(&w->work); } -bool __get_random_once(void *buf, int nbytes, bool *done, - struct static_key *once_key) -{ - static DEFINE_SPINLOCK(lock); - unsigned long flags; +static DEFINE_SPINLOCK(once_lock); - spin_lock_irqsave(&lock, flags); +bool __do_once_start(bool *done, unsigned long *flags) + __acquires(once_lock) +{ + spin_lock_irqsave(&once_lock, *flags); if (*done) { - spin_unlock_irqrestore(&lock, flags); + spin_unlock_irqrestore(&once_lock, *flags); + /* Keep sparse happy by restoring an even lock count on + * this lock. In case we return here, we don't call into + * __do_once_done but return early in the DO_ONCE() macro. + */ + __acquire(once_lock); return false; } - get_random_bytes(buf, nbytes); - *done = true; - spin_unlock_irqrestore(&lock, flags); - - __random_once_disable_jump(once_key); - return true; } -EXPORT_SYMBOL(__get_random_once); +EXPORT_SYMBOL(__do_once_start); + +void __do_once_done(bool *done, struct static_key *once_key, + unsigned long *flags) + __releases(once_lock) +{ + *done = true; + spin_unlock_irqrestore(&once_lock, *flags); + once_disable_jump(once_key); +} +EXPORT_SYMBOL(__do_once_done); |