diff options
author | Peter Zijlstra <peterz@infradead.org> | 2015-05-27 11:09:36 +0930 |
---|---|---|
committer | Rusty Russell <rusty@rustcorp.com.au> | 2015-05-28 11:32:06 +0930 |
commit | 7fc26327b75685f37f58d64bdb061460f834f80d (patch) | |
tree | 69fecbbe48ac91608e88987c0bd0c8e5cebfa1b5 | |
parent | 0a04b0166929405cd833c1cc40f99e862b965ddc (diff) | |
download | linux-stable-7fc26327b75685f37f58d64bdb061460f834f80d.tar.gz linux-stable-7fc26327b75685f37f58d64bdb061460f834f80d.tar.bz2 linux-stable-7fc26327b75685f37f58d64bdb061460f834f80d.zip |
seqlock: Introduce raw_read_seqcount_latch()
Because with latches there is a strict data dependency on the seq load
we can avoid the rmb in favour of a read_barrier_depends.
Suggested-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
-rw-r--r-- | include/linux/seqlock.h | 9 | ||||
-rw-r--r-- | kernel/time/timekeeping.c | 2 |
2 files changed, 8 insertions, 3 deletions
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index 1c0cf3102fdc..890c7ef709d5 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -35,6 +35,7 @@ #include <linux/spinlock.h> #include <linux/preempt.h> #include <linux/lockdep.h> +#include <linux/compiler.h> #include <asm/processor.h> /* @@ -233,6 +234,11 @@ static inline void raw_write_seqcount_end(seqcount_t *s) s->sequence++; } +static inline int raw_read_seqcount_latch(seqcount_t *s) +{ + return lockless_dereference(s->sequence); +} + /** * raw_write_seqcount_latch - redirect readers to even/odd copy * @s: pointer to seqcount_t @@ -284,8 +290,7 @@ static inline void raw_write_seqcount_end(seqcount_t *s) * unsigned seq, idx; * * do { - * seq = latch->seq; - * smp_rmb(); + * seq = lockless_dereference(latch->seq); * * idx = seq & 0x01; * entry = data_query(latch->data[idx], ...); diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index cbfedddbf0cb..266dafe8f015 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -393,7 +393,7 @@ static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf) u64 now; do { - seq = raw_read_seqcount(&tkf->seq); + seq = raw_read_seqcount_latch(&tkf->seq); tkr = tkf->base + (seq & 0x01); now = ktime_to_ns(tkr->base) + timekeeping_get_ns(tkr); } while (read_seqcount_retry(&tkf->seq, seq)); |