diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2011-07-16 18:40:26 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-02-19 08:43:34 +0100 |
commit | 6617feca15c68aad12f4a1edd8d8e2ef8d5b4ae5 (patch) | |
tree | da996f85d99a740ac4edf91cd0c5f55cb0e8e3b4 /include/linux/seqlock.h | |
parent | 65c9d1bbc9f32c568a4f7ad154c9a10b0028df52 (diff) | |
download | linux-6617feca15c68aad12f4a1edd8d8e2ef8d5b4ae5.tar.gz linux-6617feca15c68aad12f4a1edd8d8e2ef8d5b4ae5.tar.bz2 linux-6617feca15c68aad12f4a1edd8d8e2ef8d5b4ae5.zip |
seqlock: Use seqcount infrastructure
No point in having different implementations for the same
thing. Change the macro mess to inline functions where possible.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'include/linux/seqlock.h')
-rw-r--r-- | include/linux/seqlock.h | 174 |
1 files changed, 92 insertions, 82 deletions
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index cb0599ca049c..18299057402f 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -30,81 +30,12 @@ #include <linux/preempt.h> #include <asm/processor.h> -typedef struct { - unsigned sequence; - spinlock_t lock; -} seqlock_t; - -/* - * These macros triggered gcc-3.x compile-time problems. We think these are - * OK now. Be cautious. - */ -#define __SEQLOCK_UNLOCKED(lockname) \ - { 0, __SPIN_LOCK_UNLOCKED(lockname) } - -#define seqlock_init(x) \ - do { \ - (x)->sequence = 0; \ - spin_lock_init(&(x)->lock); \ - } while (0) - -#define DEFINE_SEQLOCK(x) \ - seqlock_t x = __SEQLOCK_UNLOCKED(x) - -/* Lock out other writers and update the count. - * Acts like a normal spin_lock/unlock. - * Don't need preempt_disable() because that is in the spin_lock already. - */ -static inline void write_seqlock(seqlock_t *sl) -{ - spin_lock(&sl->lock); - ++sl->sequence; - smp_wmb(); -} - -static inline void write_sequnlock(seqlock_t *sl) -{ - smp_wmb(); - sl->sequence++; - spin_unlock(&sl->lock); -} - -/* Start of read calculation -- fetch last complete writer token */ -static __always_inline unsigned read_seqbegin(const seqlock_t *sl) -{ - unsigned ret; - -repeat: - ret = ACCESS_ONCE(sl->sequence); - if (unlikely(ret & 1)) { - cpu_relax(); - goto repeat; - } - smp_rmb(); - - return ret; -} - -/* - * Test if reader processed invalid data. - * - * If sequence value changed then writer changed data while in section. - */ -static __always_inline int read_seqretry(const seqlock_t *sl, unsigned start) -{ - smp_rmb(); - - return unlikely(sl->sequence != start); -} - - /* * Version using sequence counter only. * This can be used when code has its own mutex protecting the * updating starting before the write_seqcountbeqin() and ending * after the write_seqcount_end(). */ - typedef struct seqcount { unsigned sequence; } seqcount_t; @@ -207,7 +138,6 @@ static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start) static inline int read_seqcount_retry(const seqcount_t *s, unsigned start) { smp_rmb(); - return __read_seqcount_retry(s, start); } @@ -241,21 +171,101 @@ static inline void write_seqcount_barrier(seqcount_t *s) s->sequence+=2; } +typedef struct { + struct seqcount seqcount; + spinlock_t lock; +} seqlock_t; + +/* + * These macros triggered gcc-3.x compile-time problems. We think these are + * OK now. Be cautious. + */ +#define __SEQLOCK_UNLOCKED(lockname) \ + { \ + .seqcount = SEQCNT_ZERO, \ + .lock = __SPIN_LOCK_UNLOCKED(lockname) \ + } + +#define seqlock_init(x) \ + do { \ + seqcount_init(&(x)->seqcount); \ + spin_lock_init(&(x)->lock); \ + } while (0) + +#define DEFINE_SEQLOCK(x) \ + seqlock_t x = __SEQLOCK_UNLOCKED(x) + +/* + * Read side functions for starting and finalizing a read side section. + */ +static inline unsigned read_seqbegin(const seqlock_t *sl) +{ + return read_seqcount_begin(&sl->seqcount); +} + +static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) +{ + return read_seqcount_retry(&sl->seqcount, start); +} + /* - * Possible sw/hw IRQ protected versions of the interfaces. + * Lock out other writers and update the count. + * Acts like a normal spin_lock/unlock. + * Don't need preempt_disable() because that is in the spin_lock already. */ +static inline void write_seqlock(seqlock_t *sl) +{ + spin_lock(&sl->lock); + write_seqcount_begin(&sl->seqcount); +} + +static inline void write_sequnlock(seqlock_t *sl) +{ + write_seqcount_end(&sl->seqcount); + spin_unlock(&sl->lock); +} + +static inline void write_seqlock_bh(seqlock_t *sl) +{ + spin_lock_bh(&sl->lock); + write_seqcount_begin(&sl->seqcount); +} + +static inline void write_sequnlock_bh(seqlock_t *sl) +{ + write_seqcount_end(&sl->seqcount); + spin_unlock_bh(&sl->lock); +} + +static inline void write_seqlock_irq(seqlock_t *sl) +{ + spin_lock_irq(&sl->lock); + write_seqcount_begin(&sl->seqcount); +} + +static inline void write_sequnlock_irq(seqlock_t *sl) +{ + write_seqcount_end(&sl->seqcount); + spin_unlock_irq(&sl->lock); +} + +static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl) +{ + unsigned long flags; + + spin_lock_irqsave(&sl->lock, flags); + write_seqcount_begin(&sl->seqcount); + return flags; +} + #define write_seqlock_irqsave(lock, flags) \ - do { local_irq_save(flags); write_seqlock(lock); } while (0) -#define write_seqlock_irq(lock) \ - do { local_irq_disable(); write_seqlock(lock); } while (0) -#define write_seqlock_bh(lock) \ - do { local_bh_disable(); write_seqlock(lock); } while (0) + do { flags = __write_seqlock_irqsave(lock); } while (0) -#define write_sequnlock_irqrestore(lock, flags) \ - do { write_sequnlock(lock); local_irq_restore(flags); } while(0) -#define write_sequnlock_irq(lock) \ - do { write_sequnlock(lock); local_irq_enable(); } while(0) -#define write_sequnlock_bh(lock) \ - do { write_sequnlock(lock); local_bh_enable(); } while(0) +static inline void +write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags) +{ + write_seqcount_end(&sl->seqcount); + spin_unlock_irqrestore(&sl->lock, flags); +} #endif /* __LINUX_SEQLOCK_H */ |