linux/include/linux/seqlock.h
<<
>>
Prefs
   1#ifndef __LINUX_SEQLOCK_H
   2#define __LINUX_SEQLOCK_H
   3/*
   4 * Reader/writer consistent mechanism without starving writers. This type of
   5 * lock for data where the reader wants a consistent set of information
   6 * and is willing to retry if the information changes.  Readers never
   7 * block but they may have to retry if a writer is in
   8 * progress. Writers do not wait for readers. 
   9 *
  10 * This is not as cache friendly as brlock. Also, this will not work
  11 * for data that contains pointers, because any writer could
  12 * invalidate a pointer that a reader was following.
  13 *
  14 * Expected reader usage:
  15 *      do {
  16 *          seq = read_seqbegin(&foo);
  17 *      ...
  18 *      } while (read_seqretry(&foo, seq));
  19 *
  20 *
  21 * On non-SMP the spin locks disappear but the writer still needs
  22 * to increment the sequence variables because an interrupt routine could
  23 * change the state of the data.
  24 *
  25 * Based on x86_64 vsyscall gettimeofday 
  26 * by Keith Owens and Andrea Arcangeli
  27 */
  28
  29#include <linux/spinlock.h>
  30#include <linux/preempt.h>
  31
  32typedef struct {
  33        unsigned sequence;
  34        spinlock_t lock;
  35} seqlock_t;
  36
  37/*
  38 * These macros triggered gcc-3.x compile-time problems.  We think these are
  39 * OK now.  Be cautious.
  40 */
  41#define __SEQLOCK_UNLOCKED(lockname) \
  42                 { 0, __SPIN_LOCK_UNLOCKED(lockname) }
  43
  44#define SEQLOCK_UNLOCKED \
  45                 __SEQLOCK_UNLOCKED(old_style_seqlock_init)
  46
  47#define seqlock_init(x)                                 \
  48        do {                                            \
  49                (x)->sequence = 0;                      \
  50                spin_lock_init(&(x)->lock);             \
  51        } while (0)
  52
  53#define DEFINE_SEQLOCK(x) \
  54                seqlock_t x = __SEQLOCK_UNLOCKED(x)
  55
  56/* Lock out other writers and update the count.
  57 * Acts like a normal spin_lock/unlock.
  58 * Don't need preempt_disable() because that is in the spin_lock already.
  59 */
  60static inline void write_seqlock(seqlock_t *sl)
  61{
  62        spin_lock(&sl->lock);
  63        ++sl->sequence;
  64        smp_wmb();
  65}
  66
  67static inline void write_sequnlock(seqlock_t *sl)
  68{
  69        smp_wmb();
  70        sl->sequence++;
  71        spin_unlock(&sl->lock);
  72}
  73
  74static inline int write_tryseqlock(seqlock_t *sl)
  75{
  76        int ret = spin_trylock(&sl->lock);
  77
  78        if (ret) {
  79                ++sl->sequence;
  80                smp_wmb();
  81        }
  82        return ret;
  83}
  84
  85/* Start of read calculation -- fetch last complete writer token */
  86static __always_inline unsigned read_seqbegin(const seqlock_t *sl)
  87{
  88        unsigned ret;
  89
  90repeat:
  91        ret = sl->sequence;
  92        smp_rmb();
  93        if (unlikely(ret & 1)) {
  94                cpu_relax();
  95                goto repeat;
  96        }
  97
  98        return ret;
  99}
 100
 101/*
 102 * Test if reader processed invalid data.
 103 *
 104 * If sequence value changed then writer changed data while in section.
 105 */
 106static __always_inline int read_seqretry(const seqlock_t *sl, unsigned start)
 107{
 108        smp_rmb();
 109
 110        return unlikely(sl->sequence != start);
 111}
 112
 113
 114/*
 115 * Version using sequence counter only.
 116 * This can be used when code has its own mutex protecting the
 117 * updating starting before the write_seqcountbeqin() and ending
 118 * after the write_seqcount_end().
 119 */
 120
 121typedef struct seqcount {
 122        unsigned sequence;
 123} seqcount_t;
 124
 125#define SEQCNT_ZERO { 0 }
 126#define seqcount_init(x)        do { *(x) = (seqcount_t) SEQCNT_ZERO; } while (0)
 127
 128/**
 129 * __read_seqcount_begin - begin a seq-read critical section (without barrier)
 130 * @s: pointer to seqcount_t
 131 * Returns: count to be passed to read_seqcount_retry
 132 *
 133 * __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb()
 134 * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
 135 * provided before actually loading any of the variables that are to be
 136 * protected in this critical section.
 137 *
 138 * Use carefully, only in critical code, and comment how the barrier is
 139 * provided.
 140 */
 141static inline unsigned __read_seqcount_begin(const seqcount_t *s)
 142{
 143        unsigned ret;
 144
 145repeat:
 146        ret = s->sequence;
 147        if (unlikely(ret & 1)) {
 148                cpu_relax();
 149                goto repeat;
 150        }
 151        return ret;
 152}
 153
 154/**
 155 * read_seqcount_begin - begin a seq-read critical section
 156 * @s: pointer to seqcount_t
 157 * Returns: count to be passed to read_seqcount_retry
 158 *
 159 * read_seqcount_begin opens a read critical section of the given seqcount.
 160 * Validity of the critical section is tested by checking read_seqcount_retry
 161 * function.
 162 */
 163static inline unsigned read_seqcount_begin(const seqcount_t *s)
 164{
 165        unsigned ret = __read_seqcount_begin(s);
 166        smp_rmb();
 167        return ret;
 168}
 169
 170/**
 171 * __read_seqcount_retry - end a seq-read critical section (without barrier)
 172 * @s: pointer to seqcount_t
 173 * @start: count, from read_seqcount_begin
 174 * Returns: 1 if retry is required, else 0
 175 *
 176 * __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb()
 177 * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
 178 * provided before actually loading any of the variables that are to be
 179 * protected in this critical section.
 180 *
 181 * Use carefully, only in critical code, and comment how the barrier is
 182 * provided.
 183 */
 184static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
 185{
 186        return unlikely(s->sequence != start);
 187}
 188
 189/**
 190 * read_seqcount_retry - end a seq-read critical section
 191 * @s: pointer to seqcount_t
 192 * @start: count, from read_seqcount_begin
 193 * Returns: 1 if retry is required, else 0
 194 *
 195 * read_seqcount_retry closes a read critical section of the given seqcount.
 196 * If the critical section was invalid, it must be ignored (and typically
 197 * retried).
 198 */
 199static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
 200{
 201        smp_rmb();
 202
 203        return __read_seqcount_retry(s, start);
 204}
 205
 206
 207/*
 208 * Sequence counter only version assumes that callers are using their
 209 * own mutexing.
 210 */
 211static inline void write_seqcount_begin(seqcount_t *s)
 212{
 213        s->sequence++;
 214        smp_wmb();
 215}
 216
 217static inline void write_seqcount_end(seqcount_t *s)
 218{
 219        smp_wmb();
 220        s->sequence++;
 221}
 222
 223/**
 224 * write_seqcount_barrier - invalidate in-progress read-side seq operations
 225 * @s: pointer to seqcount_t
 226 *
 227 * After write_seqcount_barrier, no read-side seq operations will complete
 228 * successfully and see data older than this.
 229 */
 230static inline void write_seqcount_barrier(seqcount_t *s)
 231{
 232        smp_wmb();
 233        s->sequence+=2;
 234}
 235
 236/*
 237 * Possible sw/hw IRQ protected versions of the interfaces.
 238 */
 239#define write_seqlock_irqsave(lock, flags)                              \
 240        do { local_irq_save(flags); write_seqlock(lock); } while (0)
 241#define write_seqlock_irq(lock)                                         \
 242        do { local_irq_disable();   write_seqlock(lock); } while (0)
 243#define write_seqlock_bh(lock)                                          \
 244        do { local_bh_disable();    write_seqlock(lock); } while (0)
 245
 246#define write_sequnlock_irqrestore(lock, flags)                         \
 247        do { write_sequnlock(lock); local_irq_restore(flags); } while(0)
 248#define write_sequnlock_irq(lock)                                       \
 249        do { write_sequnlock(lock); local_irq_enable(); } while(0)
 250#define write_sequnlock_bh(lock)                                        \
 251        do { write_sequnlock(lock); local_bh_enable(); } while(0)
 252
 253#define read_seqbegin_irqsave(lock, flags)                              \
 254        ({ local_irq_save(flags);   read_seqbegin(lock); })
 255
 256#define read_seqretry_irqrestore(lock, iv, flags)                       \
 257        ({                                                              \
 258                int ret = read_seqretry(lock, iv);                      \
 259                local_irq_restore(flags);                               \
 260                ret;                                                    \
 261        })
 262
 263#endif /* __LINUX_SEQLOCK_H */
 264