linux/include/linux/seqlock.h
<<
>>
Prefs
   1#ifndef __LINUX_SEQLOCK_H
   2#define __LINUX_SEQLOCK_H
   3/*
   4 * Reader/writer consistent mechanism without starving writers. This type of
   5 * lock for data where the reader wants a consistent set of information
   6 * and is willing to retry if the information changes. There are two types
   7 * of readers:
   8 * 1. Sequence readers which never block a writer but they may have to retry
   9 *    if a writer is in progress by detecting change in sequence number.
  10 *    Writers do not wait for a sequence reader.
  11 * 2. Locking readers which will wait if a writer or another locking reader
  12 *    is in progress. A locking reader in progress will also block a writer
  13 *    from going forward. Unlike the regular rwlock, the read lock here is
  14 *    exclusive so that only one locking reader can get it.
  15 *
  16 * This is not as cache friendly as brlock. Also, this may not work well
  17 * for data that contains pointers, because any writer could
  18 * invalidate a pointer that a reader was following.
  19 *
  20 * Expected non-blocking reader usage:
  21 *      do {
  22 *          seq = read_seqbegin(&foo);
  23 *      ...
  24 *      } while (read_seqretry(&foo, seq));
  25 *
  26 *
  27 * On non-SMP the spin locks disappear but the writer still needs
  28 * to increment the sequence variables because an interrupt routine could
  29 * change the state of the data.
  30 *
  31 * Based on x86_64 vsyscall gettimeofday 
  32 * by Keith Owens and Andrea Arcangeli
  33 */
  34
  35#include <linux/spinlock.h>
  36#include <linux/preempt.h>
  37#include <asm/processor.h>
  38
  39/*
  40 * Version using sequence counter only.
  41 * This can be used when code has its own mutex protecting the
  42 * updating starting before the write_seqcountbeqin() and ending
  43 * after the write_seqcount_end().
  44 */
  45typedef struct seqcount {
  46        unsigned sequence;
  47} seqcount_t;
  48
  49#define SEQCNT_ZERO { 0 }
  50#define seqcount_init(x)        do { *(x) = (seqcount_t) SEQCNT_ZERO; } while (0)
  51
  52
  53static inline void __seqcount_init(seqcount_t *s, const char *name,
  54                                          struct lock_class_key *key)
  55{
  56        s->sequence = 0;
  57}
  58
  59/**
  60 * __read_seqcount_begin - begin a seq-read critical section (without barrier)
  61 * @s: pointer to seqcount_t
  62 * Returns: count to be passed to read_seqcount_retry
  63 *
  64 * __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb()
  65 * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
  66 * provided before actually loading any of the variables that are to be
  67 * protected in this critical section.
  68 *
  69 * Use carefully, only in critical code, and comment how the barrier is
  70 * provided.
  71 */
  72static inline unsigned __read_seqcount_begin(const seqcount_t *s)
  73{
  74        unsigned ret;
  75
  76repeat:
  77        ret = READ_ONCE(s->sequence);
  78        if (unlikely(ret & 1)) {
  79                cpu_relax();
  80                goto repeat;
  81        }
  82        return ret;
  83}
  84
  85/**
  86 * raw_read_seqcount - Read the raw seqcount
  87 * @s: pointer to seqcount_t
  88 * Returns: count to be passed to read_seqcount_retry
  89 *
  90 * raw_read_seqcount opens a read critical section of the given
  91 * seqcount without any lockdep checking and without checking or
  92 * masking the LSB. Calling code is responsible for handling that.
  93 */
  94static inline unsigned raw_read_seqcount(const seqcount_t *s)
  95{
  96        unsigned ret = READ_ONCE(s->sequence);
  97        smp_rmb();
  98        return ret;
  99}
 100
 101/**
 102 * read_seqcount_begin - begin a seq-read critical section
 103 * @s: pointer to seqcount_t
 104 * Returns: count to be passed to read_seqcount_retry
 105 *
 106 * read_seqcount_begin opens a read critical section of the given seqcount.
 107 * Validity of the critical section is tested by checking read_seqcount_retry
 108 * function.
 109 */
 110static inline unsigned read_seqcount_begin(const seqcount_t *s)
 111{
 112        unsigned ret = __read_seqcount_begin(s);
 113        smp_rmb();
 114        return ret;
 115}
 116
 117/**
 118 * raw_seqcount_begin - begin a seq-read critical section
 119 * @s: pointer to seqcount_t
 120 * Returns: count to be passed to read_seqcount_retry
 121 *
 122 * raw_seqcount_begin opens a read critical section of the given seqcount.
 123 * Validity of the critical section is tested by checking read_seqcount_retry
 124 * function.
 125 *
 126 * Unlike read_seqcount_begin(), this function will not wait for the count
 127 * to stabilize. If a writer is active when we begin, we will fail the
 128 * read_seqcount_retry() instead of stabilizing at the beginning of the
 129 * critical section.
 130 */
 131static inline unsigned raw_seqcount_begin(const seqcount_t *s)
 132{
 133        unsigned ret = READ_ONCE(s->sequence);
 134        smp_rmb();
 135        return ret & ~1;
 136}
 137
 138/**
 139 * __read_seqcount_retry - end a seq-read critical section (without barrier)
 140 * @s: pointer to seqcount_t
 141 * @start: count, from read_seqcount_begin
 142 * Returns: 1 if retry is required, else 0
 143 *
 144 * __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb()
 145 * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
 146 * provided before actually loading any of the variables that are to be
 147 * protected in this critical section.
 148 *
 149 * Use carefully, only in critical code, and comment how the barrier is
 150 * provided.
 151 */
 152static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
 153{
 154        return unlikely(s->sequence != start);
 155}
 156
 157/**
 158 * read_seqcount_retry - end a seq-read critical section
 159 * @s: pointer to seqcount_t
 160 * @start: count, from read_seqcount_begin
 161 * Returns: 1 if retry is required, else 0
 162 *
 163 * read_seqcount_retry closes a read critical section of the given seqcount.
 164 * If the critical section was invalid, it must be ignored (and typically
 165 * retried).
 166 */
 167static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
 168{
 169        smp_rmb();
 170        return __read_seqcount_retry(s, start);
 171}
 172
 173
 174/*
 175 * raw_write_seqcount_latch - redirect readers to even/odd copy
 176 * @s: pointer to seqcount_t
 177 */
 178static inline void raw_write_seqcount_latch(seqcount_t *s)
 179{
 180       smp_wmb();      /* prior stores before incrementing "sequence" */
 181       s->sequence++;
 182       smp_wmb();      /* increment "sequence" before following stores */
 183}
 184
 185/*
 186 * Sequence counter only version assumes that callers are using their
 187 * own mutexing.
 188 */
 189static inline void write_seqcount_begin(seqcount_t *s)
 190{
 191        s->sequence++;
 192        smp_wmb();
 193}
 194
 195static inline void write_seqcount_end(seqcount_t *s)
 196{
 197        smp_wmb();
 198        s->sequence++;
 199}
 200
 201/**
 202 * write_seqcount_barrier - invalidate in-progress read-side seq operations
 203 * @s: pointer to seqcount_t
 204 *
 205 * After write_seqcount_barrier, no read-side seq operations will complete
 206 * successfully and see data older than this.
 207 */
 208static inline void write_seqcount_barrier(seqcount_t *s)
 209{
 210        smp_wmb();
 211        s->sequence+=2;
 212}
 213
 214typedef struct {
 215        struct seqcount seqcount;
 216        spinlock_t lock;
 217} seqlock_t;
 218
 219/*
 220 * These macros triggered gcc-3.x compile-time problems.  We think these are
 221 * OK now.  Be cautious.
 222 */
 223#define __SEQLOCK_UNLOCKED(lockname)                    \
 224        {                                               \
 225                .seqcount = SEQCNT_ZERO,                \
 226                .lock = __SPIN_LOCK_UNLOCKED(lockname)  \
 227        }
 228
 229#define seqlock_init(x)                                 \
 230        do {                                            \
 231                seqcount_init(&(x)->seqcount);          \
 232                spin_lock_init(&(x)->lock);             \
 233        } while (0)
 234
 235#define DEFINE_SEQLOCK(x) \
 236                seqlock_t x = __SEQLOCK_UNLOCKED(x)
 237
 238/*
 239 * Read side functions for starting and finalizing a read side section.
 240 */
 241static inline unsigned read_seqbegin(const seqlock_t *sl)
 242{
 243        return read_seqcount_begin(&sl->seqcount);
 244}
 245
 246static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
 247{
 248        return read_seqcount_retry(&sl->seqcount, start);
 249}
 250
 251/*
 252 * Lock out other writers and update the count.
 253 * Acts like a normal spin_lock/unlock.
 254 * Don't need preempt_disable() because that is in the spin_lock already.
 255 */
 256static inline void write_seqlock(seqlock_t *sl)
 257{
 258        spin_lock(&sl->lock);
 259        write_seqcount_begin(&sl->seqcount);
 260}
 261
 262static inline void write_sequnlock(seqlock_t *sl)
 263{
 264        write_seqcount_end(&sl->seqcount);
 265        spin_unlock(&sl->lock);
 266}
 267
 268static inline void write_seqlock_bh(seqlock_t *sl)
 269{
 270        spin_lock_bh(&sl->lock);
 271        write_seqcount_begin(&sl->seqcount);
 272}
 273
 274static inline void write_sequnlock_bh(seqlock_t *sl)
 275{
 276        write_seqcount_end(&sl->seqcount);
 277        spin_unlock_bh(&sl->lock);
 278}
 279
 280static inline void write_seqlock_irq(seqlock_t *sl)
 281{
 282        spin_lock_irq(&sl->lock);
 283        write_seqcount_begin(&sl->seqcount);
 284}
 285
 286static inline void write_sequnlock_irq(seqlock_t *sl)
 287{
 288        write_seqcount_end(&sl->seqcount);
 289        spin_unlock_irq(&sl->lock);
 290}
 291
 292static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
 293{
 294        unsigned long flags;
 295
 296        spin_lock_irqsave(&sl->lock, flags);
 297        write_seqcount_begin(&sl->seqcount);
 298        return flags;
 299}
 300
 301#define write_seqlock_irqsave(lock, flags)                              \
 302        do { flags = __write_seqlock_irqsave(lock); } while (0)
 303
 304static inline void
 305write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
 306{
 307        write_seqcount_end(&sl->seqcount);
 308        spin_unlock_irqrestore(&sl->lock, flags);
 309}
 310
 311/*
 312 * A locking reader exclusively locks out other writers and locking readers,
 313 * but doesn't update the sequence number. Acts like a normal spin_lock/unlock.
 314 * Don't need preempt_disable() because that is in the spin_lock already.
 315 */
 316static inline void read_seqlock_excl(seqlock_t *sl)
 317{
 318        spin_lock(&sl->lock);
 319}
 320
 321static inline void read_sequnlock_excl(seqlock_t *sl)
 322{
 323        spin_unlock(&sl->lock);
 324}
 325
 326/**
 327 * read_seqbegin_or_lock - begin a sequence number check or locking block
 328 * @lock: sequence lock
 329 * @seq : sequence number to be checked
 330 *
 331 * First try it once optimistically without taking the lock. If that fails,
 332 * take the lock. The sequence number is also used as a marker for deciding
 333 * whether to be a reader (even) or writer (odd).
 334 * N.B. seq must be initialized to an even number to begin with.
 335 */
 336static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq)
 337{
 338        if (!(*seq & 1))        /* Even */
 339                *seq = read_seqbegin(lock);
 340        else                    /* Odd */
 341                read_seqlock_excl(lock);
 342}
 343
 344static inline int need_seqretry(seqlock_t *lock, int seq)
 345{
 346        return !(seq & 1) && read_seqretry(lock, seq);
 347}
 348
 349static inline void done_seqretry(seqlock_t *lock, int seq)
 350{
 351        if (seq & 1)
 352                read_sequnlock_excl(lock);
 353}
 354
 355static inline void read_seqlock_excl_bh(seqlock_t *sl)
 356{
 357        spin_lock_bh(&sl->lock);
 358}
 359
 360static inline void read_sequnlock_excl_bh(seqlock_t *sl)
 361{
 362        spin_unlock_bh(&sl->lock);
 363}
 364
 365static inline void read_seqlock_excl_irq(seqlock_t *sl)
 366{
 367        spin_lock_irq(&sl->lock);
 368}
 369
 370static inline void read_sequnlock_excl_irq(seqlock_t *sl)
 371{
 372        spin_unlock_irq(&sl->lock);
 373}
 374
 375static inline unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl)
 376{
 377        unsigned long flags;
 378
 379        spin_lock_irqsave(&sl->lock, flags);
 380        return flags;
 381}
 382
 383#define read_seqlock_excl_irqsave(lock, flags)                          \
 384        do { flags = __read_seqlock_excl_irqsave(lock); } while (0)
 385
 386static inline void
 387read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags)
 388{
 389        spin_unlock_irqrestore(&sl->lock, flags);
 390}
 391
 392static inline unsigned long read_seqbegin_or_lock_irqsave(seqlock_t *lock,
 393                                                          int *seq)
 394{
 395        unsigned long flags = 0;
 396        if (!(*seq & 1))        /* Even */
 397                *seq = read_seqbegin(lock);
 398        else                    /* Odd */
 399                read_seqlock_excl_irqsave(lock, flags);
 400        return flags;
 401}
 402
 403static inline void done_seqretry_irqrestore(seqlock_t *lock, int seq,
 404                                            unsigned long flags)
 405{
 406        if (seq & 1)
 407                read_sequnlock_excl_irqrestore(lock, flags);
 408}
 409#endif /* __LINUX_SEQLOCK_H */
 410