linux/include/linux/seqlock.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef __LINUX_SEQLOCK_H
   3#define __LINUX_SEQLOCK_H
   4
   5/*
   6 * seqcount_t / seqlock_t - a reader-writer consistency mechanism with
   7 * lockless readers (read-only retry loops), and no writer starvation.
   8 *
   9 * See Documentation/locking/seqlock.rst
  10 *
  11 * Copyrights:
  12 * - Based on x86_64 vsyscall gettimeofday: Keith Owens, Andrea Arcangeli
  13 * - Sequence counters with associated locks, (C) 2020 Linutronix GmbH
  14 */
  15
  16#include <linux/compiler.h>
  17#include <linux/kcsan-checks.h>
  18#include <linux/lockdep.h>
  19#include <linux/mutex.h>
  20#include <linux/ww_mutex.h>
  21#include <linux/preempt.h>
  22#include <linux/spinlock.h>
  23
  24#include <asm/processor.h>
  25
  26/*
  27 * The seqlock seqcount_t interface does not prescribe a precise sequence of
  28 * read begin/retry/end. For readers, typically there is a call to
  29 * read_seqcount_begin() and read_seqcount_retry(), however, there are more
  30 * esoteric cases which do not follow this pattern.
  31 *
  32 * As a consequence, we take the following best-effort approach for raw usage
  33 * via seqcount_t under KCSAN: upon beginning a seq-reader critical section,
  34 * pessimistically mark the next KCSAN_SEQLOCK_REGION_MAX memory accesses as
  35 * atomics; if there is a matching read_seqcount_retry() call, no following
  36 * memory operations are considered atomic. Usage of the seqlock_t interface
  37 * is not affected.
  38 */
  39#define KCSAN_SEQLOCK_REGION_MAX 1000
  40
  41/*
  42 * Sequence counters (seqcount_t)
  43 *
  44 * This is the raw counting mechanism, without any writer protection.
  45 *
  46 * Write side critical sections must be serialized and non-preemptible.
  47 *
  48 * If readers can be invoked from hardirq or softirq contexts,
  49 * interrupts or bottom halves must also be respectively disabled before
  50 * entering the write section.
  51 *
  52 * This mechanism can't be used if the protected data contains pointers,
  53 * as the writer can invalidate a pointer that a reader is following.
  54 *
  55 * If the write serialization mechanism is one of the common kernel
  56 * locking primitives, use a sequence counter with associated lock
  57 * (seqcount_LOCKNAME_t) instead.
  58 *
  59 * If it's desired to automatically handle the sequence counter writer
  60 * serialization and non-preemptibility requirements, use a sequential
  61 * lock (seqlock_t) instead.
  62 *
  63 * See Documentation/locking/seqlock.rst
  64 */
  65typedef struct seqcount {
  66        unsigned sequence;
  67#ifdef CONFIG_DEBUG_LOCK_ALLOC
  68        struct lockdep_map dep_map;
  69#endif
  70} seqcount_t;
  71
  72static inline void __seqcount_init(seqcount_t *s, const char *name,
  73                                          struct lock_class_key *key)
  74{
  75        /*
  76         * Make sure we are not reinitializing a held lock:
  77         */
  78        lockdep_init_map(&s->dep_map, name, key, 0);
  79        s->sequence = 0;
  80}
  81
  82#ifdef CONFIG_DEBUG_LOCK_ALLOC
  83
  84# define SEQCOUNT_DEP_MAP_INIT(lockname)                                \
  85                .dep_map = { .name = #lockname }
  86
  87/**
  88 * seqcount_init() - runtime initializer for seqcount_t
  89 * @s: Pointer to the seqcount_t instance
  90 */
  91# define seqcount_init(s)                                               \
  92        do {                                                            \
  93                static struct lock_class_key __key;                     \
  94                __seqcount_init((s), #s, &__key);                       \
  95        } while (0)
  96
  97static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
  98{
  99        seqcount_t *l = (seqcount_t *)s;
 100        unsigned long flags;
 101
 102        local_irq_save(flags);
 103        seqcount_acquire_read(&l->dep_map, 0, 0, _RET_IP_);
 104        seqcount_release(&l->dep_map, _RET_IP_);
 105        local_irq_restore(flags);
 106}
 107
 108#else
 109# define SEQCOUNT_DEP_MAP_INIT(lockname)
 110# define seqcount_init(s) __seqcount_init(s, NULL, NULL)
 111# define seqcount_lockdep_reader_access(x)
 112#endif
 113
 114/**
 115 * SEQCNT_ZERO() - static initializer for seqcount_t
 116 * @name: Name of the seqcount_t instance
 117 */
 118#define SEQCNT_ZERO(name) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(name) }
 119
 120/*
 121 * Sequence counters with associated locks (seqcount_LOCKNAME_t)
 122 *
 123 * A sequence counter which associates the lock used for writer
 124 * serialization at initialization time. This enables lockdep to validate
 125 * that the write side critical section is properly serialized.
 126 *
 127 * For associated locks which do not implicitly disable preemption,
 128 * preemption protection is enforced in the write side function.
 129 *
 130 * Lockdep is never used in any for the raw write variants.
 131 *
 132 * See Documentation/locking/seqlock.rst
 133 */
 134
 135/*
 136 * For PREEMPT_RT, seqcount_LOCKNAME_t write side critical sections cannot
 137 * disable preemption. It can lead to higher latencies, and the write side
 138 * sections will not be able to acquire locks which become sleeping locks
 139 * (e.g. spinlock_t).
 140 *
 141 * To remain preemptible while avoiding a possible livelock caused by the
 142 * reader preempting the writer, use a different technique: let the reader
 143 * detect if a seqcount_LOCKNAME_t writer is in progress. If that is the
 144 * case, acquire then release the associated LOCKNAME writer serialization
 145 * lock. This will allow any possibly-preempted writer to make progress
 146 * until the end of its writer serialization lock critical section.
 147 *
 148 * This lock-unlock technique must be implemented for all of PREEMPT_RT
 149 * sleeping locks.  See Documentation/locking/locktypes.rst
 150 */
 151#if defined(CONFIG_LOCKDEP) || defined(CONFIG_PREEMPT_RT)
 152#define __SEQ_LOCK(expr)        expr
 153#else
 154#define __SEQ_LOCK(expr)
 155#endif
 156
 157/*
 158 * typedef seqcount_LOCKNAME_t - sequence counter with LOCKNAME associated
 159 * @seqcount:   The real sequence counter
 160 * @lock:       Pointer to the associated lock
 161 *
 162 * A plain sequence counter with external writer synchronization by
 163 * LOCKNAME @lock. The lock is associated to the sequence counter in the
 164 * static initializer or init function. This enables lockdep to validate
 165 * that the write side critical section is properly serialized.
 166 *
 167 * LOCKNAME:    raw_spinlock, spinlock, rwlock, mutex, or ww_mutex.
 168 */
 169
 170/*
 171 * seqcount_LOCKNAME_init() - runtime initializer for seqcount_LOCKNAME_t
 172 * @s:          Pointer to the seqcount_LOCKNAME_t instance
 173 * @lock:       Pointer to the associated lock
 174 */
 175
 176#define seqcount_LOCKNAME_init(s, _lock, lockname)                      \
 177        do {                                                            \
 178                seqcount_##lockname##_t *____s = (s);                   \
 179                seqcount_init(&____s->seqcount);                        \
 180                __SEQ_LOCK(____s->lock = (_lock));                      \
 181        } while (0)
 182
 183#define seqcount_raw_spinlock_init(s, lock)     seqcount_LOCKNAME_init(s, lock, raw_spinlock)
 184#define seqcount_spinlock_init(s, lock)         seqcount_LOCKNAME_init(s, lock, spinlock)
 185#define seqcount_rwlock_init(s, lock)           seqcount_LOCKNAME_init(s, lock, rwlock);
 186#define seqcount_mutex_init(s, lock)            seqcount_LOCKNAME_init(s, lock, mutex);
 187#define seqcount_ww_mutex_init(s, lock)         seqcount_LOCKNAME_init(s, lock, ww_mutex);
 188
 189/*
 190 * SEQCOUNT_LOCKNAME()  - Instantiate seqcount_LOCKNAME_t and helpers
 191 * seqprop_LOCKNAME_*() - Property accessors for seqcount_LOCKNAME_t
 192 *
 193 * @lockname:           "LOCKNAME" part of seqcount_LOCKNAME_t
 194 * @locktype:           LOCKNAME canonical C data type
 195 * @preemptible:        preemptibility of above locktype
 196 * @lockmember:         argument for lockdep_assert_held()
 197 * @lockbase:           associated lock release function (prefix only)
 198 * @lock_acquire:       associated lock acquisition function (full call)
 199 */
 200#define SEQCOUNT_LOCKNAME(lockname, locktype, preemptible, lockmember, lockbase, lock_acquire) \
 201typedef struct seqcount_##lockname {                                    \
 202        seqcount_t              seqcount;                               \
 203        __SEQ_LOCK(locktype     *lock);                                 \
 204} seqcount_##lockname##_t;                                              \
 205                                                                        \
 206static __always_inline seqcount_t *                                     \
 207__seqprop_##lockname##_ptr(seqcount_##lockname##_t *s)                  \
 208{                                                                       \
 209        return &s->seqcount;                                            \
 210}                                                                       \
 211                                                                        \
 212static __always_inline unsigned                                         \
 213__seqprop_##lockname##_sequence(const seqcount_##lockname##_t *s)       \
 214{                                                                       \
 215        unsigned seq = READ_ONCE(s->seqcount.sequence);                 \
 216                                                                        \
 217        if (!IS_ENABLED(CONFIG_PREEMPT_RT))                             \
 218                return seq;                                             \
 219                                                                        \
 220        if (preemptible && unlikely(seq & 1)) {                         \
 221                __SEQ_LOCK(lock_acquire);                               \
 222                __SEQ_LOCK(lockbase##_unlock(s->lock));                 \
 223                                                                        \
 224                /*                                                      \
 225                 * Re-read the sequence counter since the (possibly     \
 226                 * preempted) writer made progress.                     \
 227                 */                                                     \
 228                seq = READ_ONCE(s->seqcount.sequence);                  \
 229        }                                                               \
 230                                                                        \
 231        return seq;                                                     \
 232}                                                                       \
 233                                                                        \
 234static __always_inline bool                                             \
 235__seqprop_##lockname##_preemptible(const seqcount_##lockname##_t *s)    \
 236{                                                                       \
 237        if (!IS_ENABLED(CONFIG_PREEMPT_RT))                             \
 238                return preemptible;                                     \
 239                                                                        \
 240        /* PREEMPT_RT relies on the above LOCK+UNLOCK */                \
 241        return false;                                                   \
 242}                                                                       \
 243                                                                        \
 244static __always_inline void                                             \
 245__seqprop_##lockname##_assert(const seqcount_##lockname##_t *s)         \
 246{                                                                       \
 247        __SEQ_LOCK(lockdep_assert_held(lockmember));                    \
 248}
 249
 250/*
 251 * __seqprop() for seqcount_t
 252 */
 253
 254static inline seqcount_t *__seqprop_ptr(seqcount_t *s)
 255{
 256        return s;
 257}
 258
 259static inline unsigned __seqprop_sequence(const seqcount_t *s)
 260{
 261        return READ_ONCE(s->sequence);
 262}
 263
 264static inline bool __seqprop_preemptible(const seqcount_t *s)
 265{
 266        return false;
 267}
 268
 269static inline void __seqprop_assert(const seqcount_t *s)
 270{
 271        lockdep_assert_preemption_disabled();
 272}
 273
 274#define __SEQ_RT        IS_ENABLED(CONFIG_PREEMPT_RT)
 275
 276SEQCOUNT_LOCKNAME(raw_spinlock, raw_spinlock_t,  false,    s->lock,        raw_spin, raw_spin_lock(s->lock))
 277SEQCOUNT_LOCKNAME(spinlock,     spinlock_t,      __SEQ_RT, s->lock,        spin,     spin_lock(s->lock))
 278SEQCOUNT_LOCKNAME(rwlock,       rwlock_t,        __SEQ_RT, s->lock,        read,     read_lock(s->lock))
 279SEQCOUNT_LOCKNAME(mutex,        struct mutex,    true,     s->lock,        mutex,    mutex_lock(s->lock))
 280SEQCOUNT_LOCKNAME(ww_mutex,     struct ww_mutex, true,     &s->lock->base, ww_mutex, ww_mutex_lock(s->lock, NULL))
 281
 282/*
 283 * SEQCNT_LOCKNAME_ZERO - static initializer for seqcount_LOCKNAME_t
 284 * @name:       Name of the seqcount_LOCKNAME_t instance
 285 * @lock:       Pointer to the associated LOCKNAME
 286 */
 287
 288#define SEQCOUNT_LOCKNAME_ZERO(seq_name, assoc_lock) {                  \
 289        .seqcount               = SEQCNT_ZERO(seq_name.seqcount),       \
 290        __SEQ_LOCK(.lock        = (assoc_lock))                         \
 291}
 292
 293#define SEQCNT_RAW_SPINLOCK_ZERO(name, lock)    SEQCOUNT_LOCKNAME_ZERO(name, lock)
 294#define SEQCNT_SPINLOCK_ZERO(name, lock)        SEQCOUNT_LOCKNAME_ZERO(name, lock)
 295#define SEQCNT_RWLOCK_ZERO(name, lock)          SEQCOUNT_LOCKNAME_ZERO(name, lock)
 296#define SEQCNT_MUTEX_ZERO(name, lock)           SEQCOUNT_LOCKNAME_ZERO(name, lock)
 297#define SEQCNT_WW_MUTEX_ZERO(name, lock)        SEQCOUNT_LOCKNAME_ZERO(name, lock)
 298
 299#define __seqprop_case(s, lockname, prop)                               \
 300        seqcount_##lockname##_t: __seqprop_##lockname##_##prop((void *)(s))
 301
 302#define __seqprop(s, prop) _Generic(*(s),                               \
 303        seqcount_t:             __seqprop_##prop((void *)(s)),          \
 304        __seqprop_case((s),     raw_spinlock,   prop),                  \
 305        __seqprop_case((s),     spinlock,       prop),                  \
 306        __seqprop_case((s),     rwlock,         prop),                  \
 307        __seqprop_case((s),     mutex,          prop),                  \
 308        __seqprop_case((s),     ww_mutex,       prop))
 309
 310#define __seqcount_ptr(s)               __seqprop(s, ptr)
 311#define __seqcount_sequence(s)          __seqprop(s, sequence)
 312#define __seqcount_lock_preemptible(s)  __seqprop(s, preemptible)
 313#define __seqcount_assert_lock_held(s)  __seqprop(s, assert)
 314
 315/**
 316 * __read_seqcount_begin() - begin a seqcount_t read section w/o barrier
 317 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
 318 *
 319 * __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb()
 320 * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
 321 * provided before actually loading any of the variables that are to be
 322 * protected in this critical section.
 323 *
 324 * Use carefully, only in critical code, and comment how the barrier is
 325 * provided.
 326 *
 327 * Return: count to be passed to read_seqcount_retry()
 328 */
 329#define __read_seqcount_begin(s)                                        \
 330({                                                                      \
 331        unsigned seq;                                                   \
 332                                                                        \
 333        while ((seq = __seqcount_sequence(s)) & 1)                      \
 334                cpu_relax();                                            \
 335                                                                        \
 336        kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);                    \
 337        seq;                                                            \
 338})
 339
 340/**
 341 * raw_read_seqcount_begin() - begin a seqcount_t read section w/o lockdep
 342 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
 343 *
 344 * Return: count to be passed to read_seqcount_retry()
 345 */
 346#define raw_read_seqcount_begin(s)                                      \
 347({                                                                      \
 348        unsigned seq = __read_seqcount_begin(s);                        \
 349                                                                        \
 350        smp_rmb();                                                      \
 351        seq;                                                            \
 352})
 353
 354/**
 355 * read_seqcount_begin() - begin a seqcount_t read critical section
 356 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
 357 *
 358 * Return: count to be passed to read_seqcount_retry()
 359 */
 360#define read_seqcount_begin(s)                                          \
 361({                                                                      \
 362        seqcount_lockdep_reader_access(__seqcount_ptr(s));              \
 363        raw_read_seqcount_begin(s);                                     \
 364})
 365
 366/**
 367 * raw_read_seqcount() - read the raw seqcount_t counter value
 368 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
 369 *
 370 * raw_read_seqcount opens a read critical section of the given
 371 * seqcount_t, without any lockdep checking, and without checking or
 372 * masking the sequence counter LSB. Calling code is responsible for
 373 * handling that.
 374 *
 375 * Return: count to be passed to read_seqcount_retry()
 376 */
 377#define raw_read_seqcount(s)                                            \
 378({                                                                      \
 379        unsigned seq = __seqcount_sequence(s);                          \
 380                                                                        \
 381        smp_rmb();                                                      \
 382        kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);                    \
 383        seq;                                                            \
 384})
 385
 386/**
 387 * raw_seqcount_begin() - begin a seqcount_t read critical section w/o
 388 *                        lockdep and w/o counter stabilization
 389 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
 390 *
 391 * raw_seqcount_begin opens a read critical section of the given
 392 * seqcount_t. Unlike read_seqcount_begin(), this function will not wait
 393 * for the count to stabilize. If a writer is active when it begins, it
 394 * will fail the read_seqcount_retry() at the end of the read critical
 395 * section instead of stabilizing at the beginning of it.
 396 *
 397 * Use this only in special kernel hot paths where the read section is
 398 * small and has a high probability of success through other external
 399 * means. It will save a single branching instruction.
 400 *
 401 * Return: count to be passed to read_seqcount_retry()
 402 */
 403#define raw_seqcount_begin(s)                                           \
 404({                                                                      \
 405        /*                                                              \
 406         * If the counter is odd, let read_seqcount_retry() fail        \
 407         * by decrementing the counter.                                 \
 408         */                                                             \
 409        raw_read_seqcount(s) & ~1;                                      \
 410})
 411
 412/**
 413 * __read_seqcount_retry() - end a seqcount_t read section w/o barrier
 414 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
 415 * @start: count, from read_seqcount_begin()
 416 *
 417 * __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb()
 418 * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
 419 * provided before actually loading any of the variables that are to be
 420 * protected in this critical section.
 421 *
 422 * Use carefully, only in critical code, and comment how the barrier is
 423 * provided.
 424 *
 425 * Return: true if a read section retry is required, else false
 426 */
 427#define __read_seqcount_retry(s, start)                                 \
 428        __read_seqcount_t_retry(__seqcount_ptr(s), start)
 429
 430static inline int __read_seqcount_t_retry(const seqcount_t *s, unsigned start)
 431{
 432        kcsan_atomic_next(0);
 433        return unlikely(READ_ONCE(s->sequence) != start);
 434}
 435
 436/**
 437 * read_seqcount_retry() - end a seqcount_t read critical section
 438 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
 439 * @start: count, from read_seqcount_begin()
 440 *
 441 * read_seqcount_retry closes the read critical section of given
 442 * seqcount_t.  If the critical section was invalid, it must be ignored
 443 * (and typically retried).
 444 *
 445 * Return: true if a read section retry is required, else false
 446 */
 447#define read_seqcount_retry(s, start)                                   \
 448        read_seqcount_t_retry(__seqcount_ptr(s), start)
 449
 450static inline int read_seqcount_t_retry(const seqcount_t *s, unsigned start)
 451{
 452        smp_rmb();
 453        return __read_seqcount_t_retry(s, start);
 454}
 455
 456/**
 457 * raw_write_seqcount_begin() - start a seqcount_t write section w/o lockdep
 458 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
 459 */
 460#define raw_write_seqcount_begin(s)                                     \
 461do {                                                                    \
 462        if (__seqcount_lock_preemptible(s))                             \
 463                preempt_disable();                                      \
 464                                                                        \
 465        raw_write_seqcount_t_begin(__seqcount_ptr(s));                  \
 466} while (0)
 467
 468static inline void raw_write_seqcount_t_begin(seqcount_t *s)
 469{
 470        kcsan_nestable_atomic_begin();
 471        s->sequence++;
 472        smp_wmb();
 473}
 474
 475/**
 476 * raw_write_seqcount_end() - end a seqcount_t write section w/o lockdep
 477 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
 478 */
 479#define raw_write_seqcount_end(s)                                       \
 480do {                                                                    \
 481        raw_write_seqcount_t_end(__seqcount_ptr(s));                    \
 482                                                                        \
 483        if (__seqcount_lock_preemptible(s))                             \
 484                preempt_enable();                                       \
 485} while (0)
 486
 487static inline void raw_write_seqcount_t_end(seqcount_t *s)
 488{
 489        smp_wmb();
 490        s->sequence++;
 491        kcsan_nestable_atomic_end();
 492}
 493
 494/**
 495 * write_seqcount_begin_nested() - start a seqcount_t write section with
 496 *                                 custom lockdep nesting level
 497 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
 498 * @subclass: lockdep nesting level
 499 *
 500 * See Documentation/locking/lockdep-design.rst
 501 */
 502#define write_seqcount_begin_nested(s, subclass)                        \
 503do {                                                                    \
 504        __seqcount_assert_lock_held(s);                                 \
 505                                                                        \
 506        if (__seqcount_lock_preemptible(s))                             \
 507                preempt_disable();                                      \
 508                                                                        \
 509        write_seqcount_t_begin_nested(__seqcount_ptr(s), subclass);     \
 510} while (0)
 511
 512static inline void write_seqcount_t_begin_nested(seqcount_t *s, int subclass)
 513{
 514        raw_write_seqcount_t_begin(s);
 515        seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
 516}
 517
 518/**
 519 * write_seqcount_begin() - start a seqcount_t write side critical section
 520 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
 521 *
 522 * write_seqcount_begin opens a write side critical section of the given
 523 * seqcount_t.
 524 *
 525 * Context: seqcount_t write side critical sections must be serialized and
 526 * non-preemptible. If readers can be invoked from hardirq or softirq
 527 * context, interrupts or bottom halves must be respectively disabled.
 528 */
 529#define write_seqcount_begin(s)                                         \
 530do {                                                                    \
 531        __seqcount_assert_lock_held(s);                                 \
 532                                                                        \
 533        if (__seqcount_lock_preemptible(s))                             \
 534                preempt_disable();                                      \
 535                                                                        \
 536        write_seqcount_t_begin(__seqcount_ptr(s));                      \
 537} while (0)
 538
 539static inline void write_seqcount_t_begin(seqcount_t *s)
 540{
 541        write_seqcount_t_begin_nested(s, 0);
 542}
 543
 544/**
 545 * write_seqcount_end() - end a seqcount_t write side critical section
 546 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
 547 *
 548 * The write section must've been opened with write_seqcount_begin().
 549 */
 550#define write_seqcount_end(s)                                           \
 551do {                                                                    \
 552        write_seqcount_t_end(__seqcount_ptr(s));                        \
 553                                                                        \
 554        if (__seqcount_lock_preemptible(s))                             \
 555                preempt_enable();                                       \
 556} while (0)
 557
 558static inline void write_seqcount_t_end(seqcount_t *s)
 559{
 560        seqcount_release(&s->dep_map, _RET_IP_);
 561        raw_write_seqcount_t_end(s);
 562}
 563
 564/**
 565 * raw_write_seqcount_barrier() - do a seqcount_t write barrier
 566 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
 567 *
 568 * This can be used to provide an ordering guarantee instead of the usual
 569 * consistency guarantee. It is one wmb cheaper, because it can collapse
 570 * the two back-to-back wmb()s.
 571 *
 572 * Note that writes surrounding the barrier should be declared atomic (e.g.
 573 * via WRITE_ONCE): a) to ensure the writes become visible to other threads
 574 * atomically, avoiding compiler optimizations; b) to document which writes are
 575 * meant to propagate to the reader critical section. This is necessary because
 576 * neither writes before and after the barrier are enclosed in a seq-writer
 577 * critical section that would ensure readers are aware of ongoing writes::
 578 *
 579 *      seqcount_t seq;
 580 *      bool X = true, Y = false;
 581 *
 582 *      void read(void)
 583 *      {
 584 *              bool x, y;
 585 *
 586 *              do {
 587 *                      int s = read_seqcount_begin(&seq);
 588 *
 589 *                      x = X; y = Y;
 590 *
 591 *              } while (read_seqcount_retry(&seq, s));
 592 *
 593 *              BUG_ON(!x && !y);
 594 *      }
 595 *
 596 *      void write(void)
 597 *      {
 598 *              WRITE_ONCE(Y, true);
 599 *
 600 *              raw_write_seqcount_barrier(seq);
 601 *
 602 *              WRITE_ONCE(X, false);
 603 *      }
 604 */
 605#define raw_write_seqcount_barrier(s)                                   \
 606        raw_write_seqcount_t_barrier(__seqcount_ptr(s))
 607
 608static inline void raw_write_seqcount_t_barrier(seqcount_t *s)
 609{
 610        kcsan_nestable_atomic_begin();
 611        s->sequence++;
 612        smp_wmb();
 613        s->sequence++;
 614        kcsan_nestable_atomic_end();
 615}
 616
 617/**
 618 * write_seqcount_invalidate() - invalidate in-progress seqcount_t read
 619 *                               side operations
 620 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
 621 *
 622 * After write_seqcount_invalidate, no seqcount_t read side operations
 623 * will complete successfully and see data older than this.
 624 */
 625#define write_seqcount_invalidate(s)                                    \
 626        write_seqcount_t_invalidate(__seqcount_ptr(s))
 627
 628static inline void write_seqcount_t_invalidate(seqcount_t *s)
 629{
 630        smp_wmb();
 631        kcsan_nestable_atomic_begin();
 632        s->sequence+=2;
 633        kcsan_nestable_atomic_end();
 634}
 635
 636/*
 637 * Latch sequence counters (seqcount_latch_t)
 638 *
 639 * A sequence counter variant where the counter even/odd value is used to
 640 * switch between two copies of protected data. This allows the read path,
 641 * typically NMIs, to safely interrupt the write side critical section.
 642 *
 643 * As the write sections are fully preemptible, no special handling for
 644 * PREEMPT_RT is needed.
 645 */
 646typedef struct {
 647        seqcount_t seqcount;
 648} seqcount_latch_t;
 649
 650/**
 651 * SEQCNT_LATCH_ZERO() - static initializer for seqcount_latch_t
 652 * @seq_name: Name of the seqcount_latch_t instance
 653 */
 654#define SEQCNT_LATCH_ZERO(seq_name) {                                   \
 655        .seqcount               = SEQCNT_ZERO(seq_name.seqcount),       \
 656}
 657
 658/**
 659 * seqcount_latch_init() - runtime initializer for seqcount_latch_t
 660 * @s: Pointer to the seqcount_latch_t instance
 661 */
 662static inline void seqcount_latch_init(seqcount_latch_t *s)
 663{
 664        seqcount_init(&s->seqcount);
 665}
 666
 667/**
 668 * raw_read_seqcount_latch() - pick even/odd latch data copy
 669 * @s: Pointer to seqcount_latch_t
 670 *
 671 * See raw_write_seqcount_latch() for details and a full reader/writer
 672 * usage example.
 673 *
 674 * Return: sequence counter raw value. Use the lowest bit as an index for
 675 * picking which data copy to read. The full counter must then be checked
 676 * with read_seqcount_latch_retry().
 677 */
 678static inline unsigned raw_read_seqcount_latch(const seqcount_latch_t *s)
 679{
 680        /*
 681         * Pairs with the first smp_wmb() in raw_write_seqcount_latch().
 682         * Due to the dependent load, a full smp_rmb() is not needed.
 683         */
 684        return READ_ONCE(s->seqcount.sequence);
 685}
 686
 687/**
 688 * read_seqcount_latch_retry() - end a seqcount_latch_t read section
 689 * @s:          Pointer to seqcount_latch_t
 690 * @start:      count, from raw_read_seqcount_latch()
 691 *
 692 * Return: true if a read section retry is required, else false
 693 */
 694static inline int
 695read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)
 696{
 697        return read_seqcount_retry(&s->seqcount, start);
 698}
 699
 700/**
 701 * raw_write_seqcount_latch() - redirect latch readers to even/odd copy
 702 * @s: Pointer to seqcount_latch_t
 703 *
 704 * The latch technique is a multiversion concurrency control method that allows
 705 * queries during non-atomic modifications. If you can guarantee queries never
 706 * interrupt the modification -- e.g. the concurrency is strictly between CPUs
 707 * -- you most likely do not need this.
 708 *
 709 * Where the traditional RCU/lockless data structures rely on atomic
 710 * modifications to ensure queries observe either the old or the new state the
 711 * latch allows the same for non-atomic updates. The trade-off is doubling the
 712 * cost of storage; we have to maintain two copies of the entire data
 713 * structure.
 714 *
 715 * Very simply put: we first modify one copy and then the other. This ensures
 716 * there is always one copy in a stable state, ready to give us an answer.
 717 *
 718 * The basic form is a data structure like::
 719 *
 720 *      struct latch_struct {
 721 *              seqcount_latch_t        seq;
 722 *              struct data_struct      data[2];
 723 *      };
 724 *
 725 * Where a modification, which is assumed to be externally serialized, does the
 726 * following::
 727 *
 728 *      void latch_modify(struct latch_struct *latch, ...)
 729 *      {
 730 *              smp_wmb();      // Ensure that the last data[1] update is visible
 731 *              latch->seq.sequence++;
 732 *              smp_wmb();      // Ensure that the seqcount update is visible
 733 *
 734 *              modify(latch->data[0], ...);
 735 *
 736 *              smp_wmb();      // Ensure that the data[0] update is visible
 737 *              latch->seq.sequence++;
 738 *              smp_wmb();      // Ensure that the seqcount update is visible
 739 *
 740 *              modify(latch->data[1], ...);
 741 *      }
 742 *
 743 * The query will have a form like::
 744 *
 745 *      struct entry *latch_query(struct latch_struct *latch, ...)
 746 *      {
 747 *              struct entry *entry;
 748 *              unsigned seq, idx;
 749 *
 750 *              do {
 751 *                      seq = raw_read_seqcount_latch(&latch->seq);
 752 *
 753 *                      idx = seq & 0x01;
 754 *                      entry = data_query(latch->data[idx], ...);
 755 *
 756 *              // This includes needed smp_rmb()
 757 *              } while (read_seqcount_latch_retry(&latch->seq, seq));
 758 *
 759 *              return entry;
 760 *      }
 761 *
 762 * So during the modification, queries are first redirected to data[1]. Then we
 763 * modify data[0]. When that is complete, we redirect queries back to data[0]
 764 * and we can modify data[1].
 765 *
 766 * NOTE:
 767 *
 768 *      The non-requirement for atomic modifications does _NOT_ include
 769 *      the publishing of new entries in the case where data is a dynamic
 770 *      data structure.
 771 *
 772 *      An iteration might start in data[0] and get suspended long enough
 773 *      to miss an entire modification sequence, once it resumes it might
 774 *      observe the new entry.
 775 *
 776 * NOTE2:
 777 *
 778 *      When data is a dynamic data structure; one should use regular RCU
 779 *      patterns to manage the lifetimes of the objects within.
 780 */
 781static inline void raw_write_seqcount_latch(seqcount_latch_t *s)
 782{
 783        smp_wmb();      /* prior stores before incrementing "sequence" */
 784        s->seqcount.sequence++;
 785        smp_wmb();      /* increment "sequence" before following stores */
 786}
 787
 788/*
 789 * Sequential locks (seqlock_t)
 790 *
 791 * Sequence counters with an embedded spinlock for writer serialization
 792 * and non-preemptibility.
 793 *
 794 * For more info, see:
 795 *    - Comments on top of seqcount_t
 796 *    - Documentation/locking/seqlock.rst
 797 */
 798typedef struct {
 799        /*
 800         * Make sure that readers don't starve writers on PREEMPT_RT: use
 801         * seqcount_spinlock_t instead of seqcount_t. Check __SEQ_LOCK().
 802         */
 803        seqcount_spinlock_t seqcount;
 804        spinlock_t lock;
 805} seqlock_t;
 806
 807#define __SEQLOCK_UNLOCKED(lockname)                                    \
 808        {                                                               \
 809                .seqcount = SEQCNT_SPINLOCK_ZERO(lockname, &(lockname).lock), \
 810                .lock = __SPIN_LOCK_UNLOCKED(lockname)                  \
 811        }
 812
 813/**
 814 * seqlock_init() - dynamic initializer for seqlock_t
 815 * @sl: Pointer to the seqlock_t instance
 816 */
 817#define seqlock_init(sl)                                                \
 818        do {                                                            \
 819                spin_lock_init(&(sl)->lock);                            \
 820                seqcount_spinlock_init(&(sl)->seqcount, &(sl)->lock);   \
 821        } while (0)
 822
 823/**
 824 * DEFINE_SEQLOCK(sl) - Define a statically allocated seqlock_t
 825 * @sl: Name of the seqlock_t instance
 826 */
 827#define DEFINE_SEQLOCK(sl) \
 828                seqlock_t sl = __SEQLOCK_UNLOCKED(sl)
 829
 830/**
 831 * read_seqbegin() - start a seqlock_t read side critical section
 832 * @sl: Pointer to seqlock_t
 833 *
 834 * Return: count, to be passed to read_seqretry()
 835 */
 836static inline unsigned read_seqbegin(const seqlock_t *sl)
 837{
 838        unsigned ret = read_seqcount_begin(&sl->seqcount);
 839
 840        kcsan_atomic_next(0);  /* non-raw usage, assume closing read_seqretry() */
 841        kcsan_flat_atomic_begin();
 842        return ret;
 843}
 844
 845/**
 846 * read_seqretry() - end a seqlock_t read side section
 847 * @sl: Pointer to seqlock_t
 848 * @start: count, from read_seqbegin()
 849 *
 850 * read_seqretry closes the read side critical section of given seqlock_t.
 851 * If the critical section was invalid, it must be ignored (and typically
 852 * retried).
 853 *
 854 * Return: true if a read section retry is required, else false
 855 */
 856static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
 857{
 858        /*
 859         * Assume not nested: read_seqretry() may be called multiple times when
 860         * completing read critical section.
 861         */
 862        kcsan_flat_atomic_end();
 863
 864        return read_seqcount_retry(&sl->seqcount, start);
 865}
 866
 867/*
 868 * For all seqlock_t write side functions, use write_seqcount_*t*_begin()
 869 * instead of the generic write_seqcount_begin(). This way, no redundant
 870 * lockdep_assert_held() checks are added.
 871 */
 872
 873/**
 874 * write_seqlock() - start a seqlock_t write side critical section
 875 * @sl: Pointer to seqlock_t
 876 *
 877 * write_seqlock opens a write side critical section for the given
 878 * seqlock_t.  It also implicitly acquires the spinlock_t embedded inside
 879 * that sequential lock. All seqlock_t write side sections are thus
 880 * automatically serialized and non-preemptible.
 881 *
 882 * Context: if the seqlock_t read section, or other write side critical
 883 * sections, can be invoked from hardirq or softirq contexts, use the
 884 * _irqsave or _bh variants of this function instead.
 885 */
 886static inline void write_seqlock(seqlock_t *sl)
 887{
 888        spin_lock(&sl->lock);
 889        write_seqcount_t_begin(&sl->seqcount.seqcount);
 890}
 891
 892/**
 893 * write_sequnlock() - end a seqlock_t write side critical section
 894 * @sl: Pointer to seqlock_t
 895 *
 896 * write_sequnlock closes the (serialized and non-preemptible) write side
 897 * critical section of given seqlock_t.
 898 */
 899static inline void write_sequnlock(seqlock_t *sl)
 900{
 901        write_seqcount_t_end(&sl->seqcount.seqcount);
 902        spin_unlock(&sl->lock);
 903}
 904
 905/**
 906 * write_seqlock_bh() - start a softirqs-disabled seqlock_t write section
 907 * @sl: Pointer to seqlock_t
 908 *
 909 * _bh variant of write_seqlock(). Use only if the read side section, or
 910 * other write side sections, can be invoked from softirq contexts.
 911 */
 912static inline void write_seqlock_bh(seqlock_t *sl)
 913{
 914        spin_lock_bh(&sl->lock);
 915        write_seqcount_t_begin(&sl->seqcount.seqcount);
 916}
 917
 918/**
 919 * write_sequnlock_bh() - end a softirqs-disabled seqlock_t write section
 920 * @sl: Pointer to seqlock_t
 921 *
 922 * write_sequnlock_bh closes the serialized, non-preemptible, and
 923 * softirqs-disabled, seqlock_t write side critical section opened with
 924 * write_seqlock_bh().
 925 */
 926static inline void write_sequnlock_bh(seqlock_t *sl)
 927{
 928        write_seqcount_t_end(&sl->seqcount.seqcount);
 929        spin_unlock_bh(&sl->lock);
 930}
 931
 932/**
 933 * write_seqlock_irq() - start a non-interruptible seqlock_t write section
 934 * @sl: Pointer to seqlock_t
 935 *
 936 * _irq variant of write_seqlock(). Use only if the read side section, or
 937 * other write sections, can be invoked from hardirq contexts.
 938 */
 939static inline void write_seqlock_irq(seqlock_t *sl)
 940{
 941        spin_lock_irq(&sl->lock);
 942        write_seqcount_t_begin(&sl->seqcount.seqcount);
 943}
 944
 945/**
 946 * write_sequnlock_irq() - end a non-interruptible seqlock_t write section
 947 * @sl: Pointer to seqlock_t
 948 *
 949 * write_sequnlock_irq closes the serialized and non-interruptible
 950 * seqlock_t write side section opened with write_seqlock_irq().
 951 */
 952static inline void write_sequnlock_irq(seqlock_t *sl)
 953{
 954        write_seqcount_t_end(&sl->seqcount.seqcount);
 955        spin_unlock_irq(&sl->lock);
 956}
 957
 958static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
 959{
 960        unsigned long flags;
 961
 962        spin_lock_irqsave(&sl->lock, flags);
 963        write_seqcount_t_begin(&sl->seqcount.seqcount);
 964        return flags;
 965}
 966
 967/**
 968 * write_seqlock_irqsave() - start a non-interruptible seqlock_t write
 969 *                           section
 970 * @lock:  Pointer to seqlock_t
 971 * @flags: Stack-allocated storage for saving caller's local interrupt
 972 *         state, to be passed to write_sequnlock_irqrestore().
 973 *
 974 * _irqsave variant of write_seqlock(). Use it only if the read side
 975 * section, or other write sections, can be invoked from hardirq context.
 976 */
 977#define write_seqlock_irqsave(lock, flags)                              \
 978        do { flags = __write_seqlock_irqsave(lock); } while (0)
 979
 980/**
 981 * write_sequnlock_irqrestore() - end non-interruptible seqlock_t write
 982 *                                section
 983 * @sl:    Pointer to seqlock_t
 984 * @flags: Caller's saved interrupt state, from write_seqlock_irqsave()
 985 *
 986 * write_sequnlock_irqrestore closes the serialized and non-interruptible
 987 * seqlock_t write section previously opened with write_seqlock_irqsave().
 988 */
 989static inline void
 990write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
 991{
 992        write_seqcount_t_end(&sl->seqcount.seqcount);
 993        spin_unlock_irqrestore(&sl->lock, flags);
 994}
 995
 996/**
 997 * read_seqlock_excl() - begin a seqlock_t locking reader section
 998 * @sl: Pointer to seqlock_t
 999 *
1000 * read_seqlock_excl opens a seqlock_t locking reader critical section.  A
1001 * locking reader exclusively locks out *both* other writers *and* other
1002 * locking readers, but it does not update the embedded sequence number.
1003 *
1004 * Locking readers act like a normal spin_lock()/spin_unlock().
1005 *
1006 * Context: if the seqlock_t write section, *or other read sections*, can
1007 * be invoked from hardirq or softirq contexts, use the _irqsave or _bh
1008 * variant of this function instead.
1009 *
1010 * The opened read section must be closed with read_sequnlock_excl().
1011 */
1012static inline void read_seqlock_excl(seqlock_t *sl)
1013{
1014        spin_lock(&sl->lock);
1015}
1016
1017/**
1018 * read_sequnlock_excl() - end a seqlock_t locking reader critical section
1019 * @sl: Pointer to seqlock_t
1020 */
1021static inline void read_sequnlock_excl(seqlock_t *sl)
1022{
1023        spin_unlock(&sl->lock);
1024}
1025
1026/**
1027 * read_seqlock_excl_bh() - start a seqlock_t locking reader section with
1028 *                          softirqs disabled
1029 * @sl: Pointer to seqlock_t
1030 *
1031 * _bh variant of read_seqlock_excl(). Use this variant only if the
1032 * seqlock_t write side section, *or other read sections*, can be invoked
1033 * from softirq contexts.
1034 */
1035static inline void read_seqlock_excl_bh(seqlock_t *sl)
1036{
1037        spin_lock_bh(&sl->lock);
1038}
1039
1040/**
1041 * read_sequnlock_excl_bh() - stop a seqlock_t softirq-disabled locking
1042 *                            reader section
1043 * @sl: Pointer to seqlock_t
1044 */
1045static inline void read_sequnlock_excl_bh(seqlock_t *sl)
1046{
1047        spin_unlock_bh(&sl->lock);
1048}
1049
1050/**
1051 * read_seqlock_excl_irq() - start a non-interruptible seqlock_t locking
1052 *                           reader section
1053 * @sl: Pointer to seqlock_t
1054 *
1055 * _irq variant of read_seqlock_excl(). Use this only if the seqlock_t
1056 * write side section, *or other read sections*, can be invoked from a
1057 * hardirq context.
1058 */
1059static inline void read_seqlock_excl_irq(seqlock_t *sl)
1060{
1061        spin_lock_irq(&sl->lock);
1062}
1063
1064/**
1065 * read_sequnlock_excl_irq() - end an interrupts-disabled seqlock_t
1066 *                             locking reader section
1067 * @sl: Pointer to seqlock_t
1068 */
1069static inline void read_sequnlock_excl_irq(seqlock_t *sl)
1070{
1071        spin_unlock_irq(&sl->lock);
1072}
1073
1074static inline unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl)
1075{
1076        unsigned long flags;
1077
1078        spin_lock_irqsave(&sl->lock, flags);
1079        return flags;
1080}
1081
1082/**
1083 * read_seqlock_excl_irqsave() - start a non-interruptible seqlock_t
1084 *                               locking reader section
1085 * @lock:  Pointer to seqlock_t
1086 * @flags: Stack-allocated storage for saving caller's local interrupt
1087 *         state, to be passed to read_sequnlock_excl_irqrestore().
1088 *
1089 * _irqsave variant of read_seqlock_excl(). Use this only if the seqlock_t
1090 * write side section, *or other read sections*, can be invoked from a
1091 * hardirq context.
1092 */
1093#define read_seqlock_excl_irqsave(lock, flags)                          \
1094        do { flags = __read_seqlock_excl_irqsave(lock); } while (0)
1095
1096/**
1097 * read_sequnlock_excl_irqrestore() - end non-interruptible seqlock_t
1098 *                                    locking reader section
1099 * @sl:    Pointer to seqlock_t
1100 * @flags: Caller saved interrupt state, from read_seqlock_excl_irqsave()
1101 */
1102static inline void
1103read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags)
1104{
1105        spin_unlock_irqrestore(&sl->lock, flags);
1106}
1107
1108/**
1109 * read_seqbegin_or_lock() - begin a seqlock_t lockless or locking reader
1110 * @lock: Pointer to seqlock_t
1111 * @seq : Marker and return parameter. If the passed value is even, the
1112 * reader will become a *lockless* seqlock_t reader as in read_seqbegin().
1113 * If the passed value is odd, the reader will become a *locking* reader
1114 * as in read_seqlock_excl().  In the first call to this function, the
1115 * caller *must* initialize and pass an even value to @seq; this way, a
1116 * lockless read can be optimistically tried first.
1117 *
1118 * read_seqbegin_or_lock is an API designed to optimistically try a normal
1119 * lockless seqlock_t read section first.  If an odd counter is found, the
1120 * lockless read trial has failed, and the next read iteration transforms
1121 * itself into a full seqlock_t locking reader.
1122 *
1123 * This is typically used to avoid seqlock_t lockless readers starvation
1124 * (too much retry loops) in the case of a sharp spike in write side
1125 * activity.
1126 *
1127 * Context: if the seqlock_t write section, *or other read sections*, can
1128 * be invoked from hardirq or softirq contexts, use the _irqsave or _bh
1129 * variant of this function instead.
1130 *
1131 * Check Documentation/locking/seqlock.rst for template example code.
1132 *
1133 * Return: the encountered sequence counter value, through the @seq
1134 * parameter, which is overloaded as a return parameter. This returned
1135 * value must be checked with need_seqretry(). If the read section need to
1136 * be retried, this returned value must also be passed as the @seq
1137 * parameter of the next read_seqbegin_or_lock() iteration.
1138 */
1139static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq)
1140{
1141        if (!(*seq & 1))        /* Even */
1142                *seq = read_seqbegin(lock);
1143        else                    /* Odd */
1144                read_seqlock_excl(lock);
1145}
1146
1147/**
1148 * need_seqretry() - validate seqlock_t "locking or lockless" read section
1149 * @lock: Pointer to seqlock_t
1150 * @seq: sequence count, from read_seqbegin_or_lock()
1151 *
1152 * Return: true if a read section retry is required, false otherwise
1153 */
1154static inline int need_seqretry(seqlock_t *lock, int seq)
1155{
1156        return !(seq & 1) && read_seqretry(lock, seq);
1157}
1158
1159/**
1160 * done_seqretry() - end seqlock_t "locking or lockless" reader section
1161 * @lock: Pointer to seqlock_t
1162 * @seq: count, from read_seqbegin_or_lock()
1163 *
1164 * done_seqretry finishes the seqlock_t read side critical section started
1165 * with read_seqbegin_or_lock() and validated by need_seqretry().
1166 */
1167static inline void done_seqretry(seqlock_t *lock, int seq)
1168{
1169        if (seq & 1)
1170                read_sequnlock_excl(lock);
1171}
1172
1173/**
1174 * read_seqbegin_or_lock_irqsave() - begin a seqlock_t lockless reader, or
1175 *                                   a non-interruptible locking reader
1176 * @lock: Pointer to seqlock_t
1177 * @seq:  Marker and return parameter. Check read_seqbegin_or_lock().
1178 *
1179 * This is the _irqsave variant of read_seqbegin_or_lock(). Use it only if
1180 * the seqlock_t write section, *or other read sections*, can be invoked
1181 * from hardirq context.
1182 *
1183 * Note: Interrupts will be disabled only for "locking reader" mode.
1184 *
1185 * Return:
1186 *
1187 *   1. The saved local interrupts state in case of a locking reader, to
1188 *      be passed to done_seqretry_irqrestore().
1189 *
1190 *   2. The encountered sequence counter value, returned through @seq
1191 *      overloaded as a return parameter. Check read_seqbegin_or_lock().
1192 */
1193static inline unsigned long
1194read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq)
1195{
1196        unsigned long flags = 0;
1197
1198        if (!(*seq & 1))        /* Even */
1199                *seq = read_seqbegin(lock);
1200        else                    /* Odd */
1201                read_seqlock_excl_irqsave(lock, flags);
1202
1203        return flags;
1204}
1205
1206/**
1207 * done_seqretry_irqrestore() - end a seqlock_t lockless reader, or a
1208 *                              non-interruptible locking reader section
1209 * @lock:  Pointer to seqlock_t
1210 * @seq:   Count, from read_seqbegin_or_lock_irqsave()
1211 * @flags: Caller's saved local interrupt state in case of a locking
1212 *         reader, also from read_seqbegin_or_lock_irqsave()
1213 *
1214 * This is the _irqrestore variant of done_seqretry(). The read section
1215 * must've been opened with read_seqbegin_or_lock_irqsave(), and validated
1216 * by need_seqretry().
1217 */
1218static inline void
1219done_seqretry_irqrestore(seqlock_t *lock, int seq, unsigned long flags)
1220{
1221        if (seq & 1)
1222                read_sequnlock_excl_irqrestore(lock, flags);
1223}
1224#endif /* __LINUX_SEQLOCK_H */
1225