linux/kernel/locking/rwsem-xadd.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* rwsem.c: R/W semaphores: contention handling functions
   3 *
   4 * Written by David Howells (dhowells@redhat.com).
   5 * Derived from arch/i386/kernel/semaphore.c
   6 *
   7 * Writer lock-stealing by Alex Shi <alex.shi@intel.com>
   8 * and Michel Lespinasse <walken@google.com>
   9 *
  10 * Optimistic spinning by Tim Chen <tim.c.chen@intel.com>
  11 * and Davidlohr Bueso <davidlohr@hp.com>. Based on mutexes.
  12 */
  13#include <linux/rwsem.h>
  14#include <linux/init.h>
  15#include <linux/export.h>
  16#include <linux/sched/signal.h>
  17#include <linux/sched/rt.h>
  18#include <linux/sched/wake_q.h>
  19#include <linux/sched/debug.h>
  20#include <linux/osq_lock.h>
  21
  22#include "rwsem.h"
  23
  24/*
  25 * Guide to the rw_semaphore's count field for common values.
  26 * (32-bit case illustrated, similar for 64-bit)
  27 *
  28 * 0x0000000X   (1) X readers active or attempting lock, no writer waiting
  29 *                  X = #active_readers + #readers attempting to lock
  30 *                  (X*ACTIVE_BIAS)
  31 *
  32 * 0x00000000   rwsem is unlocked, and no one is waiting for the lock or
  33 *              attempting to read lock or write lock.
  34 *
  35 * 0xffff000X   (1) X readers active or attempting lock, with waiters for lock
  36 *                  X = #active readers + # readers attempting lock
  37 *                  (X*ACTIVE_BIAS + WAITING_BIAS)
  38 *              (2) 1 writer attempting lock, no waiters for lock
  39 *                  X-1 = #active readers + #readers attempting lock
  40 *                  ((X-1)*ACTIVE_BIAS + ACTIVE_WRITE_BIAS)
  41 *              (3) 1 writer active, no waiters for lock
  42 *                  X-1 = #active readers + #readers attempting lock
  43 *                  ((X-1)*ACTIVE_BIAS + ACTIVE_WRITE_BIAS)
  44 *
  45 * 0xffff0001   (1) 1 reader active or attempting lock, waiters for lock
  46 *                  (WAITING_BIAS + ACTIVE_BIAS)
  47 *              (2) 1 writer active or attempting lock, no waiters for lock
  48 *                  (ACTIVE_WRITE_BIAS)
  49 *
  50 * 0xffff0000   (1) There are writers or readers queued but none active
  51 *                  or in the process of attempting lock.
  52 *                  (WAITING_BIAS)
  53 *              Note: writer can attempt to steal lock for this count by adding
  54 *              ACTIVE_WRITE_BIAS in cmpxchg and checking the old count
  55 *
  56 * 0xfffe0001   (1) 1 writer active, or attempting lock. Waiters on queue.
  57 *                  (ACTIVE_WRITE_BIAS + WAITING_BIAS)
  58 *
  59 * Note: Readers attempt to lock by adding ACTIVE_BIAS in down_read and checking
  60 *       the count becomes more than 0 for successful lock acquisition,
  61 *       i.e. the case where there are only readers or nobody has lock.
  62 *       (1st and 2nd case above).
  63 *
  64 *       Writers attempt to lock by adding ACTIVE_WRITE_BIAS in down_write and
  65 *       checking the count becomes ACTIVE_WRITE_BIAS for successful lock
  66 *       acquisition (i.e. nobody else has lock or attempts lock).  If
  67 *       unsuccessful, in rwsem_down_write_failed, we'll check to see if there
  68 *       are only waiters but none active (5th case above), and attempt to
  69 *       steal the lock.
  70 *
  71 */
  72
  73/*
  74 * Initialize an rwsem:
  75 */
  76void __init_rwsem(struct rw_semaphore *sem, const char *name,
  77                  struct lock_class_key *key)
  78{
  79#ifdef CONFIG_DEBUG_LOCK_ALLOC
  80        /*
  81         * Make sure we are not reinitializing a held semaphore:
  82         */
  83        debug_check_no_locks_freed((void *)sem, sizeof(*sem));
  84        lockdep_init_map(&sem->dep_map, name, key, 0);
  85#endif
  86        atomic_long_set(&sem->count, RWSEM_UNLOCKED_VALUE);
  87        raw_spin_lock_init(&sem->wait_lock);
  88        INIT_LIST_HEAD(&sem->wait_list);
  89#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
  90        sem->owner = NULL;
  91        osq_lock_init(&sem->osq);
  92#endif
  93}
  94
  95EXPORT_SYMBOL(__init_rwsem);
  96
  97enum rwsem_waiter_type {
  98        RWSEM_WAITING_FOR_WRITE,
  99        RWSEM_WAITING_FOR_READ
 100};
 101
 102struct rwsem_waiter {
 103        struct list_head list;
 104        struct task_struct *task;
 105        enum rwsem_waiter_type type;
 106};
 107
 108enum rwsem_wake_type {
 109        RWSEM_WAKE_ANY,         /* Wake whatever's at head of wait list */
 110        RWSEM_WAKE_READERS,     /* Wake readers only */
 111        RWSEM_WAKE_READ_OWNED   /* Waker thread holds the read lock */
 112};
 113
 114/*
 115 * handle the lock release when processes blocked on it that can now run
 116 * - if we come here from up_xxxx(), then:
 117 *   - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
 118 *   - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
 119 * - there must be someone on the queue
 120 * - the wait_lock must be held by the caller
 121 * - tasks are marked for wakeup, the caller must later invoke wake_up_q()
 122 *   to actually wakeup the blocked task(s) and drop the reference count,
 123 *   preferably when the wait_lock is released
 124 * - woken process blocks are discarded from the list after having task zeroed
 125 * - writers are only marked woken if downgrading is false
 126 */
 127static void __rwsem_mark_wake(struct rw_semaphore *sem,
 128                              enum rwsem_wake_type wake_type,
 129                              struct wake_q_head *wake_q)
 130{
 131        struct rwsem_waiter *waiter, *tmp;
 132        long oldcount, woken = 0, adjustment = 0;
 133
 134        /*
 135         * Take a peek at the queue head waiter such that we can determine
 136         * the wakeup(s) to perform.
 137         */
 138        waiter = list_first_entry(&sem->wait_list, struct rwsem_waiter, list);
 139
 140        if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
 141                if (wake_type == RWSEM_WAKE_ANY) {
 142                        /*
 143                         * Mark writer at the front of the queue for wakeup.
 144                         * Until the task is actually later awoken later by
 145                         * the caller, other writers are able to steal it.
 146                         * Readers, on the other hand, will block as they
 147                         * will notice the queued writer.
 148                         */
 149                        wake_q_add(wake_q, waiter->task);
 150                }
 151
 152                return;
 153        }
 154
 155        /*
 156         * Writers might steal the lock before we grant it to the next reader.
 157         * We prefer to do the first reader grant before counting readers
 158         * so we can bail out early if a writer stole the lock.
 159         */
 160        if (wake_type != RWSEM_WAKE_READ_OWNED) {
 161                adjustment = RWSEM_ACTIVE_READ_BIAS;
 162 try_reader_grant:
 163                oldcount = atomic_long_fetch_add(adjustment, &sem->count);
 164                if (unlikely(oldcount < RWSEM_WAITING_BIAS)) {
 165                        /*
 166                         * If the count is still less than RWSEM_WAITING_BIAS
 167                         * after removing the adjustment, it is assumed that
 168                         * a writer has stolen the lock. We have to undo our
 169                         * reader grant.
 170                         */
 171                        if (atomic_long_add_return(-adjustment, &sem->count) <
 172                            RWSEM_WAITING_BIAS)
 173                                return;
 174
 175                        /* Last active locker left. Retry waking readers. */
 176                        goto try_reader_grant;
 177                }
 178                /*
 179                 * It is not really necessary to set it to reader-owned here,
 180                 * but it gives the spinners an early indication that the
 181                 * readers now have the lock.
 182                 */
 183                rwsem_set_reader_owned(sem);
 184        }
 185
 186        /*
 187         * Grant an infinite number of read locks to the readers at the front
 188         * of the queue. We know that woken will be at least 1 as we accounted
 189         * for above. Note we increment the 'active part' of the count by the
 190         * number of readers before waking any processes up.
 191         */
 192        list_for_each_entry_safe(waiter, tmp, &sem->wait_list, list) {
 193                struct task_struct *tsk;
 194
 195                if (waiter->type == RWSEM_WAITING_FOR_WRITE)
 196                        break;
 197
 198                woken++;
 199                tsk = waiter->task;
 200
 201                wake_q_add(wake_q, tsk);
 202                list_del(&waiter->list);
 203                /*
 204                 * Ensure that the last operation is setting the reader
 205                 * waiter to nil such that rwsem_down_read_failed() cannot
 206                 * race with do_exit() by always holding a reference count
 207                 * to the task to wakeup.
 208                 */
 209                smp_store_release(&waiter->task, NULL);
 210        }
 211
 212        adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment;
 213        if (list_empty(&sem->wait_list)) {
 214                /* hit end of list above */
 215                adjustment -= RWSEM_WAITING_BIAS;
 216        }
 217
 218        if (adjustment)
 219                atomic_long_add(adjustment, &sem->count);
 220}
 221
 222/*
 223 * Wait for the read lock to be granted
 224 */
 225static inline struct rw_semaphore __sched *
 226__rwsem_down_read_failed_common(struct rw_semaphore *sem, int state)
 227{
 228        long count, adjustment = -RWSEM_ACTIVE_READ_BIAS;
 229        struct rwsem_waiter waiter;
 230        DEFINE_WAKE_Q(wake_q);
 231
 232        waiter.task = current;
 233        waiter.type = RWSEM_WAITING_FOR_READ;
 234
 235        raw_spin_lock_irq(&sem->wait_lock);
 236        if (list_empty(&sem->wait_list))
 237                adjustment += RWSEM_WAITING_BIAS;
 238        list_add_tail(&waiter.list, &sem->wait_list);
 239
 240        /* we're now waiting on the lock, but no longer actively locking */
 241        count = atomic_long_add_return(adjustment, &sem->count);
 242
 243        /*
 244         * If there are no active locks, wake the front queued process(es).
 245         *
 246         * If there are no writers and we are first in the queue,
 247         * wake our own waiter to join the existing active readers !
 248         */
 249        if (count == RWSEM_WAITING_BIAS ||
 250            (count > RWSEM_WAITING_BIAS &&
 251             adjustment != -RWSEM_ACTIVE_READ_BIAS))
 252                __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
 253
 254        raw_spin_unlock_irq(&sem->wait_lock);
 255        wake_up_q(&wake_q);
 256
 257        /* wait to be given the lock */
 258        while (true) {
 259                set_current_state(state);
 260                if (!waiter.task)
 261                        break;
 262                if (signal_pending_state(state, current)) {
 263                        raw_spin_lock_irq(&sem->wait_lock);
 264                        if (waiter.task)
 265                                goto out_nolock;
 266                        raw_spin_unlock_irq(&sem->wait_lock);
 267                        break;
 268                }
 269                schedule();
 270        }
 271
 272        __set_current_state(TASK_RUNNING);
 273        return sem;
 274out_nolock:
 275        list_del(&waiter.list);
 276        if (list_empty(&sem->wait_list))
 277                atomic_long_add(-RWSEM_WAITING_BIAS, &sem->count);
 278        raw_spin_unlock_irq(&sem->wait_lock);
 279        __set_current_state(TASK_RUNNING);
 280        return ERR_PTR(-EINTR);
 281}
 282
 283__visible struct rw_semaphore * __sched
 284rwsem_down_read_failed(struct rw_semaphore *sem)
 285{
 286        return __rwsem_down_read_failed_common(sem, TASK_UNINTERRUPTIBLE);
 287}
 288EXPORT_SYMBOL(rwsem_down_read_failed);
 289
 290__visible struct rw_semaphore * __sched
 291rwsem_down_read_failed_killable(struct rw_semaphore *sem)
 292{
 293        return __rwsem_down_read_failed_common(sem, TASK_KILLABLE);
 294}
 295EXPORT_SYMBOL(rwsem_down_read_failed_killable);
 296
 297/*
 298 * This function must be called with the sem->wait_lock held to prevent
 299 * race conditions between checking the rwsem wait list and setting the
 300 * sem->count accordingly.
 301 */
 302static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem)
 303{
 304        /*
 305         * Avoid trying to acquire write lock if count isn't RWSEM_WAITING_BIAS.
 306         */
 307        if (count != RWSEM_WAITING_BIAS)
 308                return false;
 309
 310        /*
 311         * Acquire the lock by trying to set it to ACTIVE_WRITE_BIAS. If there
 312         * are other tasks on the wait list, we need to add on WAITING_BIAS.
 313         */
 314        count = list_is_singular(&sem->wait_list) ?
 315                        RWSEM_ACTIVE_WRITE_BIAS :
 316                        RWSEM_ACTIVE_WRITE_BIAS + RWSEM_WAITING_BIAS;
 317
 318        if (atomic_long_cmpxchg_acquire(&sem->count, RWSEM_WAITING_BIAS, count)
 319                                                        == RWSEM_WAITING_BIAS) {
 320                rwsem_set_owner(sem);
 321                return true;
 322        }
 323
 324        return false;
 325}
 326
 327#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
 328/*
 329 * Try to acquire write lock before the writer has been put on wait queue.
 330 */
 331static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
 332{
 333        long old, count = atomic_long_read(&sem->count);
 334
 335        while (true) {
 336                if (!(count == 0 || count == RWSEM_WAITING_BIAS))
 337                        return false;
 338
 339                old = atomic_long_cmpxchg_acquire(&sem->count, count,
 340                                      count + RWSEM_ACTIVE_WRITE_BIAS);
 341                if (old == count) {
 342                        rwsem_set_owner(sem);
 343                        return true;
 344                }
 345
 346                count = old;
 347        }
 348}
 349
 350static inline bool owner_on_cpu(struct task_struct *owner)
 351{
 352        /*
 353         * As lock holder preemption issue, we both skip spinning if
 354         * task is not on cpu or its cpu is preempted
 355         */
 356        return owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
 357}
 358
 359static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
 360{
 361        struct task_struct *owner;
 362        bool ret = true;
 363
 364        BUILD_BUG_ON(!rwsem_has_anonymous_owner(RWSEM_OWNER_UNKNOWN));
 365
 366        if (need_resched())
 367                return false;
 368
 369        rcu_read_lock();
 370        owner = READ_ONCE(sem->owner);
 371        if (owner) {
 372                ret = is_rwsem_owner_spinnable(owner) &&
 373                      owner_on_cpu(owner);
 374        }
 375        rcu_read_unlock();
 376        return ret;
 377}
 378
 379/*
 380 * Return true only if we can still spin on the owner field of the rwsem.
 381 */
 382static noinline bool rwsem_spin_on_owner(struct rw_semaphore *sem)
 383{
 384        struct task_struct *owner = READ_ONCE(sem->owner);
 385
 386        if (!is_rwsem_owner_spinnable(owner))
 387                return false;
 388
 389        rcu_read_lock();
 390        while (owner && (READ_ONCE(sem->owner) == owner)) {
 391                /*
 392                 * Ensure we emit the owner->on_cpu, dereference _after_
 393                 * checking sem->owner still matches owner, if that fails,
 394                 * owner might point to free()d memory, if it still matches,
 395                 * the rcu_read_lock() ensures the memory stays valid.
 396                 */
 397                barrier();
 398
 399                /*
 400                 * abort spinning when need_resched or owner is not running or
 401                 * owner's cpu is preempted.
 402                 */
 403                if (need_resched() || !owner_on_cpu(owner)) {
 404                        rcu_read_unlock();
 405                        return false;
 406                }
 407
 408                cpu_relax();
 409        }
 410        rcu_read_unlock();
 411
 412        /*
 413         * If there is a new owner or the owner is not set, we continue
 414         * spinning.
 415         */
 416        return is_rwsem_owner_spinnable(READ_ONCE(sem->owner));
 417}
 418
 419static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
 420{
 421        bool taken = false;
 422
 423        preempt_disable();
 424
 425        /* sem->wait_lock should not be held when doing optimistic spinning */
 426        if (!rwsem_can_spin_on_owner(sem))
 427                goto done;
 428
 429        if (!osq_lock(&sem->osq))
 430                goto done;
 431
 432        /*
 433         * Optimistically spin on the owner field and attempt to acquire the
 434         * lock whenever the owner changes. Spinning will be stopped when:
 435         *  1) the owning writer isn't running; or
 436         *  2) readers own the lock as we can't determine if they are
 437         *     actively running or not.
 438         */
 439        while (rwsem_spin_on_owner(sem)) {
 440                /*
 441                 * Try to acquire the lock
 442                 */
 443                if (rwsem_try_write_lock_unqueued(sem)) {
 444                        taken = true;
 445                        break;
 446                }
 447
 448                /*
 449                 * When there's no owner, we might have preempted between the
 450                 * owner acquiring the lock and setting the owner field. If
 451                 * we're an RT task that will live-lock because we won't let
 452                 * the owner complete.
 453                 */
 454                if (!sem->owner && (need_resched() || rt_task(current)))
 455                        break;
 456
 457                /*
 458                 * The cpu_relax() call is a compiler barrier which forces
 459                 * everything in this loop to be re-loaded. We don't need
 460                 * memory barriers as we'll eventually observe the right
 461                 * values at the cost of a few extra spins.
 462                 */
 463                cpu_relax();
 464        }
 465        osq_unlock(&sem->osq);
 466done:
 467        preempt_enable();
 468        return taken;
 469}
 470
 471/*
 472 * Return true if the rwsem has active spinner
 473 */
 474static inline bool rwsem_has_spinner(struct rw_semaphore *sem)
 475{
 476        return osq_is_locked(&sem->osq);
 477}
 478
 479#else
 480static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
 481{
 482        return false;
 483}
 484
 485static inline bool rwsem_has_spinner(struct rw_semaphore *sem)
 486{
 487        return false;
 488}
 489#endif
 490
 491/*
 492 * Wait until we successfully acquire the write lock
 493 */
 494static inline struct rw_semaphore *
 495__rwsem_down_write_failed_common(struct rw_semaphore *sem, int state)
 496{
 497        long count;
 498        bool waiting = true; /* any queued threads before us */
 499        struct rwsem_waiter waiter;
 500        struct rw_semaphore *ret = sem;
 501        DEFINE_WAKE_Q(wake_q);
 502
 503        /* undo write bias from down_write operation, stop active locking */
 504        count = atomic_long_sub_return(RWSEM_ACTIVE_WRITE_BIAS, &sem->count);
 505
 506        /* do optimistic spinning and steal lock if possible */
 507        if (rwsem_optimistic_spin(sem))
 508                return sem;
 509
 510        /*
 511         * Optimistic spinning failed, proceed to the slowpath
 512         * and block until we can acquire the sem.
 513         */
 514        waiter.task = current;
 515        waiter.type = RWSEM_WAITING_FOR_WRITE;
 516
 517        raw_spin_lock_irq(&sem->wait_lock);
 518
 519        /* account for this before adding a new element to the list */
 520        if (list_empty(&sem->wait_list))
 521                waiting = false;
 522
 523        list_add_tail(&waiter.list, &sem->wait_list);
 524
 525        /* we're now waiting on the lock, but no longer actively locking */
 526        if (waiting) {
 527                count = atomic_long_read(&sem->count);
 528
 529                /*
 530                 * If there were already threads queued before us and there are
 531                 * no active writers, the lock must be read owned; so we try to
 532                 * wake any read locks that were queued ahead of us.
 533                 */
 534                if (count > RWSEM_WAITING_BIAS) {
 535                        __rwsem_mark_wake(sem, RWSEM_WAKE_READERS, &wake_q);
 536                        /*
 537                         * The wakeup is normally called _after_ the wait_lock
 538                         * is released, but given that we are proactively waking
 539                         * readers we can deal with the wake_q overhead as it is
 540                         * similar to releasing and taking the wait_lock again
 541                         * for attempting rwsem_try_write_lock().
 542                         */
 543                        wake_up_q(&wake_q);
 544
 545                        /*
 546                         * Reinitialize wake_q after use.
 547                         */
 548                        wake_q_init(&wake_q);
 549                }
 550
 551        } else
 552                count = atomic_long_add_return(RWSEM_WAITING_BIAS, &sem->count);
 553
 554        /* wait until we successfully acquire the lock */
 555        set_current_state(state);
 556        while (true) {
 557                if (rwsem_try_write_lock(count, sem))
 558                        break;
 559                raw_spin_unlock_irq(&sem->wait_lock);
 560
 561                /* Block until there are no active lockers. */
 562                do {
 563                        if (signal_pending_state(state, current))
 564                                goto out_nolock;
 565
 566                        schedule();
 567                        set_current_state(state);
 568                } while ((count = atomic_long_read(&sem->count)) & RWSEM_ACTIVE_MASK);
 569
 570                raw_spin_lock_irq(&sem->wait_lock);
 571        }
 572        __set_current_state(TASK_RUNNING);
 573        list_del(&waiter.list);
 574        raw_spin_unlock_irq(&sem->wait_lock);
 575
 576        return ret;
 577
 578out_nolock:
 579        __set_current_state(TASK_RUNNING);
 580        raw_spin_lock_irq(&sem->wait_lock);
 581        list_del(&waiter.list);
 582        if (list_empty(&sem->wait_list))
 583                atomic_long_add(-RWSEM_WAITING_BIAS, &sem->count);
 584        else
 585                __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
 586        raw_spin_unlock_irq(&sem->wait_lock);
 587        wake_up_q(&wake_q);
 588
 589        return ERR_PTR(-EINTR);
 590}
 591
 592__visible struct rw_semaphore * __sched
 593rwsem_down_write_failed(struct rw_semaphore *sem)
 594{
 595        return __rwsem_down_write_failed_common(sem, TASK_UNINTERRUPTIBLE);
 596}
 597EXPORT_SYMBOL(rwsem_down_write_failed);
 598
 599__visible struct rw_semaphore * __sched
 600rwsem_down_write_failed_killable(struct rw_semaphore *sem)
 601{
 602        return __rwsem_down_write_failed_common(sem, TASK_KILLABLE);
 603}
 604EXPORT_SYMBOL(rwsem_down_write_failed_killable);
 605
 606/*
 607 * handle waking up a waiter on the semaphore
 608 * - up_read/up_write has decremented the active part of count if we come here
 609 */
 610__visible
 611struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
 612{
 613        unsigned long flags;
 614        DEFINE_WAKE_Q(wake_q);
 615
 616        /*
 617        * __rwsem_down_write_failed_common(sem)
 618        *   rwsem_optimistic_spin(sem)
 619        *     osq_unlock(sem->osq)
 620        *   ...
 621        *   atomic_long_add_return(&sem->count)
 622        *
 623        *      - VS -
 624        *
 625        *              __up_write()
 626        *                if (atomic_long_sub_return_release(&sem->count) < 0)
 627        *                  rwsem_wake(sem)
 628        *                    osq_is_locked(&sem->osq)
 629        *
 630        * And __up_write() must observe !osq_is_locked() when it observes the
 631        * atomic_long_add_return() in order to not miss a wakeup.
 632        *
 633        * This boils down to:
 634        *
 635        * [S.rel] X = 1                [RmW] r0 = (Y += 0)
 636        *         MB                         RMB
 637        * [RmW]   Y += 1               [L]   r1 = X
 638        *
 639        * exists (r0=1 /\ r1=0)
 640        */
 641        smp_rmb();
 642
 643        /*
 644         * If a spinner is present, it is not necessary to do the wakeup.
 645         * Try to do wakeup only if the trylock succeeds to minimize
 646         * spinlock contention which may introduce too much delay in the
 647         * unlock operation.
 648         *
 649         *    spinning writer           up_write/up_read caller
 650         *    ---------------           -----------------------
 651         * [S]   osq_unlock()           [L]   osq
 652         *       MB                           RMB
 653         * [RmW] rwsem_try_write_lock() [RmW] spin_trylock(wait_lock)
 654         *
 655         * Here, it is important to make sure that there won't be a missed
 656         * wakeup while the rwsem is free and the only spinning writer goes
 657         * to sleep without taking the rwsem. Even when the spinning writer
 658         * is just going to break out of the waiting loop, it will still do
 659         * a trylock in rwsem_down_write_failed() before sleeping. IOW, if
 660         * rwsem_has_spinner() is true, it will guarantee at least one
 661         * trylock attempt on the rwsem later on.
 662         */
 663        if (rwsem_has_spinner(sem)) {
 664                /*
 665                 * The smp_rmb() here is to make sure that the spinner
 666                 * state is consulted before reading the wait_lock.
 667                 */
 668                smp_rmb();
 669                if (!raw_spin_trylock_irqsave(&sem->wait_lock, flags))
 670                        return sem;
 671                goto locked;
 672        }
 673        raw_spin_lock_irqsave(&sem->wait_lock, flags);
 674locked:
 675
 676        if (!list_empty(&sem->wait_list))
 677                __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
 678
 679        raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
 680        wake_up_q(&wake_q);
 681
 682        return sem;
 683}
 684EXPORT_SYMBOL(rwsem_wake);
 685
 686/*
 687 * downgrade a write lock into a read lock
 688 * - caller incremented waiting part of count and discovered it still negative
 689 * - just wake up any readers at the front of the queue
 690 */
 691__visible
 692struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
 693{
 694        unsigned long flags;
 695        DEFINE_WAKE_Q(wake_q);
 696
 697        raw_spin_lock_irqsave(&sem->wait_lock, flags);
 698
 699        if (!list_empty(&sem->wait_list))
 700                __rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED, &wake_q);
 701
 702        raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
 703        wake_up_q(&wake_q);
 704
 705        return sem;
 706}
 707EXPORT_SYMBOL(rwsem_downgrade_wake);
 708