linux/kernel/locking/mutex.c
<<
>>
Prefs
   1/*
   2 * kernel/locking/mutex.c
   3 *
   4 * Mutexes: blocking mutual exclusion locks
   5 *
   6 * Started by Ingo Molnar:
   7 *
   8 *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
   9 *
  10 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
  11 * David Howells for suggestions and improvements.
  12 *
  13 *  - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
  14 *    from the -rt tree, where it was originally implemented for rtmutexes
  15 *    by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
  16 *    and Sven Dietrich.
  17 *
  18 * Also see Documentation/locking/mutex-design.txt.
  19 */
  20#include <linux/mutex.h>
  21#include <linux/ww_mutex.h>
  22#include <linux/sched/signal.h>
  23#include <linux/sched/rt.h>
  24#include <linux/sched/wake_q.h>
  25#include <linux/sched/debug.h>
  26#include <linux/export.h>
  27#include <linux/spinlock.h>
  28#include <linux/interrupt.h>
  29#include <linux/debug_locks.h>
  30#include <linux/osq_lock.h>
  31
  32#ifdef CONFIG_DEBUG_MUTEXES
  33# include "mutex-debug.h"
  34#else
  35# include "mutex.h"
  36#endif
  37
  38void
  39__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
  40{
  41        atomic_long_set(&lock->owner, 0);
  42        spin_lock_init(&lock->wait_lock);
  43        INIT_LIST_HEAD(&lock->wait_list);
  44#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
  45        osq_lock_init(&lock->osq);
  46#endif
  47
  48        debug_mutex_init(lock, name, key);
  49}
  50EXPORT_SYMBOL(__mutex_init);
  51
  52/*
  53 * @owner: contains: 'struct task_struct *' to the current lock owner,
  54 * NULL means not owned. Since task_struct pointers are aligned at
  55 * at least L1_CACHE_BYTES, we have low bits to store extra state.
  56 *
  57 * Bit0 indicates a non-empty waiter list; unlock must issue a wakeup.
  58 * Bit1 indicates unlock needs to hand the lock to the top-waiter
  59 * Bit2 indicates handoff has been done and we're waiting for pickup.
  60 */
  61#define MUTEX_FLAG_WAITERS      0x01
  62#define MUTEX_FLAG_HANDOFF      0x02
  63#define MUTEX_FLAG_PICKUP       0x04
  64
  65#define MUTEX_FLAGS             0x07
  66
  67static inline struct task_struct *__owner_task(unsigned long owner)
  68{
  69        return (struct task_struct *)(owner & ~MUTEX_FLAGS);
  70}
  71
  72static inline unsigned long __owner_flags(unsigned long owner)
  73{
  74        return owner & MUTEX_FLAGS;
  75}
  76
  77/*
  78 * Trylock variant that retuns the owning task on failure.
  79 */
  80static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
  81{
  82        unsigned long owner, curr = (unsigned long)current;
  83
  84        owner = atomic_long_read(&lock->owner);
  85        for (;;) { /* must loop, can race against a flag */
  86                unsigned long old, flags = __owner_flags(owner);
  87                unsigned long task = owner & ~MUTEX_FLAGS;
  88
  89                if (task) {
  90                        if (likely(task != curr))
  91                                break;
  92
  93                        if (likely(!(flags & MUTEX_FLAG_PICKUP)))
  94                                break;
  95
  96                        flags &= ~MUTEX_FLAG_PICKUP;
  97                } else {
  98#ifdef CONFIG_DEBUG_MUTEXES
  99                        DEBUG_LOCKS_WARN_ON(flags & MUTEX_FLAG_PICKUP);
 100#endif
 101                }
 102
 103                /*
 104                 * We set the HANDOFF bit, we must make sure it doesn't live
 105                 * past the point where we acquire it. This would be possible
 106                 * if we (accidentally) set the bit on an unlocked mutex.
 107                 */
 108                flags &= ~MUTEX_FLAG_HANDOFF;
 109
 110                old = atomic_long_cmpxchg_acquire(&lock->owner, owner, curr | flags);
 111                if (old == owner)
 112                        return NULL;
 113
 114                owner = old;
 115        }
 116
 117        return __owner_task(owner);
 118}
 119
 120/*
 121 * Actual trylock that will work on any unlocked state.
 122 */
 123static inline bool __mutex_trylock(struct mutex *lock)
 124{
 125        return !__mutex_trylock_or_owner(lock);
 126}
 127
 128#ifndef CONFIG_DEBUG_LOCK_ALLOC
 129/*
 130 * Lockdep annotations are contained to the slow paths for simplicity.
 131 * There is nothing that would stop spreading the lockdep annotations outwards
 132 * except more code.
 133 */
 134
 135/*
 136 * Optimistic trylock that only works in the uncontended case. Make sure to
 137 * follow with a __mutex_trylock() before failing.
 138 */
 139static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
 140{
 141        unsigned long curr = (unsigned long)current;
 142
 143        if (!atomic_long_cmpxchg_acquire(&lock->owner, 0UL, curr))
 144                return true;
 145
 146        return false;
 147}
 148
 149static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
 150{
 151        unsigned long curr = (unsigned long)current;
 152
 153        if (atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr)
 154                return true;
 155
 156        return false;
 157}
 158#endif
 159
 160static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag)
 161{
 162        atomic_long_or(flag, &lock->owner);
 163}
 164
 165static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag)
 166{
 167        atomic_long_andnot(flag, &lock->owner);
 168}
 169
 170static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter)
 171{
 172        return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter;
 173}
 174
 175/*
 176 * Give up ownership to a specific task, when @task = NULL, this is equivalent
 177 * to a regular unlock. Sets PICKUP on a handoff, clears HANDOF, preserves
 178 * WAITERS. Provides RELEASE semantics like a regular unlock, the
 179 * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff.
 180 */
 181static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
 182{
 183        unsigned long owner = atomic_long_read(&lock->owner);
 184
 185        for (;;) {
 186                unsigned long old, new;
 187
 188#ifdef CONFIG_DEBUG_MUTEXES
 189                DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
 190                DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
 191#endif
 192
 193                new = (owner & MUTEX_FLAG_WAITERS);
 194                new |= (unsigned long)task;
 195                if (task)
 196                        new |= MUTEX_FLAG_PICKUP;
 197
 198                old = atomic_long_cmpxchg_release(&lock->owner, owner, new);
 199                if (old == owner)
 200                        break;
 201
 202                owner = old;
 203        }
 204}
 205
 206#ifndef CONFIG_DEBUG_LOCK_ALLOC
 207/*
 208 * We split the mutex lock/unlock logic into separate fastpath and
 209 * slowpath functions, to reduce the register pressure on the fastpath.
 210 * We also put the fastpath first in the kernel image, to make sure the
 211 * branch is predicted by the CPU as default-untaken.
 212 */
 213static void __sched __mutex_lock_slowpath(struct mutex *lock);
 214
 215/**
 216 * mutex_lock - acquire the mutex
 217 * @lock: the mutex to be acquired
 218 *
 219 * Lock the mutex exclusively for this task. If the mutex is not
 220 * available right now, it will sleep until it can get it.
 221 *
 222 * The mutex must later on be released by the same task that
 223 * acquired it. Recursive locking is not allowed. The task
 224 * may not exit without first unlocking the mutex. Also, kernel
 225 * memory where the mutex resides must not be freed with
 226 * the mutex still locked. The mutex must first be initialized
 227 * (or statically defined) before it can be locked. memset()-ing
 228 * the mutex to 0 is not allowed.
 229 *
 230 * (The CONFIG_DEBUG_MUTEXES .config option turns on debugging
 231 * checks that will enforce the restrictions and will also do
 232 * deadlock debugging)
 233 *
 234 * This function is similar to (but not equivalent to) down().
 235 */
 236void __sched mutex_lock(struct mutex *lock)
 237{
 238        might_sleep();
 239
 240        if (!__mutex_trylock_fast(lock))
 241                __mutex_lock_slowpath(lock);
 242}
 243EXPORT_SYMBOL(mutex_lock);
 244#endif
 245
 246static __always_inline void
 247ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
 248{
 249#ifdef CONFIG_DEBUG_MUTEXES
 250        /*
 251         * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
 252         * but released with a normal mutex_unlock in this call.
 253         *
 254         * This should never happen, always use ww_mutex_unlock.
 255         */
 256        DEBUG_LOCKS_WARN_ON(ww->ctx);
 257
 258        /*
 259         * Not quite done after calling ww_acquire_done() ?
 260         */
 261        DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
 262
 263        if (ww_ctx->contending_lock) {
 264                /*
 265                 * After -EDEADLK you tried to
 266                 * acquire a different ww_mutex? Bad!
 267                 */
 268                DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
 269
 270                /*
 271                 * You called ww_mutex_lock after receiving -EDEADLK,
 272                 * but 'forgot' to unlock everything else first?
 273                 */
 274                DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
 275                ww_ctx->contending_lock = NULL;
 276        }
 277
 278        /*
 279         * Naughty, using a different class will lead to undefined behavior!
 280         */
 281        DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
 282#endif
 283        ww_ctx->acquired++;
 284}
 285
 286static inline bool __sched
 287__ww_ctx_stamp_after(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b)
 288{
 289        return a->stamp - b->stamp <= LONG_MAX &&
 290               (a->stamp != b->stamp || a > b);
 291}
 292
 293/*
 294 * Wake up any waiters that may have to back off when the lock is held by the
 295 * given context.
 296 *
 297 * Due to the invariants on the wait list, this can only affect the first
 298 * waiter with a context.
 299 *
 300 * The current task must not be on the wait list.
 301 */
 302static void __sched
 303__ww_mutex_wakeup_for_backoff(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
 304{
 305        struct mutex_waiter *cur;
 306
 307        lockdep_assert_held(&lock->wait_lock);
 308
 309        list_for_each_entry(cur, &lock->wait_list, list) {
 310                if (!cur->ww_ctx)
 311                        continue;
 312
 313                if (cur->ww_ctx->acquired > 0 &&
 314                    __ww_ctx_stamp_after(cur->ww_ctx, ww_ctx)) {
 315                        debug_mutex_wake_waiter(lock, cur);
 316                        wake_up_process(cur->task);
 317                }
 318
 319                break;
 320        }
 321}
 322
 323/*
 324 * After acquiring lock with fastpath or when we lost out in contested
 325 * slowpath, set ctx and wake up any waiters so they can recheck.
 326 */
 327static __always_inline void
 328ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
 329{
 330        ww_mutex_lock_acquired(lock, ctx);
 331
 332        lock->ctx = ctx;
 333
 334        /*
 335         * The lock->ctx update should be visible on all cores before
 336         * the atomic read is done, otherwise contended waiters might be
 337         * missed. The contended waiters will either see ww_ctx == NULL
 338         * and keep spinning, or it will acquire wait_lock, add itself
 339         * to waiter list and sleep.
 340         */
 341        smp_mb(); /* ^^^ */
 342
 343        /*
 344         * Check if lock is contended, if not there is nobody to wake up
 345         */
 346        if (likely(!(atomic_long_read(&lock->base.owner) & MUTEX_FLAG_WAITERS)))
 347                return;
 348
 349        /*
 350         * Uh oh, we raced in fastpath, wake up everyone in this case,
 351         * so they can see the new lock->ctx.
 352         */
 353        spin_lock(&lock->base.wait_lock);
 354        __ww_mutex_wakeup_for_backoff(&lock->base, ctx);
 355        spin_unlock(&lock->base.wait_lock);
 356}
 357
 358/*
 359 * After acquiring lock in the slowpath set ctx.
 360 *
 361 * Unlike for the fast path, the caller ensures that waiters are woken up where
 362 * necessary.
 363 *
 364 * Callers must hold the mutex wait_lock.
 365 */
 366static __always_inline void
 367ww_mutex_set_context_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
 368{
 369        ww_mutex_lock_acquired(lock, ctx);
 370        lock->ctx = ctx;
 371}
 372
 373#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
 374
 375static inline
 376bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
 377                            struct mutex_waiter *waiter)
 378{
 379        struct ww_mutex *ww;
 380
 381        ww = container_of(lock, struct ww_mutex, base);
 382
 383        /*
 384         * If ww->ctx is set the contents are undefined, only
 385         * by acquiring wait_lock there is a guarantee that
 386         * they are not invalid when reading.
 387         *
 388         * As such, when deadlock detection needs to be
 389         * performed the optimistic spinning cannot be done.
 390         *
 391         * Check this in every inner iteration because we may
 392         * be racing against another thread's ww_mutex_lock.
 393         */
 394        if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx))
 395                return false;
 396
 397        /*
 398         * If we aren't on the wait list yet, cancel the spin
 399         * if there are waiters. We want  to avoid stealing the
 400         * lock from a waiter with an earlier stamp, since the
 401         * other thread may already own a lock that we also
 402         * need.
 403         */
 404        if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS))
 405                return false;
 406
 407        /*
 408         * Similarly, stop spinning if we are no longer the
 409         * first waiter.
 410         */
 411        if (waiter && !__mutex_waiter_is_first(lock, waiter))
 412                return false;
 413
 414        return true;
 415}
 416
 417/*
 418 * Look out! "owner" is an entirely speculative pointer access and not
 419 * reliable.
 420 *
 421 * "noinline" so that this function shows up on perf profiles.
 422 */
 423static noinline
 424bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
 425                         struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter)
 426{
 427        bool ret = true;
 428
 429        rcu_read_lock();
 430        while (__mutex_owner(lock) == owner) {
 431                /*
 432                 * Ensure we emit the owner->on_cpu, dereference _after_
 433                 * checking lock->owner still matches owner. If that fails,
 434                 * owner might point to freed memory. If it still matches,
 435                 * the rcu_read_lock() ensures the memory stays valid.
 436                 */
 437                barrier();
 438
 439                /*
 440                 * Use vcpu_is_preempted to detect lock holder preemption issue.
 441                 */
 442                if (!owner->on_cpu || need_resched() ||
 443                                vcpu_is_preempted(task_cpu(owner))) {
 444                        ret = false;
 445                        break;
 446                }
 447
 448                if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) {
 449                        ret = false;
 450                        break;
 451                }
 452
 453                cpu_relax();
 454        }
 455        rcu_read_unlock();
 456
 457        return ret;
 458}
 459
 460/*
 461 * Initial check for entering the mutex spinning loop
 462 */
 463static inline int mutex_can_spin_on_owner(struct mutex *lock)
 464{
 465        struct task_struct *owner;
 466        int retval = 1;
 467
 468        if (need_resched())
 469                return 0;
 470
 471        rcu_read_lock();
 472        owner = __mutex_owner(lock);
 473
 474        /*
 475         * As lock holder preemption issue, we both skip spinning if task is not
 476         * on cpu or its cpu is preempted
 477         */
 478        if (owner)
 479                retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
 480        rcu_read_unlock();
 481
 482        /*
 483         * If lock->owner is not set, the mutex has been released. Return true
 484         * such that we'll trylock in the spin path, which is a faster option
 485         * than the blocking slow path.
 486         */
 487        return retval;
 488}
 489
 490/*
 491 * Optimistic spinning.
 492 *
 493 * We try to spin for acquisition when we find that the lock owner
 494 * is currently running on a (different) CPU and while we don't
 495 * need to reschedule. The rationale is that if the lock owner is
 496 * running, it is likely to release the lock soon.
 497 *
 498 * The mutex spinners are queued up using MCS lock so that only one
 499 * spinner can compete for the mutex. However, if mutex spinning isn't
 500 * going to happen, there is no point in going through the lock/unlock
 501 * overhead.
 502 *
 503 * Returns true when the lock was taken, otherwise false, indicating
 504 * that we need to jump to the slowpath and sleep.
 505 *
 506 * The waiter flag is set to true if the spinner is a waiter in the wait
 507 * queue. The waiter-spinner will spin on the lock directly and concurrently
 508 * with the spinner at the head of the OSQ, if present, until the owner is
 509 * changed to itself.
 510 */
 511static __always_inline bool
 512mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
 513                      const bool use_ww_ctx, struct mutex_waiter *waiter)
 514{
 515        if (!waiter) {
 516                /*
 517                 * The purpose of the mutex_can_spin_on_owner() function is
 518                 * to eliminate the overhead of osq_lock() and osq_unlock()
 519                 * in case spinning isn't possible. As a waiter-spinner
 520                 * is not going to take OSQ lock anyway, there is no need
 521                 * to call mutex_can_spin_on_owner().
 522                 */
 523                if (!mutex_can_spin_on_owner(lock))
 524                        goto fail;
 525
 526                /*
 527                 * In order to avoid a stampede of mutex spinners trying to
 528                 * acquire the mutex all at once, the spinners need to take a
 529                 * MCS (queued) lock first before spinning on the owner field.
 530                 */
 531                if (!osq_lock(&lock->osq))
 532                        goto fail;
 533        }
 534
 535        for (;;) {
 536                struct task_struct *owner;
 537
 538                /* Try to acquire the mutex... */
 539                owner = __mutex_trylock_or_owner(lock);
 540                if (!owner)
 541                        break;
 542
 543                /*
 544                 * There's an owner, wait for it to either
 545                 * release the lock or go to sleep.
 546                 */
 547                if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter))
 548                        goto fail_unlock;
 549
 550                /*
 551                 * The cpu_relax() call is a compiler barrier which forces
 552                 * everything in this loop to be re-loaded. We don't need
 553                 * memory barriers as we'll eventually observe the right
 554                 * values at the cost of a few extra spins.
 555                 */
 556                cpu_relax();
 557        }
 558
 559        if (!waiter)
 560                osq_unlock(&lock->osq);
 561
 562        return true;
 563
 564
 565fail_unlock:
 566        if (!waiter)
 567                osq_unlock(&lock->osq);
 568
 569fail:
 570        /*
 571         * If we fell out of the spin path because of need_resched(),
 572         * reschedule now, before we try-lock the mutex. This avoids getting
 573         * scheduled out right after we obtained the mutex.
 574         */
 575        if (need_resched()) {
 576                /*
 577                 * We _should_ have TASK_RUNNING here, but just in case
 578                 * we do not, make it so, otherwise we might get stuck.
 579                 */
 580                __set_current_state(TASK_RUNNING);
 581                schedule_preempt_disabled();
 582        }
 583
 584        return false;
 585}
 586#else
 587static __always_inline bool
 588mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
 589                      const bool use_ww_ctx, struct mutex_waiter *waiter)
 590{
 591        return false;
 592}
 593#endif
 594
 595static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip);
 596
 597/**
 598 * mutex_unlock - release the mutex
 599 * @lock: the mutex to be released
 600 *
 601 * Unlock a mutex that has been locked by this task previously.
 602 *
 603 * This function must not be used in interrupt context. Unlocking
 604 * of a not locked mutex is not allowed.
 605 *
 606 * This function is similar to (but not equivalent to) up().
 607 */
 608void __sched mutex_unlock(struct mutex *lock)
 609{
 610#ifndef CONFIG_DEBUG_LOCK_ALLOC
 611        if (__mutex_unlock_fast(lock))
 612                return;
 613#endif
 614        __mutex_unlock_slowpath(lock, _RET_IP_);
 615}
 616EXPORT_SYMBOL(mutex_unlock);
 617
 618/**
 619 * ww_mutex_unlock - release the w/w mutex
 620 * @lock: the mutex to be released
 621 *
 622 * Unlock a mutex that has been locked by this task previously with any of the
 623 * ww_mutex_lock* functions (with or without an acquire context). It is
 624 * forbidden to release the locks after releasing the acquire context.
 625 *
 626 * This function must not be used in interrupt context. Unlocking
 627 * of a unlocked mutex is not allowed.
 628 */
 629void __sched ww_mutex_unlock(struct ww_mutex *lock)
 630{
 631        /*
 632         * The unlocking fastpath is the 0->1 transition from 'locked'
 633         * into 'unlocked' state:
 634         */
 635        if (lock->ctx) {
 636#ifdef CONFIG_DEBUG_MUTEXES
 637                DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
 638#endif
 639                if (lock->ctx->acquired > 0)
 640                        lock->ctx->acquired--;
 641                lock->ctx = NULL;
 642        }
 643
 644        mutex_unlock(&lock->base);
 645}
 646EXPORT_SYMBOL(ww_mutex_unlock);
 647
 648static inline int __sched
 649__ww_mutex_lock_check_stamp(struct mutex *lock, struct mutex_waiter *waiter,
 650                            struct ww_acquire_ctx *ctx)
 651{
 652        struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
 653        struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
 654        struct mutex_waiter *cur;
 655
 656        if (hold_ctx && __ww_ctx_stamp_after(ctx, hold_ctx))
 657                goto deadlock;
 658
 659        /*
 660         * If there is a waiter in front of us that has a context, then its
 661         * stamp is earlier than ours and we must back off.
 662         */
 663        cur = waiter;
 664        list_for_each_entry_continue_reverse(cur, &lock->wait_list, list) {
 665                if (cur->ww_ctx)
 666                        goto deadlock;
 667        }
 668
 669        return 0;
 670
 671deadlock:
 672#ifdef CONFIG_DEBUG_MUTEXES
 673        DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
 674        ctx->contending_lock = ww;
 675#endif
 676        return -EDEADLK;
 677}
 678
 679static inline int __sched
 680__ww_mutex_add_waiter(struct mutex_waiter *waiter,
 681                      struct mutex *lock,
 682                      struct ww_acquire_ctx *ww_ctx)
 683{
 684        struct mutex_waiter *cur;
 685        struct list_head *pos;
 686
 687        if (!ww_ctx) {
 688                list_add_tail(&waiter->list, &lock->wait_list);
 689                return 0;
 690        }
 691
 692        /*
 693         * Add the waiter before the first waiter with a higher stamp.
 694         * Waiters without a context are skipped to avoid starving
 695         * them.
 696         */
 697        pos = &lock->wait_list;
 698        list_for_each_entry_reverse(cur, &lock->wait_list, list) {
 699                if (!cur->ww_ctx)
 700                        continue;
 701
 702                if (__ww_ctx_stamp_after(ww_ctx, cur->ww_ctx)) {
 703                        /* Back off immediately if necessary. */
 704                        if (ww_ctx->acquired > 0) {
 705#ifdef CONFIG_DEBUG_MUTEXES
 706                                struct ww_mutex *ww;
 707
 708                                ww = container_of(lock, struct ww_mutex, base);
 709                                DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock);
 710                                ww_ctx->contending_lock = ww;
 711#endif
 712                                return -EDEADLK;
 713                        }
 714
 715                        break;
 716                }
 717
 718                pos = &cur->list;
 719
 720                /*
 721                 * Wake up the waiter so that it gets a chance to back
 722                 * off.
 723                 */
 724                if (cur->ww_ctx->acquired > 0) {
 725                        debug_mutex_wake_waiter(lock, cur);
 726                        wake_up_process(cur->task);
 727                }
 728        }
 729
 730        list_add_tail(&waiter->list, pos);
 731        return 0;
 732}
 733
 734/*
 735 * Lock a mutex (possibly interruptible), slowpath:
 736 */
 737static __always_inline int __sched
 738__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
 739                    struct lockdep_map *nest_lock, unsigned long ip,
 740                    struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
 741{
 742        struct mutex_waiter waiter;
 743        bool first = false;
 744        struct ww_mutex *ww;
 745        int ret;
 746
 747        might_sleep();
 748
 749        ww = container_of(lock, struct ww_mutex, base);
 750        if (use_ww_ctx && ww_ctx) {
 751                if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
 752                        return -EALREADY;
 753        }
 754
 755        preempt_disable();
 756        mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
 757
 758        if (__mutex_trylock(lock) ||
 759            mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, NULL)) {
 760                /* got the lock, yay! */
 761                lock_acquired(&lock->dep_map, ip);
 762                if (use_ww_ctx && ww_ctx)
 763                        ww_mutex_set_context_fastpath(ww, ww_ctx);
 764                preempt_enable();
 765                return 0;
 766        }
 767
 768        spin_lock(&lock->wait_lock);
 769        /*
 770         * After waiting to acquire the wait_lock, try again.
 771         */
 772        if (__mutex_trylock(lock)) {
 773                if (use_ww_ctx && ww_ctx)
 774                        __ww_mutex_wakeup_for_backoff(lock, ww_ctx);
 775
 776                goto skip_wait;
 777        }
 778
 779        debug_mutex_lock_common(lock, &waiter);
 780        debug_mutex_add_waiter(lock, &waiter, current);
 781
 782        lock_contended(&lock->dep_map, ip);
 783
 784        if (!use_ww_ctx) {
 785                /* add waiting tasks to the end of the waitqueue (FIFO): */
 786                list_add_tail(&waiter.list, &lock->wait_list);
 787
 788#ifdef CONFIG_DEBUG_MUTEXES
 789                waiter.ww_ctx = MUTEX_POISON_WW_CTX;
 790#endif
 791        } else {
 792                /* Add in stamp order, waking up waiters that must back off. */
 793                ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx);
 794                if (ret)
 795                        goto err_early_backoff;
 796
 797                waiter.ww_ctx = ww_ctx;
 798        }
 799
 800        waiter.task = current;
 801
 802        if (__mutex_waiter_is_first(lock, &waiter))
 803                __mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
 804
 805        set_current_state(state);
 806        for (;;) {
 807                /*
 808                 * Once we hold wait_lock, we're serialized against
 809                 * mutex_unlock() handing the lock off to us, do a trylock
 810                 * before testing the error conditions to make sure we pick up
 811                 * the handoff.
 812                 */
 813                if (__mutex_trylock(lock))
 814                        goto acquired;
 815
 816                /*
 817                 * Check for signals and wound conditions while holding
 818                 * wait_lock. This ensures the lock cancellation is ordered
 819                 * against mutex_unlock() and wake-ups do not go missing.
 820                 */
 821                if (unlikely(signal_pending_state(state, current))) {
 822                        ret = -EINTR;
 823                        goto err;
 824                }
 825
 826                if (use_ww_ctx && ww_ctx && ww_ctx->acquired > 0) {
 827                        ret = __ww_mutex_lock_check_stamp(lock, &waiter, ww_ctx);
 828                        if (ret)
 829                                goto err;
 830                }
 831
 832                spin_unlock(&lock->wait_lock);
 833                schedule_preempt_disabled();
 834
 835                /*
 836                 * ww_mutex needs to always recheck its position since its waiter
 837                 * list is not FIFO ordered.
 838                 */
 839                if ((use_ww_ctx && ww_ctx) || !first) {
 840                        first = __mutex_waiter_is_first(lock, &waiter);
 841                        if (first)
 842                                __mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
 843                }
 844
 845                set_current_state(state);
 846                /*
 847                 * Here we order against unlock; we must either see it change
 848                 * state back to RUNNING and fall through the next schedule(),
 849                 * or we must see its unlock and acquire.
 850                 */
 851                if (__mutex_trylock(lock) ||
 852                    (first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, &waiter)))
 853                        break;
 854
 855                spin_lock(&lock->wait_lock);
 856        }
 857        spin_lock(&lock->wait_lock);
 858acquired:
 859        __set_current_state(TASK_RUNNING);
 860
 861        mutex_remove_waiter(lock, &waiter, current);
 862        if (likely(list_empty(&lock->wait_list)))
 863                __mutex_clear_flag(lock, MUTEX_FLAGS);
 864
 865        debug_mutex_free_waiter(&waiter);
 866
 867skip_wait:
 868        /* got the lock - cleanup and rejoice! */
 869        lock_acquired(&lock->dep_map, ip);
 870
 871        if (use_ww_ctx && ww_ctx)
 872                ww_mutex_set_context_slowpath(ww, ww_ctx);
 873
 874        spin_unlock(&lock->wait_lock);
 875        preempt_enable();
 876        return 0;
 877
 878err:
 879        __set_current_state(TASK_RUNNING);
 880        mutex_remove_waiter(lock, &waiter, current);
 881err_early_backoff:
 882        spin_unlock(&lock->wait_lock);
 883        debug_mutex_free_waiter(&waiter);
 884        mutex_release(&lock->dep_map, 1, ip);
 885        preempt_enable();
 886        return ret;
 887}
 888
 889static int __sched
 890__mutex_lock(struct mutex *lock, long state, unsigned int subclass,
 891             struct lockdep_map *nest_lock, unsigned long ip)
 892{
 893        return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false);
 894}
 895
 896static int __sched
 897__ww_mutex_lock(struct mutex *lock, long state, unsigned int subclass,
 898                struct lockdep_map *nest_lock, unsigned long ip,
 899                struct ww_acquire_ctx *ww_ctx)
 900{
 901        return __mutex_lock_common(lock, state, subclass, nest_lock, ip, ww_ctx, true);
 902}
 903
 904#ifdef CONFIG_DEBUG_LOCK_ALLOC
 905void __sched
 906mutex_lock_nested(struct mutex *lock, unsigned int subclass)
 907{
 908        __mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
 909}
 910
 911EXPORT_SYMBOL_GPL(mutex_lock_nested);
 912
 913void __sched
 914_mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
 915{
 916        __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
 917}
 918EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
 919
 920int __sched
 921mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
 922{
 923        return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
 924}
 925EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
 926
 927int __sched
 928mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
 929{
 930        return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_);
 931}
 932EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
 933
 934void __sched
 935mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
 936{
 937        int token;
 938
 939        might_sleep();
 940
 941        token = io_schedule_prepare();
 942        __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
 943                            subclass, NULL, _RET_IP_, NULL, 0);
 944        io_schedule_finish(token);
 945}
 946EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
 947
 948static inline int
 949ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
 950{
 951#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
 952        unsigned tmp;
 953
 954        if (ctx->deadlock_inject_countdown-- == 0) {
 955                tmp = ctx->deadlock_inject_interval;
 956                if (tmp > UINT_MAX/4)
 957                        tmp = UINT_MAX;
 958                else
 959                        tmp = tmp*2 + tmp + tmp/2;
 960
 961                ctx->deadlock_inject_interval = tmp;
 962                ctx->deadlock_inject_countdown = tmp;
 963                ctx->contending_lock = lock;
 964
 965                ww_mutex_unlock(lock);
 966
 967                return -EDEADLK;
 968        }
 969#endif
 970
 971        return 0;
 972}
 973
 974int __sched
 975ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
 976{
 977        int ret;
 978
 979        might_sleep();
 980        ret =  __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE,
 981                               0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
 982                               ctx);
 983        if (!ret && ctx && ctx->acquired > 1)
 984                return ww_mutex_deadlock_injection(lock, ctx);
 985
 986        return ret;
 987}
 988EXPORT_SYMBOL_GPL(ww_mutex_lock);
 989
 990int __sched
 991ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
 992{
 993        int ret;
 994
 995        might_sleep();
 996        ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE,
 997                              0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
 998                              ctx);
 999
1000        if (!ret && ctx && ctx->acquired > 1)
1001                return ww_mutex_deadlock_injection(lock, ctx);
1002
1003        return ret;
1004}
1005EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible);
1006
1007#endif
1008
1009/*
1010 * Release the lock, slowpath:
1011 */
1012static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
1013{
1014        struct task_struct *next = NULL;
1015        DEFINE_WAKE_Q(wake_q);
1016        unsigned long owner;
1017
1018        mutex_release(&lock->dep_map, 1, ip);
1019
1020        /*
1021         * Release the lock before (potentially) taking the spinlock such that
1022         * other contenders can get on with things ASAP.
1023         *
1024         * Except when HANDOFF, in that case we must not clear the owner field,
1025         * but instead set it to the top waiter.
1026         */
1027        owner = atomic_long_read(&lock->owner);
1028        for (;;) {
1029                unsigned long old;
1030
1031#ifdef CONFIG_DEBUG_MUTEXES
1032                DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
1033                DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
1034#endif
1035
1036                if (owner & MUTEX_FLAG_HANDOFF)
1037                        break;
1038
1039                old = atomic_long_cmpxchg_release(&lock->owner, owner,
1040                                                  __owner_flags(owner));
1041                if (old == owner) {
1042                        if (owner & MUTEX_FLAG_WAITERS)
1043                                break;
1044
1045                        return;
1046                }
1047
1048                owner = old;
1049        }
1050
1051        spin_lock(&lock->wait_lock);
1052        debug_mutex_unlock(lock);
1053        if (!list_empty(&lock->wait_list)) {
1054                /* get the first entry from the wait-list: */
1055                struct mutex_waiter *waiter =
1056                        list_first_entry(&lock->wait_list,
1057                                         struct mutex_waiter, list);
1058
1059                next = waiter->task;
1060
1061                debug_mutex_wake_waiter(lock, waiter);
1062                wake_q_add(&wake_q, next);
1063        }
1064
1065        if (owner & MUTEX_FLAG_HANDOFF)
1066                __mutex_handoff(lock, next);
1067
1068        spin_unlock(&lock->wait_lock);
1069
1070        wake_up_q(&wake_q);
1071}
1072
1073#ifndef CONFIG_DEBUG_LOCK_ALLOC
1074/*
1075 * Here come the less common (and hence less performance-critical) APIs:
1076 * mutex_lock_interruptible() and mutex_trylock().
1077 */
1078static noinline int __sched
1079__mutex_lock_killable_slowpath(struct mutex *lock);
1080
1081static noinline int __sched
1082__mutex_lock_interruptible_slowpath(struct mutex *lock);
1083
1084/**
1085 * mutex_lock_interruptible - acquire the mutex, interruptible
1086 * @lock: the mutex to be acquired
1087 *
1088 * Lock the mutex like mutex_lock(), and return 0 if the mutex has
1089 * been acquired or sleep until the mutex becomes available. If a
1090 * signal arrives while waiting for the lock then this function
1091 * returns -EINTR.
1092 *
1093 * This function is similar to (but not equivalent to) down_interruptible().
1094 */
1095int __sched mutex_lock_interruptible(struct mutex *lock)
1096{
1097        might_sleep();
1098
1099        if (__mutex_trylock_fast(lock))
1100                return 0;
1101
1102        return __mutex_lock_interruptible_slowpath(lock);
1103}
1104
1105EXPORT_SYMBOL(mutex_lock_interruptible);
1106
1107int __sched mutex_lock_killable(struct mutex *lock)
1108{
1109        might_sleep();
1110
1111        if (__mutex_trylock_fast(lock))
1112                return 0;
1113
1114        return __mutex_lock_killable_slowpath(lock);
1115}
1116EXPORT_SYMBOL(mutex_lock_killable);
1117
1118void __sched mutex_lock_io(struct mutex *lock)
1119{
1120        int token;
1121
1122        token = io_schedule_prepare();
1123        mutex_lock(lock);
1124        io_schedule_finish(token);
1125}
1126EXPORT_SYMBOL_GPL(mutex_lock_io);
1127
1128static noinline void __sched
1129__mutex_lock_slowpath(struct mutex *lock)
1130{
1131        __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
1132}
1133
1134static noinline int __sched
1135__mutex_lock_killable_slowpath(struct mutex *lock)
1136{
1137        return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
1138}
1139
1140static noinline int __sched
1141__mutex_lock_interruptible_slowpath(struct mutex *lock)
1142{
1143        return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
1144}
1145
1146static noinline int __sched
1147__ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1148{
1149        return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0, NULL,
1150                               _RET_IP_, ctx);
1151}
1152
1153static noinline int __sched
1154__ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
1155                                            struct ww_acquire_ctx *ctx)
1156{
1157        return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0, NULL,
1158                               _RET_IP_, ctx);
1159}
1160
1161#endif
1162
1163/**
1164 * mutex_trylock - try to acquire the mutex, without waiting
1165 * @lock: the mutex to be acquired
1166 *
1167 * Try to acquire the mutex atomically. Returns 1 if the mutex
1168 * has been acquired successfully, and 0 on contention.
1169 *
1170 * NOTE: this function follows the spin_trylock() convention, so
1171 * it is negated from the down_trylock() return values! Be careful
1172 * about this when converting semaphore users to mutexes.
1173 *
1174 * This function must not be used in interrupt context. The
1175 * mutex must be released by the same task that acquired it.
1176 */
1177int __sched mutex_trylock(struct mutex *lock)
1178{
1179        bool locked = __mutex_trylock(lock);
1180
1181        if (locked)
1182                mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1183
1184        return locked;
1185}
1186EXPORT_SYMBOL(mutex_trylock);
1187
1188#ifndef CONFIG_DEBUG_LOCK_ALLOC
1189int __sched
1190ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1191{
1192        might_sleep();
1193
1194        if (__mutex_trylock_fast(&lock->base)) {
1195                if (ctx)
1196                        ww_mutex_set_context_fastpath(lock, ctx);
1197                return 0;
1198        }
1199
1200        return __ww_mutex_lock_slowpath(lock, ctx);
1201}
1202EXPORT_SYMBOL(ww_mutex_lock);
1203
1204int __sched
1205ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1206{
1207        might_sleep();
1208
1209        if (__mutex_trylock_fast(&lock->base)) {
1210                if (ctx)
1211                        ww_mutex_set_context_fastpath(lock, ctx);
1212                return 0;
1213        }
1214
1215        return __ww_mutex_lock_interruptible_slowpath(lock, ctx);
1216}
1217EXPORT_SYMBOL(ww_mutex_lock_interruptible);
1218
1219#endif
1220
1221/**
1222 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
1223 * @cnt: the atomic which we are to dec
1224 * @lock: the mutex to return holding if we dec to 0
1225 *
1226 * return true and hold lock if we dec to 0, return false otherwise
1227 */
1228int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
1229{
1230        /* dec if we can't possibly hit 0 */
1231        if (atomic_add_unless(cnt, -1, 1))
1232                return 0;
1233        /* we might hit 0, so take the lock */
1234        mutex_lock(lock);
1235        if (!atomic_dec_and_test(cnt)) {
1236                /* when we actually did the dec, we didn't hit 0 */
1237                mutex_unlock(lock);
1238                return 0;
1239        }
1240        /* we hit 0, and we hold the lock */
1241        return 1;
1242}
1243EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
1244