linux/kernel/locking/qspinlock.c
<<
>>
Prefs
   1/*
   2 * Queued spinlock
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License as published by
   6 * the Free Software Foundation; either version 2 of the License, or
   7 * (at your option) any later version.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
  15 * (C) Copyright 2013-2014,2018 Red Hat, Inc.
  16 * (C) Copyright 2015 Intel Corp.
  17 * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP
  18 *
  19 * Authors: Waiman Long <longman@redhat.com>
  20 *          Peter Zijlstra <peterz@infradead.org>
  21 */
  22
  23#ifndef _GEN_PV_LOCK_SLOWPATH
  24
  25#include <linux/smp.h>
  26#include <linux/bug.h>
  27#include <linux/cpumask.h>
  28#include <linux/percpu.h>
  29#include <linux/hardirq.h>
  30#include <linux/mutex.h>
  31#include <linux/prefetch.h>
  32#include <asm/byteorder.h>
  33#include <asm/qspinlock.h>
  34
  35/*
  36 * Include queued spinlock statistics code
  37 */
  38#include "qspinlock_stat.h"
  39
  40/*
  41 * The basic principle of a queue-based spinlock can best be understood
  42 * by studying a classic queue-based spinlock implementation called the
  43 * MCS lock. The paper below provides a good description for this kind
  44 * of lock.
  45 *
  46 * http://www.cise.ufl.edu/tr/DOC/REP-1992-71.pdf
  47 *
  48 * This queued spinlock implementation is based on the MCS lock, however to make
  49 * it fit the 4 bytes we assume spinlock_t to be, and preserve its existing
  50 * API, we must modify it somehow.
  51 *
  52 * In particular; where the traditional MCS lock consists of a tail pointer
  53 * (8 bytes) and needs the next pointer (another 8 bytes) of its own node to
  54 * unlock the next pending (next->locked), we compress both these: {tail,
  55 * next->locked} into a single u32 value.
  56 *
  57 * Since a spinlock disables recursion of its own context and there is a limit
  58 * to the contexts that can nest; namely: task, softirq, hardirq, nmi. As there
  59 * are at most 4 nesting levels, it can be encoded by a 2-bit number. Now
  60 * we can encode the tail by combining the 2-bit nesting level with the cpu
  61 * number. With one byte for the lock value and 3 bytes for the tail, only a
  62 * 32-bit word is now needed. Even though we only need 1 bit for the lock,
  63 * we extend it to a full byte to achieve better performance for architectures
  64 * that support atomic byte write.
  65 *
  66 * We also change the first spinner to spin on the lock bit instead of its
  67 * node; whereby avoiding the need to carry a node from lock to unlock, and
  68 * preserving existing lock API. This also makes the unlock code simpler and
  69 * faster.
  70 *
  71 * N.B. The current implementation only supports architectures that allow
  72 *      atomic operations on smaller 8-bit and 16-bit data types.
  73 *
  74 */
  75
  76#include "mcs_spinlock.h"
  77
  78#ifdef CONFIG_PARAVIRT_SPINLOCKS
  79#define MAX_NODES       8
  80#else
  81#define MAX_NODES       4
  82#endif
  83
  84/*
  85 * The pending bit spinning loop count.
  86 * This heuristic is used to limit the number of lockword accesses
  87 * made by atomic_cond_read_relaxed when waiting for the lock to
  88 * transition out of the "== _Q_PENDING_VAL" state. We don't spin
  89 * indefinitely because there's no guarantee that we'll make forward
  90 * progress.
  91 */
  92#ifndef _Q_PENDING_LOOPS
  93#define _Q_PENDING_LOOPS        1
  94#endif
  95
  96/*
  97 * Per-CPU queue node structures; we can never have more than 4 nested
  98 * contexts: task, softirq, hardirq, nmi.
  99 *
 100 * Exactly fits one 64-byte cacheline on a 64-bit architecture.
 101 *
 102 * PV doubles the storage and uses the second cacheline for PV state.
 103 */
 104static DEFINE_PER_CPU_ALIGNED(struct mcs_spinlock, mcs_nodes[MAX_NODES]);
 105
 106/*
 107 * We must be able to distinguish between no-tail and the tail at 0:0,
 108 * therefore increment the cpu number by one.
 109 */
 110
 111static inline __pure u32 encode_tail(int cpu, int idx)
 112{
 113        u32 tail;
 114
 115#ifdef CONFIG_DEBUG_SPINLOCK
 116        BUG_ON(idx > 3);
 117#endif
 118        tail  = (cpu + 1) << _Q_TAIL_CPU_OFFSET;
 119        tail |= idx << _Q_TAIL_IDX_OFFSET; /* assume < 4 */
 120
 121        return tail;
 122}
 123
 124static inline __pure struct mcs_spinlock *decode_tail(u32 tail)
 125{
 126        int cpu = (tail >> _Q_TAIL_CPU_OFFSET) - 1;
 127        int idx = (tail &  _Q_TAIL_IDX_MASK) >> _Q_TAIL_IDX_OFFSET;
 128
 129        return per_cpu_ptr(&mcs_nodes[idx], cpu);
 130}
 131
 132#define _Q_LOCKED_PENDING_MASK (_Q_LOCKED_MASK | _Q_PENDING_MASK)
 133
 134#if _Q_PENDING_BITS == 8
 135/**
 136 * clear_pending - clear the pending bit.
 137 * @lock: Pointer to queued spinlock structure
 138 *
 139 * *,1,* -> *,0,*
 140 */
 141static __always_inline void clear_pending(struct qspinlock *lock)
 142{
 143        WRITE_ONCE(lock->pending, 0);
 144}
 145
 146/**
 147 * clear_pending_set_locked - take ownership and clear the pending bit.
 148 * @lock: Pointer to queued spinlock structure
 149 *
 150 * *,1,0 -> *,0,1
 151 *
 152 * Lock stealing is not allowed if this function is used.
 153 */
 154static __always_inline void clear_pending_set_locked(struct qspinlock *lock)
 155{
 156        WRITE_ONCE(lock->locked_pending, _Q_LOCKED_VAL);
 157}
 158
 159/*
 160 * xchg_tail - Put in the new queue tail code word & retrieve previous one
 161 * @lock : Pointer to queued spinlock structure
 162 * @tail : The new queue tail code word
 163 * Return: The previous queue tail code word
 164 *
 165 * xchg(lock, tail), which heads an address dependency
 166 *
 167 * p,*,* -> n,*,* ; prev = xchg(lock, node)
 168 */
 169static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail)
 170{
 171        /*
 172         * We can use relaxed semantics since the caller ensures that the
 173         * MCS node is properly initialized before updating the tail.
 174         */
 175        return (u32)xchg_relaxed(&lock->tail,
 176                                 tail >> _Q_TAIL_OFFSET) << _Q_TAIL_OFFSET;
 177}
 178
 179#else /* _Q_PENDING_BITS == 8 */
 180
 181/**
 182 * clear_pending - clear the pending bit.
 183 * @lock: Pointer to queued spinlock structure
 184 *
 185 * *,1,* -> *,0,*
 186 */
 187static __always_inline void clear_pending(struct qspinlock *lock)
 188{
 189        atomic_andnot(_Q_PENDING_VAL, &lock->val);
 190}
 191
 192/**
 193 * clear_pending_set_locked - take ownership and clear the pending bit.
 194 * @lock: Pointer to queued spinlock structure
 195 *
 196 * *,1,0 -> *,0,1
 197 */
 198static __always_inline void clear_pending_set_locked(struct qspinlock *lock)
 199{
 200        atomic_add(-_Q_PENDING_VAL + _Q_LOCKED_VAL, &lock->val);
 201}
 202
 203/**
 204 * xchg_tail - Put in the new queue tail code word & retrieve previous one
 205 * @lock : Pointer to queued spinlock structure
 206 * @tail : The new queue tail code word
 207 * Return: The previous queue tail code word
 208 *
 209 * xchg(lock, tail)
 210 *
 211 * p,*,* -> n,*,* ; prev = xchg(lock, node)
 212 */
 213static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail)
 214{
 215        u32 old, new, val = atomic_read(&lock->val);
 216
 217        for (;;) {
 218                new = (val & _Q_LOCKED_PENDING_MASK) | tail;
 219                /*
 220                 * We can use relaxed semantics since the caller ensures that
 221                 * the MCS node is properly initialized before updating the
 222                 * tail.
 223                 */
 224                old = atomic_cmpxchg_relaxed(&lock->val, val, new);
 225                if (old == val)
 226                        break;
 227
 228                val = old;
 229        }
 230        return old;
 231}
 232#endif /* _Q_PENDING_BITS == 8 */
 233
 234/**
 235 * set_locked - Set the lock bit and own the lock
 236 * @lock: Pointer to queued spinlock structure
 237 *
 238 * *,*,0 -> *,0,1
 239 */
 240static __always_inline void set_locked(struct qspinlock *lock)
 241{
 242        WRITE_ONCE(lock->locked, _Q_LOCKED_VAL);
 243}
 244
 245
 246/*
 247 * Generate the native code for queued_spin_unlock_slowpath(); provide NOPs for
 248 * all the PV callbacks.
 249 */
 250
 251static __always_inline void __pv_init_node(struct mcs_spinlock *node) { }
 252static __always_inline void __pv_wait_node(struct mcs_spinlock *node,
 253                                           struct mcs_spinlock *prev) { }
 254static __always_inline void __pv_kick_node(struct qspinlock *lock,
 255                                           struct mcs_spinlock *node) { }
 256static __always_inline u32  __pv_wait_head_or_lock(struct qspinlock *lock,
 257                                                   struct mcs_spinlock *node)
 258                                                   { return 0; }
 259
 260#define pv_enabled()            false
 261
 262#define pv_init_node            __pv_init_node
 263#define pv_wait_node            __pv_wait_node
 264#define pv_kick_node            __pv_kick_node
 265#define pv_wait_head_or_lock    __pv_wait_head_or_lock
 266
 267#ifdef CONFIG_PARAVIRT_SPINLOCKS
 268#define queued_spin_lock_slowpath       native_queued_spin_lock_slowpath
 269#endif
 270
 271#endif /* _GEN_PV_LOCK_SLOWPATH */
 272
 273/**
 274 * queued_spin_lock_slowpath - acquire the queued spinlock
 275 * @lock: Pointer to queued spinlock structure
 276 * @val: Current value of the queued spinlock 32-bit word
 277 *
 278 * (queue tail, pending bit, lock value)
 279 *
 280 *              fast     :    slow                                  :    unlock
 281 *                       :                                          :
 282 * uncontended  (0,0,0) -:--> (0,0,1) ------------------------------:--> (*,*,0)
 283 *                       :       | ^--------.------.             /  :
 284 *                       :       v           \      \            |  :
 285 * pending               :    (0,1,1) +--> (0,1,0)   \           |  :
 286 *                       :       | ^--'              |           |  :
 287 *                       :       v                   |           |  :
 288 * uncontended           :    (n,x,y) +--> (n,0,0) --'           |  :
 289 *   queue               :       | ^--'                          |  :
 290 *                       :       v                               |  :
 291 * contended             :    (*,x,y) +--> (*,0,0) ---> (*,0,1) -'  :
 292 *   queue               :         ^--'                             :
 293 */
 294void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
 295{
 296        struct mcs_spinlock *prev, *next, *node;
 297        u32 old, tail;
 298        int idx;
 299
 300        BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
 301
 302        if (pv_enabled())
 303                goto pv_queue;
 304
 305        if (virt_spin_lock(lock))
 306                return;
 307
 308        /*
 309         * Wait for in-progress pending->locked hand-overs with a bounded
 310         * number of spins so that we guarantee forward progress.
 311         *
 312         * 0,1,0 -> 0,0,1
 313         */
 314        if (val == _Q_PENDING_VAL) {
 315                int cnt = _Q_PENDING_LOOPS;
 316                val = atomic_cond_read_relaxed(&lock->val,
 317                                               (VAL != _Q_PENDING_VAL) || !cnt--);
 318        }
 319
 320        /*
 321         * If we observe any contention; queue.
 322         */
 323        if (val & ~_Q_LOCKED_MASK)
 324                goto queue;
 325
 326        /*
 327         * trylock || pending
 328         *
 329         * 0,0,0 -> 0,0,1 ; trylock
 330         * 0,0,1 -> 0,1,1 ; pending
 331         */
 332        val = atomic_fetch_or_acquire(_Q_PENDING_VAL, &lock->val);
 333        if (!(val & ~_Q_LOCKED_MASK)) {
 334                /*
 335                 * We're pending, wait for the owner to go away.
 336                 *
 337                 * *,1,1 -> *,1,0
 338                 *
 339                 * this wait loop must be a load-acquire such that we match the
 340                 * store-release that clears the locked bit and create lock
 341                 * sequentiality; this is because not all
 342                 * clear_pending_set_locked() implementations imply full
 343                 * barriers.
 344                 */
 345                if (val & _Q_LOCKED_MASK) {
 346                        atomic_cond_read_acquire(&lock->val,
 347                                                 !(VAL & _Q_LOCKED_MASK));
 348                }
 349
 350                /*
 351                 * take ownership and clear the pending bit.
 352                 *
 353                 * *,1,0 -> *,0,1
 354                 */
 355                clear_pending_set_locked(lock);
 356                qstat_inc(qstat_lock_pending, true);
 357                return;
 358        }
 359
 360        /*
 361         * If pending was clear but there are waiters in the queue, then
 362         * we need to undo our setting of pending before we queue ourselves.
 363         */
 364        if (!(val & _Q_PENDING_MASK))
 365                clear_pending(lock);
 366
 367        /*
 368         * End of pending bit optimistic spinning and beginning of MCS
 369         * queuing.
 370         */
 371queue:
 372        qstat_inc(qstat_lock_slowpath, true);
 373pv_queue:
 374        node = this_cpu_ptr(&mcs_nodes[0]);
 375        idx = node->count++;
 376        tail = encode_tail(smp_processor_id(), idx);
 377
 378        node += idx;
 379
 380        /*
 381         * Ensure that we increment the head node->count before initialising
 382         * the actual node. If the compiler is kind enough to reorder these
 383         * stores, then an IRQ could overwrite our assignments.
 384         */
 385        barrier();
 386
 387        node->locked = 0;
 388        node->next = NULL;
 389        pv_init_node(node);
 390
 391        /*
 392         * We touched a (possibly) cold cacheline in the per-cpu queue node;
 393         * attempt the trylock once more in the hope someone let go while we
 394         * weren't watching.
 395         */
 396        if (queued_spin_trylock(lock))
 397                goto release;
 398
 399        /*
 400         * Ensure that the initialisation of @node is complete before we
 401         * publish the updated tail via xchg_tail() and potentially link
 402         * @node into the waitqueue via WRITE_ONCE(prev->next, node) below.
 403         */
 404        smp_wmb();
 405
 406        /*
 407         * Publish the updated tail.
 408         * We have already touched the queueing cacheline; don't bother with
 409         * pending stuff.
 410         *
 411         * p,*,* -> n,*,*
 412         */
 413        old = xchg_tail(lock, tail);
 414        next = NULL;
 415
 416        /*
 417         * if there was a previous node; link it and wait until reaching the
 418         * head of the waitqueue.
 419         */
 420        if (old & _Q_TAIL_MASK) {
 421                prev = decode_tail(old);
 422
 423                /* Link @node into the waitqueue. */
 424                WRITE_ONCE(prev->next, node);
 425
 426                pv_wait_node(node, prev);
 427                arch_mcs_spin_lock_contended(&node->locked);
 428
 429                /*
 430                 * While waiting for the MCS lock, the next pointer may have
 431                 * been set by another lock waiter. We optimistically load
 432                 * the next pointer & prefetch the cacheline for writing
 433                 * to reduce latency in the upcoming MCS unlock operation.
 434                 */
 435                next = READ_ONCE(node->next);
 436                if (next)
 437                        prefetchw(next);
 438        }
 439
 440        /*
 441         * we're at the head of the waitqueue, wait for the owner & pending to
 442         * go away.
 443         *
 444         * *,x,y -> *,0,0
 445         *
 446         * this wait loop must use a load-acquire such that we match the
 447         * store-release that clears the locked bit and create lock
 448         * sequentiality; this is because the set_locked() function below
 449         * does not imply a full barrier.
 450         *
 451         * The PV pv_wait_head_or_lock function, if active, will acquire
 452         * the lock and return a non-zero value. So we have to skip the
 453         * atomic_cond_read_acquire() call. As the next PV queue head hasn't
 454         * been designated yet, there is no way for the locked value to become
 455         * _Q_SLOW_VAL. So both the set_locked() and the
 456         * atomic_cmpxchg_relaxed() calls will be safe.
 457         *
 458         * If PV isn't active, 0 will be returned instead.
 459         *
 460         */
 461        if ((val = pv_wait_head_or_lock(lock, node)))
 462                goto locked;
 463
 464        val = atomic_cond_read_acquire(&lock->val, !(VAL & _Q_LOCKED_PENDING_MASK));
 465
 466locked:
 467        /*
 468         * claim the lock:
 469         *
 470         * n,0,0 -> 0,0,1 : lock, uncontended
 471         * *,*,0 -> *,*,1 : lock, contended
 472         *
 473         * If the queue head is the only one in the queue (lock value == tail)
 474         * and nobody is pending, clear the tail code and grab the lock.
 475         * Otherwise, we only need to grab the lock.
 476         */
 477
 478        /*
 479         * In the PV case we might already have _Q_LOCKED_VAL set.
 480         *
 481         * The atomic_cond_read_acquire() call above has provided the
 482         * necessary acquire semantics required for locking.
 483         */
 484        if (((val & _Q_TAIL_MASK) == tail) &&
 485            atomic_try_cmpxchg_relaxed(&lock->val, &val, _Q_LOCKED_VAL))
 486                goto release; /* No contention */
 487
 488        /* Either somebody is queued behind us or _Q_PENDING_VAL is set */
 489        set_locked(lock);
 490
 491        /*
 492         * contended path; wait for next if not observed yet, release.
 493         */
 494        if (!next)
 495                next = smp_cond_load_relaxed(&node->next, (VAL));
 496
 497        arch_mcs_spin_unlock_contended(&next->locked);
 498        pv_kick_node(lock, next);
 499
 500release:
 501        /*
 502         * release the node
 503         */
 504        __this_cpu_dec(mcs_nodes[0].count);
 505}
 506EXPORT_SYMBOL(queued_spin_lock_slowpath);
 507
 508/*
 509 * Generate the paravirt code for queued_spin_unlock_slowpath().
 510 */
 511#if !defined(_GEN_PV_LOCK_SLOWPATH) && defined(CONFIG_PARAVIRT_SPINLOCKS)
 512#define _GEN_PV_LOCK_SLOWPATH
 513
 514#undef  pv_enabled
 515#define pv_enabled()    true
 516
 517#undef pv_init_node
 518#undef pv_wait_node
 519#undef pv_kick_node
 520#undef pv_wait_head_or_lock
 521
 522#undef  queued_spin_lock_slowpath
 523#define queued_spin_lock_slowpath       __pv_queued_spin_lock_slowpath
 524
 525#include "qspinlock_paravirt.h"
 526#include "qspinlock.c"
 527
 528#endif
 529