dpdk/lib/eal/include/generic/rte_mcslock.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright(c) 2019 Arm Limited
   3 */
   4
   5#ifndef _RTE_MCSLOCK_H_
   6#define _RTE_MCSLOCK_H_
   7
   8/**
   9 * @file
  10 *
  11 * RTE MCS lock
  12 *
  13 * This file defines the main data structure and APIs for MCS queued lock.
  14 *
  15 * The MCS lock (proposed by John M. Mellor-Crummey and Michael L. Scott)
  16 * provides scalability by spinning on a CPU/thread local variable which
  17 * avoids expensive cache bouncings. It provides fairness by maintaining
  18 * a list of acquirers and passing the lock to each CPU/thread in the order
  19 * they acquired the lock.
  20 */
  21
  22#include <rte_lcore.h>
  23#include <rte_common.h>
  24#include <rte_pause.h>
  25#include <rte_branch_prediction.h>
  26
  27/**
  28 * The rte_mcslock_t type.
  29 */
  30typedef struct rte_mcslock {
  31        struct rte_mcslock *next;
  32        int locked; /* 1 if the queue locked, 0 otherwise */
  33} rte_mcslock_t;
  34
  35/**
  36 * Take the MCS lock.
  37 *
  38 * @param msl
  39 *   A pointer to the pointer of a MCS lock.
  40 *   When the lock is initialized or declared, the msl pointer should be
  41 *   set to NULL.
  42 * @param me
  43 *   A pointer to a new node of MCS lock. Each CPU/thread acquiring the
  44 *   lock should use its 'own node'.
  45 */
  46static inline void
  47rte_mcslock_lock(rte_mcslock_t **msl, rte_mcslock_t *me)
  48{
  49        rte_mcslock_t *prev;
  50
  51        /* Init me node */
  52        __atomic_store_n(&me->locked, 1, __ATOMIC_RELAXED);
  53        __atomic_store_n(&me->next, NULL, __ATOMIC_RELAXED);
  54
  55        /* If the queue is empty, the exchange operation is enough to acquire
  56         * the lock. Hence, the exchange operation requires acquire semantics.
  57         * The store to me->next above should complete before the node is
  58         * visible to other CPUs/threads. Hence, the exchange operation requires
  59         * release semantics as well.
  60         */
  61        prev = __atomic_exchange_n(msl, me, __ATOMIC_ACQ_REL);
  62        if (likely(prev == NULL)) {
  63                /* Queue was empty, no further action required,
  64                 * proceed with lock taken.
  65                 */
  66                return;
  67        }
  68        /* The store to me->next above should also complete before the node is
  69         * visible to predecessor thread releasing the lock. Hence, the store
  70         * prev->next also requires release semantics. Note that, for example,
  71         * on ARM, the release semantics in the exchange operation is not
  72         * strong as a release fence and is not sufficient to enforce the
  73         * desired order here.
  74         */
  75        __atomic_store_n(&prev->next, me, __ATOMIC_RELEASE);
  76
  77        /* The while-load of me->locked should not move above the previous
  78         * store to prev->next. Otherwise it will cause a deadlock. Need a
  79         * store-load barrier.
  80         */
  81        __atomic_thread_fence(__ATOMIC_ACQ_REL);
  82        /* If the lock has already been acquired, it first atomically
  83         * places the node at the end of the queue and then proceeds
  84         * to spin on me->locked until the previous lock holder resets
  85         * the me->locked using mcslock_unlock().
  86         */
  87        rte_wait_until_equal_32((uint32_t *)&me->locked, 0, __ATOMIC_ACQUIRE);
  88}
  89
  90/**
  91 * Release the MCS lock.
  92 *
  93 * @param msl
  94 *   A pointer to the pointer of a MCS lock.
  95 * @param me
  96 *   A pointer to the node of MCS lock passed in rte_mcslock_lock.
  97 */
  98static inline void
  99rte_mcslock_unlock(rte_mcslock_t **msl, rte_mcslock_t *me)
 100{
 101        /* Check if there are more nodes in the queue. */
 102        if (likely(__atomic_load_n(&me->next, __ATOMIC_RELAXED) == NULL)) {
 103                /* No, last member in the queue. */
 104                rte_mcslock_t *save_me = __atomic_load_n(&me, __ATOMIC_RELAXED);
 105
 106                /* Release the lock by setting it to NULL */
 107                if (likely(__atomic_compare_exchange_n(msl, &save_me, NULL, 0,
 108                                __ATOMIC_RELEASE, __ATOMIC_RELAXED)))
 109                        return;
 110
 111                /* Speculative execution would be allowed to read in the
 112                 * while-loop first. This has the potential to cause a
 113                 * deadlock. Need a load barrier.
 114                 */
 115                __atomic_thread_fence(__ATOMIC_ACQUIRE);
 116                /* More nodes added to the queue by other CPUs.
 117                 * Wait until the next pointer is set.
 118                 */
 119                uintptr_t *next;
 120                next = (uintptr_t *)&me->next;
 121                RTE_WAIT_UNTIL_MASKED(next, UINTPTR_MAX, !=, 0,
 122                        __ATOMIC_RELAXED);
 123        }
 124
 125        /* Pass lock to next waiter. */
 126        __atomic_store_n(&me->next->locked, 0, __ATOMIC_RELEASE);
 127}
 128
 129/**
 130 * Try to take the lock.
 131 *
 132 * @param msl
 133 *   A pointer to the pointer of a MCS lock.
 134 * @param me
 135 *   A pointer to a new node of MCS lock.
 136 * @return
 137 *   1 if the lock is successfully taken; 0 otherwise.
 138 */
 139static inline int
 140rte_mcslock_trylock(rte_mcslock_t **msl, rte_mcslock_t *me)
 141{
 142        /* Init me node */
 143        __atomic_store_n(&me->next, NULL, __ATOMIC_RELAXED);
 144
 145        /* Try to lock */
 146        rte_mcslock_t *expected = NULL;
 147
 148        /* The lock can be taken only when the queue is empty. Hence,
 149         * the compare-exchange operation requires acquire semantics.
 150         * The store to me->next above should complete before the node
 151         * is visible to other CPUs/threads. Hence, the compare-exchange
 152         * operation requires release semantics as well.
 153         */
 154        return __atomic_compare_exchange_n(msl, &expected, me, 0,
 155                        __ATOMIC_ACQ_REL, __ATOMIC_RELAXED);
 156}
 157
 158/**
 159 * Test if the lock is taken.
 160 *
 161 * @param msl
 162 *   A pointer to a MCS lock node.
 163 * @return
 164 *   1 if the lock is currently taken; 0 otherwise.
 165 */
 166static inline int
 167rte_mcslock_is_locked(rte_mcslock_t *msl)
 168{
 169        return (__atomic_load_n(&msl, __ATOMIC_RELAXED) != NULL);
 170}
 171
 172#endif /* _RTE_MCSLOCK_H_ */
 173