dpdk/lib/eal/include/generic/rte_rwlock.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright(c) 2010-2014 Intel Corporation
   3 */
   4
   5#ifndef _RTE_RWLOCK_H_
   6#define _RTE_RWLOCK_H_
   7
   8/**
   9 * @file
  10 *
  11 * RTE Read-Write Locks
  12 *
  13 * This file defines an API for read-write locks. The lock is used to
  14 * protect data that allows multiple readers in parallel, but only
  15 * one writer. All readers are blocked until the writer is finished
  16 * writing.
  17 *
  18 */
  19
  20#ifdef __cplusplus
  21extern "C" {
  22#endif
  23
  24#include <rte_common.h>
  25#include <rte_atomic.h>
  26#include <rte_pause.h>
  27
  28/**
  29 * The rte_rwlock_t type.
  30 *
  31 * cnt is -1 when write lock is held, and > 0 when read locks are held.
  32 */
  33typedef struct {
  34        volatile int32_t cnt; /**< -1 when W lock held, > 0 when R locks held. */
  35} rte_rwlock_t;
  36
  37/**
  38 * A static rwlock initializer.
  39 */
  40#define RTE_RWLOCK_INITIALIZER { 0 }
  41
  42/**
  43 * Initialize the rwlock to an unlocked state.
  44 *
  45 * @param rwl
  46 *   A pointer to the rwlock structure.
  47 */
  48static inline void
  49rte_rwlock_init(rte_rwlock_t *rwl)
  50{
  51        rwl->cnt = 0;
  52}
  53
  54/**
  55 * Take a read lock. Loop until the lock is held.
  56 *
  57 * @param rwl
  58 *   A pointer to a rwlock structure.
  59 */
  60static inline void
  61rte_rwlock_read_lock(rte_rwlock_t *rwl)
  62{
  63        int32_t x;
  64        int success = 0;
  65
  66        while (success == 0) {
  67                x = __atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED);
  68                /* write lock is held */
  69                if (x < 0) {
  70                        rte_pause();
  71                        continue;
  72                }
  73                success = __atomic_compare_exchange_n(&rwl->cnt, &x, x + 1, 1,
  74                                        __ATOMIC_ACQUIRE, __ATOMIC_RELAXED);
  75        }
  76}
  77
  78/**
  79 * @warning
  80 * @b EXPERIMENTAL: this API may change without prior notice.
  81 *
  82 * try to take a read lock.
  83 *
  84 * @param rwl
  85 *   A pointer to a rwlock structure.
  86 * @return
  87 *   - zero if the lock is successfully taken
  88 *   - -EBUSY if lock could not be acquired for reading because a
  89 *     writer holds the lock
  90 */
  91__rte_experimental
  92static inline int
  93rte_rwlock_read_trylock(rte_rwlock_t *rwl)
  94{
  95        int32_t x;
  96        int success = 0;
  97
  98        while (success == 0) {
  99                x = __atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED);
 100                /* write lock is held */
 101                if (x < 0)
 102                        return -EBUSY;
 103                success = __atomic_compare_exchange_n(&rwl->cnt, &x, x + 1, 1,
 104                                        __ATOMIC_ACQUIRE, __ATOMIC_RELAXED);
 105        }
 106
 107        return 0;
 108}
 109
 110/**
 111 * Release a read lock.
 112 *
 113 * @param rwl
 114 *   A pointer to the rwlock structure.
 115 */
 116static inline void
 117rte_rwlock_read_unlock(rte_rwlock_t *rwl)
 118{
 119        __atomic_fetch_sub(&rwl->cnt, 1, __ATOMIC_RELEASE);
 120}
 121
 122/**
 123 * @warning
 124 * @b EXPERIMENTAL: this API may change without prior notice.
 125 *
 126 * try to take a write lock.
 127 *
 128 * @param rwl
 129 *   A pointer to a rwlock structure.
 130 * @return
 131 *   - zero if the lock is successfully taken
 132 *   - -EBUSY if lock could not be acquired for writing because
 133 *     it was already locked for reading or writing
 134 */
 135__rte_experimental
 136static inline int
 137rte_rwlock_write_trylock(rte_rwlock_t *rwl)
 138{
 139        int32_t x;
 140
 141        x = __atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED);
 142        if (x != 0 || __atomic_compare_exchange_n(&rwl->cnt, &x, -1, 1,
 143                              __ATOMIC_ACQUIRE, __ATOMIC_RELAXED) == 0)
 144                return -EBUSY;
 145
 146        return 0;
 147}
 148
 149/**
 150 * Take a write lock. Loop until the lock is held.
 151 *
 152 * @param rwl
 153 *   A pointer to a rwlock structure.
 154 */
 155static inline void
 156rte_rwlock_write_lock(rte_rwlock_t *rwl)
 157{
 158        int32_t x;
 159        int success = 0;
 160
 161        while (success == 0) {
 162                x = __atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED);
 163                /* a lock is held */
 164                if (x != 0) {
 165                        rte_pause();
 166                        continue;
 167                }
 168                success = __atomic_compare_exchange_n(&rwl->cnt, &x, -1, 1,
 169                                        __ATOMIC_ACQUIRE, __ATOMIC_RELAXED);
 170        }
 171}
 172
 173/**
 174 * Release a write lock.
 175 *
 176 * @param rwl
 177 *   A pointer to a rwlock structure.
 178 */
 179static inline void
 180rte_rwlock_write_unlock(rte_rwlock_t *rwl)
 181{
 182        __atomic_store_n(&rwl->cnt, 0, __ATOMIC_RELEASE);
 183}
 184
 185/**
 186 * Try to execute critical section in a hardware memory transaction, if it
 187 * fails or not available take a read lock
 188 *
 189 * NOTE: An attempt to perform a HW I/O operation inside a hardware memory
 190 * transaction always aborts the transaction since the CPU is not able to
 191 * roll-back should the transaction fail. Therefore, hardware transactional
 192 * locks are not advised to be used around rte_eth_rx_burst() and
 193 * rte_eth_tx_burst() calls.
 194 *
 195 * @param rwl
 196 *   A pointer to a rwlock structure.
 197 */
 198static inline void
 199rte_rwlock_read_lock_tm(rte_rwlock_t *rwl);
 200
 201/**
 202 * Commit hardware memory transaction or release the read lock if the lock is used as a fall-back
 203 *
 204 * @param rwl
 205 *   A pointer to the rwlock structure.
 206 */
 207static inline void
 208rte_rwlock_read_unlock_tm(rte_rwlock_t *rwl);
 209
 210/**
 211 * Try to execute critical section in a hardware memory transaction, if it
 212 * fails or not available take a write lock
 213 *
 214 * NOTE: An attempt to perform a HW I/O operation inside a hardware memory
 215 * transaction always aborts the transaction since the CPU is not able to
 216 * roll-back should the transaction fail. Therefore, hardware transactional
 217 * locks are not advised to be used around rte_eth_rx_burst() and
 218 * rte_eth_tx_burst() calls.
 219 *
 220 * @param rwl
 221 *   A pointer to a rwlock structure.
 222 */
 223static inline void
 224rte_rwlock_write_lock_tm(rte_rwlock_t *rwl);
 225
 226/**
 227 * Commit hardware memory transaction or release the write lock if the lock is used as a fall-back
 228 *
 229 * @param rwl
 230 *   A pointer to a rwlock structure.
 231 */
 232static inline void
 233rte_rwlock_write_unlock_tm(rte_rwlock_t *rwl);
 234
 235#ifdef __cplusplus
 236}
 237#endif
 238
 239#endif /* _RTE_RWLOCK_H_ */
 240