1/* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2022 Ericsson AB 3 */ 4 5#ifndef _RTE_SEQLOCK_H_ 6#define _RTE_SEQLOCK_H_ 7 8#ifdef __cplusplus 9extern "C" { 10#endif 11 12/** 13 * @file 14 * RTE Seqlock 15 * 16 * A sequence lock (seqlock) is a synchronization primitive allowing 17 * multiple, parallel, readers to efficiently and safely (i.e., in a 18 * data-race free manner) access lock-protected data. The RTE seqlock 19 * permits multiple writers as well. A spinlock is used for 20 * writer-writer synchronization. 21 * 22 * A reader never blocks a writer. Very high frequency writes may 23 * prevent readers from making progress. 24 * 25 * A seqlock is not preemption-safe on the writer side. If a writer is 26 * preempted, it may block readers until the writer thread is allowed 27 * to continue. Heavy computations should be kept out of the 28 * writer-side critical section, to avoid delaying readers. 29 * 30 * Seqlocks are useful for data which are read by many cores, at a 31 * high frequency, and relatively infrequently written to. 32 * 33 * One way to think about seqlocks is that they provide means to 34 * perform atomic operations on objects larger than what the native 35 * machine instructions allow for. 36 * 37 * To avoid resource reclamation issues, the data protected by a 38 * seqlock should typically be kept self-contained (e.g., no pointers 39 * to mutable, dynamically allocated data). 40 * 41 * Example usage: 42 * @code{.c} 43 * #define MAX_Y_LEN 16 44 * // Application-defined example data structure, protected by a seqlock. 45 * struct config { 46 * rte_seqlock_t lock; 47 * int param_x; 48 * char param_y[MAX_Y_LEN]; 49 * }; 50 * 51 * // Accessor function for reading config fields. 52 * void 53 * config_read(const struct config *config, int *param_x, char *param_y) 54 * { 55 * uint32_t sn; 56 * 57 * do { 58 * sn = rte_seqlock_read_begin(&config->lock); 59 * 60 * // Loads may be atomic or non-atomic, as in this example. 61 * *param_x = config->param_x; 62 * strcpy(param_y, config->param_y); 63 * // An alternative to an immediate retry is to abort and 64 * // try again at some later time, assuming progress is 65 * // possible without the data. 66 * } while (rte_seqlock_read_retry(&config->lock, sn)); 67 * } 68 * 69 * // Accessor function for writing config fields. 70 * void 71 * config_update(struct config *config, int param_x, const char *param_y) 72 * { 73 * rte_seqlock_write_lock(&config->lock); 74 * // Stores may be atomic or non-atomic, as in this example. 75 * config->param_x = param_x; 76 * strcpy(config->param_y, param_y); 77 * rte_seqlock_write_unlock(&config->lock); 78 * } 79 * @endcode 80 * 81 * In case there is only a single writer, or writer-writer 82 * serialization is provided by other means, the use of sequence lock 83 * (i.e., rte_seqlock_t) can be replaced with the use of the "raw" 84 * rte_seqcount_t type instead. 85 * 86 * @see 87 * https://en.wikipedia.org/wiki/Seqlock. 88 */ 89 90#include <stdbool.h> 91#include <stdint.h> 92 93#include <rte_atomic.h> 94#include <rte_branch_prediction.h> 95#include <rte_compat.h> 96#include <rte_seqcount.h> 97#include <rte_spinlock.h> 98 99/** 100 * The RTE seqlock type. 101 */ 102typedef struct { 103 rte_seqcount_t count; /**< Sequence count for the protected data. */ 104 rte_spinlock_t lock; /**< Spinlock used to serialize writers. */ 105} rte_seqlock_t; 106 107/** 108 * A static seqlock initializer. 109 */ 110#define RTE_SEQLOCK_INITIALIZER \ 111 { \ 112 .count = RTE_SEQCOUNT_INITIALIZER, \ 113 .lock = RTE_SPINLOCK_INITIALIZER \ 114 } 115 116/** 117 * @warning 118 * @b EXPERIMENTAL: this API may change without prior notice. 119 * 120 * Initialize the seqlock. 121 * 122 * This function initializes the seqlock, and leaves the writer-side 123 * spinlock unlocked. 124 * 125 * @param seqlock 126 * A pointer to the seqlock. 127 */ 128__rte_experimental 129static inline void 130rte_seqlock_init(rte_seqlock_t *seqlock) 131{ 132 rte_seqcount_init(&seqlock->count); 133 rte_spinlock_init(&seqlock->lock); 134} 135 136/** 137 * @warning 138 * @b EXPERIMENTAL: this API may change without prior notice. 139 * 140 * Begin a read-side critical section. 141 * 142 * See rte_seqcount_read_retry() for details. 143 * 144 * @param seqlock 145 * A pointer to the seqlock. 146 * @return 147 * The seqlock sequence number for this critical section, to 148 * later be passed to rte_seqlock_read_retry(). 149 * 150 * @see rte_seqlock_read_retry() 151 * @see rte_seqcount_read_retry() 152 */ 153 154__rte_experimental 155static inline uint32_t 156rte_seqlock_read_begin(const rte_seqlock_t *seqlock) 157{ 158 return rte_seqcount_read_begin(&seqlock->count); 159} 160 161/** 162 * @warning 163 * @b EXPERIMENTAL: this API may change without prior notice. 164 * 165 * End a read-side critical section. 166 * 167 * See rte_seqcount_read_retry() for details. 168 * 169 * @param seqlock 170 * A pointer to the seqlock. 171 * @param begin_sn 172 * The seqlock sequence number returned by rte_seqlock_read_begin(). 173 * @return 174 * true or false, if the just-read seqlock-protected data was 175 * inconsistent or consistent, respectively, at the time it was 176 * read. 177 * 178 * @see rte_seqlock_read_begin() 179 */ 180__rte_experimental 181static inline bool 182rte_seqlock_read_retry(const rte_seqlock_t *seqlock, uint32_t begin_sn) 183{ 184 return rte_seqcount_read_retry(&seqlock->count, begin_sn); 185} 186 187/** 188 * @warning 189 * @b EXPERIMENTAL: this API may change without prior notice. 190 * 191 * Begin a write-side critical section. 192 * 193 * A call to this function acquires the write lock associated @p 194 * seqlock, and marks the beginning of a write-side critical section. 195 * 196 * After having called this function, the caller may go on to modify 197 * (both read and write) the protected data, in an atomic or 198 * non-atomic manner. 199 * 200 * After the necessary updates have been performed, the application 201 * calls rte_seqlock_write_unlock(). 202 * 203 * This function is not preemption-safe in the sense that preemption 204 * of the calling thread may block reader progress until the writer 205 * thread is rescheduled. 206 * 207 * Unlike rte_seqlock_read_begin(), each call made to 208 * rte_seqlock_write_lock() must be matched with an unlock call. 209 * 210 * @param seqlock 211 * A pointer to the seqlock. 212 * 213 * @see rte_seqlock_write_unlock() 214 */ 215__rte_experimental 216static inline void 217rte_seqlock_write_lock(rte_seqlock_t *seqlock) 218{ 219 /* To synchronize with other writers. */ 220 rte_spinlock_lock(&seqlock->lock); 221 222 rte_seqcount_write_begin(&seqlock->count); 223} 224 225/** 226 * @warning 227 * @b EXPERIMENTAL: this API may change without prior notice. 228 * 229 * End a write-side critical section. 230 * 231 * A call to this function marks the end of the write-side critical 232 * section, for @p seqlock. After this call has been made, the protected 233 * data may no longer be modified. 234 * 235 * @param seqlock 236 * A pointer to the seqlock. 237 * 238 * @see rte_seqlock_write_lock() 239 */ 240__rte_experimental 241static inline void 242rte_seqlock_write_unlock(rte_seqlock_t *seqlock) 243{ 244 rte_seqcount_write_end(&seqlock->count); 245 246 rte_spinlock_unlock(&seqlock->lock); 247} 248 249#ifdef __cplusplus 250} 251#endif 252 253#endif /* _RTE_SEQLOCK_H_ */ 254