qemu/include/qemu/thread.h
<<
>>
Prefs
   1#ifndef QEMU_THREAD_H
   2#define QEMU_THREAD_H
   3
   4#include "qemu/processor.h"
   5#include "qemu/atomic.h"
   6
   7typedef struct QemuCond QemuCond;
   8typedef struct QemuSemaphore QemuSemaphore;
   9typedef struct QemuEvent QemuEvent;
  10typedef struct QemuLockCnt QemuLockCnt;
  11typedef struct QemuThread QemuThread;
  12
  13#ifdef _WIN32
  14#include "qemu/thread-win32.h"
  15#else
  16#include "qemu/thread-posix.h"
  17#endif
  18
  19/* include QSP header once QemuMutex, QemuCond etc. are defined */
  20#include "qemu/qsp.h"
  21
  22#define QEMU_THREAD_JOINABLE 0
  23#define QEMU_THREAD_DETACHED 1
  24
  25void qemu_mutex_init(QemuMutex *mutex);
  26void qemu_mutex_destroy(QemuMutex *mutex);
  27int qemu_mutex_trylock_impl(QemuMutex *mutex, const char *file, const int line);
  28void qemu_mutex_lock_impl(QemuMutex *mutex, const char *file, const int line);
  29void qemu_mutex_unlock_impl(QemuMutex *mutex, const char *file, const int line);
  30
  31typedef void (*QemuMutexLockFunc)(QemuMutex *m, const char *f, int l);
  32typedef int (*QemuMutexTrylockFunc)(QemuMutex *m, const char *f, int l);
  33typedef void (*QemuRecMutexLockFunc)(QemuRecMutex *m, const char *f, int l);
  34typedef int (*QemuRecMutexTrylockFunc)(QemuRecMutex *m, const char *f, int l);
  35typedef void (*QemuCondWaitFunc)(QemuCond *c, QemuMutex *m, const char *f,
  36                                 int l);
  37typedef bool (*QemuCondTimedWaitFunc)(QemuCond *c, QemuMutex *m, int ms,
  38                                      const char *f, int l);
  39
  40extern QemuMutexLockFunc qemu_bql_mutex_lock_func;
  41extern QemuMutexLockFunc qemu_mutex_lock_func;
  42extern QemuMutexTrylockFunc qemu_mutex_trylock_func;
  43extern QemuRecMutexLockFunc qemu_rec_mutex_lock_func;
  44extern QemuRecMutexTrylockFunc qemu_rec_mutex_trylock_func;
  45extern QemuCondWaitFunc qemu_cond_wait_func;
  46extern QemuCondTimedWaitFunc qemu_cond_timedwait_func;
  47
  48/* convenience macros to bypass the profiler */
  49#define qemu_mutex_lock__raw(m)                         \
  50        qemu_mutex_lock_impl(m, __FILE__, __LINE__)
  51#define qemu_mutex_trylock__raw(m)                      \
  52        qemu_mutex_trylock_impl(m, __FILE__, __LINE__)
  53
  54#ifdef __COVERITY__
  55/*
  56 * Coverity is severely confused by the indirect function calls,
  57 * hide them.
  58 */
  59#define qemu_mutex_lock(m)                                              \
  60            qemu_mutex_lock_impl(m, __FILE__, __LINE__)
  61#define qemu_mutex_trylock(m)                                           \
  62            qemu_mutex_trylock_impl(m, __FILE__, __LINE__)
  63#define qemu_rec_mutex_lock(m)                                          \
  64            qemu_rec_mutex_lock_impl(m, __FILE__, __LINE__)
  65#define qemu_rec_mutex_trylock(m)                                       \
  66            qemu_rec_mutex_trylock_impl(m, __FILE__, __LINE__)
  67#define qemu_cond_wait(c, m)                                            \
  68            qemu_cond_wait_impl(c, m, __FILE__, __LINE__)
  69#define qemu_cond_timedwait(c, m, ms)                                   \
  70            qemu_cond_timedwait_impl(c, m, ms, __FILE__, __LINE__)
  71#else
  72#define qemu_mutex_lock(m) ({                                           \
  73            QemuMutexLockFunc _f = atomic_read(&qemu_mutex_lock_func);  \
  74            _f(m, __FILE__, __LINE__);                                  \
  75        })
  76
  77#define qemu_mutex_trylock(m) ({                                        \
  78            QemuMutexTrylockFunc _f = atomic_read(&qemu_mutex_trylock_func); \
  79            _f(m, __FILE__, __LINE__);                                  \
  80        })
  81
  82#define qemu_rec_mutex_lock(m) ({                                       \
  83            QemuRecMutexLockFunc _f = atomic_read(&qemu_rec_mutex_lock_func); \
  84            _f(m, __FILE__, __LINE__);                                  \
  85        })
  86
  87#define qemu_rec_mutex_trylock(m) ({                            \
  88            QemuRecMutexTrylockFunc _f;                         \
  89            _f = atomic_read(&qemu_rec_mutex_trylock_func);     \
  90            _f(m, __FILE__, __LINE__);                          \
  91        })
  92
  93#define qemu_cond_wait(c, m) ({                                         \
  94            QemuCondWaitFunc _f = atomic_read(&qemu_cond_wait_func);    \
  95            _f(c, m, __FILE__, __LINE__);                               \
  96        })
  97
  98#define qemu_cond_timedwait(c, m, ms) ({                                       \
  99            QemuCondTimedWaitFunc _f = atomic_read(&qemu_cond_timedwait_func); \
 100            _f(c, m, ms, __FILE__, __LINE__);                                  \
 101        })
 102#endif
 103
 104#define qemu_mutex_unlock(mutex) \
 105        qemu_mutex_unlock_impl(mutex, __FILE__, __LINE__)
 106
 107static inline void (qemu_mutex_lock)(QemuMutex *mutex)
 108{
 109    qemu_mutex_lock(mutex);
 110}
 111
 112static inline int (qemu_mutex_trylock)(QemuMutex *mutex)
 113{
 114    return qemu_mutex_trylock(mutex);
 115}
 116
 117static inline void (qemu_mutex_unlock)(QemuMutex *mutex)
 118{
 119    qemu_mutex_unlock(mutex);
 120}
 121
 122static inline void (qemu_rec_mutex_lock)(QemuRecMutex *mutex)
 123{
 124    qemu_rec_mutex_lock(mutex);
 125}
 126
 127static inline int (qemu_rec_mutex_trylock)(QemuRecMutex *mutex)
 128{
 129    return qemu_rec_mutex_trylock(mutex);
 130}
 131
 132/* Prototypes for other functions are in thread-posix.h/thread-win32.h.  */
 133void qemu_rec_mutex_init(QemuRecMutex *mutex);
 134
 135void qemu_cond_init(QemuCond *cond);
 136void qemu_cond_destroy(QemuCond *cond);
 137
 138/*
 139 * IMPORTANT: The implementation does not guarantee that pthread_cond_signal
 140 * and pthread_cond_broadcast can be called except while the same mutex is
 141 * held as in the corresponding pthread_cond_wait calls!
 142 */
 143void qemu_cond_signal(QemuCond *cond);
 144void qemu_cond_broadcast(QemuCond *cond);
 145void qemu_cond_wait_impl(QemuCond *cond, QemuMutex *mutex,
 146                         const char *file, const int line);
 147bool qemu_cond_timedwait_impl(QemuCond *cond, QemuMutex *mutex, int ms,
 148                              const char *file, const int line);
 149
 150static inline void (qemu_cond_wait)(QemuCond *cond, QemuMutex *mutex)
 151{
 152    qemu_cond_wait(cond, mutex);
 153}
 154
 155/* Returns true if timeout has not expired, and false otherwise */
 156static inline bool (qemu_cond_timedwait)(QemuCond *cond, QemuMutex *mutex,
 157                                         int ms)
 158{
 159    return qemu_cond_timedwait(cond, mutex, ms);
 160}
 161
 162void qemu_sem_init(QemuSemaphore *sem, int init);
 163void qemu_sem_post(QemuSemaphore *sem);
 164void qemu_sem_wait(QemuSemaphore *sem);
 165int qemu_sem_timedwait(QemuSemaphore *sem, int ms);
 166void qemu_sem_destroy(QemuSemaphore *sem);
 167
 168void qemu_event_init(QemuEvent *ev, bool init);
 169void qemu_event_set(QemuEvent *ev);
 170void qemu_event_reset(QemuEvent *ev);
 171void qemu_event_wait(QemuEvent *ev);
 172void qemu_event_destroy(QemuEvent *ev);
 173
 174void qemu_thread_create(QemuThread *thread, const char *name,
 175                        void *(*start_routine)(void *),
 176                        void *arg, int mode);
 177void *qemu_thread_join(QemuThread *thread);
 178void qemu_thread_get_self(QemuThread *thread);
 179bool qemu_thread_is_self(QemuThread *thread);
 180void qemu_thread_exit(void *retval) QEMU_NORETURN;
 181void qemu_thread_naming(bool enable);
 182
 183struct Notifier;
 184/**
 185 * qemu_thread_atexit_add:
 186 * @notifier: Notifier to add
 187 *
 188 * Add the specified notifier to a list which will be run via
 189 * notifier_list_notify() when this thread exits (either by calling
 190 * qemu_thread_exit() or by returning from its start_routine).
 191 * The usual usage is that the caller passes a Notifier which is
 192 * a per-thread variable; it can then use the callback to free
 193 * other per-thread data.
 194 *
 195 * If the thread exits as part of the entire process exiting,
 196 * it is unspecified whether notifiers are called or not.
 197 */
 198void qemu_thread_atexit_add(struct Notifier *notifier);
 199/**
 200 * qemu_thread_atexit_remove:
 201 * @notifier: Notifier to remove
 202 *
 203 * Remove the specified notifier from the thread-exit notification
 204 * list. It is not valid to try to remove a notifier which is not
 205 * on the list.
 206 */
 207void qemu_thread_atexit_remove(struct Notifier *notifier);
 208
 209#ifdef CONFIG_TSAN
 210#include <sanitizer/tsan_interface.h>
 211#endif
 212
 213struct QemuSpin {
 214    int value;
 215};
 216
 217static inline void qemu_spin_init(QemuSpin *spin)
 218{
 219    __sync_lock_release(&spin->value);
 220#ifdef CONFIG_TSAN
 221    __tsan_mutex_create(spin, __tsan_mutex_not_static);
 222#endif
 223}
 224
 225/* const parameter because the only purpose here is the TSAN annotation */
 226static inline void qemu_spin_destroy(const QemuSpin *spin)
 227{
 228#ifdef CONFIG_TSAN
 229    __tsan_mutex_destroy((void *)spin, __tsan_mutex_not_static);
 230#endif
 231}
 232
 233static inline void qemu_spin_lock(QemuSpin *spin)
 234{
 235#ifdef CONFIG_TSAN
 236    __tsan_mutex_pre_lock(spin, 0);
 237#endif
 238    while (unlikely(__sync_lock_test_and_set(&spin->value, true))) {
 239        while (atomic_read(&spin->value)) {
 240            cpu_relax();
 241        }
 242    }
 243#ifdef CONFIG_TSAN
 244    __tsan_mutex_post_lock(spin, 0, 0);
 245#endif
 246}
 247
 248static inline bool qemu_spin_trylock(QemuSpin *spin)
 249{
 250#ifdef CONFIG_TSAN
 251    __tsan_mutex_pre_lock(spin, __tsan_mutex_try_lock);
 252#endif
 253    bool busy = __sync_lock_test_and_set(&spin->value, true);
 254#ifdef CONFIG_TSAN
 255    unsigned flags = __tsan_mutex_try_lock;
 256    flags |= busy ? __tsan_mutex_try_lock_failed : 0;
 257    __tsan_mutex_post_lock(spin, flags, 0);
 258#endif
 259    return busy;
 260}
 261
 262static inline bool qemu_spin_locked(QemuSpin *spin)
 263{
 264    return atomic_read(&spin->value);
 265}
 266
 267static inline void qemu_spin_unlock(QemuSpin *spin)
 268{
 269#ifdef CONFIG_TSAN
 270    __tsan_mutex_pre_unlock(spin, 0);
 271#endif
 272    __sync_lock_release(&spin->value);
 273#ifdef CONFIG_TSAN
 274    __tsan_mutex_post_unlock(spin, 0);
 275#endif
 276}
 277
 278struct QemuLockCnt {
 279#ifndef CONFIG_LINUX
 280    QemuMutex mutex;
 281#endif
 282    unsigned count;
 283};
 284
 285/**
 286 * qemu_lockcnt_init: initialize a QemuLockcnt
 287 * @lockcnt: the lockcnt to initialize
 288 *
 289 * Initialize lockcnt's counter to zero and prepare its mutex
 290 * for usage.
 291 */
 292void qemu_lockcnt_init(QemuLockCnt *lockcnt);
 293
 294/**
 295 * qemu_lockcnt_destroy: destroy a QemuLockcnt
 296 * @lockcnt: the lockcnt to destruct
 297 *
 298 * Destroy lockcnt's mutex.
 299 */
 300void qemu_lockcnt_destroy(QemuLockCnt *lockcnt);
 301
 302/**
 303 * qemu_lockcnt_inc: increment a QemuLockCnt's counter
 304 * @lockcnt: the lockcnt to operate on
 305 *
 306 * If the lockcnt's count is zero, wait for critical sections
 307 * to finish and increment lockcnt's count to 1.  If the count
 308 * is not zero, just increment it.
 309 *
 310 * Because this function can wait on the mutex, it must not be
 311 * called while the lockcnt's mutex is held by the current thread.
 312 * For the same reason, qemu_lockcnt_inc can also contribute to
 313 * AB-BA deadlocks.  This is a sample deadlock scenario:
 314 *
 315 *            thread 1                      thread 2
 316 *            -------------------------------------------------------
 317 *            qemu_lockcnt_lock(&lc1);
 318 *                                          qemu_lockcnt_lock(&lc2);
 319 *            qemu_lockcnt_inc(&lc2);
 320 *                                          qemu_lockcnt_inc(&lc1);
 321 */
 322void qemu_lockcnt_inc(QemuLockCnt *lockcnt);
 323
 324/**
 325 * qemu_lockcnt_dec: decrement a QemuLockCnt's counter
 326 * @lockcnt: the lockcnt to operate on
 327 */
 328void qemu_lockcnt_dec(QemuLockCnt *lockcnt);
 329
 330/**
 331 * qemu_lockcnt_dec_and_lock: decrement a QemuLockCnt's counter and
 332 * possibly lock it.
 333 * @lockcnt: the lockcnt to operate on
 334 *
 335 * Decrement lockcnt's count.  If the new count is zero, lock
 336 * the mutex and return true.  Otherwise, return false.
 337 */
 338bool qemu_lockcnt_dec_and_lock(QemuLockCnt *lockcnt);
 339
 340/**
 341 * qemu_lockcnt_dec_if_lock: possibly decrement a QemuLockCnt's counter and
 342 * lock it.
 343 * @lockcnt: the lockcnt to operate on
 344 *
 345 * If the count is 1, decrement the count to zero, lock
 346 * the mutex and return true.  Otherwise, return false.
 347 */
 348bool qemu_lockcnt_dec_if_lock(QemuLockCnt *lockcnt);
 349
 350/**
 351 * qemu_lockcnt_lock: lock a QemuLockCnt's mutex.
 352 * @lockcnt: the lockcnt to operate on
 353 *
 354 * Remember that concurrent visits are not blocked unless the count is
 355 * also zero.  You can use qemu_lockcnt_count to check for this inside a
 356 * critical section.
 357 */
 358void qemu_lockcnt_lock(QemuLockCnt *lockcnt);
 359
 360/**
 361 * qemu_lockcnt_unlock: release a QemuLockCnt's mutex.
 362 * @lockcnt: the lockcnt to operate on.
 363 */
 364void qemu_lockcnt_unlock(QemuLockCnt *lockcnt);
 365
 366/**
 367 * qemu_lockcnt_inc_and_unlock: combined unlock/increment on a QemuLockCnt.
 368 * @lockcnt: the lockcnt to operate on.
 369 *
 370 * This is the same as
 371 *
 372 *     qemu_lockcnt_unlock(lockcnt);
 373 *     qemu_lockcnt_inc(lockcnt);
 374 *
 375 * but more efficient.
 376 */
 377void qemu_lockcnt_inc_and_unlock(QemuLockCnt *lockcnt);
 378
 379/**
 380 * qemu_lockcnt_count: query a LockCnt's count.
 381 * @lockcnt: the lockcnt to query.
 382 *
 383 * Note that the count can change at any time.  Still, while the
 384 * lockcnt is locked, one can usefully check whether the count
 385 * is non-zero.
 386 */
 387unsigned qemu_lockcnt_count(QemuLockCnt *lockcnt);
 388
 389#endif
 390