qemu/include/qemu/thread.h
<<
>>
Prefs
   1#ifndef QEMU_THREAD_H
   2#define QEMU_THREAD_H
   3
   4#include "qemu/processor.h"
   5#include "qemu/atomic.h"
   6
   7typedef struct QemuCond QemuCond;
   8typedef struct QemuSemaphore QemuSemaphore;
   9typedef struct QemuEvent QemuEvent;
  10typedef struct QemuLockCnt QemuLockCnt;
  11typedef struct QemuThread QemuThread;
  12
  13#ifdef _WIN32
  14#include "qemu/thread-win32.h"
  15#else
  16#include "qemu/thread-posix.h"
  17#endif
  18
  19/* include QSP header once QemuMutex, QemuCond etc. are defined */
  20#include "qemu/qsp.h"
  21
  22#define QEMU_THREAD_JOINABLE 0
  23#define QEMU_THREAD_DETACHED 1
  24
  25void qemu_mutex_init(QemuMutex *mutex);
  26void qemu_mutex_destroy(QemuMutex *mutex);
  27int qemu_mutex_trylock_impl(QemuMutex *mutex, const char *file, const int line);
  28void qemu_mutex_lock_impl(QemuMutex *mutex, const char *file, const int line);
  29void qemu_mutex_unlock_impl(QemuMutex *mutex, const char *file, const int line);
  30
  31void qemu_rec_mutex_init(QemuRecMutex *mutex);
  32void qemu_rec_mutex_destroy(QemuRecMutex *mutex);
  33void qemu_rec_mutex_lock_impl(QemuRecMutex *mutex, const char *file, int line);
  34int qemu_rec_mutex_trylock_impl(QemuRecMutex *mutex, const char *file, int line);
  35void qemu_rec_mutex_unlock_impl(QemuRecMutex *mutex, const char *file, int line);
  36
  37typedef void (*QemuMutexLockFunc)(QemuMutex *m, const char *f, int l);
  38typedef int (*QemuMutexTrylockFunc)(QemuMutex *m, const char *f, int l);
  39typedef void (*QemuRecMutexLockFunc)(QemuRecMutex *m, const char *f, int l);
  40typedef int (*QemuRecMutexTrylockFunc)(QemuRecMutex *m, const char *f, int l);
  41typedef void (*QemuCondWaitFunc)(QemuCond *c, QemuMutex *m, const char *f,
  42                                 int l);
  43typedef bool (*QemuCondTimedWaitFunc)(QemuCond *c, QemuMutex *m, int ms,
  44                                      const char *f, int l);
  45
  46extern QemuMutexLockFunc qemu_bql_mutex_lock_func;
  47extern QemuMutexLockFunc qemu_mutex_lock_func;
  48extern QemuMutexTrylockFunc qemu_mutex_trylock_func;
  49extern QemuRecMutexLockFunc qemu_rec_mutex_lock_func;
  50extern QemuRecMutexTrylockFunc qemu_rec_mutex_trylock_func;
  51extern QemuCondWaitFunc qemu_cond_wait_func;
  52extern QemuCondTimedWaitFunc qemu_cond_timedwait_func;
  53
  54/* convenience macros to bypass the profiler */
  55#define qemu_mutex_lock__raw(m)                         \
  56        qemu_mutex_lock_impl(m, __FILE__, __LINE__)
  57#define qemu_mutex_trylock__raw(m)                      \
  58        qemu_mutex_trylock_impl(m, __FILE__, __LINE__)
  59
  60#ifdef __COVERITY__
  61/*
  62 * Coverity is severely confused by the indirect function calls,
  63 * hide them.
  64 */
  65#define qemu_mutex_lock(m)                                              \
  66            qemu_mutex_lock_impl(m, __FILE__, __LINE__)
  67#define qemu_mutex_trylock(m)                                           \
  68            qemu_mutex_trylock_impl(m, __FILE__, __LINE__)
  69#define qemu_rec_mutex_lock(m)                                          \
  70            qemu_rec_mutex_lock_impl(m, __FILE__, __LINE__)
  71#define qemu_rec_mutex_trylock(m)                                       \
  72            qemu_rec_mutex_trylock_impl(m, __FILE__, __LINE__)
  73#define qemu_cond_wait(c, m)                                            \
  74            qemu_cond_wait_impl(c, m, __FILE__, __LINE__)
  75#define qemu_cond_timedwait(c, m, ms)                                   \
  76            qemu_cond_timedwait_impl(c, m, ms, __FILE__, __LINE__)
  77#else
  78#define qemu_mutex_lock(m) ({                                           \
  79            QemuMutexLockFunc _f = qatomic_read(&qemu_mutex_lock_func); \
  80            _f(m, __FILE__, __LINE__);                                  \
  81        })
  82
  83#define qemu_mutex_trylock(m) ({                                              \
  84            QemuMutexTrylockFunc _f = qatomic_read(&qemu_mutex_trylock_func); \
  85            _f(m, __FILE__, __LINE__);                                        \
  86        })
  87
  88#define qemu_rec_mutex_lock(m) ({                                             \
  89            QemuRecMutexLockFunc _f = qatomic_read(&qemu_rec_mutex_lock_func);\
  90            _f(m, __FILE__, __LINE__);                                        \
  91        })
  92
  93#define qemu_rec_mutex_trylock(m) ({                            \
  94            QemuRecMutexTrylockFunc _f;                         \
  95            _f = qatomic_read(&qemu_rec_mutex_trylock_func);    \
  96            _f(m, __FILE__, __LINE__);                          \
  97        })
  98
  99#define qemu_cond_wait(c, m) ({                                         \
 100            QemuCondWaitFunc _f = qatomic_read(&qemu_cond_wait_func);   \
 101            _f(c, m, __FILE__, __LINE__);                               \
 102        })
 103
 104#define qemu_cond_timedwait(c, m, ms) ({                                       \
 105            QemuCondTimedWaitFunc _f = qatomic_read(&qemu_cond_timedwait_func);\
 106            _f(c, m, ms, __FILE__, __LINE__);                                  \
 107        })
 108#endif
 109
 110#define qemu_mutex_unlock(mutex) \
 111        qemu_mutex_unlock_impl(mutex, __FILE__, __LINE__)
 112
 113#define qemu_rec_mutex_unlock(mutex) \
 114        qemu_rec_mutex_unlock_impl(mutex, __FILE__, __LINE__)
 115
 116static inline void (qemu_mutex_lock)(QemuMutex *mutex)
 117{
 118    qemu_mutex_lock(mutex);
 119}
 120
 121static inline int (qemu_mutex_trylock)(QemuMutex *mutex)
 122{
 123    return qemu_mutex_trylock(mutex);
 124}
 125
 126static inline void (qemu_mutex_unlock)(QemuMutex *mutex)
 127{
 128    qemu_mutex_unlock(mutex);
 129}
 130
 131static inline void (qemu_rec_mutex_lock)(QemuRecMutex *mutex)
 132{
 133    qemu_rec_mutex_lock(mutex);
 134}
 135
 136static inline int (qemu_rec_mutex_trylock)(QemuRecMutex *mutex)
 137{
 138    return qemu_rec_mutex_trylock(mutex);
 139}
 140
 141static inline void (qemu_rec_mutex_unlock)(QemuRecMutex *mutex)
 142{
 143    qemu_rec_mutex_unlock(mutex);
 144}
 145
 146void qemu_cond_init(QemuCond *cond);
 147void qemu_cond_destroy(QemuCond *cond);
 148
 149/*
 150 * IMPORTANT: The implementation does not guarantee that pthread_cond_signal
 151 * and pthread_cond_broadcast can be called except while the same mutex is
 152 * held as in the corresponding pthread_cond_wait calls!
 153 */
 154void qemu_cond_signal(QemuCond *cond);
 155void qemu_cond_broadcast(QemuCond *cond);
 156void qemu_cond_wait_impl(QemuCond *cond, QemuMutex *mutex,
 157                         const char *file, const int line);
 158bool qemu_cond_timedwait_impl(QemuCond *cond, QemuMutex *mutex, int ms,
 159                              const char *file, const int line);
 160
 161static inline void (qemu_cond_wait)(QemuCond *cond, QemuMutex *mutex)
 162{
 163    qemu_cond_wait(cond, mutex);
 164}
 165
 166/* Returns true if timeout has not expired, and false otherwise */
 167static inline bool (qemu_cond_timedwait)(QemuCond *cond, QemuMutex *mutex,
 168                                         int ms)
 169{
 170    return qemu_cond_timedwait(cond, mutex, ms);
 171}
 172
 173void qemu_sem_init(QemuSemaphore *sem, int init);
 174void qemu_sem_post(QemuSemaphore *sem);
 175void qemu_sem_wait(QemuSemaphore *sem);
 176int qemu_sem_timedwait(QemuSemaphore *sem, int ms);
 177void qemu_sem_destroy(QemuSemaphore *sem);
 178
 179void qemu_event_init(QemuEvent *ev, bool init);
 180void qemu_event_set(QemuEvent *ev);
 181void qemu_event_reset(QemuEvent *ev);
 182void qemu_event_wait(QemuEvent *ev);
 183void qemu_event_destroy(QemuEvent *ev);
 184
 185void qemu_thread_create(QemuThread *thread, const char *name,
 186                        void *(*start_routine)(void *),
 187                        void *arg, int mode);
 188void *qemu_thread_join(QemuThread *thread);
 189void qemu_thread_get_self(QemuThread *thread);
 190bool qemu_thread_is_self(QemuThread *thread);
 191void qemu_thread_exit(void *retval) QEMU_NORETURN;
 192void qemu_thread_naming(bool enable);
 193
 194struct Notifier;
 195/**
 196 * qemu_thread_atexit_add:
 197 * @notifier: Notifier to add
 198 *
 199 * Add the specified notifier to a list which will be run via
 200 * notifier_list_notify() when this thread exits (either by calling
 201 * qemu_thread_exit() or by returning from its start_routine).
 202 * The usual usage is that the caller passes a Notifier which is
 203 * a per-thread variable; it can then use the callback to free
 204 * other per-thread data.
 205 *
 206 * If the thread exits as part of the entire process exiting,
 207 * it is unspecified whether notifiers are called or not.
 208 */
 209void qemu_thread_atexit_add(struct Notifier *notifier);
 210/**
 211 * qemu_thread_atexit_remove:
 212 * @notifier: Notifier to remove
 213 *
 214 * Remove the specified notifier from the thread-exit notification
 215 * list. It is not valid to try to remove a notifier which is not
 216 * on the list.
 217 */
 218void qemu_thread_atexit_remove(struct Notifier *notifier);
 219
 220#ifdef CONFIG_TSAN
 221#include <sanitizer/tsan_interface.h>
 222#endif
 223
 224struct QemuSpin {
 225    int value;
 226};
 227
 228static inline void qemu_spin_init(QemuSpin *spin)
 229{
 230    __sync_lock_release(&spin->value);
 231#ifdef CONFIG_TSAN
 232    __tsan_mutex_create(spin, __tsan_mutex_not_static);
 233#endif
 234}
 235
 236/* const parameter because the only purpose here is the TSAN annotation */
 237static inline void qemu_spin_destroy(const QemuSpin *spin)
 238{
 239#ifdef CONFIG_TSAN
 240    __tsan_mutex_destroy((void *)spin, __tsan_mutex_not_static);
 241#endif
 242}
 243
 244static inline void qemu_spin_lock(QemuSpin *spin)
 245{
 246#ifdef CONFIG_TSAN
 247    __tsan_mutex_pre_lock(spin, 0);
 248#endif
 249    while (unlikely(__sync_lock_test_and_set(&spin->value, true))) {
 250        while (qatomic_read(&spin->value)) {
 251            cpu_relax();
 252        }
 253    }
 254#ifdef CONFIG_TSAN
 255    __tsan_mutex_post_lock(spin, 0, 0);
 256#endif
 257}
 258
 259static inline bool qemu_spin_trylock(QemuSpin *spin)
 260{
 261#ifdef CONFIG_TSAN
 262    __tsan_mutex_pre_lock(spin, __tsan_mutex_try_lock);
 263#endif
 264    bool busy = __sync_lock_test_and_set(&spin->value, true);
 265#ifdef CONFIG_TSAN
 266    unsigned flags = __tsan_mutex_try_lock;
 267    flags |= busy ? __tsan_mutex_try_lock_failed : 0;
 268    __tsan_mutex_post_lock(spin, flags, 0);
 269#endif
 270    return busy;
 271}
 272
 273static inline bool qemu_spin_locked(QemuSpin *spin)
 274{
 275    return qatomic_read(&spin->value);
 276}
 277
 278static inline void qemu_spin_unlock(QemuSpin *spin)
 279{
 280#ifdef CONFIG_TSAN
 281    __tsan_mutex_pre_unlock(spin, 0);
 282#endif
 283    __sync_lock_release(&spin->value);
 284#ifdef CONFIG_TSAN
 285    __tsan_mutex_post_unlock(spin, 0);
 286#endif
 287}
 288
 289struct QemuLockCnt {
 290#ifndef CONFIG_LINUX
 291    QemuMutex mutex;
 292#endif
 293    unsigned count;
 294};
 295
 296/**
 297 * qemu_lockcnt_init: initialize a QemuLockcnt
 298 * @lockcnt: the lockcnt to initialize
 299 *
 300 * Initialize lockcnt's counter to zero and prepare its mutex
 301 * for usage.
 302 */
 303void qemu_lockcnt_init(QemuLockCnt *lockcnt);
 304
 305/**
 306 * qemu_lockcnt_destroy: destroy a QemuLockcnt
 307 * @lockcnt: the lockcnt to destruct
 308 *
 309 * Destroy lockcnt's mutex.
 310 */
 311void qemu_lockcnt_destroy(QemuLockCnt *lockcnt);
 312
 313/**
 314 * qemu_lockcnt_inc: increment a QemuLockCnt's counter
 315 * @lockcnt: the lockcnt to operate on
 316 *
 317 * If the lockcnt's count is zero, wait for critical sections
 318 * to finish and increment lockcnt's count to 1.  If the count
 319 * is not zero, just increment it.
 320 *
 321 * Because this function can wait on the mutex, it must not be
 322 * called while the lockcnt's mutex is held by the current thread.
 323 * For the same reason, qemu_lockcnt_inc can also contribute to
 324 * AB-BA deadlocks.  This is a sample deadlock scenario:
 325 *
 326 *            thread 1                      thread 2
 327 *            -------------------------------------------------------
 328 *            qemu_lockcnt_lock(&lc1);
 329 *                                          qemu_lockcnt_lock(&lc2);
 330 *            qemu_lockcnt_inc(&lc2);
 331 *                                          qemu_lockcnt_inc(&lc1);
 332 */
 333void qemu_lockcnt_inc(QemuLockCnt *lockcnt);
 334
 335/**
 336 * qemu_lockcnt_dec: decrement a QemuLockCnt's counter
 337 * @lockcnt: the lockcnt to operate on
 338 */
 339void qemu_lockcnt_dec(QemuLockCnt *lockcnt);
 340
 341/**
 342 * qemu_lockcnt_dec_and_lock: decrement a QemuLockCnt's counter and
 343 * possibly lock it.
 344 * @lockcnt: the lockcnt to operate on
 345 *
 346 * Decrement lockcnt's count.  If the new count is zero, lock
 347 * the mutex and return true.  Otherwise, return false.
 348 */
 349bool qemu_lockcnt_dec_and_lock(QemuLockCnt *lockcnt);
 350
 351/**
 352 * qemu_lockcnt_dec_if_lock: possibly decrement a QemuLockCnt's counter and
 353 * lock it.
 354 * @lockcnt: the lockcnt to operate on
 355 *
 356 * If the count is 1, decrement the count to zero, lock
 357 * the mutex and return true.  Otherwise, return false.
 358 */
 359bool qemu_lockcnt_dec_if_lock(QemuLockCnt *lockcnt);
 360
 361/**
 362 * qemu_lockcnt_lock: lock a QemuLockCnt's mutex.
 363 * @lockcnt: the lockcnt to operate on
 364 *
 365 * Remember that concurrent visits are not blocked unless the count is
 366 * also zero.  You can use qemu_lockcnt_count to check for this inside a
 367 * critical section.
 368 */
 369void qemu_lockcnt_lock(QemuLockCnt *lockcnt);
 370
 371/**
 372 * qemu_lockcnt_unlock: release a QemuLockCnt's mutex.
 373 * @lockcnt: the lockcnt to operate on.
 374 */
 375void qemu_lockcnt_unlock(QemuLockCnt *lockcnt);
 376
 377/**
 378 * qemu_lockcnt_inc_and_unlock: combined unlock/increment on a QemuLockCnt.
 379 * @lockcnt: the lockcnt to operate on.
 380 *
 381 * This is the same as
 382 *
 383 *     qemu_lockcnt_unlock(lockcnt);
 384 *     qemu_lockcnt_inc(lockcnt);
 385 *
 386 * but more efficient.
 387 */
 388void qemu_lockcnt_inc_and_unlock(QemuLockCnt *lockcnt);
 389
 390/**
 391 * qemu_lockcnt_count: query a LockCnt's count.
 392 * @lockcnt: the lockcnt to query.
 393 *
 394 * Note that the count can change at any time.  Still, while the
 395 * lockcnt is locked, one can usefully check whether the count
 396 * is non-zero.
 397 */
 398unsigned qemu_lockcnt_count(QemuLockCnt *lockcnt);
 399
 400#endif
 401