qemu/util/qemu-thread-posix.c
<<
>>
Prefs
   1/*
   2 * Wrappers around mutex/cond/thread functions
   3 *
   4 * Copyright Red Hat, Inc. 2009
   5 *
   6 * Author:
   7 *  Marcelo Tosatti <mtosatti@redhat.com>
   8 *
   9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
  10 * See the COPYING file in the top-level directory.
  11 *
  12 */
  13#include "qemu/osdep.h"
  14#ifdef __linux__
  15#include <sys/syscall.h>
  16#include <linux/futex.h>
  17#endif
  18#include "qemu/thread.h"
  19#include "qemu/atomic.h"
  20#include "qemu/notify.h"
  21
  22static bool name_threads;
  23
  24void qemu_thread_naming(bool enable)
  25{
  26    name_threads = enable;
  27
  28#ifndef CONFIG_THREAD_SETNAME_BYTHREAD
  29    /* This is a debugging option, not fatal */
  30    if (enable) {
  31        fprintf(stderr, "qemu: thread naming not supported on this host\n");
  32    }
  33#endif
  34}
  35
  36static void error_exit(int err, const char *msg)
  37{
  38    fprintf(stderr, "qemu: %s: %s\n", msg, strerror(err));
  39    abort();
  40}
  41
  42void qemu_mutex_init(QemuMutex *mutex)
  43{
  44    int err;
  45
  46    err = pthread_mutex_init(&mutex->lock, NULL);
  47    if (err)
  48        error_exit(err, __func__);
  49}
  50
  51void qemu_mutex_destroy(QemuMutex *mutex)
  52{
  53    int err;
  54
  55    err = pthread_mutex_destroy(&mutex->lock);
  56    if (err)
  57        error_exit(err, __func__);
  58}
  59
  60void qemu_mutex_lock(QemuMutex *mutex)
  61{
  62    int err;
  63
  64    err = pthread_mutex_lock(&mutex->lock);
  65    if (err)
  66        error_exit(err, __func__);
  67}
  68
  69int qemu_mutex_trylock(QemuMutex *mutex)
  70{
  71    return pthread_mutex_trylock(&mutex->lock);
  72}
  73
  74void qemu_mutex_unlock(QemuMutex *mutex)
  75{
  76    int err;
  77
  78    err = pthread_mutex_unlock(&mutex->lock);
  79    if (err)
  80        error_exit(err, __func__);
  81}
  82
  83void qemu_cond_init(QemuCond *cond)
  84{
  85    int err;
  86
  87    err = pthread_cond_init(&cond->cond, NULL);
  88    if (err)
  89        error_exit(err, __func__);
  90}
  91
  92void qemu_cond_destroy(QemuCond *cond)
  93{
  94    int err;
  95
  96    err = pthread_cond_destroy(&cond->cond);
  97    if (err)
  98        error_exit(err, __func__);
  99}
 100
 101void qemu_cond_signal(QemuCond *cond)
 102{
 103    int err;
 104
 105    err = pthread_cond_signal(&cond->cond);
 106    if (err)
 107        error_exit(err, __func__);
 108}
 109
 110void qemu_cond_broadcast(QemuCond *cond)
 111{
 112    int err;
 113
 114    err = pthread_cond_broadcast(&cond->cond);
 115    if (err)
 116        error_exit(err, __func__);
 117}
 118
 119void qemu_cond_wait(QemuCond *cond, QemuMutex *mutex)
 120{
 121    int err;
 122
 123    err = pthread_cond_wait(&cond->cond, &mutex->lock);
 124    if (err)
 125        error_exit(err, __func__);
 126}
 127
 128void qemu_sem_init(QemuSemaphore *sem, int init)
 129{
 130    int rc;
 131
 132#if defined(__APPLE__) || defined(__NetBSD__)
 133    rc = pthread_mutex_init(&sem->lock, NULL);
 134    if (rc != 0) {
 135        error_exit(rc, __func__);
 136    }
 137    rc = pthread_cond_init(&sem->cond, NULL);
 138    if (rc != 0) {
 139        error_exit(rc, __func__);
 140    }
 141    if (init < 0) {
 142        error_exit(EINVAL, __func__);
 143    }
 144    sem->count = init;
 145#else
 146    rc = sem_init(&sem->sem, 0, init);
 147    if (rc < 0) {
 148        error_exit(errno, __func__);
 149    }
 150#endif
 151}
 152
 153void qemu_sem_destroy(QemuSemaphore *sem)
 154{
 155    int rc;
 156
 157#if defined(__APPLE__) || defined(__NetBSD__)
 158    rc = pthread_cond_destroy(&sem->cond);
 159    if (rc < 0) {
 160        error_exit(rc, __func__);
 161    }
 162    rc = pthread_mutex_destroy(&sem->lock);
 163    if (rc < 0) {
 164        error_exit(rc, __func__);
 165    }
 166#else
 167    rc = sem_destroy(&sem->sem);
 168    if (rc < 0) {
 169        error_exit(errno, __func__);
 170    }
 171#endif
 172}
 173
 174void qemu_sem_post(QemuSemaphore *sem)
 175{
 176    int rc;
 177
 178#if defined(__APPLE__) || defined(__NetBSD__)
 179    pthread_mutex_lock(&sem->lock);
 180    if (sem->count == UINT_MAX) {
 181        rc = EINVAL;
 182    } else {
 183        sem->count++;
 184        rc = pthread_cond_signal(&sem->cond);
 185    }
 186    pthread_mutex_unlock(&sem->lock);
 187    if (rc != 0) {
 188        error_exit(rc, __func__);
 189    }
 190#else
 191    rc = sem_post(&sem->sem);
 192    if (rc < 0) {
 193        error_exit(errno, __func__);
 194    }
 195#endif
 196}
 197
 198static void compute_abs_deadline(struct timespec *ts, int ms)
 199{
 200    struct timeval tv;
 201    gettimeofday(&tv, NULL);
 202    ts->tv_nsec = tv.tv_usec * 1000 + (ms % 1000) * 1000000;
 203    ts->tv_sec = tv.tv_sec + ms / 1000;
 204    if (ts->tv_nsec >= 1000000000) {
 205        ts->tv_sec++;
 206        ts->tv_nsec -= 1000000000;
 207    }
 208}
 209
 210int qemu_sem_timedwait(QemuSemaphore *sem, int ms)
 211{
 212    int rc;
 213    struct timespec ts;
 214
 215#if defined(__APPLE__) || defined(__NetBSD__)
 216    rc = 0;
 217    compute_abs_deadline(&ts, ms);
 218    pthread_mutex_lock(&sem->lock);
 219    while (sem->count == 0) {
 220        rc = pthread_cond_timedwait(&sem->cond, &sem->lock, &ts);
 221        if (rc == ETIMEDOUT) {
 222            break;
 223        }
 224        if (rc != 0) {
 225            error_exit(rc, __func__);
 226        }
 227    }
 228    if (rc != ETIMEDOUT) {
 229        --sem->count;
 230    }
 231    pthread_mutex_unlock(&sem->lock);
 232    return (rc == ETIMEDOUT ? -1 : 0);
 233#else
 234    if (ms <= 0) {
 235        /* This is cheaper than sem_timedwait.  */
 236        do {
 237            rc = sem_trywait(&sem->sem);
 238        } while (rc == -1 && errno == EINTR);
 239        if (rc == -1 && errno == EAGAIN) {
 240            return -1;
 241        }
 242    } else {
 243        compute_abs_deadline(&ts, ms);
 244        do {
 245            rc = sem_timedwait(&sem->sem, &ts);
 246        } while (rc == -1 && errno == EINTR);
 247        if (rc == -1 && errno == ETIMEDOUT) {
 248            return -1;
 249        }
 250    }
 251    if (rc < 0) {
 252        error_exit(errno, __func__);
 253    }
 254    return 0;
 255#endif
 256}
 257
 258void qemu_sem_wait(QemuSemaphore *sem)
 259{
 260    int rc;
 261
 262#if defined(__APPLE__) || defined(__NetBSD__)
 263    pthread_mutex_lock(&sem->lock);
 264    while (sem->count == 0) {
 265        rc = pthread_cond_wait(&sem->cond, &sem->lock);
 266        if (rc != 0) {
 267            error_exit(rc, __func__);
 268        }
 269    }
 270    --sem->count;
 271    pthread_mutex_unlock(&sem->lock);
 272#else
 273    do {
 274        rc = sem_wait(&sem->sem);
 275    } while (rc == -1 && errno == EINTR);
 276    if (rc < 0) {
 277        error_exit(errno, __func__);
 278    }
 279#endif
 280}
 281
 282#ifdef __linux__
 283#define futex(...)              syscall(__NR_futex, __VA_ARGS__)
 284
 285static inline void futex_wake(QemuEvent *ev, int n)
 286{
 287    futex(ev, FUTEX_WAKE, n, NULL, NULL, 0);
 288}
 289
 290static inline void futex_wait(QemuEvent *ev, unsigned val)
 291{
 292    while (futex(ev, FUTEX_WAIT, (int) val, NULL, NULL, 0)) {
 293        switch (errno) {
 294        case EWOULDBLOCK:
 295            return;
 296        case EINTR:
 297            break; /* get out of switch and retry */
 298        default:
 299            abort();
 300        }
 301    }
 302}
 303#else
 304static inline void futex_wake(QemuEvent *ev, int n)
 305{
 306    pthread_mutex_lock(&ev->lock);
 307    if (n == 1) {
 308        pthread_cond_signal(&ev->cond);
 309    } else {
 310        pthread_cond_broadcast(&ev->cond);
 311    }
 312    pthread_mutex_unlock(&ev->lock);
 313}
 314
 315static inline void futex_wait(QemuEvent *ev, unsigned val)
 316{
 317    pthread_mutex_lock(&ev->lock);
 318    if (ev->value == val) {
 319        pthread_cond_wait(&ev->cond, &ev->lock);
 320    }
 321    pthread_mutex_unlock(&ev->lock);
 322}
 323#endif
 324
 325/* Valid transitions:
 326 * - free->set, when setting the event
 327 * - busy->set, when setting the event, followed by futex_wake
 328 * - set->free, when resetting the event
 329 * - free->busy, when waiting
 330 *
 331 * set->busy does not happen (it can be observed from the outside but
 332 * it really is set->free->busy).
 333 *
 334 * busy->free provably cannot happen; to enforce it, the set->free transition
 335 * is done with an OR, which becomes a no-op if the event has concurrently
 336 * transitioned to free or busy.
 337 */
 338
 339#define EV_SET         0
 340#define EV_FREE        1
 341#define EV_BUSY       -1
 342
 343void qemu_event_init(QemuEvent *ev, bool init)
 344{
 345#ifndef __linux__
 346    pthread_mutex_init(&ev->lock, NULL);
 347    pthread_cond_init(&ev->cond, NULL);
 348#endif
 349
 350    ev->value = (init ? EV_SET : EV_FREE);
 351}
 352
 353void qemu_event_destroy(QemuEvent *ev)
 354{
 355#ifndef __linux__
 356    pthread_mutex_destroy(&ev->lock);
 357    pthread_cond_destroy(&ev->cond);
 358#endif
 359}
 360
 361void qemu_event_set(QemuEvent *ev)
 362{
 363    if (atomic_mb_read(&ev->value) != EV_SET) {
 364        if (atomic_xchg(&ev->value, EV_SET) == EV_BUSY) {
 365            /* There were waiters, wake them up.  */
 366            futex_wake(ev, INT_MAX);
 367        }
 368    }
 369}
 370
 371void qemu_event_reset(QemuEvent *ev)
 372{
 373    if (atomic_mb_read(&ev->value) == EV_SET) {
 374        /*
 375         * If there was a concurrent reset (or even reset+wait),
 376         * do nothing.  Otherwise change EV_SET->EV_FREE.
 377         */
 378        atomic_or(&ev->value, EV_FREE);
 379    }
 380}
 381
 382void qemu_event_wait(QemuEvent *ev)
 383{
 384    unsigned value;
 385
 386    value = atomic_mb_read(&ev->value);
 387    if (value != EV_SET) {
 388        if (value == EV_FREE) {
 389            /*
 390             * Leave the event reset and tell qemu_event_set that there
 391             * are waiters.  No need to retry, because there cannot be
 392             * a concurrent busy->free transition.  After the CAS, the
 393             * event will be either set or busy.
 394             */
 395            if (atomic_cmpxchg(&ev->value, EV_FREE, EV_BUSY) == EV_SET) {
 396                return;
 397            }
 398        }
 399        futex_wait(ev, EV_BUSY);
 400    }
 401}
 402
 403static pthread_key_t exit_key;
 404
 405union NotifierThreadData {
 406    void *ptr;
 407    NotifierList list;
 408};
 409QEMU_BUILD_BUG_ON(sizeof(union NotifierThreadData) != sizeof(void *));
 410
 411void qemu_thread_atexit_add(Notifier *notifier)
 412{
 413    union NotifierThreadData ntd;
 414    ntd.ptr = pthread_getspecific(exit_key);
 415    notifier_list_add(&ntd.list, notifier);
 416    pthread_setspecific(exit_key, ntd.ptr);
 417}
 418
 419void qemu_thread_atexit_remove(Notifier *notifier)
 420{
 421    union NotifierThreadData ntd;
 422    ntd.ptr = pthread_getspecific(exit_key);
 423    notifier_remove(notifier);
 424    pthread_setspecific(exit_key, ntd.ptr);
 425}
 426
 427static void qemu_thread_atexit_run(void *arg)
 428{
 429    union NotifierThreadData ntd = { .ptr = arg };
 430    notifier_list_notify(&ntd.list, NULL);
 431}
 432
 433static void __attribute__((constructor)) qemu_thread_atexit_init(void)
 434{
 435    pthread_key_create(&exit_key, qemu_thread_atexit_run);
 436}
 437
 438
 439/* Attempt to set the threads name; note that this is for debug, so
 440 * we're not going to fail if we can't set it.
 441 */
 442static void qemu_thread_set_name(QemuThread *thread, const char *name)
 443{
 444#ifdef CONFIG_PTHREAD_SETNAME_NP
 445    pthread_setname_np(thread->thread, name);
 446#endif
 447}
 448
 449void qemu_thread_create(QemuThread *thread, const char *name,
 450                       void *(*start_routine)(void*),
 451                       void *arg, int mode)
 452{
 453    sigset_t set, oldset;
 454    int err;
 455    pthread_attr_t attr;
 456
 457    err = pthread_attr_init(&attr);
 458    if (err) {
 459        error_exit(err, __func__);
 460    }
 461    if (mode == QEMU_THREAD_DETACHED) {
 462        err = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
 463        if (err) {
 464            error_exit(err, __func__);
 465        }
 466    }
 467
 468    /* Leave signal handling to the iothread.  */
 469    sigfillset(&set);
 470    pthread_sigmask(SIG_SETMASK, &set, &oldset);
 471    err = pthread_create(&thread->thread, &attr, start_routine, arg);
 472    if (err)
 473        error_exit(err, __func__);
 474
 475    if (name_threads) {
 476        qemu_thread_set_name(thread, name);
 477    }
 478
 479    pthread_sigmask(SIG_SETMASK, &oldset, NULL);
 480
 481    pthread_attr_destroy(&attr);
 482}
 483
 484void qemu_thread_get_self(QemuThread *thread)
 485{
 486    thread->thread = pthread_self();
 487}
 488
 489bool qemu_thread_is_self(QemuThread *thread)
 490{
 491   return pthread_equal(pthread_self(), thread->thread);
 492}
 493
 494void qemu_thread_exit(void *retval)
 495{
 496    pthread_exit(retval);
 497}
 498
 499void *qemu_thread_join(QemuThread *thread)
 500{
 501    int err;
 502    void *ret;
 503
 504    err = pthread_join(thread->thread, &ret);
 505    if (err) {
 506        error_exit(err, __func__);
 507    }
 508    return ret;
 509}
 510