qemu/util/qemu-thread-posix.c
<<
>>
Prefs
   1/*
   2 * Wrappers around mutex/cond/thread functions
   3 *
   4 * Copyright Red Hat, Inc. 2009
   5 *
   6 * Author:
   7 *  Marcelo Tosatti <mtosatti@redhat.com>
   8 *
   9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
  10 * See the COPYING file in the top-level directory.
  11 *
  12 */
  13#include "qemu/osdep.h"
  14#include "qemu/thread.h"
  15#include "qemu/atomic.h"
  16#include "qemu/notify.h"
  17#include "trace.h"
  18
  19static bool name_threads;
  20
  21void qemu_thread_naming(bool enable)
  22{
  23    name_threads = enable;
  24
  25#ifndef CONFIG_THREAD_SETNAME_BYTHREAD
  26    /* This is a debugging option, not fatal */
  27    if (enable) {
  28        fprintf(stderr, "qemu: thread naming not supported on this host\n");
  29    }
  30#endif
  31}
  32
  33static void error_exit(int err, const char *msg)
  34{
  35    fprintf(stderr, "qemu: %s: %s\n", msg, strerror(err));
  36    abort();
  37}
  38
  39void qemu_mutex_init(QemuMutex *mutex)
  40{
  41    int err;
  42
  43    err = pthread_mutex_init(&mutex->lock, NULL);
  44    if (err)
  45        error_exit(err, __func__);
  46    mutex->initialized = true;
  47}
  48
  49void qemu_mutex_destroy(QemuMutex *mutex)
  50{
  51    int err;
  52
  53    assert(mutex->initialized);
  54    mutex->initialized = false;
  55    err = pthread_mutex_destroy(&mutex->lock);
  56    if (err)
  57        error_exit(err, __func__);
  58}
  59
  60void qemu_mutex_lock_impl(QemuMutex *mutex, const char *file, const int line)
  61{
  62    int err;
  63
  64    assert(mutex->initialized);
  65    trace_qemu_mutex_lock(mutex, file, line);
  66
  67    err = pthread_mutex_lock(&mutex->lock);
  68    if (err)
  69        error_exit(err, __func__);
  70
  71    trace_qemu_mutex_locked(mutex, file, line);
  72}
  73
  74int qemu_mutex_trylock_impl(QemuMutex *mutex, const char *file, const int line)
  75{
  76    int err;
  77
  78    assert(mutex->initialized);
  79    err = pthread_mutex_trylock(&mutex->lock);
  80    if (err == 0) {
  81        trace_qemu_mutex_locked(mutex, file, line);
  82        return 0;
  83    }
  84    if (err != EBUSY) {
  85        error_exit(err, __func__);
  86    }
  87    return -EBUSY;
  88}
  89
  90void qemu_mutex_unlock_impl(QemuMutex *mutex, const char *file, const int line)
  91{
  92    int err;
  93
  94    assert(mutex->initialized);
  95    err = pthread_mutex_unlock(&mutex->lock);
  96    if (err)
  97        error_exit(err, __func__);
  98
  99    trace_qemu_mutex_unlock(mutex, file, line);
 100}
 101
 102void qemu_rec_mutex_init(QemuRecMutex *mutex)
 103{
 104    int err;
 105    pthread_mutexattr_t attr;
 106
 107    pthread_mutexattr_init(&attr);
 108    pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
 109    err = pthread_mutex_init(&mutex->lock, &attr);
 110    pthread_mutexattr_destroy(&attr);
 111    if (err) {
 112        error_exit(err, __func__);
 113    }
 114    mutex->initialized = true;
 115}
 116
 117void qemu_cond_init(QemuCond *cond)
 118{
 119    int err;
 120
 121    err = pthread_cond_init(&cond->cond, NULL);
 122    if (err)
 123        error_exit(err, __func__);
 124    cond->initialized = true;
 125}
 126
 127void qemu_cond_destroy(QemuCond *cond)
 128{
 129    int err;
 130
 131    assert(cond->initialized);
 132    cond->initialized = false;
 133    err = pthread_cond_destroy(&cond->cond);
 134    if (err)
 135        error_exit(err, __func__);
 136}
 137
 138void qemu_cond_signal(QemuCond *cond)
 139{
 140    int err;
 141
 142    assert(cond->initialized);
 143    err = pthread_cond_signal(&cond->cond);
 144    if (err)
 145        error_exit(err, __func__);
 146}
 147
 148void qemu_cond_broadcast(QemuCond *cond)
 149{
 150    int err;
 151
 152    assert(cond->initialized);
 153    err = pthread_cond_broadcast(&cond->cond);
 154    if (err)
 155        error_exit(err, __func__);
 156}
 157
 158void qemu_cond_wait_impl(QemuCond *cond, QemuMutex *mutex, const char *file, const int line)
 159{
 160    int err;
 161
 162    assert(cond->initialized);
 163    trace_qemu_mutex_unlock(mutex, file, line);
 164    err = pthread_cond_wait(&cond->cond, &mutex->lock);
 165    trace_qemu_mutex_locked(mutex, file, line);
 166    if (err)
 167        error_exit(err, __func__);
 168}
 169
 170void qemu_sem_init(QemuSemaphore *sem, int init)
 171{
 172    int rc;
 173
 174#ifndef CONFIG_SEM_TIMEDWAIT
 175    rc = pthread_mutex_init(&sem->lock, NULL);
 176    if (rc != 0) {
 177        error_exit(rc, __func__);
 178    }
 179    rc = pthread_cond_init(&sem->cond, NULL);
 180    if (rc != 0) {
 181        error_exit(rc, __func__);
 182    }
 183    if (init < 0) {
 184        error_exit(EINVAL, __func__);
 185    }
 186    sem->count = init;
 187#else
 188    rc = sem_init(&sem->sem, 0, init);
 189    if (rc < 0) {
 190        error_exit(errno, __func__);
 191    }
 192#endif
 193    sem->initialized = true;
 194}
 195
 196void qemu_sem_destroy(QemuSemaphore *sem)
 197{
 198    int rc;
 199
 200    assert(sem->initialized);
 201    sem->initialized = false;
 202#ifndef CONFIG_SEM_TIMEDWAIT
 203    rc = pthread_cond_destroy(&sem->cond);
 204    if (rc < 0) {
 205        error_exit(rc, __func__);
 206    }
 207    rc = pthread_mutex_destroy(&sem->lock);
 208    if (rc < 0) {
 209        error_exit(rc, __func__);
 210    }
 211#else
 212    rc = sem_destroy(&sem->sem);
 213    if (rc < 0) {
 214        error_exit(errno, __func__);
 215    }
 216#endif
 217}
 218
 219void qemu_sem_post(QemuSemaphore *sem)
 220{
 221    int rc;
 222
 223    assert(sem->initialized);
 224#ifndef CONFIG_SEM_TIMEDWAIT
 225    pthread_mutex_lock(&sem->lock);
 226    if (sem->count == UINT_MAX) {
 227        rc = EINVAL;
 228    } else {
 229        sem->count++;
 230        rc = pthread_cond_signal(&sem->cond);
 231    }
 232    pthread_mutex_unlock(&sem->lock);
 233    if (rc != 0) {
 234        error_exit(rc, __func__);
 235    }
 236#else
 237    rc = sem_post(&sem->sem);
 238    if (rc < 0) {
 239        error_exit(errno, __func__);
 240    }
 241#endif
 242}
 243
 244static void compute_abs_deadline(struct timespec *ts, int ms)
 245{
 246    struct timeval tv;
 247    gettimeofday(&tv, NULL);
 248    ts->tv_nsec = tv.tv_usec * 1000 + (ms % 1000) * 1000000;
 249    ts->tv_sec = tv.tv_sec + ms / 1000;
 250    if (ts->tv_nsec >= 1000000000) {
 251        ts->tv_sec++;
 252        ts->tv_nsec -= 1000000000;
 253    }
 254}
 255
 256int qemu_sem_timedwait(QemuSemaphore *sem, int ms)
 257{
 258    int rc;
 259    struct timespec ts;
 260
 261    assert(sem->initialized);
 262#ifndef CONFIG_SEM_TIMEDWAIT
 263    rc = 0;
 264    compute_abs_deadline(&ts, ms);
 265    pthread_mutex_lock(&sem->lock);
 266    while (sem->count == 0) {
 267        rc = pthread_cond_timedwait(&sem->cond, &sem->lock, &ts);
 268        if (rc == ETIMEDOUT) {
 269            break;
 270        }
 271        if (rc != 0) {
 272            error_exit(rc, __func__);
 273        }
 274    }
 275    if (rc != ETIMEDOUT) {
 276        --sem->count;
 277    }
 278    pthread_mutex_unlock(&sem->lock);
 279    return (rc == ETIMEDOUT ? -1 : 0);
 280#else
 281    if (ms <= 0) {
 282        /* This is cheaper than sem_timedwait.  */
 283        do {
 284            rc = sem_trywait(&sem->sem);
 285        } while (rc == -1 && errno == EINTR);
 286        if (rc == -1 && errno == EAGAIN) {
 287            return -1;
 288        }
 289    } else {
 290        compute_abs_deadline(&ts, ms);
 291        do {
 292            rc = sem_timedwait(&sem->sem, &ts);
 293        } while (rc == -1 && errno == EINTR);
 294        if (rc == -1 && errno == ETIMEDOUT) {
 295            return -1;
 296        }
 297    }
 298    if (rc < 0) {
 299        error_exit(errno, __func__);
 300    }
 301    return 0;
 302#endif
 303}
 304
 305void qemu_sem_wait(QemuSemaphore *sem)
 306{
 307    int rc;
 308
 309    assert(sem->initialized);
 310#ifndef CONFIG_SEM_TIMEDWAIT
 311    pthread_mutex_lock(&sem->lock);
 312    while (sem->count == 0) {
 313        rc = pthread_cond_wait(&sem->cond, &sem->lock);
 314        if (rc != 0) {
 315            error_exit(rc, __func__);
 316        }
 317    }
 318    --sem->count;
 319    pthread_mutex_unlock(&sem->lock);
 320#else
 321    do {
 322        rc = sem_wait(&sem->sem);
 323    } while (rc == -1 && errno == EINTR);
 324    if (rc < 0) {
 325        error_exit(errno, __func__);
 326    }
 327#endif
 328}
 329
 330#ifdef __linux__
 331#include "qemu/futex.h"
 332#else
 333static inline void qemu_futex_wake(QemuEvent *ev, int n)
 334{
 335    assert(ev->initialized);
 336    pthread_mutex_lock(&ev->lock);
 337    if (n == 1) {
 338        pthread_cond_signal(&ev->cond);
 339    } else {
 340        pthread_cond_broadcast(&ev->cond);
 341    }
 342    pthread_mutex_unlock(&ev->lock);
 343}
 344
 345static inline void qemu_futex_wait(QemuEvent *ev, unsigned val)
 346{
 347    assert(ev->initialized);
 348    pthread_mutex_lock(&ev->lock);
 349    if (ev->value == val) {
 350        pthread_cond_wait(&ev->cond, &ev->lock);
 351    }
 352    pthread_mutex_unlock(&ev->lock);
 353}
 354#endif
 355
 356/* Valid transitions:
 357 * - free->set, when setting the event
 358 * - busy->set, when setting the event, followed by qemu_futex_wake
 359 * - set->free, when resetting the event
 360 * - free->busy, when waiting
 361 *
 362 * set->busy does not happen (it can be observed from the outside but
 363 * it really is set->free->busy).
 364 *
 365 * busy->free provably cannot happen; to enforce it, the set->free transition
 366 * is done with an OR, which becomes a no-op if the event has concurrently
 367 * transitioned to free or busy.
 368 */
 369
 370#define EV_SET         0
 371#define EV_FREE        1
 372#define EV_BUSY       -1
 373
 374void qemu_event_init(QemuEvent *ev, bool init)
 375{
 376#ifndef __linux__
 377    pthread_mutex_init(&ev->lock, NULL);
 378    pthread_cond_init(&ev->cond, NULL);
 379#endif
 380
 381    ev->value = (init ? EV_SET : EV_FREE);
 382    ev->initialized = true;
 383}
 384
 385void qemu_event_destroy(QemuEvent *ev)
 386{
 387    assert(ev->initialized);
 388    ev->initialized = false;
 389#ifndef __linux__
 390    pthread_mutex_destroy(&ev->lock);
 391    pthread_cond_destroy(&ev->cond);
 392#endif
 393}
 394
 395void qemu_event_set(QemuEvent *ev)
 396{
 397    /* qemu_event_set has release semantics, but because it *loads*
 398     * ev->value we need a full memory barrier here.
 399     */
 400    assert(ev->initialized);
 401    smp_mb();
 402    if (atomic_read(&ev->value) != EV_SET) {
 403        if (atomic_xchg(&ev->value, EV_SET) == EV_BUSY) {
 404            /* There were waiters, wake them up.  */
 405            qemu_futex_wake(ev, INT_MAX);
 406        }
 407    }
 408}
 409
 410void qemu_event_reset(QemuEvent *ev)
 411{
 412    unsigned value;
 413
 414    assert(ev->initialized);
 415    value = atomic_read(&ev->value);
 416    smp_mb_acquire();
 417    if (value == EV_SET) {
 418        /*
 419         * If there was a concurrent reset (or even reset+wait),
 420         * do nothing.  Otherwise change EV_SET->EV_FREE.
 421         */
 422        atomic_or(&ev->value, EV_FREE);
 423    }
 424}
 425
 426void qemu_event_wait(QemuEvent *ev)
 427{
 428    unsigned value;
 429
 430    assert(ev->initialized);
 431    value = atomic_read(&ev->value);
 432    smp_mb_acquire();
 433    if (value != EV_SET) {
 434        if (value == EV_FREE) {
 435            /*
 436             * Leave the event reset and tell qemu_event_set that there
 437             * are waiters.  No need to retry, because there cannot be
 438             * a concurrent busy->free transition.  After the CAS, the
 439             * event will be either set or busy.
 440             */
 441            if (atomic_cmpxchg(&ev->value, EV_FREE, EV_BUSY) == EV_SET) {
 442                return;
 443            }
 444        }
 445        qemu_futex_wait(ev, EV_BUSY);
 446    }
 447}
 448
 449static pthread_key_t exit_key;
 450
 451union NotifierThreadData {
 452    void *ptr;
 453    NotifierList list;
 454};
 455QEMU_BUILD_BUG_ON(sizeof(union NotifierThreadData) != sizeof(void *));
 456
 457void qemu_thread_atexit_add(Notifier *notifier)
 458{
 459    union NotifierThreadData ntd;
 460    ntd.ptr = pthread_getspecific(exit_key);
 461    notifier_list_add(&ntd.list, notifier);
 462    pthread_setspecific(exit_key, ntd.ptr);
 463}
 464
 465void qemu_thread_atexit_remove(Notifier *notifier)
 466{
 467    union NotifierThreadData ntd;
 468    ntd.ptr = pthread_getspecific(exit_key);
 469    notifier_remove(notifier);
 470    pthread_setspecific(exit_key, ntd.ptr);
 471}
 472
 473static void qemu_thread_atexit_run(void *arg)
 474{
 475    union NotifierThreadData ntd = { .ptr = arg };
 476    notifier_list_notify(&ntd.list, NULL);
 477}
 478
 479static void __attribute__((constructor)) qemu_thread_atexit_init(void)
 480{
 481    pthread_key_create(&exit_key, qemu_thread_atexit_run);
 482}
 483
 484
 485#ifdef CONFIG_PTHREAD_SETNAME_NP
 486typedef struct {
 487    void *(*start_routine)(void *);
 488    void *arg;
 489    char *name;
 490} QemuThreadArgs;
 491
 492static void *qemu_thread_start(void *args)
 493{
 494    QemuThreadArgs *qemu_thread_args = args;
 495    void *(*start_routine)(void *) = qemu_thread_args->start_routine;
 496    void *arg = qemu_thread_args->arg;
 497
 498    /* Attempt to set the threads name; note that this is for debug, so
 499     * we're not going to fail if we can't set it.
 500     */
 501    pthread_setname_np(pthread_self(), qemu_thread_args->name);
 502    g_free(qemu_thread_args->name);
 503    g_free(qemu_thread_args);
 504    return start_routine(arg);
 505}
 506#endif
 507
 508
 509void qemu_thread_create(QemuThread *thread, const char *name,
 510                       void *(*start_routine)(void*),
 511                       void *arg, int mode)
 512{
 513    sigset_t set, oldset;
 514    int err;
 515    pthread_attr_t attr;
 516
 517    err = pthread_attr_init(&attr);
 518    if (err) {
 519        error_exit(err, __func__);
 520    }
 521
 522    if (mode == QEMU_THREAD_DETACHED) {
 523        pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
 524    }
 525
 526    /* Leave signal handling to the iothread.  */
 527    sigfillset(&set);
 528    pthread_sigmask(SIG_SETMASK, &set, &oldset);
 529
 530#ifdef CONFIG_PTHREAD_SETNAME_NP
 531    if (name_threads) {
 532        QemuThreadArgs *qemu_thread_args;
 533        qemu_thread_args = g_new0(QemuThreadArgs, 1);
 534        qemu_thread_args->name = g_strdup(name);
 535        qemu_thread_args->start_routine = start_routine;
 536        qemu_thread_args->arg = arg;
 537
 538        err = pthread_create(&thread->thread, &attr,
 539                             qemu_thread_start, qemu_thread_args);
 540    } else
 541#endif
 542    {
 543        err = pthread_create(&thread->thread, &attr,
 544                             start_routine, arg);
 545    }
 546
 547    if (err)
 548        error_exit(err, __func__);
 549
 550    pthread_sigmask(SIG_SETMASK, &oldset, NULL);
 551
 552    pthread_attr_destroy(&attr);
 553}
 554
 555void qemu_thread_get_self(QemuThread *thread)
 556{
 557    thread->thread = pthread_self();
 558}
 559
 560bool qemu_thread_is_self(QemuThread *thread)
 561{
 562   return pthread_equal(pthread_self(), thread->thread);
 563}
 564
 565void qemu_thread_exit(void *retval)
 566{
 567    pthread_exit(retval);
 568}
 569
 570void *qemu_thread_join(QemuThread *thread)
 571{
 572    int err;
 573    void *ret;
 574
 575    err = pthread_join(thread->thread, &ret);
 576    if (err) {
 577        error_exit(err, __func__);
 578    }
 579    return ret;
 580}
 581