qemu/util/qemu-thread-posix.c
<<
>>
Prefs
   1/*
   2 * Wrappers around mutex/cond/thread functions
   3 *
   4 * Copyright Red Hat, Inc. 2009
   5 *
   6 * Author:
   7 *  Marcelo Tosatti <mtosatti@redhat.com>
   8 *
   9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
  10 * See the COPYING file in the top-level directory.
  11 *
  12 */
  13#include "qemu/osdep.h"
  14#include "qemu/thread.h"
  15#include "qemu/atomic.h"
  16#include "qemu/notify.h"
  17#include "qemu-thread-common.h"
  18#include "qemu/tsan.h"
  19
  20static bool name_threads;
  21
  22void qemu_thread_naming(bool enable)
  23{
  24    name_threads = enable;
  25
  26#ifndef CONFIG_THREAD_SETNAME_BYTHREAD
  27    /* This is a debugging option, not fatal */
  28    if (enable) {
  29        fprintf(stderr, "qemu: thread naming not supported on this host\n");
  30    }
  31#endif
  32}
  33
  34static void error_exit(int err, const char *msg)
  35{
  36    fprintf(stderr, "qemu: %s: %s\n", msg, strerror(err));
  37    abort();
  38}
  39
  40static void compute_abs_deadline(struct timespec *ts, int ms)
  41{
  42    struct timeval tv;
  43    gettimeofday(&tv, NULL);
  44    ts->tv_nsec = tv.tv_usec * 1000 + (ms % 1000) * 1000000;
  45    ts->tv_sec = tv.tv_sec + ms / 1000;
  46    if (ts->tv_nsec >= 1000000000) {
  47        ts->tv_sec++;
  48        ts->tv_nsec -= 1000000000;
  49    }
  50}
  51
  52void qemu_mutex_init(QemuMutex *mutex)
  53{
  54    int err;
  55
  56    err = pthread_mutex_init(&mutex->lock, NULL);
  57    if (err)
  58        error_exit(err, __func__);
  59    qemu_mutex_post_init(mutex);
  60}
  61
  62void qemu_mutex_destroy(QemuMutex *mutex)
  63{
  64    int err;
  65
  66    assert(mutex->initialized);
  67    mutex->initialized = false;
  68    err = pthread_mutex_destroy(&mutex->lock);
  69    if (err)
  70        error_exit(err, __func__);
  71}
  72
  73void qemu_mutex_lock_impl(QemuMutex *mutex, const char *file, const int line)
  74{
  75    int err;
  76
  77    assert(mutex->initialized);
  78    qemu_mutex_pre_lock(mutex, file, line);
  79    err = pthread_mutex_lock(&mutex->lock);
  80    if (err)
  81        error_exit(err, __func__);
  82    qemu_mutex_post_lock(mutex, file, line);
  83}
  84
  85int qemu_mutex_trylock_impl(QemuMutex *mutex, const char *file, const int line)
  86{
  87    int err;
  88
  89    assert(mutex->initialized);
  90    err = pthread_mutex_trylock(&mutex->lock);
  91    if (err == 0) {
  92        qemu_mutex_post_lock(mutex, file, line);
  93        return 0;
  94    }
  95    if (err != EBUSY) {
  96        error_exit(err, __func__);
  97    }
  98    return -EBUSY;
  99}
 100
 101void qemu_mutex_unlock_impl(QemuMutex *mutex, const char *file, const int line)
 102{
 103    int err;
 104
 105    assert(mutex->initialized);
 106    qemu_mutex_pre_unlock(mutex, file, line);
 107    err = pthread_mutex_unlock(&mutex->lock);
 108    if (err)
 109        error_exit(err, __func__);
 110}
 111
 112void qemu_rec_mutex_init(QemuRecMutex *mutex)
 113{
 114    int err;
 115    pthread_mutexattr_t attr;
 116
 117    pthread_mutexattr_init(&attr);
 118    pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
 119    err = pthread_mutex_init(&mutex->lock, &attr);
 120    pthread_mutexattr_destroy(&attr);
 121    if (err) {
 122        error_exit(err, __func__);
 123    }
 124    mutex->initialized = true;
 125}
 126
 127void qemu_cond_init(QemuCond *cond)
 128{
 129    int err;
 130
 131    err = pthread_cond_init(&cond->cond, NULL);
 132    if (err)
 133        error_exit(err, __func__);
 134    cond->initialized = true;
 135}
 136
 137void qemu_cond_destroy(QemuCond *cond)
 138{
 139    int err;
 140
 141    assert(cond->initialized);
 142    cond->initialized = false;
 143    err = pthread_cond_destroy(&cond->cond);
 144    if (err)
 145        error_exit(err, __func__);
 146}
 147
 148void qemu_cond_signal(QemuCond *cond)
 149{
 150    int err;
 151
 152    assert(cond->initialized);
 153    err = pthread_cond_signal(&cond->cond);
 154    if (err)
 155        error_exit(err, __func__);
 156}
 157
 158void qemu_cond_broadcast(QemuCond *cond)
 159{
 160    int err;
 161
 162    assert(cond->initialized);
 163    err = pthread_cond_broadcast(&cond->cond);
 164    if (err)
 165        error_exit(err, __func__);
 166}
 167
 168void qemu_cond_wait_impl(QemuCond *cond, QemuMutex *mutex, const char *file, const int line)
 169{
 170    int err;
 171
 172    assert(cond->initialized);
 173    qemu_mutex_pre_unlock(mutex, file, line);
 174    err = pthread_cond_wait(&cond->cond, &mutex->lock);
 175    qemu_mutex_post_lock(mutex, file, line);
 176    if (err)
 177        error_exit(err, __func__);
 178}
 179
 180bool qemu_cond_timedwait_impl(QemuCond *cond, QemuMutex *mutex, int ms,
 181                              const char *file, const int line)
 182{
 183    int err;
 184    struct timespec ts;
 185
 186    assert(cond->initialized);
 187    trace_qemu_mutex_unlock(mutex, file, line);
 188    compute_abs_deadline(&ts, ms);
 189    err = pthread_cond_timedwait(&cond->cond, &mutex->lock, &ts);
 190    trace_qemu_mutex_locked(mutex, file, line);
 191    if (err && err != ETIMEDOUT) {
 192        error_exit(err, __func__);
 193    }
 194    return err != ETIMEDOUT;
 195}
 196
 197void qemu_sem_init(QemuSemaphore *sem, int init)
 198{
 199    int rc;
 200
 201#ifndef CONFIG_SEM_TIMEDWAIT
 202    rc = pthread_mutex_init(&sem->lock, NULL);
 203    if (rc != 0) {
 204        error_exit(rc, __func__);
 205    }
 206    rc = pthread_cond_init(&sem->cond, NULL);
 207    if (rc != 0) {
 208        error_exit(rc, __func__);
 209    }
 210    if (init < 0) {
 211        error_exit(EINVAL, __func__);
 212    }
 213    sem->count = init;
 214#else
 215    rc = sem_init(&sem->sem, 0, init);
 216    if (rc < 0) {
 217        error_exit(errno, __func__);
 218    }
 219#endif
 220    sem->initialized = true;
 221}
 222
 223void qemu_sem_destroy(QemuSemaphore *sem)
 224{
 225    int rc;
 226
 227    assert(sem->initialized);
 228    sem->initialized = false;
 229#ifndef CONFIG_SEM_TIMEDWAIT
 230    rc = pthread_cond_destroy(&sem->cond);
 231    if (rc < 0) {
 232        error_exit(rc, __func__);
 233    }
 234    rc = pthread_mutex_destroy(&sem->lock);
 235    if (rc < 0) {
 236        error_exit(rc, __func__);
 237    }
 238#else
 239    rc = sem_destroy(&sem->sem);
 240    if (rc < 0) {
 241        error_exit(errno, __func__);
 242    }
 243#endif
 244}
 245
 246void qemu_sem_post(QemuSemaphore *sem)
 247{
 248    int rc;
 249
 250    assert(sem->initialized);
 251#ifndef CONFIG_SEM_TIMEDWAIT
 252    pthread_mutex_lock(&sem->lock);
 253    if (sem->count == UINT_MAX) {
 254        rc = EINVAL;
 255    } else {
 256        sem->count++;
 257        rc = pthread_cond_signal(&sem->cond);
 258    }
 259    pthread_mutex_unlock(&sem->lock);
 260    if (rc != 0) {
 261        error_exit(rc, __func__);
 262    }
 263#else
 264    rc = sem_post(&sem->sem);
 265    if (rc < 0) {
 266        error_exit(errno, __func__);
 267    }
 268#endif
 269}
 270
 271int qemu_sem_timedwait(QemuSemaphore *sem, int ms)
 272{
 273    int rc;
 274    struct timespec ts;
 275
 276    assert(sem->initialized);
 277#ifndef CONFIG_SEM_TIMEDWAIT
 278    rc = 0;
 279    compute_abs_deadline(&ts, ms);
 280    pthread_mutex_lock(&sem->lock);
 281    while (sem->count == 0) {
 282        rc = pthread_cond_timedwait(&sem->cond, &sem->lock, &ts);
 283        if (rc == ETIMEDOUT) {
 284            break;
 285        }
 286        if (rc != 0) {
 287            error_exit(rc, __func__);
 288        }
 289    }
 290    if (rc != ETIMEDOUT) {
 291        --sem->count;
 292    }
 293    pthread_mutex_unlock(&sem->lock);
 294    return (rc == ETIMEDOUT ? -1 : 0);
 295#else
 296    if (ms <= 0) {
 297        /* This is cheaper than sem_timedwait.  */
 298        do {
 299            rc = sem_trywait(&sem->sem);
 300        } while (rc == -1 && errno == EINTR);
 301        if (rc == -1 && errno == EAGAIN) {
 302            return -1;
 303        }
 304    } else {
 305        compute_abs_deadline(&ts, ms);
 306        do {
 307            rc = sem_timedwait(&sem->sem, &ts);
 308        } while (rc == -1 && errno == EINTR);
 309        if (rc == -1 && errno == ETIMEDOUT) {
 310            return -1;
 311        }
 312    }
 313    if (rc < 0) {
 314        error_exit(errno, __func__);
 315    }
 316    return 0;
 317#endif
 318}
 319
 320void qemu_sem_wait(QemuSemaphore *sem)
 321{
 322    int rc;
 323
 324    assert(sem->initialized);
 325#ifndef CONFIG_SEM_TIMEDWAIT
 326    pthread_mutex_lock(&sem->lock);
 327    while (sem->count == 0) {
 328        rc = pthread_cond_wait(&sem->cond, &sem->lock);
 329        if (rc != 0) {
 330            error_exit(rc, __func__);
 331        }
 332    }
 333    --sem->count;
 334    pthread_mutex_unlock(&sem->lock);
 335#else
 336    do {
 337        rc = sem_wait(&sem->sem);
 338    } while (rc == -1 && errno == EINTR);
 339    if (rc < 0) {
 340        error_exit(errno, __func__);
 341    }
 342#endif
 343}
 344
 345#ifdef __linux__
 346#include "qemu/futex.h"
 347#else
 348static inline void qemu_futex_wake(QemuEvent *ev, int n)
 349{
 350    assert(ev->initialized);
 351    pthread_mutex_lock(&ev->lock);
 352    if (n == 1) {
 353        pthread_cond_signal(&ev->cond);
 354    } else {
 355        pthread_cond_broadcast(&ev->cond);
 356    }
 357    pthread_mutex_unlock(&ev->lock);
 358}
 359
 360static inline void qemu_futex_wait(QemuEvent *ev, unsigned val)
 361{
 362    assert(ev->initialized);
 363    pthread_mutex_lock(&ev->lock);
 364    if (ev->value == val) {
 365        pthread_cond_wait(&ev->cond, &ev->lock);
 366    }
 367    pthread_mutex_unlock(&ev->lock);
 368}
 369#endif
 370
 371/* Valid transitions:
 372 * - free->set, when setting the event
 373 * - busy->set, when setting the event, followed by qemu_futex_wake
 374 * - set->free, when resetting the event
 375 * - free->busy, when waiting
 376 *
 377 * set->busy does not happen (it can be observed from the outside but
 378 * it really is set->free->busy).
 379 *
 380 * busy->free provably cannot happen; to enforce it, the set->free transition
 381 * is done with an OR, which becomes a no-op if the event has concurrently
 382 * transitioned to free or busy.
 383 */
 384
 385#define EV_SET         0
 386#define EV_FREE        1
 387#define EV_BUSY       -1
 388
 389void qemu_event_init(QemuEvent *ev, bool init)
 390{
 391#ifndef __linux__
 392    pthread_mutex_init(&ev->lock, NULL);
 393    pthread_cond_init(&ev->cond, NULL);
 394#endif
 395
 396    ev->value = (init ? EV_SET : EV_FREE);
 397    ev->initialized = true;
 398}
 399
 400void qemu_event_destroy(QemuEvent *ev)
 401{
 402    assert(ev->initialized);
 403    ev->initialized = false;
 404#ifndef __linux__
 405    pthread_mutex_destroy(&ev->lock);
 406    pthread_cond_destroy(&ev->cond);
 407#endif
 408}
 409
 410void qemu_event_set(QemuEvent *ev)
 411{
 412    /* qemu_event_set has release semantics, but because it *loads*
 413     * ev->value we need a full memory barrier here.
 414     */
 415    assert(ev->initialized);
 416    smp_mb();
 417    if (qatomic_read(&ev->value) != EV_SET) {
 418        if (qatomic_xchg(&ev->value, EV_SET) == EV_BUSY) {
 419            /* There were waiters, wake them up.  */
 420            qemu_futex_wake(ev, INT_MAX);
 421        }
 422    }
 423}
 424
 425void qemu_event_reset(QemuEvent *ev)
 426{
 427    unsigned value;
 428
 429    assert(ev->initialized);
 430    value = qatomic_read(&ev->value);
 431    smp_mb_acquire();
 432    if (value == EV_SET) {
 433        /*
 434         * If there was a concurrent reset (or even reset+wait),
 435         * do nothing.  Otherwise change EV_SET->EV_FREE.
 436         */
 437        qatomic_or(&ev->value, EV_FREE);
 438    }
 439}
 440
 441void qemu_event_wait(QemuEvent *ev)
 442{
 443    unsigned value;
 444
 445    assert(ev->initialized);
 446    value = qatomic_read(&ev->value);
 447    smp_mb_acquire();
 448    if (value != EV_SET) {
 449        if (value == EV_FREE) {
 450            /*
 451             * Leave the event reset and tell qemu_event_set that there
 452             * are waiters.  No need to retry, because there cannot be
 453             * a concurrent busy->free transition.  After the CAS, the
 454             * event will be either set or busy.
 455             */
 456            if (qatomic_cmpxchg(&ev->value, EV_FREE, EV_BUSY) == EV_SET) {
 457                return;
 458            }
 459        }
 460        qemu_futex_wait(ev, EV_BUSY);
 461    }
 462}
 463
 464static __thread NotifierList thread_exit;
 465
 466/*
 467 * Note that in this implementation you can register a thread-exit
 468 * notifier for the main thread, but it will never be called.
 469 * This is OK because main thread exit can only happen when the
 470 * entire process is exiting, and the API allows notifiers to not
 471 * be called on process exit.
 472 */
 473void qemu_thread_atexit_add(Notifier *notifier)
 474{
 475    notifier_list_add(&thread_exit, notifier);
 476}
 477
 478void qemu_thread_atexit_remove(Notifier *notifier)
 479{
 480    notifier_remove(notifier);
 481}
 482
 483static void qemu_thread_atexit_notify(void *arg)
 484{
 485    /*
 486     * Called when non-main thread exits (via qemu_thread_exit()
 487     * or by returning from its start routine.)
 488     */
 489    notifier_list_notify(&thread_exit, NULL);
 490}
 491
 492typedef struct {
 493    void *(*start_routine)(void *);
 494    void *arg;
 495    char *name;
 496} QemuThreadArgs;
 497
 498static void *qemu_thread_start(void *args)
 499{
 500    QemuThreadArgs *qemu_thread_args = args;
 501    void *(*start_routine)(void *) = qemu_thread_args->start_routine;
 502    void *arg = qemu_thread_args->arg;
 503    void *r;
 504
 505#ifdef CONFIG_THREAD_SETNAME_BYTHREAD
 506    /* Attempt to set the threads name; note that this is for debug, so
 507     * we're not going to fail if we can't set it.
 508     */
 509    if (name_threads && qemu_thread_args->name) {
 510# if defined(CONFIG_PTHREAD_SETNAME_NP_W_TID)
 511        pthread_setname_np(pthread_self(), qemu_thread_args->name);
 512# elif defined(CONFIG_PTHREAD_SETNAME_NP_WO_TID)
 513        pthread_setname_np(qemu_thread_args->name);
 514# endif
 515    }
 516#endif
 517    QEMU_TSAN_ANNOTATE_THREAD_NAME(qemu_thread_args->name);
 518    g_free(qemu_thread_args->name);
 519    g_free(qemu_thread_args);
 520    pthread_cleanup_push(qemu_thread_atexit_notify, NULL);
 521    r = start_routine(arg);
 522    pthread_cleanup_pop(1);
 523    return r;
 524}
 525
 526void qemu_thread_create(QemuThread *thread, const char *name,
 527                       void *(*start_routine)(void*),
 528                       void *arg, int mode)
 529{
 530    sigset_t set, oldset;
 531    int err;
 532    pthread_attr_t attr;
 533    QemuThreadArgs *qemu_thread_args;
 534
 535    err = pthread_attr_init(&attr);
 536    if (err) {
 537        error_exit(err, __func__);
 538    }
 539
 540    if (mode == QEMU_THREAD_DETACHED) {
 541        pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
 542    }
 543
 544    /* Leave signal handling to the iothread.  */
 545    sigfillset(&set);
 546    /* Blocking the signals can result in undefined behaviour. */
 547    sigdelset(&set, SIGSEGV);
 548    sigdelset(&set, SIGFPE);
 549    sigdelset(&set, SIGILL);
 550    /* TODO avoid SIGBUS loss on macOS */
 551    pthread_sigmask(SIG_SETMASK, &set, &oldset);
 552
 553    qemu_thread_args = g_new0(QemuThreadArgs, 1);
 554    qemu_thread_args->name = g_strdup(name);
 555    qemu_thread_args->start_routine = start_routine;
 556    qemu_thread_args->arg = arg;
 557
 558    err = pthread_create(&thread->thread, &attr,
 559                         qemu_thread_start, qemu_thread_args);
 560
 561    if (err)
 562        error_exit(err, __func__);
 563
 564    pthread_sigmask(SIG_SETMASK, &oldset, NULL);
 565
 566    pthread_attr_destroy(&attr);
 567}
 568
 569void qemu_thread_get_self(QemuThread *thread)
 570{
 571    thread->thread = pthread_self();
 572}
 573
 574bool qemu_thread_is_self(QemuThread *thread)
 575{
 576   return pthread_equal(pthread_self(), thread->thread);
 577}
 578
 579void qemu_thread_exit(void *retval)
 580{
 581    pthread_exit(retval);
 582}
 583
 584void *qemu_thread_join(QemuThread *thread)
 585{
 586    int err;
 587    void *ret;
 588
 589    err = pthread_join(thread->thread, &ret);
 590    if (err) {
 591        error_exit(err, __func__);
 592    }
 593    return ret;
 594}
 595