qemu/util/qemu-thread-win32.c
<<
>>
Prefs
   1/*
   2 * Win32 implementation for mutex/cond/thread functions
   3 *
   4 * Copyright Red Hat, Inc. 2010
   5 *
   6 * Author:
   7 *  Paolo Bonzini <pbonzini@redhat.com>
   8 *
   9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
  10 * See the COPYING file in the top-level directory.
  11 *
  12 */
  13
  14#ifndef _WIN32_WINNT
  15#define _WIN32_WINNT 0x0600
  16#endif
  17
  18#include "qemu/osdep.h"
  19#include "qemu-common.h"
  20#include "qemu/thread.h"
  21#include "qemu/notify.h"
  22#include <process.h>
  23
  24static bool name_threads;
  25
  26void qemu_thread_naming(bool enable)
  27{
  28    /* But note we don't actually name them on Windows yet */
  29    name_threads = enable;
  30
  31    fprintf(stderr, "qemu: thread naming not supported on this host\n");
  32}
  33
  34static void error_exit(int err, const char *msg)
  35{
  36    char *pstr;
  37
  38    FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_ALLOCATE_BUFFER,
  39                  NULL, err, 0, (LPTSTR)&pstr, 2, NULL);
  40    fprintf(stderr, "qemu: %s: %s\n", msg, pstr);
  41    LocalFree(pstr);
  42    abort();
  43}
  44
  45void qemu_mutex_init(QemuMutex *mutex)
  46{
  47    InitializeSRWLock(&mutex->lock);
  48}
  49
  50void qemu_mutex_destroy(QemuMutex *mutex)
  51{
  52    InitializeSRWLock(&mutex->lock);
  53}
  54
  55void qemu_mutex_lock(QemuMutex *mutex)
  56{
  57    AcquireSRWLockExclusive(&mutex->lock);
  58}
  59
  60int qemu_mutex_trylock(QemuMutex *mutex)
  61{
  62    int owned;
  63
  64    owned = TryAcquireSRWLockExclusive(&mutex->lock);
  65    return !owned;
  66}
  67
  68void qemu_mutex_unlock(QemuMutex *mutex)
  69{
  70    ReleaseSRWLockExclusive(&mutex->lock);
  71}
  72
  73void qemu_rec_mutex_init(QemuRecMutex *mutex)
  74{
  75    InitializeCriticalSection(&mutex->lock);
  76}
  77
  78void qemu_rec_mutex_destroy(QemuRecMutex *mutex)
  79{
  80    DeleteCriticalSection(&mutex->lock);
  81}
  82
  83void qemu_rec_mutex_lock(QemuRecMutex *mutex)
  84{
  85    EnterCriticalSection(&mutex->lock);
  86}
  87
  88int qemu_rec_mutex_trylock(QemuRecMutex *mutex)
  89{
  90    return !TryEnterCriticalSection(&mutex->lock);
  91}
  92
  93void qemu_rec_mutex_unlock(QemuRecMutex *mutex)
  94{
  95    LeaveCriticalSection(&mutex->lock);
  96}
  97
  98void qemu_cond_init(QemuCond *cond)
  99{
 100    memset(cond, 0, sizeof(*cond));
 101    InitializeConditionVariable(&cond->var);
 102}
 103
 104void qemu_cond_destroy(QemuCond *cond)
 105{
 106    InitializeConditionVariable(&cond->var);
 107}
 108
 109void qemu_cond_signal(QemuCond *cond)
 110{
 111    WakeConditionVariable(&cond->var);
 112}
 113
 114void qemu_cond_broadcast(QemuCond *cond)
 115{
 116    WakeAllConditionVariable(&cond->var);
 117}
 118
 119void qemu_cond_wait(QemuCond *cond, QemuMutex *mutex)
 120{
 121    SleepConditionVariableSRW(&cond->var, &mutex->lock, INFINITE, 0);
 122}
 123
 124void qemu_sem_init(QemuSemaphore *sem, int init)
 125{
 126    /* Manual reset.  */
 127    sem->sema = CreateSemaphore(NULL, init, LONG_MAX, NULL);
 128}
 129
 130void qemu_sem_destroy(QemuSemaphore *sem)
 131{
 132    CloseHandle(sem->sema);
 133}
 134
 135void qemu_sem_post(QemuSemaphore *sem)
 136{
 137    ReleaseSemaphore(sem->sema, 1, NULL);
 138}
 139
 140int qemu_sem_timedwait(QemuSemaphore *sem, int ms)
 141{
 142    int rc = WaitForSingleObject(sem->sema, ms);
 143    if (rc == WAIT_OBJECT_0) {
 144        return 0;
 145    }
 146    if (rc != WAIT_TIMEOUT) {
 147        error_exit(GetLastError(), __func__);
 148    }
 149    return -1;
 150}
 151
 152void qemu_sem_wait(QemuSemaphore *sem)
 153{
 154    if (WaitForSingleObject(sem->sema, INFINITE) != WAIT_OBJECT_0) {
 155        error_exit(GetLastError(), __func__);
 156    }
 157}
 158
 159/* Wrap a Win32 manual-reset event with a fast userspace path.  The idea
 160 * is to reset the Win32 event lazily, as part of a test-reset-test-wait
 161 * sequence.  Such a sequence is, indeed, how QemuEvents are used by
 162 * RCU and other subsystems!
 163 *
 164 * Valid transitions:
 165 * - free->set, when setting the event
 166 * - busy->set, when setting the event, followed by SetEvent
 167 * - set->free, when resetting the event
 168 * - free->busy, when waiting
 169 *
 170 * set->busy does not happen (it can be observed from the outside but
 171 * it really is set->free->busy).
 172 *
 173 * busy->free provably cannot happen; to enforce it, the set->free transition
 174 * is done with an OR, which becomes a no-op if the event has concurrently
 175 * transitioned to free or busy (and is faster than cmpxchg).
 176 */
 177
 178#define EV_SET         0
 179#define EV_FREE        1
 180#define EV_BUSY       -1
 181
 182void qemu_event_init(QemuEvent *ev, bool init)
 183{
 184    /* Manual reset.  */
 185    ev->event = CreateEvent(NULL, TRUE, TRUE, NULL);
 186    ev->value = (init ? EV_SET : EV_FREE);
 187}
 188
 189void qemu_event_destroy(QemuEvent *ev)
 190{
 191    CloseHandle(ev->event);
 192}
 193
 194void qemu_event_set(QemuEvent *ev)
 195{
 196    /* qemu_event_set has release semantics, but because it *loads*
 197     * ev->value we need a full memory barrier here.
 198     */
 199    smp_mb();
 200    if (atomic_read(&ev->value) != EV_SET) {
 201        if (atomic_xchg(&ev->value, EV_SET) == EV_BUSY) {
 202            /* There were waiters, wake them up.  */
 203            SetEvent(ev->event);
 204        }
 205    }
 206}
 207
 208void qemu_event_reset(QemuEvent *ev)
 209{
 210    unsigned value;
 211
 212    value = atomic_read(&ev->value);
 213    smp_mb_acquire();
 214    if (value == EV_SET) {
 215        /* If there was a concurrent reset (or even reset+wait),
 216         * do nothing.  Otherwise change EV_SET->EV_FREE.
 217         */
 218        atomic_or(&ev->value, EV_FREE);
 219    }
 220}
 221
 222void qemu_event_wait(QemuEvent *ev)
 223{
 224    unsigned value;
 225
 226    value = atomic_read(&ev->value);
 227    smp_mb_acquire();
 228    if (value != EV_SET) {
 229        if (value == EV_FREE) {
 230            /* qemu_event_set is not yet going to call SetEvent, but we are
 231             * going to do another check for EV_SET below when setting EV_BUSY.
 232             * At that point it is safe to call WaitForSingleObject.
 233             */
 234            ResetEvent(ev->event);
 235
 236            /* Tell qemu_event_set that there are waiters.  No need to retry
 237             * because there cannot be a concurent busy->free transition.
 238             * After the CAS, the event will be either set or busy.
 239             */
 240            if (atomic_cmpxchg(&ev->value, EV_FREE, EV_BUSY) == EV_SET) {
 241                value = EV_SET;
 242            } else {
 243                value = EV_BUSY;
 244            }
 245        }
 246        if (value == EV_BUSY) {
 247            WaitForSingleObject(ev->event, INFINITE);
 248        }
 249    }
 250}
 251
 252struct QemuThreadData {
 253    /* Passed to win32_start_routine.  */
 254    void             *(*start_routine)(void *);
 255    void             *arg;
 256    short             mode;
 257    NotifierList      exit;
 258
 259    /* Only used for joinable threads. */
 260    bool              exited;
 261    void             *ret;
 262    CRITICAL_SECTION  cs;
 263};
 264
 265static bool atexit_registered;
 266static NotifierList main_thread_exit;
 267
 268static __thread QemuThreadData *qemu_thread_data;
 269
 270static void run_main_thread_exit(void)
 271{
 272    notifier_list_notify(&main_thread_exit, NULL);
 273}
 274
 275void qemu_thread_atexit_add(Notifier *notifier)
 276{
 277    if (!qemu_thread_data) {
 278        if (!atexit_registered) {
 279            atexit_registered = true;
 280            atexit(run_main_thread_exit);
 281        }
 282        notifier_list_add(&main_thread_exit, notifier);
 283    } else {
 284        notifier_list_add(&qemu_thread_data->exit, notifier);
 285    }
 286}
 287
 288void qemu_thread_atexit_remove(Notifier *notifier)
 289{
 290    notifier_remove(notifier);
 291}
 292
 293static unsigned __stdcall win32_start_routine(void *arg)
 294{
 295    QemuThreadData *data = (QemuThreadData *) arg;
 296    void *(*start_routine)(void *) = data->start_routine;
 297    void *thread_arg = data->arg;
 298
 299    qemu_thread_data = data;
 300    qemu_thread_exit(start_routine(thread_arg));
 301    abort();
 302}
 303
 304void qemu_thread_exit(void *arg)
 305{
 306    QemuThreadData *data = qemu_thread_data;
 307
 308    notifier_list_notify(&data->exit, NULL);
 309    if (data->mode == QEMU_THREAD_JOINABLE) {
 310        data->ret = arg;
 311        EnterCriticalSection(&data->cs);
 312        data->exited = true;
 313        LeaveCriticalSection(&data->cs);
 314    } else {
 315        g_free(data);
 316    }
 317    _endthreadex(0);
 318}
 319
 320void *qemu_thread_join(QemuThread *thread)
 321{
 322    QemuThreadData *data;
 323    void *ret;
 324    HANDLE handle;
 325
 326    data = thread->data;
 327    if (data->mode == QEMU_THREAD_DETACHED) {
 328        return NULL;
 329    }
 330
 331    /*
 332     * Because multiple copies of the QemuThread can exist via
 333     * qemu_thread_get_self, we need to store a value that cannot
 334     * leak there.  The simplest, non racy way is to store the TID,
 335     * discard the handle that _beginthreadex gives back, and
 336     * get another copy of the handle here.
 337     */
 338    handle = qemu_thread_get_handle(thread);
 339    if (handle) {
 340        WaitForSingleObject(handle, INFINITE);
 341        CloseHandle(handle);
 342    }
 343    ret = data->ret;
 344    DeleteCriticalSection(&data->cs);
 345    g_free(data);
 346    return ret;
 347}
 348
 349void qemu_thread_create(QemuThread *thread, const char *name,
 350                       void *(*start_routine)(void *),
 351                       void *arg, int mode)
 352{
 353    HANDLE hThread;
 354    struct QemuThreadData *data;
 355
 356    data = g_malloc(sizeof *data);
 357    data->start_routine = start_routine;
 358    data->arg = arg;
 359    data->mode = mode;
 360    data->exited = false;
 361    notifier_list_init(&data->exit);
 362
 363    if (data->mode != QEMU_THREAD_DETACHED) {
 364        InitializeCriticalSection(&data->cs);
 365    }
 366
 367    hThread = (HANDLE) _beginthreadex(NULL, 0, win32_start_routine,
 368                                      data, 0, &thread->tid);
 369    if (!hThread) {
 370        error_exit(GetLastError(), __func__);
 371    }
 372    CloseHandle(hThread);
 373    thread->data = data;
 374}
 375
 376void qemu_thread_get_self(QemuThread *thread)
 377{
 378    thread->data = qemu_thread_data;
 379    thread->tid = GetCurrentThreadId();
 380}
 381
 382HANDLE qemu_thread_get_handle(QemuThread *thread)
 383{
 384    QemuThreadData *data;
 385    HANDLE handle;
 386
 387    data = thread->data;
 388    if (data->mode == QEMU_THREAD_DETACHED) {
 389        return NULL;
 390    }
 391
 392    EnterCriticalSection(&data->cs);
 393    if (!data->exited) {
 394        handle = OpenThread(SYNCHRONIZE | THREAD_SUSPEND_RESUME |
 395                            THREAD_SET_CONTEXT, FALSE, thread->tid);
 396    } else {
 397        handle = NULL;
 398    }
 399    LeaveCriticalSection(&data->cs);
 400    return handle;
 401}
 402
 403bool qemu_thread_is_self(QemuThread *thread)
 404{
 405    return GetCurrentThreadId() == thread->tid;
 406}
 407