qemu/qemu-thread-win32.c
<<
>>
Prefs
   1/*
   2 * Win32 implementation for mutex/cond/thread functions
   3 *
   4 * Copyright Red Hat, Inc. 2010
   5 *
   6 * Author:
   7 *  Paolo Bonzini <pbonzini@redhat.com>
   8 *
   9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
  10 * See the COPYING file in the top-level directory.
  11 *
  12 */
  13#include "qemu-common.h"
  14#include "qemu-thread.h"
  15#include <process.h>
  16#include <assert.h>
  17#include <limits.h>
  18
  19static void error_exit(int err, const char *msg)
  20{
  21    char *pstr;
  22
  23    FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_ALLOCATE_BUFFER,
  24                  NULL, err, 0, (LPTSTR)&pstr, 2, NULL);
  25    fprintf(stderr, "qemu: %s: %s\n", msg, pstr);
  26    LocalFree(pstr);
  27    abort();
  28}
  29
  30void qemu_mutex_init(QemuMutex *mutex)
  31{
  32    mutex->owner = 0;
  33    InitializeCriticalSection(&mutex->lock);
  34}
  35
  36void qemu_mutex_destroy(QemuMutex *mutex)
  37{
  38    assert(mutex->owner == 0);
  39    DeleteCriticalSection(&mutex->lock);
  40}
  41
  42void qemu_mutex_lock(QemuMutex *mutex)
  43{
  44    EnterCriticalSection(&mutex->lock);
  45
  46    /* Win32 CRITICAL_SECTIONs are recursive.  Assert that we're not
  47     * using them as such.
  48     */
  49    assert(mutex->owner == 0);
  50    mutex->owner = GetCurrentThreadId();
  51}
  52
  53int qemu_mutex_trylock(QemuMutex *mutex)
  54{
  55    int owned;
  56
  57    owned = TryEnterCriticalSection(&mutex->lock);
  58    if (owned) {
  59        assert(mutex->owner == 0);
  60        mutex->owner = GetCurrentThreadId();
  61    }
  62    return !owned;
  63}
  64
  65void qemu_mutex_unlock(QemuMutex *mutex)
  66{
  67    assert(mutex->owner == GetCurrentThreadId());
  68    mutex->owner = 0;
  69    LeaveCriticalSection(&mutex->lock);
  70}
  71
  72void qemu_cond_init(QemuCond *cond)
  73{
  74    memset(cond, 0, sizeof(*cond));
  75
  76    cond->sema = CreateSemaphore(NULL, 0, LONG_MAX, NULL);
  77    if (!cond->sema) {
  78        error_exit(GetLastError(), __func__);
  79    }
  80    cond->continue_event = CreateEvent(NULL,    /* security */
  81                                       FALSE,   /* auto-reset */
  82                                       FALSE,   /* not signaled */
  83                                       NULL);   /* name */
  84    if (!cond->continue_event) {
  85        error_exit(GetLastError(), __func__);
  86    }
  87}
  88
  89void qemu_cond_destroy(QemuCond *cond)
  90{
  91    BOOL result;
  92    result = CloseHandle(cond->continue_event);
  93    if (!result) {
  94        error_exit(GetLastError(), __func__);
  95    }
  96    cond->continue_event = 0;
  97    result = CloseHandle(cond->sema);
  98    if (!result) {
  99        error_exit(GetLastError(), __func__);
 100    }
 101    cond->sema = 0;
 102}
 103
 104void qemu_cond_signal(QemuCond *cond)
 105{
 106    DWORD result;
 107
 108    /*
 109     * Signal only when there are waiters.  cond->waiters is
 110     * incremented by pthread_cond_wait under the external lock,
 111     * so we are safe about that.
 112     */
 113    if (cond->waiters == 0) {
 114        return;
 115    }
 116
 117    /*
 118     * Waiting threads decrement it outside the external lock, but
 119     * only if another thread is executing pthread_cond_broadcast and
 120     * has the mutex.  So, it also cannot be decremented concurrently
 121     * with this particular access.
 122     */
 123    cond->target = cond->waiters - 1;
 124    result = SignalObjectAndWait(cond->sema, cond->continue_event,
 125                                 INFINITE, FALSE);
 126    if (result == WAIT_ABANDONED || result == WAIT_FAILED) {
 127        error_exit(GetLastError(), __func__);
 128    }
 129}
 130
 131void qemu_cond_broadcast(QemuCond *cond)
 132{
 133    BOOLEAN result;
 134    /*
 135     * As in pthread_cond_signal, access to cond->waiters and
 136     * cond->target is locked via the external mutex.
 137     */
 138    if (cond->waiters == 0) {
 139        return;
 140    }
 141
 142    cond->target = 0;
 143    result = ReleaseSemaphore(cond->sema, cond->waiters, NULL);
 144    if (!result) {
 145        error_exit(GetLastError(), __func__);
 146    }
 147
 148    /*
 149     * At this point all waiters continue. Each one takes its
 150     * slice of the semaphore. Now it's our turn to wait: Since
 151     * the external mutex is held, no thread can leave cond_wait,
 152     * yet. For this reason, we can be sure that no thread gets
 153     * a chance to eat *more* than one slice. OTOH, it means
 154     * that the last waiter must send us a wake-up.
 155     */
 156    WaitForSingleObject(cond->continue_event, INFINITE);
 157}
 158
 159void qemu_cond_wait(QemuCond *cond, QemuMutex *mutex)
 160{
 161    /*
 162     * This access is protected under the mutex.
 163     */
 164    cond->waiters++;
 165
 166    /*
 167     * Unlock external mutex and wait for signal.
 168     * NOTE: we've held mutex locked long enough to increment
 169     * waiters count above, so there's no problem with
 170     * leaving mutex unlocked before we wait on semaphore.
 171     */
 172    qemu_mutex_unlock(mutex);
 173    WaitForSingleObject(cond->sema, INFINITE);
 174
 175    /* Now waiters must rendez-vous with the signaling thread and
 176     * let it continue.  For cond_broadcast this has heavy contention
 177     * and triggers thundering herd.  So goes life.
 178     *
 179     * Decrease waiters count.  The mutex is not taken, so we have
 180     * to do this atomically.
 181     *
 182     * All waiters contend for the mutex at the end of this function
 183     * until the signaling thread relinquishes it.  To ensure
 184     * each waiter consumes exactly one slice of the semaphore,
 185     * the signaling thread stops until it is told by the last
 186     * waiter that it can go on.
 187     */
 188    if (InterlockedDecrement(&cond->waiters) == cond->target) {
 189        SetEvent(cond->continue_event);
 190    }
 191
 192    qemu_mutex_lock(mutex);
 193}
 194
 195struct QemuThreadData {
 196    /* Passed to win32_start_routine.  */
 197    void             *(*start_routine)(void *);
 198    void             *arg;
 199    short             mode;
 200
 201    /* Only used for joinable threads. */
 202    bool              exited;
 203    void             *ret;
 204    CRITICAL_SECTION  cs;
 205};
 206
 207static int qemu_thread_tls_index = TLS_OUT_OF_INDEXES;
 208
 209static unsigned __stdcall win32_start_routine(void *arg)
 210{
 211    QemuThreadData *data = (QemuThreadData *) arg;
 212    void *(*start_routine)(void *) = data->start_routine;
 213    void *thread_arg = data->arg;
 214
 215    if (data->mode == QEMU_THREAD_DETACHED) {
 216        g_free(data);
 217        data = NULL;
 218    }
 219    TlsSetValue(qemu_thread_tls_index, data);
 220    qemu_thread_exit(start_routine(thread_arg));
 221    abort();
 222}
 223
 224void qemu_thread_exit(void *arg)
 225{
 226    QemuThreadData *data = TlsGetValue(qemu_thread_tls_index);
 227    if (data) {
 228        assert(data->mode != QEMU_THREAD_DETACHED);
 229        data->ret = arg;
 230        EnterCriticalSection(&data->cs);
 231        data->exited = true;
 232        LeaveCriticalSection(&data->cs);
 233    }
 234    _endthreadex(0);
 235}
 236
 237void *qemu_thread_join(QemuThread *thread)
 238{
 239    QemuThreadData *data;
 240    void *ret;
 241    HANDLE handle;
 242
 243    data = thread->data;
 244    if (!data) {
 245        return NULL;
 246    }
 247    /*
 248     * Because multiple copies of the QemuThread can exist via
 249     * qemu_thread_get_self, we need to store a value that cannot
 250     * leak there.  The simplest, non racy way is to store the TID,
 251     * discard the handle that _beginthreadex gives back, and
 252     * get another copy of the handle here.
 253     */
 254    handle = qemu_thread_get_handle(thread);
 255    if (handle) {
 256        WaitForSingleObject(handle, INFINITE);
 257        CloseHandle(handle);
 258    }
 259    ret = data->ret;
 260    assert(data->mode != QEMU_THREAD_DETACHED);
 261    DeleteCriticalSection(&data->cs);
 262    g_free(data);
 263    return ret;
 264}
 265
 266static inline void qemu_thread_init(void)
 267{
 268    if (qemu_thread_tls_index == TLS_OUT_OF_INDEXES) {
 269        qemu_thread_tls_index = TlsAlloc();
 270        if (qemu_thread_tls_index == TLS_OUT_OF_INDEXES) {
 271            error_exit(ERROR_NO_SYSTEM_RESOURCES, __func__);
 272        }
 273    }
 274}
 275
 276
 277void qemu_thread_create(QemuThread *thread,
 278                       void *(*start_routine)(void *),
 279                       void *arg, int mode)
 280{
 281    HANDLE hThread;
 282
 283    struct QemuThreadData *data;
 284    qemu_thread_init();
 285    data = g_malloc(sizeof *data);
 286    data->start_routine = start_routine;
 287    data->arg = arg;
 288    data->mode = mode;
 289    data->exited = false;
 290
 291    if (data->mode != QEMU_THREAD_DETACHED) {
 292        InitializeCriticalSection(&data->cs);
 293    }
 294
 295    hThread = (HANDLE) _beginthreadex(NULL, 0, win32_start_routine,
 296                                      data, 0, &thread->tid);
 297    if (!hThread) {
 298        error_exit(GetLastError(), __func__);
 299    }
 300    CloseHandle(hThread);
 301    thread->data = (mode == QEMU_THREAD_DETACHED) ? NULL : data;
 302}
 303
 304void qemu_thread_get_self(QemuThread *thread)
 305{
 306    qemu_thread_init();
 307    thread->data = TlsGetValue(qemu_thread_tls_index);
 308    thread->tid = GetCurrentThreadId();
 309}
 310
 311HANDLE qemu_thread_get_handle(QemuThread *thread)
 312{
 313    QemuThreadData *data;
 314    HANDLE handle;
 315
 316    data = thread->data;
 317    if (!data) {
 318        return NULL;
 319    }
 320
 321    assert(data->mode != QEMU_THREAD_DETACHED);
 322    EnterCriticalSection(&data->cs);
 323    if (!data->exited) {
 324        handle = OpenThread(SYNCHRONIZE | THREAD_SUSPEND_RESUME, FALSE,
 325                            thread->tid);
 326    } else {
 327        handle = NULL;
 328    }
 329    LeaveCriticalSection(&data->cs);
 330    return handle;
 331}
 332
 333bool qemu_thread_is_self(QemuThread *thread)
 334{
 335    return GetCurrentThreadId() == thread->tid;
 336}
 337