qemu/aio-win32.c
<<
>>
Prefs
   1/*
   2 * QEMU aio implementation
   3 *
   4 * Copyright IBM Corp., 2008
   5 * Copyright Red Hat Inc., 2012
   6 *
   7 * Authors:
   8 *  Anthony Liguori   <aliguori@us.ibm.com>
   9 *  Paolo Bonzini     <pbonzini@redhat.com>
  10 *
  11 * This work is licensed under the terms of the GNU GPL, version 2.  See
  12 * the COPYING file in the top-level directory.
  13 *
  14 * Contributions after 2012-01-13 are licensed under the terms of the
  15 * GNU GPL, version 2 or (at your option) any later version.
  16 */
  17
  18#include "qemu/osdep.h"
  19#include "qemu-common.h"
  20#include "block/block.h"
  21#include "qemu/queue.h"
  22#include "qemu/sockets.h"
  23
  24struct AioHandler {
  25    EventNotifier *e;
  26    IOHandler *io_read;
  27    IOHandler *io_write;
  28    EventNotifierHandler *io_notify;
  29    GPollFD pfd;
  30    int deleted;
  31    void *opaque;
  32    bool is_external;
  33    QLIST_ENTRY(AioHandler) node;
  34};
  35
  36void aio_set_fd_handler(AioContext *ctx,
  37                        int fd,
  38                        bool is_external,
  39                        IOHandler *io_read,
  40                        IOHandler *io_write,
  41                        void *opaque)
  42{
  43    /* fd is a SOCKET in our case */
  44    AioHandler *node;
  45
  46    QLIST_FOREACH(node, &ctx->aio_handlers, node) {
  47        if (node->pfd.fd == fd && !node->deleted) {
  48            break;
  49        }
  50    }
  51
  52    /* Are we deleting the fd handler? */
  53    if (!io_read && !io_write) {
  54        if (node) {
  55            /* If the lock is held, just mark the node as deleted */
  56            if (ctx->walking_handlers) {
  57                node->deleted = 1;
  58                node->pfd.revents = 0;
  59            } else {
  60                /* Otherwise, delete it for real.  We can't just mark it as
  61                 * deleted because deleted nodes are only cleaned up after
  62                 * releasing the walking_handlers lock.
  63                 */
  64                QLIST_REMOVE(node, node);
  65                g_free(node);
  66            }
  67        }
  68    } else {
  69        HANDLE event;
  70
  71        if (node == NULL) {
  72            /* Alloc and insert if it's not already there */
  73            node = g_new0(AioHandler, 1);
  74            node->pfd.fd = fd;
  75            QLIST_INSERT_HEAD(&ctx->aio_handlers, node, node);
  76        }
  77
  78        node->pfd.events = 0;
  79        if (node->io_read) {
  80            node->pfd.events |= G_IO_IN;
  81        }
  82        if (node->io_write) {
  83            node->pfd.events |= G_IO_OUT;
  84        }
  85
  86        node->e = &ctx->notifier;
  87
  88        /* Update handler with latest information */
  89        node->opaque = opaque;
  90        node->io_read = io_read;
  91        node->io_write = io_write;
  92        node->is_external = is_external;
  93
  94        event = event_notifier_get_handle(&ctx->notifier);
  95        WSAEventSelect(node->pfd.fd, event,
  96                       FD_READ | FD_ACCEPT | FD_CLOSE |
  97                       FD_CONNECT | FD_WRITE | FD_OOB);
  98    }
  99
 100    aio_notify(ctx);
 101}
 102
 103void aio_set_event_notifier(AioContext *ctx,
 104                            EventNotifier *e,
 105                            bool is_external,
 106                            EventNotifierHandler *io_notify)
 107{
 108    AioHandler *node;
 109
 110    QLIST_FOREACH(node, &ctx->aio_handlers, node) {
 111        if (node->e == e && !node->deleted) {
 112            break;
 113        }
 114    }
 115
 116    /* Are we deleting the fd handler? */
 117    if (!io_notify) {
 118        if (node) {
 119            g_source_remove_poll(&ctx->source, &node->pfd);
 120
 121            /* If the lock is held, just mark the node as deleted */
 122            if (ctx->walking_handlers) {
 123                node->deleted = 1;
 124                node->pfd.revents = 0;
 125            } else {
 126                /* Otherwise, delete it for real.  We can't just mark it as
 127                 * deleted because deleted nodes are only cleaned up after
 128                 * releasing the walking_handlers lock.
 129                 */
 130                QLIST_REMOVE(node, node);
 131                g_free(node);
 132            }
 133        }
 134    } else {
 135        if (node == NULL) {
 136            /* Alloc and insert if it's not already there */
 137            node = g_new0(AioHandler, 1);
 138            node->e = e;
 139            node->pfd.fd = (uintptr_t)event_notifier_get_handle(e);
 140            node->pfd.events = G_IO_IN;
 141            node->is_external = is_external;
 142            QLIST_INSERT_HEAD(&ctx->aio_handlers, node, node);
 143
 144            g_source_add_poll(&ctx->source, &node->pfd);
 145        }
 146        /* Update handler with latest information */
 147        node->io_notify = io_notify;
 148    }
 149
 150    aio_notify(ctx);
 151}
 152
 153bool aio_prepare(AioContext *ctx)
 154{
 155    static struct timeval tv0;
 156    AioHandler *node;
 157    bool have_select_revents = false;
 158    fd_set rfds, wfds;
 159
 160    /* fill fd sets */
 161    FD_ZERO(&rfds);
 162    FD_ZERO(&wfds);
 163    QLIST_FOREACH(node, &ctx->aio_handlers, node) {
 164        if (node->io_read) {
 165            FD_SET ((SOCKET)node->pfd.fd, &rfds);
 166        }
 167        if (node->io_write) {
 168            FD_SET ((SOCKET)node->pfd.fd, &wfds);
 169        }
 170    }
 171
 172    if (select(0, &rfds, &wfds, NULL, &tv0) > 0) {
 173        QLIST_FOREACH(node, &ctx->aio_handlers, node) {
 174            node->pfd.revents = 0;
 175            if (FD_ISSET(node->pfd.fd, &rfds)) {
 176                node->pfd.revents |= G_IO_IN;
 177                have_select_revents = true;
 178            }
 179
 180            if (FD_ISSET(node->pfd.fd, &wfds)) {
 181                node->pfd.revents |= G_IO_OUT;
 182                have_select_revents = true;
 183            }
 184        }
 185    }
 186
 187    return have_select_revents;
 188}
 189
 190bool aio_pending(AioContext *ctx)
 191{
 192    AioHandler *node;
 193
 194    QLIST_FOREACH(node, &ctx->aio_handlers, node) {
 195        if (node->pfd.revents && node->io_notify) {
 196            return true;
 197        }
 198
 199        if ((node->pfd.revents & G_IO_IN) && node->io_read) {
 200            return true;
 201        }
 202        if ((node->pfd.revents & G_IO_OUT) && node->io_write) {
 203            return true;
 204        }
 205    }
 206
 207    return false;
 208}
 209
 210static bool aio_dispatch_handlers(AioContext *ctx, HANDLE event)
 211{
 212    AioHandler *node;
 213    bool progress = false;
 214
 215    /*
 216     * We have to walk very carefully in case aio_set_fd_handler is
 217     * called while we're walking.
 218     */
 219    node = QLIST_FIRST(&ctx->aio_handlers);
 220    while (node) {
 221        AioHandler *tmp;
 222        int revents = node->pfd.revents;
 223
 224        ctx->walking_handlers++;
 225
 226        if (!node->deleted &&
 227            (revents || event_notifier_get_handle(node->e) == event) &&
 228            node->io_notify) {
 229            node->pfd.revents = 0;
 230            node->io_notify(node->e);
 231
 232            /* aio_notify() does not count as progress */
 233            if (node->e != &ctx->notifier) {
 234                progress = true;
 235            }
 236        }
 237
 238        if (!node->deleted &&
 239            (node->io_read || node->io_write)) {
 240            node->pfd.revents = 0;
 241            if ((revents & G_IO_IN) && node->io_read) {
 242                node->io_read(node->opaque);
 243                progress = true;
 244            }
 245            if ((revents & G_IO_OUT) && node->io_write) {
 246                node->io_write(node->opaque);
 247                progress = true;
 248            }
 249
 250            /* if the next select() will return an event, we have progressed */
 251            if (event == event_notifier_get_handle(&ctx->notifier)) {
 252                WSANETWORKEVENTS ev;
 253                WSAEnumNetworkEvents(node->pfd.fd, event, &ev);
 254                if (ev.lNetworkEvents) {
 255                    progress = true;
 256                }
 257            }
 258        }
 259
 260        tmp = node;
 261        node = QLIST_NEXT(node, node);
 262
 263        ctx->walking_handlers--;
 264
 265        if (!ctx->walking_handlers && tmp->deleted) {
 266            QLIST_REMOVE(tmp, node);
 267            g_free(tmp);
 268        }
 269    }
 270
 271    return progress;
 272}
 273
 274bool aio_dispatch(AioContext *ctx)
 275{
 276    bool progress;
 277
 278    progress = aio_bh_poll(ctx);
 279    progress |= aio_dispatch_handlers(ctx, INVALID_HANDLE_VALUE);
 280    progress |= timerlistgroup_run_timers(&ctx->tlg);
 281    return progress;
 282}
 283
 284bool aio_poll(AioContext *ctx, bool blocking)
 285{
 286    AioHandler *node;
 287    HANDLE events[MAXIMUM_WAIT_OBJECTS + 1];
 288    bool progress, have_select_revents, first;
 289    int count;
 290    int timeout;
 291
 292    aio_context_acquire(ctx);
 293    progress = false;
 294
 295    /* aio_notify can avoid the expensive event_notifier_set if
 296     * everything (file descriptors, bottom halves, timers) will
 297     * be re-evaluated before the next blocking poll().  This is
 298     * already true when aio_poll is called with blocking == false;
 299     * if blocking == true, it is only true after poll() returns,
 300     * so disable the optimization now.
 301     */
 302    if (blocking) {
 303        atomic_add(&ctx->notify_me, 2);
 304    }
 305
 306    have_select_revents = aio_prepare(ctx);
 307
 308    ctx->walking_handlers++;
 309
 310    /* fill fd sets */
 311    count = 0;
 312    QLIST_FOREACH(node, &ctx->aio_handlers, node) {
 313        if (!node->deleted && node->io_notify
 314            && aio_node_check(ctx, node->is_external)) {
 315            events[count++] = event_notifier_get_handle(node->e);
 316        }
 317    }
 318
 319    ctx->walking_handlers--;
 320    first = true;
 321
 322    /* ctx->notifier is always registered.  */
 323    assert(count > 0);
 324
 325    /* Multiple iterations, all of them non-blocking except the first,
 326     * may be necessary to process all pending events.  After the first
 327     * WaitForMultipleObjects call ctx->notify_me will be decremented.
 328     */
 329    do {
 330        HANDLE event;
 331        int ret;
 332
 333        timeout = blocking && !have_select_revents
 334            ? qemu_timeout_ns_to_ms(aio_compute_timeout(ctx)) : 0;
 335        if (timeout) {
 336            aio_context_release(ctx);
 337        }
 338        ret = WaitForMultipleObjects(count, events, FALSE, timeout);
 339        if (blocking) {
 340            assert(first);
 341            atomic_sub(&ctx->notify_me, 2);
 342        }
 343        if (timeout) {
 344            aio_context_acquire(ctx);
 345        }
 346
 347        if (first) {
 348            aio_notify_accept(ctx);
 349            progress |= aio_bh_poll(ctx);
 350            first = false;
 351        }
 352
 353        /* if we have any signaled events, dispatch event */
 354        event = NULL;
 355        if ((DWORD) (ret - WAIT_OBJECT_0) < count) {
 356            event = events[ret - WAIT_OBJECT_0];
 357            events[ret - WAIT_OBJECT_0] = events[--count];
 358        } else if (!have_select_revents) {
 359            break;
 360        }
 361
 362        have_select_revents = false;
 363        blocking = false;
 364
 365        progress |= aio_dispatch_handlers(ctx, event);
 366    } while (count > 0);
 367
 368    progress |= timerlistgroup_run_timers(&ctx->tlg);
 369
 370    aio_context_release(ctx);
 371    return progress;
 372}
 373
 374void aio_context_setup(AioContext *ctx, Error **errp)
 375{
 376}
 377