qemu/aio-win32.c
<<
>>
Prefs
   1/*
   2 * QEMU aio implementation
   3 *
   4 * Copyright IBM Corp., 2008
   5 * Copyright Red Hat Inc., 2012
   6 *
   7 * Authors:
   8 *  Anthony Liguori   <aliguori@us.ibm.com>
   9 *  Paolo Bonzini     <pbonzini@redhat.com>
  10 *
  11 * This work is licensed under the terms of the GNU GPL, version 2.  See
  12 * the COPYING file in the top-level directory.
  13 *
  14 * Contributions after 2012-01-13 are licensed under the terms of the
  15 * GNU GPL, version 2 or (at your option) any later version.
  16 */
  17
  18#include "qemu-common.h"
  19#include "block/block.h"
  20#include "qemu/queue.h"
  21#include "qemu/sockets.h"
  22
  23struct AioHandler {
  24    EventNotifier *e;
  25    IOHandler *io_read;
  26    IOHandler *io_write;
  27    EventNotifierHandler *io_notify;
  28    GPollFD pfd;
  29    int deleted;
  30    void *opaque;
  31    QLIST_ENTRY(AioHandler) node;
  32};
  33
  34void aio_set_fd_handler(AioContext *ctx,
  35                        int fd,
  36                        IOHandler *io_read,
  37                        IOHandler *io_write,
  38                        void *opaque)
  39{
  40    /* fd is a SOCKET in our case */
  41    AioHandler *node;
  42
  43    QLIST_FOREACH(node, &ctx->aio_handlers, node) {
  44        if (node->pfd.fd == fd && !node->deleted) {
  45            break;
  46        }
  47    }
  48
  49    /* Are we deleting the fd handler? */
  50    if (!io_read && !io_write) {
  51        if (node) {
  52            /* If the lock is held, just mark the node as deleted */
  53            if (ctx->walking_handlers) {
  54                node->deleted = 1;
  55                node->pfd.revents = 0;
  56            } else {
  57                /* Otherwise, delete it for real.  We can't just mark it as
  58                 * deleted because deleted nodes are only cleaned up after
  59                 * releasing the walking_handlers lock.
  60                 */
  61                QLIST_REMOVE(node, node);
  62                g_free(node);
  63            }
  64        }
  65    } else {
  66        HANDLE event;
  67
  68        if (node == NULL) {
  69            /* Alloc and insert if it's not already there */
  70            node = g_new0(AioHandler, 1);
  71            node->pfd.fd = fd;
  72            QLIST_INSERT_HEAD(&ctx->aio_handlers, node, node);
  73        }
  74
  75        node->pfd.events = 0;
  76        if (node->io_read) {
  77            node->pfd.events |= G_IO_IN;
  78        }
  79        if (node->io_write) {
  80            node->pfd.events |= G_IO_OUT;
  81        }
  82
  83        node->e = &ctx->notifier;
  84
  85        /* Update handler with latest information */
  86        node->opaque = opaque;
  87        node->io_read = io_read;
  88        node->io_write = io_write;
  89
  90        event = event_notifier_get_handle(&ctx->notifier);
  91        WSAEventSelect(node->pfd.fd, event,
  92                       FD_READ | FD_ACCEPT | FD_CLOSE |
  93                       FD_CONNECT | FD_WRITE | FD_OOB);
  94    }
  95
  96    aio_notify(ctx);
  97}
  98
  99void aio_set_event_notifier(AioContext *ctx,
 100                            EventNotifier *e,
 101                            EventNotifierHandler *io_notify)
 102{
 103    AioHandler *node;
 104
 105    QLIST_FOREACH(node, &ctx->aio_handlers, node) {
 106        if (node->e == e && !node->deleted) {
 107            break;
 108        }
 109    }
 110
 111    /* Are we deleting the fd handler? */
 112    if (!io_notify) {
 113        if (node) {
 114            g_source_remove_poll(&ctx->source, &node->pfd);
 115
 116            /* If the lock is held, just mark the node as deleted */
 117            if (ctx->walking_handlers) {
 118                node->deleted = 1;
 119                node->pfd.revents = 0;
 120            } else {
 121                /* Otherwise, delete it for real.  We can't just mark it as
 122                 * deleted because deleted nodes are only cleaned up after
 123                 * releasing the walking_handlers lock.
 124                 */
 125                QLIST_REMOVE(node, node);
 126                g_free(node);
 127            }
 128        }
 129    } else {
 130        if (node == NULL) {
 131            /* Alloc and insert if it's not already there */
 132            node = g_new0(AioHandler, 1);
 133            node->e = e;
 134            node->pfd.fd = (uintptr_t)event_notifier_get_handle(e);
 135            node->pfd.events = G_IO_IN;
 136            QLIST_INSERT_HEAD(&ctx->aio_handlers, node, node);
 137
 138            g_source_add_poll(&ctx->source, &node->pfd);
 139        }
 140        /* Update handler with latest information */
 141        node->io_notify = io_notify;
 142    }
 143
 144    aio_notify(ctx);
 145}
 146
 147bool aio_prepare(AioContext *ctx)
 148{
 149    static struct timeval tv0;
 150    AioHandler *node;
 151    bool have_select_revents = false;
 152    fd_set rfds, wfds;
 153
 154    /* fill fd sets */
 155    FD_ZERO(&rfds);
 156    FD_ZERO(&wfds);
 157    QLIST_FOREACH(node, &ctx->aio_handlers, node) {
 158        if (node->io_read) {
 159            FD_SET ((SOCKET)node->pfd.fd, &rfds);
 160        }
 161        if (node->io_write) {
 162            FD_SET ((SOCKET)node->pfd.fd, &wfds);
 163        }
 164    }
 165
 166    if (select(0, &rfds, &wfds, NULL, &tv0) > 0) {
 167        QLIST_FOREACH(node, &ctx->aio_handlers, node) {
 168            node->pfd.revents = 0;
 169            if (FD_ISSET(node->pfd.fd, &rfds)) {
 170                node->pfd.revents |= G_IO_IN;
 171                have_select_revents = true;
 172            }
 173
 174            if (FD_ISSET(node->pfd.fd, &wfds)) {
 175                node->pfd.revents |= G_IO_OUT;
 176                have_select_revents = true;
 177            }
 178        }
 179    }
 180
 181    return have_select_revents;
 182}
 183
 184bool aio_pending(AioContext *ctx)
 185{
 186    AioHandler *node;
 187
 188    QLIST_FOREACH(node, &ctx->aio_handlers, node) {
 189        if (node->pfd.revents && node->io_notify) {
 190            return true;
 191        }
 192
 193        if ((node->pfd.revents & G_IO_IN) && node->io_read) {
 194            return true;
 195        }
 196        if ((node->pfd.revents & G_IO_OUT) && node->io_write) {
 197            return true;
 198        }
 199    }
 200
 201    return false;
 202}
 203
 204static bool aio_dispatch_handlers(AioContext *ctx, HANDLE event)
 205{
 206    AioHandler *node;
 207    bool progress = false;
 208
 209    /*
 210     * We have to walk very carefully in case aio_set_fd_handler is
 211     * called while we're walking.
 212     */
 213    node = QLIST_FIRST(&ctx->aio_handlers);
 214    while (node) {
 215        AioHandler *tmp;
 216        int revents = node->pfd.revents;
 217
 218        ctx->walking_handlers++;
 219
 220        if (!node->deleted &&
 221            (revents || event_notifier_get_handle(node->e) == event) &&
 222            node->io_notify) {
 223            node->pfd.revents = 0;
 224            node->io_notify(node->e);
 225
 226            /* aio_notify() does not count as progress */
 227            if (node->e != &ctx->notifier) {
 228                progress = true;
 229            }
 230        }
 231
 232        if (!node->deleted &&
 233            (node->io_read || node->io_write)) {
 234            node->pfd.revents = 0;
 235            if ((revents & G_IO_IN) && node->io_read) {
 236                node->io_read(node->opaque);
 237                progress = true;
 238            }
 239            if ((revents & G_IO_OUT) && node->io_write) {
 240                node->io_write(node->opaque);
 241                progress = true;
 242            }
 243
 244            /* if the next select() will return an event, we have progressed */
 245            if (event == event_notifier_get_handle(&ctx->notifier)) {
 246                WSANETWORKEVENTS ev;
 247                WSAEnumNetworkEvents(node->pfd.fd, event, &ev);
 248                if (ev.lNetworkEvents) {
 249                    progress = true;
 250                }
 251            }
 252        }
 253
 254        tmp = node;
 255        node = QLIST_NEXT(node, node);
 256
 257        ctx->walking_handlers--;
 258
 259        if (!ctx->walking_handlers && tmp->deleted) {
 260            QLIST_REMOVE(tmp, node);
 261            g_free(tmp);
 262        }
 263    }
 264
 265    return progress;
 266}
 267
 268bool aio_dispatch(AioContext *ctx)
 269{
 270    bool progress;
 271
 272    progress = aio_bh_poll(ctx);
 273    progress |= aio_dispatch_handlers(ctx, INVALID_HANDLE_VALUE);
 274    progress |= timerlistgroup_run_timers(&ctx->tlg);
 275    return progress;
 276}
 277
 278bool aio_poll(AioContext *ctx, bool blocking)
 279{
 280    AioHandler *node;
 281    HANDLE events[MAXIMUM_WAIT_OBJECTS + 1];
 282    bool was_dispatching, progress, have_select_revents, first;
 283    int count;
 284    int timeout;
 285
 286    have_select_revents = aio_prepare(ctx);
 287    if (have_select_revents) {
 288        blocking = false;
 289    }
 290
 291    was_dispatching = ctx->dispatching;
 292    progress = false;
 293
 294    /* aio_notify can avoid the expensive event_notifier_set if
 295     * everything (file descriptors, bottom halves, timers) will
 296     * be re-evaluated before the next blocking poll().  This is
 297     * already true when aio_poll is called with blocking == false;
 298     * if blocking == true, it is only true after poll() returns.
 299     *
 300     * If we're in a nested event loop, ctx->dispatching might be true.
 301     * In that case we can restore it just before returning, but we
 302     * have to clear it now.
 303     */
 304    aio_set_dispatching(ctx, !blocking);
 305
 306    ctx->walking_handlers++;
 307
 308    /* fill fd sets */
 309    count = 0;
 310    QLIST_FOREACH(node, &ctx->aio_handlers, node) {
 311        if (!node->deleted && node->io_notify) {
 312            events[count++] = event_notifier_get_handle(node->e);
 313        }
 314    }
 315
 316    ctx->walking_handlers--;
 317    first = true;
 318
 319    /* wait until next event */
 320    while (count > 0) {
 321        HANDLE event;
 322        int ret;
 323
 324        timeout = blocking
 325            ? qemu_timeout_ns_to_ms(aio_compute_timeout(ctx)) : 0;
 326        ret = WaitForMultipleObjects(count, events, FALSE, timeout);
 327        aio_set_dispatching(ctx, true);
 328
 329        if (first && aio_bh_poll(ctx)) {
 330            progress = true;
 331        }
 332        first = false;
 333
 334        /* if we have any signaled events, dispatch event */
 335        event = NULL;
 336        if ((DWORD) (ret - WAIT_OBJECT_0) < count) {
 337            event = events[ret - WAIT_OBJECT_0];
 338            events[ret - WAIT_OBJECT_0] = events[--count];
 339        } else if (!have_select_revents) {
 340            break;
 341        }
 342
 343        have_select_revents = false;
 344        blocking = false;
 345
 346        progress |= aio_dispatch_handlers(ctx, event);
 347    }
 348
 349    progress |= timerlistgroup_run_timers(&ctx->tlg);
 350
 351    aio_set_dispatching(ctx, was_dispatching);
 352    return progress;
 353}
 354