qemu/aio.c
<<
>>
Prefs
   1/*
   2 * QEMU aio implementation
   3 *
   4 * Copyright IBM, Corp. 2008
   5 *
   6 * Authors:
   7 *  Anthony Liguori   <aliguori@us.ibm.com>
   8 *
   9 * This work is licensed under the terms of the GNU GPL, version 2.  See
  10 * the COPYING file in the top-level directory.
  11 *
  12 */
  13
  14#include "qemu-common.h"
  15#include "block.h"
  16#include "sys-queue.h"
  17#include "qemu_socket.h"
  18
  19typedef struct AioHandler AioHandler;
  20
  21/* The list of registered AIO handlers */
  22static LIST_HEAD(, AioHandler) aio_handlers;
  23
  24/* This is a simple lock used to protect the aio_handlers list.  Specifically,
  25 * it's used to ensure that no callbacks are removed while we're walking and
  26 * dispatching callbacks.
  27 */
  28static int walking_handlers;
  29
  30struct AioHandler
  31{
  32    int fd;
  33    IOHandler *io_read;
  34    IOHandler *io_write;
  35    AioFlushHandler *io_flush;
  36    int deleted;
  37    void *opaque;
  38    LIST_ENTRY(AioHandler) node;
  39};
  40
  41static AioHandler *find_aio_handler(int fd)
  42{
  43    AioHandler *node;
  44
  45    LIST_FOREACH(node, &aio_handlers, node) {
  46        if (node->fd == fd)
  47            if (!node->deleted)
  48                return node;
  49    }
  50
  51    return NULL;
  52}
  53
  54int qemu_aio_set_fd_handler(int fd,
  55                            IOHandler *io_read,
  56                            IOHandler *io_write,
  57                            AioFlushHandler *io_flush,
  58                            void *opaque)
  59{
  60    AioHandler *node;
  61
  62    node = find_aio_handler(fd);
  63
  64    /* Are we deleting the fd handler? */
  65    if (!io_read && !io_write) {
  66        if (node) {
  67            /* If the lock is held, just mark the node as deleted */
  68            if (walking_handlers)
  69                node->deleted = 1;
  70            else {
  71                /* Otherwise, delete it for real.  We can't just mark it as
  72                 * deleted because deleted nodes are only cleaned up after
  73                 * releasing the walking_handlers lock.
  74                 */
  75                LIST_REMOVE(node, node);
  76                qemu_free(node);
  77            }
  78        }
  79    } else {
  80        if (node == NULL) {
  81            /* Alloc and insert if it's not already there */
  82            node = qemu_mallocz(sizeof(AioHandler));
  83            node->fd = fd;
  84            LIST_INSERT_HEAD(&aio_handlers, node, node);
  85        }
  86        /* Update handler with latest information */
  87        node->io_read = io_read;
  88        node->io_write = io_write;
  89        node->io_flush = io_flush;
  90        node->opaque = opaque;
  91    }
  92
  93    qemu_set_fd_handler2(fd, NULL, io_read, io_write, opaque);
  94
  95    return 0;
  96}
  97
  98void qemu_aio_flush(void)
  99{
 100    AioHandler *node;
 101    int ret;
 102
 103    do {
 104        ret = 0;
 105
 106        /*
 107         * If there are pending emulated aio start them now so flush
 108         * will be able to return 1.
 109         */
 110        qemu_aio_wait();
 111
 112        LIST_FOREACH(node, &aio_handlers, node) {
 113            ret |= node->io_flush(node->opaque);
 114        }
 115    } while (qemu_bh_poll() || ret > 0);
 116}
 117
 118void qemu_aio_wait(void)
 119{
 120    int ret;
 121
 122    if (qemu_bh_poll())
 123        return;
 124
 125    do {
 126        AioHandler *node;
 127        fd_set rdfds, wrfds;
 128        int max_fd = -1;
 129
 130        walking_handlers = 1;
 131
 132        FD_ZERO(&rdfds);
 133        FD_ZERO(&wrfds);
 134
 135        /* fill fd sets */
 136        LIST_FOREACH(node, &aio_handlers, node) {
 137            /* If there aren't pending AIO operations, don't invoke callbacks.
 138             * Otherwise, if there are no AIO requests, qemu_aio_wait() would
 139             * wait indefinitely.
 140             */
 141            if (node->io_flush && node->io_flush(node->opaque) == 0)
 142                continue;
 143
 144            if (!node->deleted && node->io_read) {
 145                FD_SET(node->fd, &rdfds);
 146                max_fd = MAX(max_fd, node->fd + 1);
 147            }
 148            if (!node->deleted && node->io_write) {
 149                FD_SET(node->fd, &wrfds);
 150                max_fd = MAX(max_fd, node->fd + 1);
 151            }
 152        }
 153
 154        walking_handlers = 0;
 155
 156        /* No AIO operations?  Get us out of here */
 157        if (max_fd == -1)
 158            break;
 159
 160        /* wait until next event */
 161        ret = select(max_fd, &rdfds, &wrfds, NULL, NULL);
 162        if (ret == -1 && errno == EINTR)
 163            continue;
 164
 165        /* if we have any readable fds, dispatch event */
 166        if (ret > 0) {
 167            walking_handlers = 1;
 168
 169            /* we have to walk very carefully in case
 170             * qemu_aio_set_fd_handler is called while we're walking */
 171            node = LIST_FIRST(&aio_handlers);
 172            while (node) {
 173                AioHandler *tmp;
 174
 175                if (!node->deleted &&
 176                    FD_ISSET(node->fd, &rdfds) &&
 177                    node->io_read) {
 178                    node->io_read(node->opaque);
 179                }
 180                if (!node->deleted &&
 181                    FD_ISSET(node->fd, &wrfds) &&
 182                    node->io_write) {
 183                    node->io_write(node->opaque);
 184                }
 185
 186                tmp = node;
 187                node = LIST_NEXT(node, node);
 188
 189                if (tmp->deleted) {
 190                    LIST_REMOVE(tmp, node);
 191                    qemu_free(tmp);
 192                }
 193            }
 194
 195            walking_handlers = 0;
 196        }
 197    } while (ret == 0);
 198}
 199