qemu/dma-helpers.c
<<
>>
Prefs
   1/*
   2 * DMA helper functions
   3 *
   4 * Copyright (c) 2009 Red Hat
   5 *
   6 * This work is licensed under the terms of the GNU General Public License
   7 * (GNU GPL), version 2 or later.
   8 */
   9
  10#include "dma.h"
  11#include "block_int.h"
  12
  13void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint)
  14{
  15    qsg->sg = g_malloc(alloc_hint * sizeof(ScatterGatherEntry));
  16    qsg->nsg = 0;
  17    qsg->nalloc = alloc_hint;
  18    qsg->size = 0;
  19}
  20
  21void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len)
  22{
  23    if (qsg->nsg == qsg->nalloc) {
  24        qsg->nalloc = 2 * qsg->nalloc + 1;
  25        qsg->sg = g_realloc(qsg->sg, qsg->nalloc * sizeof(ScatterGatherEntry));
  26    }
  27    qsg->sg[qsg->nsg].base = base;
  28    qsg->sg[qsg->nsg].len = len;
  29    qsg->size += len;
  30    ++qsg->nsg;
  31}
  32
  33void qemu_sglist_destroy(QEMUSGList *qsg)
  34{
  35    g_free(qsg->sg);
  36}
  37
  38typedef struct {
  39    BlockDriverAIOCB common;
  40    BlockDriverState *bs;
  41    BlockDriverAIOCB *acb;
  42    QEMUSGList *sg;
  43    uint64_t sector_num;
  44    bool to_dev;
  45    bool in_cancel;
  46    int sg_cur_index;
  47    dma_addr_t sg_cur_byte;
  48    QEMUIOVector iov;
  49    QEMUBH *bh;
  50    DMAIOFunc *io_func;
  51} DMAAIOCB;
  52
  53static void dma_bdrv_cb(void *opaque, int ret);
  54
  55static void reschedule_dma(void *opaque)
  56{
  57    DMAAIOCB *dbs = (DMAAIOCB *)opaque;
  58
  59    qemu_bh_delete(dbs->bh);
  60    dbs->bh = NULL;
  61    dma_bdrv_cb(dbs, 0);
  62}
  63
  64static void continue_after_map_failure(void *opaque)
  65{
  66    DMAAIOCB *dbs = (DMAAIOCB *)opaque;
  67
  68    dbs->bh = qemu_bh_new(reschedule_dma, dbs);
  69    qemu_bh_schedule(dbs->bh);
  70}
  71
  72static void dma_bdrv_unmap(DMAAIOCB *dbs)
  73{
  74    int i;
  75
  76    for (i = 0; i < dbs->iov.niov; ++i) {
  77        cpu_physical_memory_unmap(dbs->iov.iov[i].iov_base,
  78                                  dbs->iov.iov[i].iov_len, !dbs->to_dev,
  79                                  dbs->iov.iov[i].iov_len);
  80    }
  81    qemu_iovec_reset(&dbs->iov);
  82}
  83
  84static void dma_complete(DMAAIOCB *dbs, int ret)
  85{
  86    dma_bdrv_unmap(dbs);
  87    if (dbs->common.cb) {
  88        dbs->common.cb(dbs->common.opaque, ret);
  89    }
  90    qemu_iovec_destroy(&dbs->iov);
  91    if (dbs->bh) {
  92        qemu_bh_delete(dbs->bh);
  93        dbs->bh = NULL;
  94    }
  95    if (!dbs->in_cancel) {
  96        /* Requests may complete while dma_aio_cancel is in progress.  In
  97         * this case, the AIOCB should not be released because it is still
  98         * referenced by dma_aio_cancel.  */
  99        qemu_aio_release(dbs);
 100    }
 101}
 102
 103static void dma_bdrv_cb(void *opaque, int ret)
 104{
 105    DMAAIOCB *dbs = (DMAAIOCB *)opaque;
 106    target_phys_addr_t cur_addr, cur_len;
 107    void *mem;
 108
 109    dbs->acb = NULL;
 110    dbs->sector_num += dbs->iov.size / 512;
 111    dma_bdrv_unmap(dbs);
 112
 113    if (dbs->sg_cur_index == dbs->sg->nsg || ret < 0) {
 114        dma_complete(dbs, ret);
 115        return;
 116    }
 117
 118    while (dbs->sg_cur_index < dbs->sg->nsg) {
 119        cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte;
 120        cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte;
 121        mem = cpu_physical_memory_map(cur_addr, &cur_len, !dbs->to_dev);
 122        if (!mem)
 123            break;
 124        qemu_iovec_add(&dbs->iov, mem, cur_len);
 125        dbs->sg_cur_byte += cur_len;
 126        if (dbs->sg_cur_byte == dbs->sg->sg[dbs->sg_cur_index].len) {
 127            dbs->sg_cur_byte = 0;
 128            ++dbs->sg_cur_index;
 129        }
 130    }
 131
 132    if (dbs->iov.size == 0) {
 133        cpu_register_map_client(dbs, continue_after_map_failure);
 134        return;
 135    }
 136
 137    dbs->acb = dbs->io_func(dbs->bs, dbs->sector_num, &dbs->iov,
 138                            dbs->iov.size / 512, dma_bdrv_cb, dbs);
 139    if (!dbs->acb) {
 140        dma_complete(dbs, -EIO);
 141    }
 142}
 143
 144static void dma_aio_cancel(BlockDriverAIOCB *acb)
 145{
 146    DMAAIOCB *dbs = container_of(acb, DMAAIOCB, common);
 147
 148    if (dbs->acb) {
 149        BlockDriverAIOCB *acb = dbs->acb;
 150        dbs->acb = NULL;
 151        dbs->in_cancel = true;
 152        bdrv_aio_cancel(acb);
 153        dbs->in_cancel = false;
 154    }
 155    dbs->common.cb = NULL;
 156    dma_complete(dbs, 0);
 157}
 158
 159static AIOPool dma_aio_pool = {
 160    .aiocb_size         = sizeof(DMAAIOCB),
 161    .cancel             = dma_aio_cancel,
 162};
 163
 164BlockDriverAIOCB *dma_bdrv_io(
 165    BlockDriverState *bs, QEMUSGList *sg, uint64_t sector_num,
 166    DMAIOFunc *io_func, BlockDriverCompletionFunc *cb,
 167    void *opaque, bool to_dev)
 168{
 169    DMAAIOCB *dbs = qemu_aio_get(&dma_aio_pool, bs, cb, opaque);
 170
 171    dbs->acb = NULL;
 172    dbs->bs = bs;
 173    dbs->sg = sg;
 174    dbs->sector_num = sector_num;
 175    dbs->sg_cur_index = 0;
 176    dbs->sg_cur_byte = 0;
 177    dbs->to_dev = to_dev;
 178    dbs->io_func = io_func;
 179    dbs->bh = NULL;
 180    qemu_iovec_init(&dbs->iov, sg->nsg);
 181    dma_bdrv_cb(dbs, 0);
 182    return &dbs->common;
 183}
 184
 185
 186BlockDriverAIOCB *dma_bdrv_read(BlockDriverState *bs,
 187                                QEMUSGList *sg, uint64_t sector,
 188                                void (*cb)(void *opaque, int ret), void *opaque)
 189{
 190    return dma_bdrv_io(bs, sg, sector, bdrv_aio_readv, cb, opaque, false);
 191}
 192
 193BlockDriverAIOCB *dma_bdrv_write(BlockDriverState *bs,
 194                                 QEMUSGList *sg, uint64_t sector,
 195                                 void (*cb)(void *opaque, int ret), void *opaque)
 196{
 197    return dma_bdrv_io(bs, sg, sector, bdrv_aio_writev, cb, opaque, true);
 198}
 199