qemu/dma-helpers.c
<<
>>
Prefs
   1/*
   2 * DMA helper functions
   3 *
   4 * Copyright (c) 2009 Red Hat
   5 *
   6 * This work is licensed under the terms of the GNU General Public License
   7 * (GNU GPL), version 2 or later.
   8 */
   9
  10#include "dma.h"
  11#include "block_int.h"
  12
  13void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint)
  14{
  15    qsg->sg = qemu_malloc(alloc_hint * sizeof(ScatterGatherEntry));
  16    qsg->nsg = 0;
  17    qsg->nalloc = alloc_hint;
  18    qsg->size = 0;
  19}
  20
  21void qemu_sglist_add(QEMUSGList *qsg, target_phys_addr_t base,
  22                     target_phys_addr_t len)
  23{
  24    if (qsg->nsg == qsg->nalloc) {
  25        qsg->nalloc = 2 * qsg->nalloc + 1;
  26        qsg->sg = qemu_realloc(qsg->sg, qsg->nalloc * sizeof(ScatterGatherEntry));
  27    }
  28    qsg->sg[qsg->nsg].base = base;
  29    qsg->sg[qsg->nsg].len = len;
  30    qsg->size += len;
  31    ++qsg->nsg;
  32}
  33
  34void qemu_sglist_destroy(QEMUSGList *qsg)
  35{
  36    qemu_free(qsg->sg);
  37}
  38
  39typedef struct {
  40    BlockDriverAIOCB common;
  41    BlockDriverState *bs;
  42    BlockDriverAIOCB *acb;
  43    QEMUSGList *sg;
  44    uint64_t sector_num;
  45    int is_write;
  46    int sg_cur_index;
  47    target_phys_addr_t sg_cur_byte;
  48    QEMUIOVector iov;
  49    QEMUBH *bh;
  50    DMAIOFunc *io_func;
  51} DMAAIOCB;
  52
  53static void dma_bdrv_cb(void *opaque, int ret);
  54
  55static void reschedule_dma(void *opaque)
  56{
  57    DMAAIOCB *dbs = (DMAAIOCB *)opaque;
  58
  59    qemu_bh_delete(dbs->bh);
  60    dbs->bh = NULL;
  61    dma_bdrv_cb(opaque, 0);
  62}
  63
  64static void continue_after_map_failure(void *opaque)
  65{
  66    DMAAIOCB *dbs = (DMAAIOCB *)opaque;
  67
  68    dbs->bh = qemu_bh_new(reschedule_dma, dbs);
  69    qemu_bh_schedule(dbs->bh);
  70}
  71
  72static void dma_bdrv_unmap(DMAAIOCB *dbs)
  73{
  74    int i;
  75
  76    for (i = 0; i < dbs->iov.niov; ++i) {
  77        cpu_physical_memory_unmap(dbs->iov.iov[i].iov_base,
  78                                  dbs->iov.iov[i].iov_len, !dbs->is_write,
  79                                  dbs->iov.iov[i].iov_len);
  80    }
  81}
  82
  83static void dma_bdrv_cb(void *opaque, int ret)
  84{
  85    DMAAIOCB *dbs = (DMAAIOCB *)opaque;
  86    target_phys_addr_t cur_addr, cur_len;
  87    void *mem;
  88
  89    dbs->acb = NULL;
  90    dbs->sector_num += dbs->iov.size / 512;
  91    dma_bdrv_unmap(dbs);
  92    qemu_iovec_reset(&dbs->iov);
  93
  94    if (dbs->sg_cur_index == dbs->sg->nsg || ret < 0) {
  95        dbs->common.cb(dbs->common.opaque, ret);
  96        qemu_iovec_destroy(&dbs->iov);
  97        qemu_aio_release(dbs);
  98        return;
  99    }
 100
 101    while (dbs->sg_cur_index < dbs->sg->nsg) {
 102        cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte;
 103        cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte;
 104        mem = cpu_physical_memory_map(cur_addr, &cur_len, !dbs->is_write);
 105        if (!mem)
 106            break;
 107        qemu_iovec_add(&dbs->iov, mem, cur_len);
 108        dbs->sg_cur_byte += cur_len;
 109        if (dbs->sg_cur_byte == dbs->sg->sg[dbs->sg_cur_index].len) {
 110            dbs->sg_cur_byte = 0;
 111            ++dbs->sg_cur_index;
 112        }
 113    }
 114
 115    if (dbs->iov.size == 0) {
 116        cpu_register_map_client(dbs, continue_after_map_failure);
 117        return;
 118    }
 119
 120    dbs->acb = dbs->io_func(dbs->bs, dbs->sector_num, &dbs->iov,
 121                            dbs->iov.size / 512, dma_bdrv_cb, dbs);
 122    if (!dbs->acb) {
 123        dma_bdrv_unmap(dbs);
 124        qemu_iovec_destroy(&dbs->iov);
 125        return;
 126    }
 127}
 128
 129static void dma_aio_cancel(BlockDriverAIOCB *acb)
 130{
 131    DMAAIOCB *dbs = container_of(acb, DMAAIOCB, common);
 132
 133    if (dbs->acb) {
 134        bdrv_aio_cancel(dbs->acb);
 135    }
 136}
 137
 138static AIOPool dma_aio_pool = {
 139    .aiocb_size         = sizeof(DMAAIOCB),
 140    .cancel             = dma_aio_cancel,
 141};
 142
 143BlockDriverAIOCB *dma_bdrv_io(
 144    BlockDriverState *bs, QEMUSGList *sg, uint64_t sector_num,
 145    DMAIOFunc *io_func, BlockDriverCompletionFunc *cb,
 146    void *opaque, int is_write)
 147{
 148    DMAAIOCB *dbs = qemu_aio_get(&dma_aio_pool, bs, cb, opaque);
 149
 150    dbs->acb = NULL;
 151    dbs->bs = bs;
 152    dbs->sg = sg;
 153    dbs->sector_num = sector_num;
 154    dbs->sg_cur_index = 0;
 155    dbs->sg_cur_byte = 0;
 156    dbs->is_write = is_write;
 157    dbs->io_func = io_func;
 158    dbs->bh = NULL;
 159    qemu_iovec_init(&dbs->iov, sg->nsg);
 160    dma_bdrv_cb(dbs, 0);
 161    if (!dbs->acb) {
 162        qemu_aio_release(dbs);
 163        return NULL;
 164    }
 165    return &dbs->common;
 166}
 167
 168
 169BlockDriverAIOCB *dma_bdrv_read(BlockDriverState *bs,
 170                                QEMUSGList *sg, uint64_t sector,
 171                                void (*cb)(void *opaque, int ret), void *opaque)
 172{
 173    return dma_bdrv_io(bs, sg, sector, bdrv_aio_readv, cb, opaque, 0);
 174}
 175
 176BlockDriverAIOCB *dma_bdrv_write(BlockDriverState *bs,
 177                                 QEMUSGList *sg, uint64_t sector,
 178                                 void (*cb)(void *opaque, int ret), void *opaque)
 179{
 180    return dma_bdrv_io(bs, sg, sector, bdrv_aio_writev, cb, opaque, 1);
 181}
 182