qemu/dma-helpers.c
<<
>>
Prefs
   1/*
   2 * DMA helper functions
   3 *
   4 * Copyright (c) 2009 Red Hat
   5 *
   6 * This work is licensed under the terms of the GNU General Public License
   7 * (GNU GPL), version 2 or later.
   8 */
   9
  10#include "sysemu/dma.h"
  11#include "trace.h"
  12#include "qemu/range.h"
  13#include "qemu/thread.h"
  14#include "qemu/main-loop.h"
  15
  16/* #define DEBUG_IOMMU */
  17
  18int dma_memory_set(AddressSpace *as, dma_addr_t addr, uint8_t c, dma_addr_t len)
  19{
  20    dma_barrier(as, DMA_DIRECTION_FROM_DEVICE);
  21
  22#define FILLBUF_SIZE 512
  23    uint8_t fillbuf[FILLBUF_SIZE];
  24    int l;
  25    bool error = false;
  26
  27    memset(fillbuf, c, FILLBUF_SIZE);
  28    while (len > 0) {
  29        l = len < FILLBUF_SIZE ? len : FILLBUF_SIZE;
  30        error |= address_space_rw(as, addr, fillbuf, l, true);
  31        len -= l;
  32        addr += l;
  33    }
  34
  35    return error;
  36}
  37
  38void qemu_sglist_init(QEMUSGList *qsg, DeviceState *dev, int alloc_hint,
  39                      AddressSpace *as)
  40{
  41    qsg->sg = g_malloc(alloc_hint * sizeof(ScatterGatherEntry));
  42    qsg->nsg = 0;
  43    qsg->nalloc = alloc_hint;
  44    qsg->size = 0;
  45    qsg->as = as;
  46    qsg->dev = dev;
  47    object_ref(OBJECT(dev));
  48}
  49
  50void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len)
  51{
  52    if (qsg->nsg == qsg->nalloc) {
  53        qsg->nalloc = 2 * qsg->nalloc + 1;
  54        qsg->sg = g_realloc(qsg->sg, qsg->nalloc * sizeof(ScatterGatherEntry));
  55    }
  56    qsg->sg[qsg->nsg].base = base;
  57    qsg->sg[qsg->nsg].len = len;
  58    qsg->size += len;
  59    ++qsg->nsg;
  60}
  61
  62void qemu_sglist_destroy(QEMUSGList *qsg)
  63{
  64    object_unref(OBJECT(qsg->dev));
  65    g_free(qsg->sg);
  66    memset(qsg, 0, sizeof(*qsg));
  67}
  68
  69typedef struct {
  70    BlockDriverAIOCB common;
  71    BlockDriverState *bs;
  72    BlockDriverAIOCB *acb;
  73    QEMUSGList *sg;
  74    uint64_t sector_num;
  75    DMADirection dir;
  76    bool in_cancel;
  77    int sg_cur_index;
  78    dma_addr_t sg_cur_byte;
  79    QEMUIOVector iov;
  80    QEMUBH *bh;
  81    DMAIOFunc *io_func;
  82} DMAAIOCB;
  83
  84static void dma_bdrv_cb(void *opaque, int ret);
  85
  86static void reschedule_dma(void *opaque)
  87{
  88    DMAAIOCB *dbs = (DMAAIOCB *)opaque;
  89
  90    qemu_bh_delete(dbs->bh);
  91    dbs->bh = NULL;
  92    dma_bdrv_cb(dbs, 0);
  93}
  94
  95static void continue_after_map_failure(void *opaque)
  96{
  97    DMAAIOCB *dbs = (DMAAIOCB *)opaque;
  98
  99    dbs->bh = qemu_bh_new(reschedule_dma, dbs);
 100    qemu_bh_schedule(dbs->bh);
 101}
 102
 103static void dma_bdrv_unmap(DMAAIOCB *dbs)
 104{
 105    int i;
 106
 107    for (i = 0; i < dbs->iov.niov; ++i) {
 108        dma_memory_unmap(dbs->sg->as, dbs->iov.iov[i].iov_base,
 109                         dbs->iov.iov[i].iov_len, dbs->dir,
 110                         dbs->iov.iov[i].iov_len);
 111    }
 112    qemu_iovec_reset(&dbs->iov);
 113}
 114
 115static void dma_complete(DMAAIOCB *dbs, int ret)
 116{
 117    trace_dma_complete(dbs, ret, dbs->common.cb);
 118
 119    dma_bdrv_unmap(dbs);
 120    if (dbs->common.cb) {
 121        dbs->common.cb(dbs->common.opaque, ret);
 122    }
 123    qemu_iovec_destroy(&dbs->iov);
 124    if (dbs->bh) {
 125        qemu_bh_delete(dbs->bh);
 126        dbs->bh = NULL;
 127    }
 128    if (!dbs->in_cancel) {
 129        /* Requests may complete while dma_aio_cancel is in progress.  In
 130         * this case, the AIOCB should not be released because it is still
 131         * referenced by dma_aio_cancel.  */
 132        qemu_aio_release(dbs);
 133    }
 134}
 135
 136static void dma_bdrv_cb(void *opaque, int ret)
 137{
 138    DMAAIOCB *dbs = (DMAAIOCB *)opaque;
 139    dma_addr_t cur_addr, cur_len;
 140    void *mem;
 141
 142    trace_dma_bdrv_cb(dbs, ret);
 143
 144    dbs->acb = NULL;
 145    dbs->sector_num += dbs->iov.size / 512;
 146
 147    if (dbs->sg_cur_index == dbs->sg->nsg || ret < 0) {
 148        dma_complete(dbs, ret);
 149        return;
 150    }
 151    dma_bdrv_unmap(dbs);
 152
 153    while (dbs->sg_cur_index < dbs->sg->nsg) {
 154        cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte;
 155        cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte;
 156        mem = dma_memory_map(dbs->sg->as, cur_addr, &cur_len, dbs->dir);
 157        if (!mem)
 158            break;
 159        qemu_iovec_add(&dbs->iov, mem, cur_len);
 160        dbs->sg_cur_byte += cur_len;
 161        if (dbs->sg_cur_byte == dbs->sg->sg[dbs->sg_cur_index].len) {
 162            dbs->sg_cur_byte = 0;
 163            ++dbs->sg_cur_index;
 164        }
 165    }
 166
 167    if (dbs->iov.size == 0) {
 168        trace_dma_map_wait(dbs);
 169        cpu_register_map_client(dbs, continue_after_map_failure);
 170        return;
 171    }
 172
 173    if (dbs->iov.size & ~BDRV_SECTOR_MASK) {
 174        qemu_iovec_discard_back(&dbs->iov, dbs->iov.size & ~BDRV_SECTOR_MASK);
 175    }
 176
 177    dbs->acb = dbs->io_func(dbs->bs, dbs->sector_num, &dbs->iov,
 178                            dbs->iov.size / 512, dma_bdrv_cb, dbs);
 179    assert(dbs->acb);
 180}
 181
 182static void dma_aio_cancel(BlockDriverAIOCB *acb)
 183{
 184    DMAAIOCB *dbs = container_of(acb, DMAAIOCB, common);
 185
 186    trace_dma_aio_cancel(dbs);
 187
 188    if (dbs->acb) {
 189        BlockDriverAIOCB *acb = dbs->acb;
 190        dbs->acb = NULL;
 191        dbs->in_cancel = true;
 192        bdrv_aio_cancel(acb);
 193        dbs->in_cancel = false;
 194    }
 195    dbs->common.cb = NULL;
 196    dma_complete(dbs, 0);
 197}
 198
 199static const AIOCBInfo dma_aiocb_info = {
 200    .aiocb_size         = sizeof(DMAAIOCB),
 201    .cancel             = dma_aio_cancel,
 202};
 203
 204BlockDriverAIOCB *dma_bdrv_io(
 205    BlockDriverState *bs, QEMUSGList *sg, uint64_t sector_num,
 206    DMAIOFunc *io_func, BlockDriverCompletionFunc *cb,
 207    void *opaque, DMADirection dir)
 208{
 209    DMAAIOCB *dbs = qemu_aio_get(&dma_aiocb_info, bs, cb, opaque);
 210
 211    trace_dma_bdrv_io(dbs, bs, sector_num, (dir == DMA_DIRECTION_TO_DEVICE));
 212
 213    dbs->acb = NULL;
 214    dbs->bs = bs;
 215    dbs->sg = sg;
 216    dbs->sector_num = sector_num;
 217    dbs->sg_cur_index = 0;
 218    dbs->sg_cur_byte = 0;
 219    dbs->dir = dir;
 220    dbs->in_cancel = false;
 221    dbs->io_func = io_func;
 222    dbs->bh = NULL;
 223    qemu_iovec_init(&dbs->iov, sg->nsg);
 224    dma_bdrv_cb(dbs, 0);
 225    return &dbs->common;
 226}
 227
 228
 229BlockDriverAIOCB *dma_bdrv_read(BlockDriverState *bs,
 230                                QEMUSGList *sg, uint64_t sector,
 231                                void (*cb)(void *opaque, int ret), void *opaque)
 232{
 233    return dma_bdrv_io(bs, sg, sector, bdrv_aio_readv, cb, opaque,
 234                       DMA_DIRECTION_FROM_DEVICE);
 235}
 236
 237BlockDriverAIOCB *dma_bdrv_write(BlockDriverState *bs,
 238                                 QEMUSGList *sg, uint64_t sector,
 239                                 void (*cb)(void *opaque, int ret), void *opaque)
 240{
 241    return dma_bdrv_io(bs, sg, sector, bdrv_aio_writev, cb, opaque,
 242                       DMA_DIRECTION_TO_DEVICE);
 243}
 244
 245
 246static uint64_t dma_buf_rw(uint8_t *ptr, int32_t len, QEMUSGList *sg,
 247                           DMADirection dir)
 248{
 249    uint64_t resid;
 250    int sg_cur_index;
 251
 252    resid = sg->size;
 253    sg_cur_index = 0;
 254    len = MIN(len, resid);
 255    while (len > 0) {
 256        ScatterGatherEntry entry = sg->sg[sg_cur_index++];
 257        int32_t xfer = MIN(len, entry.len);
 258        dma_memory_rw(sg->as, entry.base, ptr, xfer, dir);
 259        ptr += xfer;
 260        len -= xfer;
 261        resid -= xfer;
 262    }
 263
 264    return resid;
 265}
 266
 267uint64_t dma_buf_read(uint8_t *ptr, int32_t len, QEMUSGList *sg)
 268{
 269    return dma_buf_rw(ptr, len, sg, DMA_DIRECTION_FROM_DEVICE);
 270}
 271
 272uint64_t dma_buf_write(uint8_t *ptr, int32_t len, QEMUSGList *sg)
 273{
 274    return dma_buf_rw(ptr, len, sg, DMA_DIRECTION_TO_DEVICE);
 275}
 276
 277void dma_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie,
 278                    QEMUSGList *sg, enum BlockAcctType type)
 279{
 280    bdrv_acct_start(bs, cookie, sg->size, type);
 281}
 282