qemu/dma-helpers.c
<<
>>
Prefs
   1/*
   2 * DMA helper functions
   3 *
   4 * Copyright (c) 2009 Red Hat
   5 *
   6 * This work is licensed under the terms of the GNU General Public License
   7 * (GNU GPL), version 2 or later.
   8 */
   9
  10#include "sysemu/block-backend.h"
  11#include "sysemu/dma.h"
  12#include "trace.h"
  13#include "qemu/range.h"
  14#include "qemu/thread.h"
  15#include "qemu/main-loop.h"
  16
  17/* #define DEBUG_IOMMU */
  18
  19int dma_memory_set(AddressSpace *as, dma_addr_t addr, uint8_t c, dma_addr_t len)
  20{
  21    dma_barrier(as, DMA_DIRECTION_FROM_DEVICE);
  22
  23#define FILLBUF_SIZE 512
  24    uint8_t fillbuf[FILLBUF_SIZE];
  25    int l;
  26    bool error = false;
  27
  28    memset(fillbuf, c, FILLBUF_SIZE);
  29    while (len > 0) {
  30        l = len < FILLBUF_SIZE ? len : FILLBUF_SIZE;
  31        error |= address_space_rw(as, addr, fillbuf, l, true);
  32        len -= l;
  33        addr += l;
  34    }
  35
  36    return error;
  37}
  38
  39void qemu_sglist_init(QEMUSGList *qsg, DeviceState *dev, int alloc_hint,
  40                      AddressSpace *as)
  41{
  42    qsg->sg = g_malloc(alloc_hint * sizeof(ScatterGatherEntry));
  43    qsg->nsg = 0;
  44    qsg->nalloc = alloc_hint;
  45    qsg->size = 0;
  46    qsg->as = as;
  47    qsg->dev = dev;
  48    object_ref(OBJECT(dev));
  49}
  50
  51void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len)
  52{
  53    if (qsg->nsg == qsg->nalloc) {
  54        qsg->nalloc = 2 * qsg->nalloc + 1;
  55        qsg->sg = g_realloc(qsg->sg, qsg->nalloc * sizeof(ScatterGatherEntry));
  56    }
  57    qsg->sg[qsg->nsg].base = base;
  58    qsg->sg[qsg->nsg].len = len;
  59    qsg->size += len;
  60    ++qsg->nsg;
  61}
  62
  63void qemu_sglist_destroy(QEMUSGList *qsg)
  64{
  65    object_unref(OBJECT(qsg->dev));
  66    g_free(qsg->sg);
  67    memset(qsg, 0, sizeof(*qsg));
  68}
  69
  70typedef struct {
  71    BlockAIOCB common;
  72    BlockBackend *blk;
  73    BlockAIOCB *acb;
  74    QEMUSGList *sg;
  75    uint64_t sector_num;
  76    DMADirection dir;
  77    int sg_cur_index;
  78    dma_addr_t sg_cur_byte;
  79    QEMUIOVector iov;
  80    QEMUBH *bh;
  81    DMAIOFunc *io_func;
  82} DMAAIOCB;
  83
  84static void dma_blk_cb(void *opaque, int ret);
  85
  86static void reschedule_dma(void *opaque)
  87{
  88    DMAAIOCB *dbs = (DMAAIOCB *)opaque;
  89
  90    qemu_bh_delete(dbs->bh);
  91    dbs->bh = NULL;
  92    dma_blk_cb(dbs, 0);
  93}
  94
  95static void continue_after_map_failure(void *opaque)
  96{
  97    DMAAIOCB *dbs = (DMAAIOCB *)opaque;
  98
  99    dbs->bh = qemu_bh_new(reschedule_dma, dbs);
 100    qemu_bh_schedule(dbs->bh);
 101}
 102
 103static void dma_blk_unmap(DMAAIOCB *dbs)
 104{
 105    int i;
 106
 107    for (i = 0; i < dbs->iov.niov; ++i) {
 108        dma_memory_unmap(dbs->sg->as, dbs->iov.iov[i].iov_base,
 109                         dbs->iov.iov[i].iov_len, dbs->dir,
 110                         dbs->iov.iov[i].iov_len);
 111    }
 112    qemu_iovec_reset(&dbs->iov);
 113}
 114
 115static void dma_complete(DMAAIOCB *dbs, int ret)
 116{
 117    trace_dma_complete(dbs, ret, dbs->common.cb);
 118
 119    dma_blk_unmap(dbs);
 120    if (dbs->common.cb) {
 121        dbs->common.cb(dbs->common.opaque, ret);
 122    }
 123    qemu_iovec_destroy(&dbs->iov);
 124    if (dbs->bh) {
 125        qemu_bh_delete(dbs->bh);
 126        dbs->bh = NULL;
 127    }
 128    qemu_aio_unref(dbs);
 129}
 130
 131static void dma_blk_cb(void *opaque, int ret)
 132{
 133    DMAAIOCB *dbs = (DMAAIOCB *)opaque;
 134    dma_addr_t cur_addr, cur_len;
 135    void *mem;
 136
 137    trace_dma_blk_cb(dbs, ret);
 138
 139    dbs->acb = NULL;
 140    dbs->sector_num += dbs->iov.size / 512;
 141
 142    if (dbs->sg_cur_index == dbs->sg->nsg || ret < 0) {
 143        dma_complete(dbs, ret);
 144        return;
 145    }
 146    dma_blk_unmap(dbs);
 147
 148    while (dbs->sg_cur_index < dbs->sg->nsg) {
 149        cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte;
 150        cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte;
 151        mem = dma_memory_map(dbs->sg->as, cur_addr, &cur_len, dbs->dir);
 152        if (!mem)
 153            break;
 154        qemu_iovec_add(&dbs->iov, mem, cur_len);
 155        dbs->sg_cur_byte += cur_len;
 156        if (dbs->sg_cur_byte == dbs->sg->sg[dbs->sg_cur_index].len) {
 157            dbs->sg_cur_byte = 0;
 158            ++dbs->sg_cur_index;
 159        }
 160    }
 161
 162    if (dbs->iov.size == 0) {
 163        trace_dma_map_wait(dbs);
 164        cpu_register_map_client(dbs, continue_after_map_failure);
 165        return;
 166    }
 167
 168    if (dbs->iov.size & ~BDRV_SECTOR_MASK) {
 169        qemu_iovec_discard_back(&dbs->iov, dbs->iov.size & ~BDRV_SECTOR_MASK);
 170    }
 171
 172    dbs->acb = dbs->io_func(dbs->blk, dbs->sector_num, &dbs->iov,
 173                            dbs->iov.size / 512, dma_blk_cb, dbs);
 174    assert(dbs->acb);
 175}
 176
 177static void dma_aio_cancel(BlockAIOCB *acb)
 178{
 179    DMAAIOCB *dbs = container_of(acb, DMAAIOCB, common);
 180
 181    trace_dma_aio_cancel(dbs);
 182
 183    if (dbs->acb) {
 184        blk_aio_cancel_async(dbs->acb);
 185    }
 186}
 187
 188
 189static const AIOCBInfo dma_aiocb_info = {
 190    .aiocb_size         = sizeof(DMAAIOCB),
 191    .cancel_async       = dma_aio_cancel,
 192};
 193
 194BlockAIOCB *dma_blk_io(
 195    BlockBackend *blk, QEMUSGList *sg, uint64_t sector_num,
 196    DMAIOFunc *io_func, BlockCompletionFunc *cb,
 197    void *opaque, DMADirection dir)
 198{
 199    DMAAIOCB *dbs = blk_aio_get(&dma_aiocb_info, blk, cb, opaque);
 200
 201    trace_dma_blk_io(dbs, blk, sector_num, (dir == DMA_DIRECTION_TO_DEVICE));
 202
 203    dbs->acb = NULL;
 204    dbs->blk = blk;
 205    dbs->sg = sg;
 206    dbs->sector_num = sector_num;
 207    dbs->sg_cur_index = 0;
 208    dbs->sg_cur_byte = 0;
 209    dbs->dir = dir;
 210    dbs->io_func = io_func;
 211    dbs->bh = NULL;
 212    qemu_iovec_init(&dbs->iov, sg->nsg);
 213    dma_blk_cb(dbs, 0);
 214    return &dbs->common;
 215}
 216
 217
 218BlockAIOCB *dma_blk_read(BlockBackend *blk,
 219                         QEMUSGList *sg, uint64_t sector,
 220                         void (*cb)(void *opaque, int ret), void *opaque)
 221{
 222    return dma_blk_io(blk, sg, sector, blk_aio_readv, cb, opaque,
 223                      DMA_DIRECTION_FROM_DEVICE);
 224}
 225
 226BlockAIOCB *dma_blk_write(BlockBackend *blk,
 227                          QEMUSGList *sg, uint64_t sector,
 228                          void (*cb)(void *opaque, int ret), void *opaque)
 229{
 230    return dma_blk_io(blk, sg, sector, blk_aio_writev, cb, opaque,
 231                      DMA_DIRECTION_TO_DEVICE);
 232}
 233
 234
 235static uint64_t dma_buf_rw(uint8_t *ptr, int32_t len, QEMUSGList *sg,
 236                           DMADirection dir)
 237{
 238    uint64_t resid;
 239    int sg_cur_index;
 240
 241    resid = sg->size;
 242    sg_cur_index = 0;
 243    len = MIN(len, resid);
 244    while (len > 0) {
 245        ScatterGatherEntry entry = sg->sg[sg_cur_index++];
 246        int32_t xfer = MIN(len, entry.len);
 247        dma_memory_rw(sg->as, entry.base, ptr, xfer, dir);
 248        ptr += xfer;
 249        len -= xfer;
 250        resid -= xfer;
 251    }
 252
 253    return resid;
 254}
 255
 256uint64_t dma_buf_read(uint8_t *ptr, int32_t len, QEMUSGList *sg)
 257{
 258    return dma_buf_rw(ptr, len, sg, DMA_DIRECTION_FROM_DEVICE);
 259}
 260
 261uint64_t dma_buf_write(uint8_t *ptr, int32_t len, QEMUSGList *sg)
 262{
 263    return dma_buf_rw(ptr, len, sg, DMA_DIRECTION_TO_DEVICE);
 264}
 265
 266void dma_acct_start(BlockBackend *blk, BlockAcctCookie *cookie,
 267                    QEMUSGList *sg, enum BlockAcctType type)
 268{
 269    block_acct_start(blk_get_stats(blk), cookie, sg->size, type);
 270}
 271