qemu/dma-helpers.c
<<
>>
Prefs
   1/*
   2 * DMA helper functions
   3 *
   4 * Copyright (c) 2009 Red Hat
   5 *
   6 * This work is licensed under the terms of the GNU General Public License
   7 * (GNU GPL), version 2 or later.
   8 */
   9
  10#include "sysemu/dma.h"
  11#include "trace.h"
  12#include "qemu/range.h"
  13#include "qemu/thread.h"
  14
  15/* #define DEBUG_IOMMU */
  16
  17static void do_dma_memory_set(AddressSpace *as,
  18                              dma_addr_t addr, uint8_t c, dma_addr_t len)
  19{
  20#define FILLBUF_SIZE 512
  21    uint8_t fillbuf[FILLBUF_SIZE];
  22    int l;
  23
  24    memset(fillbuf, c, FILLBUF_SIZE);
  25    while (len > 0) {
  26        l = len < FILLBUF_SIZE ? len : FILLBUF_SIZE;
  27        address_space_rw(as, addr, fillbuf, l, true);
  28        len -= l;
  29        addr += l;
  30    }
  31}
  32
  33int dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c, dma_addr_t len)
  34{
  35    dma_barrier(dma, DMA_DIRECTION_FROM_DEVICE);
  36
  37    if (dma_has_iommu(dma)) {
  38        return iommu_dma_memory_set(dma, addr, c, len);
  39    }
  40    do_dma_memory_set(dma->as, addr, c, len);
  41
  42    return 0;
  43}
  44
  45void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint, DMAContext *dma)
  46{
  47    qsg->sg = g_malloc(alloc_hint * sizeof(ScatterGatherEntry));
  48    qsg->nsg = 0;
  49    qsg->nalloc = alloc_hint;
  50    qsg->size = 0;
  51    qsg->dma = dma;
  52}
  53
  54void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len)
  55{
  56    if (qsg->nsg == qsg->nalloc) {
  57        qsg->nalloc = 2 * qsg->nalloc + 1;
  58        qsg->sg = g_realloc(qsg->sg, qsg->nalloc * sizeof(ScatterGatherEntry));
  59    }
  60    qsg->sg[qsg->nsg].base = base;
  61    qsg->sg[qsg->nsg].len = len;
  62    qsg->size += len;
  63    ++qsg->nsg;
  64}
  65
  66void qemu_sglist_destroy(QEMUSGList *qsg)
  67{
  68    g_free(qsg->sg);
  69    memset(qsg, 0, sizeof(*qsg));
  70}
  71
  72typedef struct {
  73    BlockDriverAIOCB common;
  74    BlockDriverState *bs;
  75    BlockDriverAIOCB *acb;
  76    QEMUSGList *sg;
  77    uint64_t sector_num;
  78    DMADirection dir;
  79    bool in_cancel;
  80    int sg_cur_index;
  81    dma_addr_t sg_cur_byte;
  82    QEMUIOVector iov;
  83    QEMUBH *bh;
  84    DMAIOFunc *io_func;
  85} DMAAIOCB;
  86
  87static void dma_bdrv_cb(void *opaque, int ret);
  88
  89static void reschedule_dma(void *opaque)
  90{
  91    DMAAIOCB *dbs = (DMAAIOCB *)opaque;
  92
  93    qemu_bh_delete(dbs->bh);
  94    dbs->bh = NULL;
  95    dma_bdrv_cb(dbs, 0);
  96}
  97
  98static void continue_after_map_failure(void *opaque)
  99{
 100    DMAAIOCB *dbs = (DMAAIOCB *)opaque;
 101
 102    dbs->bh = qemu_bh_new(reschedule_dma, dbs);
 103    qemu_bh_schedule(dbs->bh);
 104}
 105
 106static void dma_bdrv_unmap(DMAAIOCB *dbs)
 107{
 108    int i;
 109
 110    for (i = 0; i < dbs->iov.niov; ++i) {
 111        dma_memory_unmap(dbs->sg->dma, dbs->iov.iov[i].iov_base,
 112                         dbs->iov.iov[i].iov_len, dbs->dir,
 113                         dbs->iov.iov[i].iov_len);
 114    }
 115    qemu_iovec_reset(&dbs->iov);
 116}
 117
 118static void dma_complete(DMAAIOCB *dbs, int ret)
 119{
 120    trace_dma_complete(dbs, ret, dbs->common.cb);
 121
 122    dma_bdrv_unmap(dbs);
 123    if (dbs->common.cb) {
 124        dbs->common.cb(dbs->common.opaque, ret);
 125    }
 126    qemu_iovec_destroy(&dbs->iov);
 127    if (dbs->bh) {
 128        qemu_bh_delete(dbs->bh);
 129        dbs->bh = NULL;
 130    }
 131    if (!dbs->in_cancel) {
 132        /* Requests may complete while dma_aio_cancel is in progress.  In
 133         * this case, the AIOCB should not be released because it is still
 134         * referenced by dma_aio_cancel.  */
 135        qemu_aio_release(dbs);
 136    }
 137}
 138
 139static void dma_bdrv_cb(void *opaque, int ret)
 140{
 141    DMAAIOCB *dbs = (DMAAIOCB *)opaque;
 142    dma_addr_t cur_addr, cur_len;
 143    void *mem;
 144
 145    trace_dma_bdrv_cb(dbs, ret);
 146
 147    dbs->acb = NULL;
 148    dbs->sector_num += dbs->iov.size / 512;
 149    dma_bdrv_unmap(dbs);
 150
 151    if (dbs->sg_cur_index == dbs->sg->nsg || ret < 0) {
 152        dma_complete(dbs, ret);
 153        return;
 154    }
 155
 156    while (dbs->sg_cur_index < dbs->sg->nsg) {
 157        cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte;
 158        cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte;
 159        mem = dma_memory_map(dbs->sg->dma, cur_addr, &cur_len, dbs->dir);
 160        if (!mem)
 161            break;
 162        qemu_iovec_add(&dbs->iov, mem, cur_len);
 163        dbs->sg_cur_byte += cur_len;
 164        if (dbs->sg_cur_byte == dbs->sg->sg[dbs->sg_cur_index].len) {
 165            dbs->sg_cur_byte = 0;
 166            ++dbs->sg_cur_index;
 167        }
 168    }
 169
 170    if (dbs->iov.size == 0) {
 171        trace_dma_map_wait(dbs);
 172        cpu_register_map_client(dbs, continue_after_map_failure);
 173        return;
 174    }
 175
 176    dbs->acb = dbs->io_func(dbs->bs, dbs->sector_num, &dbs->iov,
 177                            dbs->iov.size / 512, dma_bdrv_cb, dbs);
 178    assert(dbs->acb);
 179}
 180
 181static void dma_aio_cancel(BlockDriverAIOCB *acb)
 182{
 183    DMAAIOCB *dbs = container_of(acb, DMAAIOCB, common);
 184
 185    trace_dma_aio_cancel(dbs);
 186
 187    if (dbs->acb) {
 188        BlockDriverAIOCB *acb = dbs->acb;
 189        dbs->acb = NULL;
 190        dbs->in_cancel = true;
 191        bdrv_aio_cancel(acb);
 192        dbs->in_cancel = false;
 193    }
 194    dbs->common.cb = NULL;
 195    dma_complete(dbs, 0);
 196}
 197
 198static const AIOCBInfo dma_aiocb_info = {
 199    .aiocb_size         = sizeof(DMAAIOCB),
 200    .cancel             = dma_aio_cancel,
 201};
 202
 203BlockDriverAIOCB *dma_bdrv_io(
 204    BlockDriverState *bs, QEMUSGList *sg, uint64_t sector_num,
 205    DMAIOFunc *io_func, BlockDriverCompletionFunc *cb,
 206    void *opaque, DMADirection dir)
 207{
 208    DMAAIOCB *dbs = qemu_aio_get(&dma_aiocb_info, bs, cb, opaque);
 209
 210    trace_dma_bdrv_io(dbs, bs, sector_num, (dir == DMA_DIRECTION_TO_DEVICE));
 211
 212    dbs->acb = NULL;
 213    dbs->bs = bs;
 214    dbs->sg = sg;
 215    dbs->sector_num = sector_num;
 216    dbs->sg_cur_index = 0;
 217    dbs->sg_cur_byte = 0;
 218    dbs->dir = dir;
 219    dbs->io_func = io_func;
 220    dbs->bh = NULL;
 221    qemu_iovec_init(&dbs->iov, sg->nsg);
 222    dma_bdrv_cb(dbs, 0);
 223    return &dbs->common;
 224}
 225
 226
 227BlockDriverAIOCB *dma_bdrv_read(BlockDriverState *bs,
 228                                QEMUSGList *sg, uint64_t sector,
 229                                void (*cb)(void *opaque, int ret), void *opaque)
 230{
 231    return dma_bdrv_io(bs, sg, sector, bdrv_aio_readv, cb, opaque,
 232                       DMA_DIRECTION_FROM_DEVICE);
 233}
 234
 235BlockDriverAIOCB *dma_bdrv_write(BlockDriverState *bs,
 236                                 QEMUSGList *sg, uint64_t sector,
 237                                 void (*cb)(void *opaque, int ret), void *opaque)
 238{
 239    return dma_bdrv_io(bs, sg, sector, bdrv_aio_writev, cb, opaque,
 240                       DMA_DIRECTION_TO_DEVICE);
 241}
 242
 243
 244static uint64_t dma_buf_rw(uint8_t *ptr, int32_t len, QEMUSGList *sg,
 245                           DMADirection dir)
 246{
 247    uint64_t resid;
 248    int sg_cur_index;
 249
 250    resid = sg->size;
 251    sg_cur_index = 0;
 252    len = MIN(len, resid);
 253    while (len > 0) {
 254        ScatterGatherEntry entry = sg->sg[sg_cur_index++];
 255        int32_t xfer = MIN(len, entry.len);
 256        dma_memory_rw(sg->dma, entry.base, ptr, xfer, dir);
 257        ptr += xfer;
 258        len -= xfer;
 259        resid -= xfer;
 260    }
 261
 262    return resid;
 263}
 264
 265uint64_t dma_buf_read(uint8_t *ptr, int32_t len, QEMUSGList *sg)
 266{
 267    return dma_buf_rw(ptr, len, sg, DMA_DIRECTION_FROM_DEVICE);
 268}
 269
 270uint64_t dma_buf_write(uint8_t *ptr, int32_t len, QEMUSGList *sg)
 271{
 272    return dma_buf_rw(ptr, len, sg, DMA_DIRECTION_TO_DEVICE);
 273}
 274
 275void dma_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie,
 276                    QEMUSGList *sg, enum BlockAcctType type)
 277{
 278    bdrv_acct_start(bs, cookie, sg->size, type);
 279}
 280
 281bool iommu_dma_memory_valid(DMAContext *dma, dma_addr_t addr, dma_addr_t len,
 282                            DMADirection dir)
 283{
 284    hwaddr paddr, plen;
 285
 286#ifdef DEBUG_IOMMU
 287    fprintf(stderr, "dma_memory_check context=%p addr=0x" DMA_ADDR_FMT
 288            " len=0x" DMA_ADDR_FMT " dir=%d\n", dma, addr, len, dir);
 289#endif
 290
 291    while (len) {
 292        if (dma->translate(dma, addr, &paddr, &plen, dir) != 0) {
 293            return false;
 294        }
 295
 296        /* The translation might be valid for larger regions. */
 297        if (plen > len) {
 298            plen = len;
 299        }
 300
 301        len -= plen;
 302        addr += plen;
 303    }
 304
 305    return true;
 306}
 307
 308int iommu_dma_memory_rw(DMAContext *dma, dma_addr_t addr,
 309                        void *buf, dma_addr_t len, DMADirection dir)
 310{
 311    hwaddr paddr, plen;
 312    int err;
 313
 314#ifdef DEBUG_IOMMU
 315    fprintf(stderr, "dma_memory_rw context=%p addr=0x" DMA_ADDR_FMT " len=0x"
 316            DMA_ADDR_FMT " dir=%d\n", dma, addr, len, dir);
 317#endif
 318
 319    while (len) {
 320        err = dma->translate(dma, addr, &paddr, &plen, dir);
 321        if (err) {
 322            /*
 323             * In case of failure on reads from the guest, we clean the
 324             * destination buffer so that a device that doesn't test
 325             * for errors will not expose qemu internal memory.
 326             */
 327            memset(buf, 0, len);
 328            return -1;
 329        }
 330
 331        /* The translation might be valid for larger regions. */
 332        if (plen > len) {
 333            plen = len;
 334        }
 335
 336        address_space_rw(dma->as, paddr, buf, plen, dir == DMA_DIRECTION_FROM_DEVICE);
 337
 338        len -= plen;
 339        addr += plen;
 340        buf += plen;
 341    }
 342
 343    return 0;
 344}
 345
 346int iommu_dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c,
 347                         dma_addr_t len)
 348{
 349    hwaddr paddr, plen;
 350    int err;
 351
 352#ifdef DEBUG_IOMMU
 353    fprintf(stderr, "dma_memory_set context=%p addr=0x" DMA_ADDR_FMT
 354            " len=0x" DMA_ADDR_FMT "\n", dma, addr, len);
 355#endif
 356
 357    while (len) {
 358        err = dma->translate(dma, addr, &paddr, &plen,
 359                             DMA_DIRECTION_FROM_DEVICE);
 360        if (err) {
 361            return err;
 362        }
 363
 364        /* The translation might be valid for larger regions. */
 365        if (plen > len) {
 366            plen = len;
 367        }
 368
 369        do_dma_memory_set(dma->as, paddr, c, plen);
 370
 371        len -= plen;
 372        addr += plen;
 373    }
 374
 375    return 0;
 376}
 377
 378void dma_context_init(DMAContext *dma, AddressSpace *as, DMATranslateFunc translate,
 379                      DMAMapFunc map, DMAUnmapFunc unmap)
 380{
 381#ifdef DEBUG_IOMMU
 382    fprintf(stderr, "dma_context_init(%p, %p, %p, %p)\n",
 383            dma, translate, map, unmap);
 384#endif
 385    dma->as = as;
 386    dma->translate = translate;
 387    dma->map = map;
 388    dma->unmap = unmap;
 389}
 390
 391void *iommu_dma_memory_map(DMAContext *dma, dma_addr_t addr, dma_addr_t *len,
 392                           DMADirection dir)
 393{
 394    int err;
 395    hwaddr paddr, plen;
 396    void *buf;
 397
 398    if (dma->map) {
 399        return dma->map(dma, addr, len, dir);
 400    }
 401
 402    plen = *len;
 403    err = dma->translate(dma, addr, &paddr, &plen, dir);
 404    if (err) {
 405        return NULL;
 406    }
 407
 408    /*
 409     * If this is true, the virtual region is contiguous,
 410     * but the translated physical region isn't. We just
 411     * clamp *len, much like address_space_map() does.
 412     */
 413    if (plen < *len) {
 414        *len = plen;
 415    }
 416
 417    buf = address_space_map(dma->as, paddr, &plen, dir == DMA_DIRECTION_FROM_DEVICE);
 418    *len = plen;
 419
 420    return buf;
 421}
 422
 423void iommu_dma_memory_unmap(DMAContext *dma, void *buffer, dma_addr_t len,
 424                            DMADirection dir, dma_addr_t access_len)
 425{
 426    if (dma->unmap) {
 427        dma->unmap(dma, buffer, len, dir, access_len);
 428        return;
 429    }
 430
 431    address_space_unmap(dma->as, buffer, len, dir == DMA_DIRECTION_FROM_DEVICE,
 432                        access_len);
 433
 434}
 435