qemu/include/sysemu/dma.h
<<
>>
Prefs
   1/*
   2 * DMA helper functions
   3 *
   4 * Copyright (c) 2009 Red Hat
   5 *
   6 * This work is licensed under the terms of the GNU General Public License
   7 * (GNU GPL), version 2 or later.
   8 */
   9
  10#ifndef DMA_H
  11#define DMA_H
  12
  13#include <stdio.h>
  14#include "exec/memory.h"
  15#include "hw/hw.h"
  16#include "block/block.h"
  17#include "sysemu/kvm.h"
  18
  19typedef struct DMAContext DMAContext;
  20typedef struct ScatterGatherEntry ScatterGatherEntry;
  21
  22typedef enum {
  23    DMA_DIRECTION_TO_DEVICE = 0,
  24    DMA_DIRECTION_FROM_DEVICE = 1,
  25} DMADirection;
  26
  27struct QEMUSGList {
  28    ScatterGatherEntry *sg;
  29    int nsg;
  30    int nalloc;
  31    size_t size;
  32    DMAContext *dma;
  33};
  34
  35#ifndef CONFIG_USER_ONLY
  36
  37/*
  38 * When an IOMMU is present, bus addresses become distinct from
  39 * CPU/memory physical addresses and may be a different size.  Because
  40 * the IOVA size depends more on the bus than on the platform, we more
  41 * or less have to treat these as 64-bit always to cover all (or at
  42 * least most) cases.
  43 */
  44typedef uint64_t dma_addr_t;
  45
  46#define DMA_ADDR_BITS 64
  47#define DMA_ADDR_FMT "%" PRIx64
  48
  49typedef int DMATranslateFunc(DMAContext *dma,
  50                             dma_addr_t addr,
  51                             hwaddr *paddr,
  52                             hwaddr *len,
  53                             DMADirection dir);
  54typedef void* DMAMapFunc(DMAContext *dma,
  55                         dma_addr_t addr,
  56                         dma_addr_t *len,
  57                         DMADirection dir);
  58typedef void DMAUnmapFunc(DMAContext *dma,
  59                          void *buffer,
  60                          dma_addr_t len,
  61                          DMADirection dir,
  62                          dma_addr_t access_len);
  63
  64struct DMAContext {
  65    AddressSpace *as;
  66    DMATranslateFunc *translate;
  67    DMAMapFunc *map;
  68    DMAUnmapFunc *unmap;
  69};
  70
  71/* A global DMA context corresponding to the address_space_memory
  72 * AddressSpace, for sysbus devices which do DMA.
  73 */
  74extern DMAContext dma_context_memory;
  75
  76static inline void dma_barrier(DMAContext *dma, DMADirection dir)
  77{
  78    /*
  79     * This is called before DMA read and write operations
  80     * unless the _relaxed form is used and is responsible
  81     * for providing some sane ordering of accesses vs
  82     * concurrently running VCPUs.
  83     *
  84     * Users of map(), unmap() or lower level st/ld_*
  85     * operations are responsible for providing their own
  86     * ordering via barriers.
  87     *
  88     * This primitive implementation does a simple smp_mb()
  89     * before each operation which provides pretty much full
  90     * ordering.
  91     *
  92     * A smarter implementation can be devised if needed to
  93     * use lighter barriers based on the direction of the
  94     * transfer, the DMA context, etc...
  95     */
  96    if (kvm_enabled()) {
  97        smp_mb();
  98    }
  99}
 100
 101static inline bool dma_has_iommu(DMAContext *dma)
 102{
 103    return dma && dma->translate;
 104}
 105
 106/* Checks that the given range of addresses is valid for DMA.  This is
 107 * useful for certain cases, but usually you should just use
 108 * dma_memory_{read,write}() and check for errors */
 109bool iommu_dma_memory_valid(DMAContext *dma, dma_addr_t addr, dma_addr_t len,
 110                            DMADirection dir);
 111static inline bool dma_memory_valid(DMAContext *dma,
 112                                    dma_addr_t addr, dma_addr_t len,
 113                                    DMADirection dir)
 114{
 115    if (!dma_has_iommu(dma)) {
 116        return true;
 117    } else {
 118        return iommu_dma_memory_valid(dma, addr, len, dir);
 119    }
 120}
 121
 122int iommu_dma_memory_rw(DMAContext *dma, dma_addr_t addr,
 123                        void *buf, dma_addr_t len, DMADirection dir);
 124static inline int dma_memory_rw_relaxed(DMAContext *dma, dma_addr_t addr,
 125                                        void *buf, dma_addr_t len,
 126                                        DMADirection dir)
 127{
 128    if (!dma_has_iommu(dma)) {
 129        /* Fast-path for no IOMMU */
 130        address_space_rw(dma->as, addr, buf, len, dir == DMA_DIRECTION_FROM_DEVICE);
 131        return 0;
 132    } else {
 133        return iommu_dma_memory_rw(dma, addr, buf, len, dir);
 134    }
 135}
 136
 137static inline int dma_memory_read_relaxed(DMAContext *dma, dma_addr_t addr,
 138                                          void *buf, dma_addr_t len)
 139{
 140    return dma_memory_rw_relaxed(dma, addr, buf, len, DMA_DIRECTION_TO_DEVICE);
 141}
 142
 143static inline int dma_memory_write_relaxed(DMAContext *dma, dma_addr_t addr,
 144                                           const void *buf, dma_addr_t len)
 145{
 146    return dma_memory_rw_relaxed(dma, addr, (void *)buf, len,
 147                                 DMA_DIRECTION_FROM_DEVICE);
 148}
 149
 150static inline int dma_memory_rw(DMAContext *dma, dma_addr_t addr,
 151                                void *buf, dma_addr_t len,
 152                                DMADirection dir)
 153{
 154    dma_barrier(dma, dir);
 155
 156    return dma_memory_rw_relaxed(dma, addr, buf, len, dir);
 157}
 158
 159static inline int dma_memory_read(DMAContext *dma, dma_addr_t addr,
 160                                  void *buf, dma_addr_t len)
 161{
 162    return dma_memory_rw(dma, addr, buf, len, DMA_DIRECTION_TO_DEVICE);
 163}
 164
 165static inline int dma_memory_write(DMAContext *dma, dma_addr_t addr,
 166                                   const void *buf, dma_addr_t len)
 167{
 168    return dma_memory_rw(dma, addr, (void *)buf, len,
 169                         DMA_DIRECTION_FROM_DEVICE);
 170}
 171
 172int iommu_dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c,
 173                         dma_addr_t len);
 174
 175int dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c, dma_addr_t len);
 176
 177void *iommu_dma_memory_map(DMAContext *dma,
 178                           dma_addr_t addr, dma_addr_t *len,
 179                           DMADirection dir);
 180static inline void *dma_memory_map(DMAContext *dma,
 181                                   dma_addr_t addr, dma_addr_t *len,
 182                                   DMADirection dir)
 183{
 184    if (!dma_has_iommu(dma)) {
 185        hwaddr xlen = *len;
 186        void *p;
 187
 188        p = address_space_map(dma->as, addr, &xlen, dir == DMA_DIRECTION_FROM_DEVICE);
 189        *len = xlen;
 190        return p;
 191    } else {
 192        return iommu_dma_memory_map(dma, addr, len, dir);
 193    }
 194}
 195
 196void iommu_dma_memory_unmap(DMAContext *dma,
 197                            void *buffer, dma_addr_t len,
 198                            DMADirection dir, dma_addr_t access_len);
 199static inline void dma_memory_unmap(DMAContext *dma,
 200                                    void *buffer, dma_addr_t len,
 201                                    DMADirection dir, dma_addr_t access_len)
 202{
 203    if (!dma_has_iommu(dma)) {
 204        address_space_unmap(dma->as, buffer, (hwaddr)len,
 205                            dir == DMA_DIRECTION_FROM_DEVICE, access_len);
 206    } else {
 207        iommu_dma_memory_unmap(dma, buffer, len, dir, access_len);
 208    }
 209}
 210
 211#define DEFINE_LDST_DMA(_lname, _sname, _bits, _end) \
 212    static inline uint##_bits##_t ld##_lname##_##_end##_dma(DMAContext *dma, \
 213                                                            dma_addr_t addr) \
 214    {                                                                   \
 215        uint##_bits##_t val;                                            \
 216        dma_memory_read(dma, addr, &val, (_bits) / 8);                  \
 217        return _end##_bits##_to_cpu(val);                               \
 218    }                                                                   \
 219    static inline void st##_sname##_##_end##_dma(DMAContext *dma,       \
 220                                                 dma_addr_t addr,       \
 221                                                 uint##_bits##_t val)   \
 222    {                                                                   \
 223        val = cpu_to_##_end##_bits(val);                                \
 224        dma_memory_write(dma, addr, &val, (_bits) / 8);                 \
 225    }
 226
 227static inline uint8_t ldub_dma(DMAContext *dma, dma_addr_t addr)
 228{
 229    uint8_t val;
 230
 231    dma_memory_read(dma, addr, &val, 1);
 232    return val;
 233}
 234
 235static inline void stb_dma(DMAContext *dma, dma_addr_t addr, uint8_t val)
 236{
 237    dma_memory_write(dma, addr, &val, 1);
 238}
 239
 240DEFINE_LDST_DMA(uw, w, 16, le);
 241DEFINE_LDST_DMA(l, l, 32, le);
 242DEFINE_LDST_DMA(q, q, 64, le);
 243DEFINE_LDST_DMA(uw, w, 16, be);
 244DEFINE_LDST_DMA(l, l, 32, be);
 245DEFINE_LDST_DMA(q, q, 64, be);
 246
 247#undef DEFINE_LDST_DMA
 248
 249void dma_context_init(DMAContext *dma, AddressSpace *as, DMATranslateFunc translate,
 250                      DMAMapFunc map, DMAUnmapFunc unmap);
 251
 252struct ScatterGatherEntry {
 253    dma_addr_t base;
 254    dma_addr_t len;
 255};
 256
 257void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint, DMAContext *dma);
 258void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len);
 259void qemu_sglist_destroy(QEMUSGList *qsg);
 260#endif
 261
 262typedef BlockDriverAIOCB *DMAIOFunc(BlockDriverState *bs, int64_t sector_num,
 263                                 QEMUIOVector *iov, int nb_sectors,
 264                                 BlockDriverCompletionFunc *cb, void *opaque);
 265
 266BlockDriverAIOCB *dma_bdrv_io(BlockDriverState *bs,
 267                              QEMUSGList *sg, uint64_t sector_num,
 268                              DMAIOFunc *io_func, BlockDriverCompletionFunc *cb,
 269                              void *opaque, DMADirection dir);
 270BlockDriverAIOCB *dma_bdrv_read(BlockDriverState *bs,
 271                                QEMUSGList *sg, uint64_t sector,
 272                                BlockDriverCompletionFunc *cb, void *opaque);
 273BlockDriverAIOCB *dma_bdrv_write(BlockDriverState *bs,
 274                                 QEMUSGList *sg, uint64_t sector,
 275                                 BlockDriverCompletionFunc *cb, void *opaque);
 276uint64_t dma_buf_read(uint8_t *ptr, int32_t len, QEMUSGList *sg);
 277uint64_t dma_buf_write(uint8_t *ptr, int32_t len, QEMUSGList *sg);
 278
 279void dma_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie,
 280                    QEMUSGList *sg, enum BlockAcctType type);
 281
 282#endif
 283