qemu/include/sysemu/dma.h
<<
>>
Prefs
   1/*
   2 * DMA helper functions
   3 *
   4 * Copyright (c) 2009 Red Hat
   5 *
   6 * This work is licensed under the terms of the GNU General Public License
   7 * (GNU GPL), version 2 or later.
   8 */
   9
  10#ifndef DMA_H
  11#define DMA_H
  12
  13#include "exec/memory.h"
  14#include "exec/address-spaces.h"
  15#include "hw/hw.h"
  16#include "block/block.h"
  17#include "block/accounting.h"
  18#include "sysemu/kvm.h"
  19
  20typedef struct ScatterGatherEntry ScatterGatherEntry;
  21
  22typedef enum {
  23    DMA_DIRECTION_TO_DEVICE = 0,
  24    DMA_DIRECTION_FROM_DEVICE = 1,
  25} DMADirection;
  26
  27struct QEMUSGList {
  28    ScatterGatherEntry *sg;
  29    int nsg;
  30    int nalloc;
  31    size_t size;
  32    DeviceState *dev;
  33    AddressSpace *as;
  34};
  35
  36#ifndef CONFIG_USER_ONLY
  37
  38/*
  39 * When an IOMMU is present, bus addresses become distinct from
  40 * CPU/memory physical addresses and may be a different size.  Because
  41 * the IOVA size depends more on the bus than on the platform, we more
  42 * or less have to treat these as 64-bit always to cover all (or at
  43 * least most) cases.
  44 */
  45typedef uint64_t dma_addr_t;
  46
  47#define DMA_ADDR_BITS 64
  48#define DMA_ADDR_FMT "%" PRIx64
  49
  50static inline void dma_barrier(AddressSpace *as, DMADirection dir)
  51{
  52    /*
  53     * This is called before DMA read and write operations
  54     * unless the _relaxed form is used and is responsible
  55     * for providing some sane ordering of accesses vs
  56     * concurrently running VCPUs.
  57     *
  58     * Users of map(), unmap() or lower level st/ld_*
  59     * operations are responsible for providing their own
  60     * ordering via barriers.
  61     *
  62     * This primitive implementation does a simple smp_mb()
  63     * before each operation which provides pretty much full
  64     * ordering.
  65     *
  66     * A smarter implementation can be devised if needed to
  67     * use lighter barriers based on the direction of the
  68     * transfer, the DMA context, etc...
  69     */
  70    if (kvm_enabled()) {
  71        smp_mb();
  72    }
  73}
  74
  75/* Checks that the given range of addresses is valid for DMA.  This is
  76 * useful for certain cases, but usually you should just use
  77 * dma_memory_{read,write}() and check for errors */
  78static inline bool dma_memory_valid(AddressSpace *as,
  79                                    dma_addr_t addr, dma_addr_t len,
  80                                    DMADirection dir)
  81{
  82    return address_space_access_valid(as, addr, len,
  83                                      dir == DMA_DIRECTION_FROM_DEVICE,
  84                                      MEMTXATTRS_UNSPECIFIED);
  85}
  86
  87static inline int dma_memory_rw_relaxed(AddressSpace *as, dma_addr_t addr,
  88                                        void *buf, dma_addr_t len,
  89                                        DMADirection dir)
  90{
  91    return (bool)address_space_rw(as, addr, MEMTXATTRS_UNSPECIFIED,
  92                                  buf, len, dir == DMA_DIRECTION_FROM_DEVICE);
  93}
  94
  95static inline int dma_memory_rw_relaxed_attr(AddressSpace *as, dma_addr_t addr,
  96                                             void *buf, dma_addr_t len,
  97                                             DMADirection dir,
  98                                             MemTxAttrs attr)
  99{
 100    return (bool)address_space_rw(as, addr, attr,
 101                                  buf, len, dir == DMA_DIRECTION_FROM_DEVICE);
 102}
 103
 104static inline int dma_memory_read_relaxed(AddressSpace *as, dma_addr_t addr,
 105                                          void *buf, dma_addr_t len)
 106{
 107    return dma_memory_rw_relaxed(as, addr, buf, len, DMA_DIRECTION_TO_DEVICE);
 108}
 109
 110static inline int dma_memory_write_relaxed(AddressSpace *as, dma_addr_t addr,
 111                                           const void *buf, dma_addr_t len)
 112{
 113    return dma_memory_rw_relaxed(as, addr, (void *)buf, len,
 114                                 DMA_DIRECTION_FROM_DEVICE);
 115}
 116
 117static inline int dma_memory_rw_attr(AddressSpace *as, dma_addr_t addr,
 118                                     void *buf, dma_addr_t len,
 119                                     DMADirection dir,
 120                                     MemTxAttrs attr)
 121{
 122    dma_barrier(as, dir);
 123
 124    return dma_memory_rw_relaxed_attr(as, addr, buf, len, dir, attr);
 125}
 126
 127static inline int dma_memory_rw(AddressSpace *as, dma_addr_t addr,
 128                                void *buf, dma_addr_t len,
 129                                DMADirection dir)
 130{
 131    dma_barrier(as, dir);
 132
 133    return dma_memory_rw_relaxed(as, addr, buf, len, dir);
 134}
 135
 136static inline int dma_memory_read(AddressSpace *as, dma_addr_t addr,
 137                                  void *buf, dma_addr_t len)
 138{
 139    return dma_memory_rw(as, addr, buf, len, DMA_DIRECTION_TO_DEVICE);
 140}
 141
 142static inline int dma_memory_write(AddressSpace *as, dma_addr_t addr,
 143                                   const void *buf, dma_addr_t len)
 144{
 145    return dma_memory_rw(as, addr, (void *)buf, len,
 146                         DMA_DIRECTION_FROM_DEVICE);
 147}
 148
 149int dma_memory_set(AddressSpace *as, dma_addr_t addr, uint8_t c, dma_addr_t len);
 150
 151static inline void *dma_memory_map(AddressSpace *as,
 152                                   dma_addr_t addr, dma_addr_t *len,
 153                                   DMADirection dir)
 154{
 155    hwaddr xlen = *len;
 156    void *p;
 157
 158    p = address_space_map(as, addr, &xlen, dir == DMA_DIRECTION_FROM_DEVICE);
 159    *len = xlen;
 160    return p;
 161}
 162
 163static inline void dma_memory_unmap(AddressSpace *as,
 164                                    void *buffer, dma_addr_t len,
 165                                    DMADirection dir, dma_addr_t access_len)
 166{
 167    address_space_unmap(as, buffer, (hwaddr)len,
 168                        dir == DMA_DIRECTION_FROM_DEVICE, access_len);
 169}
 170
 171#define DEFINE_LDST_DMA(_lname, _sname, _bits, _end) \
 172    static inline uint##_bits##_t ld##_lname##_##_end##_dma(AddressSpace *as, \
 173                                                            dma_addr_t addr) \
 174    {                                                                   \
 175        uint##_bits##_t val;                                            \
 176        dma_memory_read(as, addr, &val, (_bits) / 8);                   \
 177        return _end##_bits##_to_cpu(val);                               \
 178    }                                                                   \
 179    static inline void st##_sname##_##_end##_dma(AddressSpace *as,      \
 180                                                 dma_addr_t addr,       \
 181                                                 uint##_bits##_t val)   \
 182    {                                                                   \
 183        val = cpu_to_##_end##_bits(val);                                \
 184        dma_memory_write(as, addr, &val, (_bits) / 8);                  \
 185    }
 186
 187static inline uint8_t ldub_dma(AddressSpace *as, dma_addr_t addr)
 188{
 189    uint8_t val;
 190
 191    dma_memory_read(as, addr, &val, 1);
 192    return val;
 193}
 194
 195static inline void stb_dma(AddressSpace *as, dma_addr_t addr, uint8_t val)
 196{
 197    dma_memory_write(as, addr, &val, 1);
 198}
 199
 200DEFINE_LDST_DMA(uw, w, 16, le);
 201DEFINE_LDST_DMA(l, l, 32, le);
 202DEFINE_LDST_DMA(q, q, 64, le);
 203DEFINE_LDST_DMA(uw, w, 16, be);
 204DEFINE_LDST_DMA(l, l, 32, be);
 205DEFINE_LDST_DMA(q, q, 64, be);
 206
 207#undef DEFINE_LDST_DMA
 208
 209struct ScatterGatherEntry {
 210    dma_addr_t base;
 211    dma_addr_t len;
 212};
 213
 214void qemu_sglist_init(QEMUSGList *qsg, DeviceState *dev, int alloc_hint,
 215                      AddressSpace *as);
 216void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len);
 217void qemu_sglist_destroy(QEMUSGList *qsg);
 218#endif
 219
 220typedef BlockAIOCB *DMAIOFunc(BlockBackend *blk, int64_t sector_num,
 221                              QEMUIOVector *iov, int nb_sectors,
 222                              BlockCompletionFunc *cb, void *opaque);
 223
 224BlockAIOCB *dma_blk_io(BlockBackend *blk,
 225                       QEMUSGList *sg, uint64_t sector_num,
 226                       DMAIOFunc *io_func, BlockCompletionFunc *cb,
 227                       void *opaque, DMADirection dir);
 228BlockAIOCB *dma_blk_read(BlockBackend *blk,
 229                         QEMUSGList *sg, uint64_t sector,
 230                         BlockCompletionFunc *cb, void *opaque);
 231BlockAIOCB *dma_blk_write(BlockBackend *blk,
 232                          QEMUSGList *sg, uint64_t sector,
 233                          BlockCompletionFunc *cb, void *opaque);
 234uint64_t dma_buf_read(uint8_t *ptr, int32_t len, QEMUSGList *sg);
 235uint64_t dma_buf_write(uint8_t *ptr, int32_t len, QEMUSGList *sg);
 236
 237void dma_acct_start(BlockBackend *blk, BlockAcctCookie *cookie,
 238                    QEMUSGList *sg, enum BlockAcctType type);
 239
 240#endif
 241