qemu/include/sysemu/dma.h
<<
>>
Prefs
   1/*
   2 * DMA helper functions
   3 *
   4 * Copyright (c) 2009, 2020 Red Hat
   5 *
   6 * This work is licensed under the terms of the GNU General Public License
   7 * (GNU GPL), version 2 or later.
   8 */
   9
  10#ifndef DMA_H
  11#define DMA_H
  12
  13#include "exec/memory.h"
  14#include "exec/address-spaces.h"
  15#include "block/block.h"
  16#include "block/accounting.h"
  17
  18typedef struct ScatterGatherEntry ScatterGatherEntry;
  19
  20typedef enum {
  21    DMA_DIRECTION_TO_DEVICE = 0,
  22    DMA_DIRECTION_FROM_DEVICE = 1,
  23} DMADirection;
  24
  25struct QEMUSGList {
  26    ScatterGatherEntry *sg;
  27    int nsg;
  28    int nalloc;
  29    size_t size;
  30    DeviceState *dev;
  31    AddressSpace *as;
  32};
  33
  34#ifndef CONFIG_USER_ONLY
  35
  36/*
  37 * When an IOMMU is present, bus addresses become distinct from
  38 * CPU/memory physical addresses and may be a different size.  Because
  39 * the IOVA size depends more on the bus than on the platform, we more
  40 * or less have to treat these as 64-bit always to cover all (or at
  41 * least most) cases.
  42 */
  43typedef uint64_t dma_addr_t;
  44
  45#define DMA_ADDR_BITS 64
  46#define DMA_ADDR_FMT "%" PRIx64
  47
  48static inline void dma_barrier(AddressSpace *as, DMADirection dir)
  49{
  50    /*
  51     * This is called before DMA read and write operations
  52     * unless the _relaxed form is used and is responsible
  53     * for providing some sane ordering of accesses vs
  54     * concurrently running VCPUs.
  55     *
  56     * Users of map(), unmap() or lower level st/ld_*
  57     * operations are responsible for providing their own
  58     * ordering via barriers.
  59     *
  60     * This primitive implementation does a simple smp_mb()
  61     * before each operation which provides pretty much full
  62     * ordering.
  63     *
  64     * A smarter implementation can be devised if needed to
  65     * use lighter barriers based on the direction of the
  66     * transfer, the DMA context, etc...
  67     */
  68    smp_mb();
  69}
  70
  71/* Checks that the given range of addresses is valid for DMA.  This is
  72 * useful for certain cases, but usually you should just use
  73 * dma_memory_{read,write}() and check for errors */
  74static inline bool dma_memory_valid(AddressSpace *as,
  75                                    dma_addr_t addr, dma_addr_t len,
  76                                    DMADirection dir)
  77{
  78    return address_space_access_valid(as, addr, len,
  79                                      dir == DMA_DIRECTION_FROM_DEVICE,
  80                                      MEMTXATTRS_UNSPECIFIED);
  81}
  82
  83static inline MemTxResult dma_memory_rw_relaxed(AddressSpace *as,
  84                                                dma_addr_t addr,
  85                                                void *buf, dma_addr_t len,
  86                                                DMADirection dir)
  87{
  88    return address_space_rw(as, addr, MEMTXATTRS_UNSPECIFIED,
  89                            buf, len, dir == DMA_DIRECTION_FROM_DEVICE);
  90}
  91
  92static inline MemTxResult dma_memory_read_relaxed(AddressSpace *as,
  93                                                  dma_addr_t addr,
  94                                                  void *buf, dma_addr_t len)
  95{
  96    return dma_memory_rw_relaxed(as, addr, buf, len, DMA_DIRECTION_TO_DEVICE);
  97}
  98
  99static inline MemTxResult dma_memory_write_relaxed(AddressSpace *as,
 100                                                   dma_addr_t addr,
 101                                                   const void *buf,
 102                                                   dma_addr_t len)
 103{
 104    return dma_memory_rw_relaxed(as, addr, (void *)buf, len,
 105                                 DMA_DIRECTION_FROM_DEVICE);
 106}
 107
 108/**
 109 * dma_memory_rw: Read from or write to an address space from DMA controller.
 110 *
 111 * Return a MemTxResult indicating whether the operation succeeded
 112 * or failed (eg unassigned memory, device rejected the transaction,
 113 * IOMMU fault).
 114 *
 115 * @as: #AddressSpace to be accessed
 116 * @addr: address within that address space
 117 * @buf: buffer with the data transferred
 118 * @len: the number of bytes to read or write
 119 * @dir: indicates the transfer direction
 120 */
 121static inline MemTxResult dma_memory_rw(AddressSpace *as, dma_addr_t addr,
 122                                        void *buf, dma_addr_t len,
 123                                        DMADirection dir)
 124{
 125    dma_barrier(as, dir);
 126
 127    return dma_memory_rw_relaxed(as, addr, buf, len, dir);
 128}
 129
 130/**
 131 * dma_memory_read: Read from an address space from DMA controller.
 132 *
 133 * Return a MemTxResult indicating whether the operation succeeded
 134 * or failed (eg unassigned memory, device rejected the transaction,
 135 * IOMMU fault).  Called within RCU critical section.
 136 *
 137 * @as: #AddressSpace to be accessed
 138 * @addr: address within that address space
 139 * @buf: buffer with the data transferred
 140 * @len: length of the data transferred
 141 */
 142static inline MemTxResult dma_memory_read(AddressSpace *as, dma_addr_t addr,
 143                                          void *buf, dma_addr_t len)
 144{
 145    return dma_memory_rw(as, addr, buf, len, DMA_DIRECTION_TO_DEVICE);
 146}
 147
 148/**
 149 * address_space_write: Write to address space from DMA controller.
 150 *
 151 * Return a MemTxResult indicating whether the operation succeeded
 152 * or failed (eg unassigned memory, device rejected the transaction,
 153 * IOMMU fault).
 154 *
 155 * @as: #AddressSpace to be accessed
 156 * @addr: address within that address space
 157 * @buf: buffer with the data transferred
 158 * @len: the number of bytes to write
 159 */
 160static inline MemTxResult dma_memory_write(AddressSpace *as, dma_addr_t addr,
 161                                           const void *buf, dma_addr_t len)
 162{
 163    return dma_memory_rw(as, addr, (void *)buf, len,
 164                         DMA_DIRECTION_FROM_DEVICE);
 165}
 166
 167/**
 168 * dma_memory_set: Fill memory with a constant byte from DMA controller.
 169 *
 170 * Return a MemTxResult indicating whether the operation succeeded
 171 * or failed (eg unassigned memory, device rejected the transaction,
 172 * IOMMU fault).
 173 *
 174 * @as: #AddressSpace to be accessed
 175 * @addr: address within that address space
 176 * @c: constant byte to fill the memory
 177 * @len: the number of bytes to fill with the constant byte
 178 */
 179MemTxResult dma_memory_set(AddressSpace *as, dma_addr_t addr,
 180                           uint8_t c, dma_addr_t len);
 181
 182/**
 183 * address_space_map: Map a physical memory region into a host virtual address.
 184 *
 185 * May map a subset of the requested range, given by and returned in @plen.
 186 * May return %NULL and set *@plen to zero(0), if resources needed to perform
 187 * the mapping are exhausted.
 188 * Use only for reads OR writes - not for read-modify-write operations.
 189 *
 190 * @as: #AddressSpace to be accessed
 191 * @addr: address within that address space
 192 * @len: pointer to length of buffer; updated on return
 193 * @dir: indicates the transfer direction
 194 */
 195static inline void *dma_memory_map(AddressSpace *as,
 196                                   dma_addr_t addr, dma_addr_t *len,
 197                                   DMADirection dir)
 198{
 199    hwaddr xlen = *len;
 200    void *p;
 201
 202    p = address_space_map(as, addr, &xlen, dir == DMA_DIRECTION_FROM_DEVICE,
 203                          MEMTXATTRS_UNSPECIFIED);
 204    *len = xlen;
 205    return p;
 206}
 207
 208/**
 209 * address_space_unmap: Unmaps a memory region previously mapped
 210 *                      by dma_memory_map()
 211 *
 212 * Will also mark the memory as dirty if @dir == %DMA_DIRECTION_FROM_DEVICE.
 213 * @access_len gives the amount of memory that was actually read or written
 214 * by the caller.
 215 *
 216 * @as: #AddressSpace used
 217 * @buffer: host pointer as returned by address_space_map()
 218 * @len: buffer length as returned by address_space_map()
 219 * @dir: indicates the transfer direction
 220 * @access_len: amount of data actually transferred
 221 */
 222static inline void dma_memory_unmap(AddressSpace *as,
 223                                    void *buffer, dma_addr_t len,
 224                                    DMADirection dir, dma_addr_t access_len)
 225{
 226    address_space_unmap(as, buffer, (hwaddr)len,
 227                        dir == DMA_DIRECTION_FROM_DEVICE, access_len);
 228}
 229
 230#define DEFINE_LDST_DMA(_lname, _sname, _bits, _end) \
 231    static inline uint##_bits##_t ld##_lname##_##_end##_dma(AddressSpace *as, \
 232                                                            dma_addr_t addr) \
 233    {                                                                   \
 234        uint##_bits##_t val;                                            \
 235        dma_memory_read(as, addr, &val, (_bits) / 8);                   \
 236        return _end##_bits##_to_cpu(val);                               \
 237    }                                                                   \
 238    static inline void st##_sname##_##_end##_dma(AddressSpace *as,      \
 239                                                 dma_addr_t addr,       \
 240                                                 uint##_bits##_t val)   \
 241    {                                                                   \
 242        val = cpu_to_##_end##_bits(val);                                \
 243        dma_memory_write(as, addr, &val, (_bits) / 8);                  \
 244    }
 245
 246static inline uint8_t ldub_dma(AddressSpace *as, dma_addr_t addr)
 247{
 248    uint8_t val;
 249
 250    dma_memory_read(as, addr, &val, 1);
 251    return val;
 252}
 253
 254static inline void stb_dma(AddressSpace *as, dma_addr_t addr, uint8_t val)
 255{
 256    dma_memory_write(as, addr, &val, 1);
 257}
 258
 259DEFINE_LDST_DMA(uw, w, 16, le);
 260DEFINE_LDST_DMA(l, l, 32, le);
 261DEFINE_LDST_DMA(q, q, 64, le);
 262DEFINE_LDST_DMA(uw, w, 16, be);
 263DEFINE_LDST_DMA(l, l, 32, be);
 264DEFINE_LDST_DMA(q, q, 64, be);
 265
 266#undef DEFINE_LDST_DMA
 267
 268struct ScatterGatherEntry {
 269    dma_addr_t base;
 270    dma_addr_t len;
 271};
 272
 273void qemu_sglist_init(QEMUSGList *qsg, DeviceState *dev, int alloc_hint,
 274                      AddressSpace *as);
 275void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len);
 276void qemu_sglist_destroy(QEMUSGList *qsg);
 277#endif
 278
 279typedef BlockAIOCB *DMAIOFunc(int64_t offset, QEMUIOVector *iov,
 280                              BlockCompletionFunc *cb, void *cb_opaque,
 281                              void *opaque);
 282
 283BlockAIOCB *dma_blk_io(AioContext *ctx,
 284                       QEMUSGList *sg, uint64_t offset, uint32_t align,
 285                       DMAIOFunc *io_func, void *io_func_opaque,
 286                       BlockCompletionFunc *cb, void *opaque, DMADirection dir);
 287BlockAIOCB *dma_blk_read(BlockBackend *blk,
 288                         QEMUSGList *sg, uint64_t offset, uint32_t align,
 289                         BlockCompletionFunc *cb, void *opaque);
 290BlockAIOCB *dma_blk_write(BlockBackend *blk,
 291                          QEMUSGList *sg, uint64_t offset, uint32_t align,
 292                          BlockCompletionFunc *cb, void *opaque);
 293uint64_t dma_buf_read(uint8_t *ptr, int32_t len, QEMUSGList *sg);
 294uint64_t dma_buf_write(uint8_t *ptr, int32_t len, QEMUSGList *sg);
 295
 296void dma_acct_start(BlockBackend *blk, BlockAcctCookie *cookie,
 297                    QEMUSGList *sg, enum BlockAcctType type);
 298
 299/**
 300 * dma_aligned_pow2_mask: Return the address bit mask of the largest
 301 * power of 2 size less or equal than @end - @start + 1, aligned with @start,
 302 * and bounded by 1 << @max_addr_bits bits.
 303 *
 304 * @start: range start address
 305 * @end: range end address (greater than @start)
 306 * @max_addr_bits: max address bits (<= 64)
 307 */
 308uint64_t dma_aligned_pow2_mask(uint64_t start, uint64_t end,
 309                               int max_addr_bits);
 310
 311#endif
 312