qemu/include/sysemu/dma.h
<<
>>
Prefs
   1/*
   2 * DMA helper functions
   3 *
   4 * Copyright (c) 2009, 2020 Red Hat
   5 *
   6 * This work is licensed under the terms of the GNU General Public License
   7 * (GNU GPL), version 2 or later.
   8 */
   9
  10#ifndef DMA_H
  11#define DMA_H
  12
  13#include "exec/memory.h"
  14#include "exec/address-spaces.h"
  15#include "block/block.h"
  16#include "block/accounting.h"
  17
  18typedef enum {
  19    DMA_DIRECTION_TO_DEVICE = 0,
  20    DMA_DIRECTION_FROM_DEVICE = 1,
  21} DMADirection;
  22
  23/*
  24 * When an IOMMU is present, bus addresses become distinct from
  25 * CPU/memory physical addresses and may be a different size.  Because
  26 * the IOVA size depends more on the bus than on the platform, we more
  27 * or less have to treat these as 64-bit always to cover all (or at
  28 * least most) cases.
  29 */
  30typedef uint64_t dma_addr_t;
  31
  32#define DMA_ADDR_BITS 64
  33#define DMA_ADDR_FMT "%" PRIx64
  34
  35typedef struct ScatterGatherEntry ScatterGatherEntry;
  36
  37struct QEMUSGList {
  38    ScatterGatherEntry *sg;
  39    int nsg;
  40    int nalloc;
  41    dma_addr_t size;
  42    DeviceState *dev;
  43    AddressSpace *as;
  44};
  45
  46static inline void dma_barrier(AddressSpace *as, DMADirection dir)
  47{
  48    /*
  49     * This is called before DMA read and write operations
  50     * unless the _relaxed form is used and is responsible
  51     * for providing some sane ordering of accesses vs
  52     * concurrently running VCPUs.
  53     *
  54     * Users of map(), unmap() or lower level st/ld_*
  55     * operations are responsible for providing their own
  56     * ordering via barriers.
  57     *
  58     * This primitive implementation does a simple smp_mb()
  59     * before each operation which provides pretty much full
  60     * ordering.
  61     *
  62     * A smarter implementation can be devised if needed to
  63     * use lighter barriers based on the direction of the
  64     * transfer, the DMA context, etc...
  65     */
  66    smp_mb();
  67}
  68
  69/* Checks that the given range of addresses is valid for DMA.  This is
  70 * useful for certain cases, but usually you should just use
  71 * dma_memory_{read,write}() and check for errors */
  72static inline bool dma_memory_valid(AddressSpace *as,
  73                                    dma_addr_t addr, dma_addr_t len,
  74                                    DMADirection dir, MemTxAttrs attrs)
  75{
  76    return address_space_access_valid(as, addr, len,
  77                                      dir == DMA_DIRECTION_FROM_DEVICE,
  78                                      attrs);
  79}
  80
  81static inline MemTxResult dma_memory_rw_relaxed(AddressSpace *as,
  82                                                dma_addr_t addr,
  83                                                void *buf, dma_addr_t len,
  84                                                DMADirection dir,
  85                                                MemTxAttrs attrs)
  86{
  87    return address_space_rw(as, addr, attrs,
  88                            buf, len, dir == DMA_DIRECTION_FROM_DEVICE);
  89}
  90
  91static inline MemTxResult dma_memory_read_relaxed(AddressSpace *as,
  92                                                  dma_addr_t addr,
  93                                                  void *buf, dma_addr_t len)
  94{
  95    return dma_memory_rw_relaxed(as, addr, buf, len,
  96                                 DMA_DIRECTION_TO_DEVICE,
  97                                 MEMTXATTRS_UNSPECIFIED);
  98}
  99
 100static inline MemTxResult dma_memory_write_relaxed(AddressSpace *as,
 101                                                   dma_addr_t addr,
 102                                                   const void *buf,
 103                                                   dma_addr_t len)
 104{
 105    return dma_memory_rw_relaxed(as, addr, (void *)buf, len,
 106                                 DMA_DIRECTION_FROM_DEVICE,
 107                                 MEMTXATTRS_UNSPECIFIED);
 108}
 109
 110/**
 111 * dma_memory_rw: Read from or write to an address space from DMA controller.
 112 *
 113 * Return a MemTxResult indicating whether the operation succeeded
 114 * or failed (eg unassigned memory, device rejected the transaction,
 115 * IOMMU fault).
 116 *
 117 * @as: #AddressSpace to be accessed
 118 * @addr: address within that address space
 119 * @buf: buffer with the data transferred
 120 * @len: the number of bytes to read or write
 121 * @dir: indicates the transfer direction
 122 * @attrs: memory transaction attributes
 123 */
 124static inline MemTxResult dma_memory_rw(AddressSpace *as, dma_addr_t addr,
 125                                        void *buf, dma_addr_t len,
 126                                        DMADirection dir, MemTxAttrs attrs)
 127{
 128    dma_barrier(as, dir);
 129
 130    return dma_memory_rw_relaxed(as, addr, buf, len, dir, attrs);
 131}
 132
 133/**
 134 * dma_memory_read: Read from an address space from DMA controller.
 135 *
 136 * Return a MemTxResult indicating whether the operation succeeded
 137 * or failed (eg unassigned memory, device rejected the transaction,
 138 * IOMMU fault).  Called within RCU critical section.
 139 *
 140 * @as: #AddressSpace to be accessed
 141 * @addr: address within that address space
 142 * @buf: buffer with the data transferred
 143 * @len: length of the data transferred
 144 * @attrs: memory transaction attributes
 145 */
 146static inline MemTxResult dma_memory_read(AddressSpace *as, dma_addr_t addr,
 147                                          void *buf, dma_addr_t len,
 148                                          MemTxAttrs attrs)
 149{
 150    return dma_memory_rw(as, addr, buf, len,
 151                         DMA_DIRECTION_TO_DEVICE, attrs);
 152}
 153
 154/**
 155 * address_space_write: Write to address space from DMA controller.
 156 *
 157 * Return a MemTxResult indicating whether the operation succeeded
 158 * or failed (eg unassigned memory, device rejected the transaction,
 159 * IOMMU fault).
 160 *
 161 * @as: #AddressSpace to be accessed
 162 * @addr: address within that address space
 163 * @buf: buffer with the data transferred
 164 * @len: the number of bytes to write
 165 * @attrs: memory transaction attributes
 166 */
 167static inline MemTxResult dma_memory_write(AddressSpace *as, dma_addr_t addr,
 168                                           const void *buf, dma_addr_t len,
 169                                           MemTxAttrs attrs)
 170{
 171    return dma_memory_rw(as, addr, (void *)buf, len,
 172                         DMA_DIRECTION_FROM_DEVICE, attrs);
 173}
 174
 175/**
 176 * dma_memory_set: Fill memory with a constant byte from DMA controller.
 177 *
 178 * Return a MemTxResult indicating whether the operation succeeded
 179 * or failed (eg unassigned memory, device rejected the transaction,
 180 * IOMMU fault).
 181 *
 182 * @as: #AddressSpace to be accessed
 183 * @addr: address within that address space
 184 * @c: constant byte to fill the memory
 185 * @len: the number of bytes to fill with the constant byte
 186 * @attrs: memory transaction attributes
 187 */
 188MemTxResult dma_memory_set(AddressSpace *as, dma_addr_t addr,
 189                           uint8_t c, dma_addr_t len, MemTxAttrs attrs);
 190
 191/**
 192 * address_space_map: Map a physical memory region into a host virtual address.
 193 *
 194 * May map a subset of the requested range, given by and returned in @plen.
 195 * May return %NULL and set *@plen to zero(0), if resources needed to perform
 196 * the mapping are exhausted.
 197 * Use only for reads OR writes - not for read-modify-write operations.
 198 *
 199 * @as: #AddressSpace to be accessed
 200 * @addr: address within that address space
 201 * @len: pointer to length of buffer; updated on return
 202 * @dir: indicates the transfer direction
 203 * @attrs: memory attributes
 204 */
 205static inline void *dma_memory_map(AddressSpace *as,
 206                                   dma_addr_t addr, dma_addr_t *len,
 207                                   DMADirection dir, MemTxAttrs attrs)
 208{
 209    hwaddr xlen = *len;
 210    void *p;
 211
 212    p = address_space_map(as, addr, &xlen, dir == DMA_DIRECTION_FROM_DEVICE,
 213                          attrs);
 214    *len = xlen;
 215    return p;
 216}
 217
 218/**
 219 * address_space_unmap: Unmaps a memory region previously mapped
 220 *                      by dma_memory_map()
 221 *
 222 * Will also mark the memory as dirty if @dir == %DMA_DIRECTION_FROM_DEVICE.
 223 * @access_len gives the amount of memory that was actually read or written
 224 * by the caller.
 225 *
 226 * @as: #AddressSpace used
 227 * @buffer: host pointer as returned by address_space_map()
 228 * @len: buffer length as returned by address_space_map()
 229 * @dir: indicates the transfer direction
 230 * @access_len: amount of data actually transferred
 231 */
 232static inline void dma_memory_unmap(AddressSpace *as,
 233                                    void *buffer, dma_addr_t len,
 234                                    DMADirection dir, dma_addr_t access_len)
 235{
 236    address_space_unmap(as, buffer, (hwaddr)len,
 237                        dir == DMA_DIRECTION_FROM_DEVICE, access_len);
 238}
 239
 240#define DEFINE_LDST_DMA(_lname, _sname, _bits, _end) \
 241    static inline MemTxResult ld##_lname##_##_end##_dma(AddressSpace *as, \
 242                                                        dma_addr_t addr, \
 243                                                        uint##_bits##_t *pval, \
 244                                                        MemTxAttrs attrs) \
 245    { \
 246        MemTxResult res = dma_memory_read(as, addr, pval, (_bits) / 8, attrs); \
 247        _end##_bits##_to_cpus(pval); \
 248        return res; \
 249    } \
 250    static inline MemTxResult st##_sname##_##_end##_dma(AddressSpace *as, \
 251                                                        dma_addr_t addr, \
 252                                                        uint##_bits##_t val, \
 253                                                        MemTxAttrs attrs) \
 254    { \
 255        val = cpu_to_##_end##_bits(val); \
 256        return dma_memory_write(as, addr, &val, (_bits) / 8, attrs); \
 257    }
 258
 259static inline MemTxResult ldub_dma(AddressSpace *as, dma_addr_t addr,
 260                                   uint8_t *val, MemTxAttrs attrs)
 261{
 262    return dma_memory_read(as, addr, val, 1, attrs);
 263}
 264
 265static inline MemTxResult stb_dma(AddressSpace *as, dma_addr_t addr,
 266                                  uint8_t val, MemTxAttrs attrs)
 267{
 268    return dma_memory_write(as, addr, &val, 1, attrs);
 269}
 270
 271DEFINE_LDST_DMA(uw, w, 16, le);
 272DEFINE_LDST_DMA(l, l, 32, le);
 273DEFINE_LDST_DMA(q, q, 64, le);
 274DEFINE_LDST_DMA(uw, w, 16, be);
 275DEFINE_LDST_DMA(l, l, 32, be);
 276DEFINE_LDST_DMA(q, q, 64, be);
 277
 278#undef DEFINE_LDST_DMA
 279
 280struct ScatterGatherEntry {
 281    dma_addr_t base;
 282    dma_addr_t len;
 283};
 284
 285void qemu_sglist_init(QEMUSGList *qsg, DeviceState *dev, int alloc_hint,
 286                      AddressSpace *as);
 287void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len);
 288void qemu_sglist_destroy(QEMUSGList *qsg);
 289
 290typedef BlockAIOCB *DMAIOFunc(int64_t offset, QEMUIOVector *iov,
 291                              BlockCompletionFunc *cb, void *cb_opaque,
 292                              void *opaque);
 293
 294BlockAIOCB *dma_blk_io(AioContext *ctx,
 295                       QEMUSGList *sg, uint64_t offset, uint32_t align,
 296                       DMAIOFunc *io_func, void *io_func_opaque,
 297                       BlockCompletionFunc *cb, void *opaque, DMADirection dir);
 298BlockAIOCB *dma_blk_read(BlockBackend *blk,
 299                         QEMUSGList *sg, uint64_t offset, uint32_t align,
 300                         BlockCompletionFunc *cb, void *opaque);
 301BlockAIOCB *dma_blk_write(BlockBackend *blk,
 302                          QEMUSGList *sg, uint64_t offset, uint32_t align,
 303                          BlockCompletionFunc *cb, void *opaque);
 304MemTxResult dma_buf_read(void *ptr, dma_addr_t len, dma_addr_t *residual,
 305                         QEMUSGList *sg, MemTxAttrs attrs);
 306MemTxResult dma_buf_write(void *ptr, dma_addr_t len, dma_addr_t *residual,
 307                          QEMUSGList *sg, MemTxAttrs attrs);
 308
 309void dma_acct_start(BlockBackend *blk, BlockAcctCookie *cookie,
 310                    QEMUSGList *sg, enum BlockAcctType type);
 311
 312/**
 313 * dma_aligned_pow2_mask: Return the address bit mask of the largest
 314 * power of 2 size less or equal than @end - @start + 1, aligned with @start,
 315 * and bounded by 1 << @max_addr_bits bits.
 316 *
 317 * @start: range start address
 318 * @end: range end address (greater than @start)
 319 * @max_addr_bits: max address bits (<= 64)
 320 */
 321uint64_t dma_aligned_pow2_mask(uint64_t start, uint64_t end,
 322                               int max_addr_bits);
 323
 324#endif
 325