linux/include/linux/dma-map-ops.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 * This header is for implementations of dma_map_ops and related code.
   4 * It should not be included in drivers just using the DMA API.
   5 */
   6#ifndef _LINUX_DMA_MAP_OPS_H
   7#define _LINUX_DMA_MAP_OPS_H
   8
   9#include <linux/dma-mapping.h>
  10#include <linux/pgtable.h>
  11
  12struct cma;
  13
  14struct dma_map_ops {
  15        void *(*alloc)(struct device *dev, size_t size,
  16                        dma_addr_t *dma_handle, gfp_t gfp,
  17                        unsigned long attrs);
  18        void (*free)(struct device *dev, size_t size, void *vaddr,
  19                        dma_addr_t dma_handle, unsigned long attrs);
  20        struct page *(*alloc_pages)(struct device *dev, size_t size,
  21                        dma_addr_t *dma_handle, enum dma_data_direction dir,
  22                        gfp_t gfp);
  23        void (*free_pages)(struct device *dev, size_t size, struct page *vaddr,
  24                        dma_addr_t dma_handle, enum dma_data_direction dir);
  25        void *(*alloc_noncoherent)(struct device *dev, size_t size,
  26                        dma_addr_t *dma_handle, enum dma_data_direction dir,
  27                        gfp_t gfp);
  28        void (*free_noncoherent)(struct device *dev, size_t size, void *vaddr,
  29                        dma_addr_t dma_handle, enum dma_data_direction dir);
  30        int (*mmap)(struct device *, struct vm_area_struct *,
  31                        void *, dma_addr_t, size_t, unsigned long attrs);
  32
  33        int (*get_sgtable)(struct device *dev, struct sg_table *sgt,
  34                        void *cpu_addr, dma_addr_t dma_addr, size_t size,
  35                        unsigned long attrs);
  36
  37        dma_addr_t (*map_page)(struct device *dev, struct page *page,
  38                        unsigned long offset, size_t size,
  39                        enum dma_data_direction dir, unsigned long attrs);
  40        void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
  41                        size_t size, enum dma_data_direction dir,
  42                        unsigned long attrs);
  43        /*
  44         * map_sg returns 0 on error and a value > 0 on success.
  45         * It should never return a value < 0.
  46         */
  47        int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents,
  48                        enum dma_data_direction dir, unsigned long attrs);
  49        void (*unmap_sg)(struct device *dev, struct scatterlist *sg, int nents,
  50                        enum dma_data_direction dir, unsigned long attrs);
  51        dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr,
  52                        size_t size, enum dma_data_direction dir,
  53                        unsigned long attrs);
  54        void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle,
  55                        size_t size, enum dma_data_direction dir,
  56                        unsigned long attrs);
  57        void (*sync_single_for_cpu)(struct device *dev, dma_addr_t dma_handle,
  58                        size_t size, enum dma_data_direction dir);
  59        void (*sync_single_for_device)(struct device *dev,
  60                        dma_addr_t dma_handle, size_t size,
  61                        enum dma_data_direction dir);
  62        void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg,
  63                        int nents, enum dma_data_direction dir);
  64        void (*sync_sg_for_device)(struct device *dev, struct scatterlist *sg,
  65                        int nents, enum dma_data_direction dir);
  66        void (*cache_sync)(struct device *dev, void *vaddr, size_t size,
  67                        enum dma_data_direction direction);
  68        int (*dma_supported)(struct device *dev, u64 mask);
  69        u64 (*get_required_mask)(struct device *dev);
  70        size_t (*max_mapping_size)(struct device *dev);
  71        unsigned long (*get_merge_boundary)(struct device *dev);
  72};
  73
  74#ifdef CONFIG_DMA_OPS
  75#include <asm/dma-mapping.h>
  76
  77static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
  78{
  79        if (dev->dma_ops)
  80                return dev->dma_ops;
  81        return get_arch_dma_ops(dev->bus);
  82}
  83
  84static inline void set_dma_ops(struct device *dev,
  85                               const struct dma_map_ops *dma_ops)
  86{
  87        dev->dma_ops = dma_ops;
  88}
  89#else /* CONFIG_DMA_OPS */
  90static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
  91{
  92        return NULL;
  93}
  94static inline void set_dma_ops(struct device *dev,
  95                               const struct dma_map_ops *dma_ops)
  96{
  97}
  98#endif /* CONFIG_DMA_OPS */
  99
 100#ifdef CONFIG_DMA_CMA
 101extern struct cma *dma_contiguous_default_area;
 102
 103static inline struct cma *dev_get_cma_area(struct device *dev)
 104{
 105        if (dev && dev->cma_area)
 106                return dev->cma_area;
 107        return dma_contiguous_default_area;
 108}
 109
 110void dma_contiguous_reserve(phys_addr_t addr_limit);
 111int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
 112                phys_addr_t limit, struct cma **res_cma, bool fixed);
 113
 114struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
 115                                       unsigned int order, bool no_warn);
 116bool dma_release_from_contiguous(struct device *dev, struct page *pages,
 117                                 int count);
 118struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp);
 119void dma_free_contiguous(struct device *dev, struct page *page, size_t size);
 120
 121void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size);
 122#else /* CONFIG_DMA_CMA */
 123static inline struct cma *dev_get_cma_area(struct device *dev)
 124{
 125        return NULL;
 126}
 127static inline void dma_contiguous_reserve(phys_addr_t limit)
 128{
 129}
 130static inline int dma_contiguous_reserve_area(phys_addr_t size,
 131                phys_addr_t base, phys_addr_t limit, struct cma **res_cma,
 132                bool fixed)
 133{
 134        return -ENOSYS;
 135}
 136static inline struct page *dma_alloc_from_contiguous(struct device *dev,
 137                size_t count, unsigned int order, bool no_warn)
 138{
 139        return NULL;
 140}
 141static inline bool dma_release_from_contiguous(struct device *dev,
 142                struct page *pages, int count)
 143{
 144        return false;
 145}
 146/* Use fallback alloc() and free() when CONFIG_DMA_CMA=n */
 147static inline struct page *dma_alloc_contiguous(struct device *dev, size_t size,
 148                gfp_t gfp)
 149{
 150        return NULL;
 151}
 152static inline void dma_free_contiguous(struct device *dev, struct page *page,
 153                size_t size)
 154{
 155        __free_pages(page, get_order(size));
 156}
 157#endif /* CONFIG_DMA_CMA*/
 158
 159#ifdef CONFIG_DMA_PERNUMA_CMA
 160void dma_pernuma_cma_reserve(void);
 161#else
 162static inline void dma_pernuma_cma_reserve(void) { }
 163#endif /* CONFIG_DMA_PERNUMA_CMA */
 164
 165#ifdef CONFIG_DMA_DECLARE_COHERENT
 166int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
 167                dma_addr_t device_addr, size_t size);
 168int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
 169                dma_addr_t *dma_handle, void **ret);
 170int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr);
 171int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
 172                void *cpu_addr, size_t size, int *ret);
 173
 174void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size,
 175                dma_addr_t *dma_handle);
 176int dma_release_from_global_coherent(int order, void *vaddr);
 177int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
 178                size_t size, int *ret);
 179
 180#else
 181static inline int dma_declare_coherent_memory(struct device *dev,
 182                phys_addr_t phys_addr, dma_addr_t device_addr, size_t size)
 183{
 184        return -ENOSYS;
 185}
 186#define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0)
 187#define dma_release_from_dev_coherent(dev, order, vaddr) (0)
 188#define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
 189
 190static inline void *dma_alloc_from_global_coherent(struct device *dev,
 191                ssize_t size, dma_addr_t *dma_handle)
 192{
 193        return NULL;
 194}
 195static inline int dma_release_from_global_coherent(int order, void *vaddr)
 196{
 197        return 0;
 198}
 199static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma,
 200                void *cpu_addr, size_t size, int *ret)
 201{
 202        return 0;
 203}
 204#endif /* CONFIG_DMA_DECLARE_COHERENT */
 205
 206int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
 207                void *cpu_addr, dma_addr_t dma_addr, size_t size,
 208                unsigned long attrs);
 209int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
 210                void *cpu_addr, dma_addr_t dma_addr, size_t size,
 211                unsigned long attrs);
 212struct page *dma_common_alloc_pages(struct device *dev, size_t size,
 213                dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp);
 214void dma_common_free_pages(struct device *dev, size_t size, struct page *vaddr,
 215                dma_addr_t dma_handle, enum dma_data_direction dir);
 216
 217struct page **dma_common_find_pages(void *cpu_addr);
 218void *dma_common_contiguous_remap(struct page *page, size_t size, pgprot_t prot,
 219                const void *caller);
 220void *dma_common_pages_remap(struct page **pages, size_t size, pgprot_t prot,
 221                const void *caller);
 222void dma_common_free_remap(void *cpu_addr, size_t size);
 223
 224struct page *dma_alloc_from_pool(struct device *dev, size_t size,
 225                void **cpu_addr, gfp_t flags,
 226                bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t));
 227bool dma_free_from_pool(struct device *dev, void *start, size_t size);
 228
 229#ifdef CONFIG_ARCH_HAS_DMA_COHERENCE_H
 230#include <asm/dma-coherence.h>
 231#elif defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
 232        defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
 233        defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
 234static inline bool dev_is_dma_coherent(struct device *dev)
 235{
 236        return dev->dma_coherent;
 237}
 238#else
 239static inline bool dev_is_dma_coherent(struct device *dev)
 240{
 241        return true;
 242}
 243#endif /* CONFIG_ARCH_HAS_DMA_COHERENCE_H */
 244
 245void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
 246                gfp_t gfp, unsigned long attrs);
 247void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
 248                dma_addr_t dma_addr, unsigned long attrs);
 249
 250#ifdef CONFIG_MMU
 251/*
 252 * Page protection so that devices that can't snoop CPU caches can use the
 253 * memory coherently.  We default to pgprot_noncached which is usually used
 254 * for ioremap as a safe bet, but architectures can override this with less
 255 * strict semantics if possible.
 256 */
 257#ifndef pgprot_dmacoherent
 258#define pgprot_dmacoherent(prot)        pgprot_noncached(prot)
 259#endif
 260
 261pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs);
 262#else
 263static inline pgprot_t dma_pgprot(struct device *dev, pgprot_t prot,
 264                unsigned long attrs)
 265{
 266        return prot;    /* no protection bits supported without page tables */
 267}
 268#endif /* CONFIG_MMU */
 269
 270#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE
 271void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
 272                enum dma_data_direction dir);
 273#else
 274static inline void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
 275                enum dma_data_direction dir)
 276{
 277}
 278#endif /* ARCH_HAS_SYNC_DMA_FOR_DEVICE */
 279
 280#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
 281void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
 282                enum dma_data_direction dir);
 283#else
 284static inline void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
 285                enum dma_data_direction dir)
 286{
 287}
 288#endif /* ARCH_HAS_SYNC_DMA_FOR_CPU */
 289
 290#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL
 291void arch_sync_dma_for_cpu_all(void);
 292#else
 293static inline void arch_sync_dma_for_cpu_all(void)
 294{
 295}
 296#endif /* CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL */
 297
 298#ifdef CONFIG_ARCH_HAS_DMA_PREP_COHERENT
 299void arch_dma_prep_coherent(struct page *page, size_t size);
 300#else
 301static inline void arch_dma_prep_coherent(struct page *page, size_t size)
 302{
 303}
 304#endif /* CONFIG_ARCH_HAS_DMA_PREP_COHERENT */
 305
 306#ifdef CONFIG_ARCH_HAS_DMA_MARK_CLEAN
 307void arch_dma_mark_clean(phys_addr_t paddr, size_t size);
 308#else
 309static inline void arch_dma_mark_clean(phys_addr_t paddr, size_t size)
 310{
 311}
 312#endif /* ARCH_HAS_DMA_MARK_CLEAN */
 313
 314void *arch_dma_set_uncached(void *addr, size_t size);
 315void arch_dma_clear_uncached(void *addr, size_t size);
 316
 317#ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
 318void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
 319                const struct iommu_ops *iommu, bool coherent);
 320#else
 321static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
 322                u64 size, const struct iommu_ops *iommu, bool coherent)
 323{
 324}
 325#endif /* CONFIG_ARCH_HAS_SETUP_DMA_OPS */
 326
 327#ifdef CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS
 328void arch_teardown_dma_ops(struct device *dev);
 329#else
 330static inline void arch_teardown_dma_ops(struct device *dev)
 331{
 332}
 333#endif /* CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS */
 334
 335#ifdef CONFIG_DMA_API_DEBUG
 336void dma_debug_add_bus(struct bus_type *bus);
 337void debug_dma_dump_mappings(struct device *dev);
 338#else
 339static inline void dma_debug_add_bus(struct bus_type *bus)
 340{
 341}
 342static inline void debug_dma_dump_mappings(struct device *dev)
 343{
 344}
 345#endif /* CONFIG_DMA_API_DEBUG */
 346
 347extern const struct dma_map_ops dma_dummy_ops;
 348
 349#endif /* _LINUX_DMA_MAP_OPS_H */
 350