linux/include/linux/dma-mapping.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _LINUX_DMA_MAPPING_H
   3#define _LINUX_DMA_MAPPING_H
   4
   5#include <linux/sizes.h>
   6#include <linux/string.h>
   7#include <linux/device.h>
   8#include <linux/err.h>
   9#include <linux/dma-debug.h>
  10#include <linux/dma-direction.h>
  11#include <linux/scatterlist.h>
  12#include <linux/bug.h>
  13#include <linux/mem_encrypt.h>
  14
  15/**
  16 * List of possible attributes associated with a DMA mapping. The semantics
  17 * of each attribute should be defined in Documentation/DMA-attributes.txt.
  18 */
  19
  20/*
  21 * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
  22 * may be weakly ordered, that is that reads and writes may pass each other.
  23 */
  24#define DMA_ATTR_WEAK_ORDERING          (1UL << 1)
  25/*
  26 * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be
  27 * buffered to improve performance.
  28 */
  29#define DMA_ATTR_WRITE_COMBINE          (1UL << 2)
  30/*
  31 * DMA_ATTR_NON_CONSISTENT: Lets the platform to choose to return either
  32 * consistent or non-consistent memory as it sees fit.
  33 */
  34#define DMA_ATTR_NON_CONSISTENT         (1UL << 3)
  35/*
  36 * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel
  37 * virtual mapping for the allocated buffer.
  38 */
  39#define DMA_ATTR_NO_KERNEL_MAPPING      (1UL << 4)
  40/*
  41 * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of
  42 * the CPU cache for the given buffer assuming that it has been already
  43 * transferred to 'device' domain.
  44 */
  45#define DMA_ATTR_SKIP_CPU_SYNC          (1UL << 5)
  46/*
  47 * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer
  48 * in physical memory.
  49 */
  50#define DMA_ATTR_FORCE_CONTIGUOUS       (1UL << 6)
  51/*
  52 * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem
  53 * that it's probably not worth the time to try to allocate memory to in a way
  54 * that gives better TLB efficiency.
  55 */
  56#define DMA_ATTR_ALLOC_SINGLE_PAGES     (1UL << 7)
  57/*
  58 * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress
  59 * allocation failure reports (similarly to __GFP_NOWARN).
  60 */
  61#define DMA_ATTR_NO_WARN        (1UL << 8)
  62
  63/*
  64 * DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully
  65 * accessible at an elevated privilege level (and ideally inaccessible or
  66 * at least read-only at lesser-privileged levels).
  67 */
  68#define DMA_ATTR_PRIVILEGED             (1UL << 9)
  69
  70/*
  71 * A dma_addr_t can hold any valid DMA or bus address for the platform.
  72 * It can be given to a device to use as a DMA source or target.  A CPU cannot
  73 * reference a dma_addr_t directly because there may be translation between
  74 * its physical address space and the bus address space.
  75 */
  76struct dma_map_ops {
  77        void* (*alloc)(struct device *dev, size_t size,
  78                                dma_addr_t *dma_handle, gfp_t gfp,
  79                                unsigned long attrs);
  80        void (*free)(struct device *dev, size_t size,
  81                              void *vaddr, dma_addr_t dma_handle,
  82                              unsigned long attrs);
  83        int (*mmap)(struct device *, struct vm_area_struct *,
  84                          void *, dma_addr_t, size_t,
  85                          unsigned long attrs);
  86
  87        int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *,
  88                           dma_addr_t, size_t, unsigned long attrs);
  89
  90        dma_addr_t (*map_page)(struct device *dev, struct page *page,
  91                               unsigned long offset, size_t size,
  92                               enum dma_data_direction dir,
  93                               unsigned long attrs);
  94        void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
  95                           size_t size, enum dma_data_direction dir,
  96                           unsigned long attrs);
  97        /*
  98         * map_sg returns 0 on error and a value > 0 on success.
  99         * It should never return a value < 0.
 100         */
 101        int (*map_sg)(struct device *dev, struct scatterlist *sg,
 102                      int nents, enum dma_data_direction dir,
 103                      unsigned long attrs);
 104        void (*unmap_sg)(struct device *dev,
 105                         struct scatterlist *sg, int nents,
 106                         enum dma_data_direction dir,
 107                         unsigned long attrs);
 108        dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr,
 109                               size_t size, enum dma_data_direction dir,
 110                               unsigned long attrs);
 111        void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle,
 112                           size_t size, enum dma_data_direction dir,
 113                           unsigned long attrs);
 114        void (*sync_single_for_cpu)(struct device *dev,
 115                                    dma_addr_t dma_handle, size_t size,
 116                                    enum dma_data_direction dir);
 117        void (*sync_single_for_device)(struct device *dev,
 118                                       dma_addr_t dma_handle, size_t size,
 119                                       enum dma_data_direction dir);
 120        void (*sync_sg_for_cpu)(struct device *dev,
 121                                struct scatterlist *sg, int nents,
 122                                enum dma_data_direction dir);
 123        void (*sync_sg_for_device)(struct device *dev,
 124                                   struct scatterlist *sg, int nents,
 125                                   enum dma_data_direction dir);
 126        void (*cache_sync)(struct device *dev, void *vaddr, size_t size,
 127                        enum dma_data_direction direction);
 128        int (*dma_supported)(struct device *dev, u64 mask);
 129        u64 (*get_required_mask)(struct device *dev);
 130        size_t (*max_mapping_size)(struct device *dev);
 131        unsigned long (*get_merge_boundary)(struct device *dev);
 132};
 133
 134#define DMA_MAPPING_ERROR               (~(dma_addr_t)0)
 135
 136extern const struct dma_map_ops dma_virt_ops;
 137extern const struct dma_map_ops dma_dummy_ops;
 138
 139#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
 140
 141#define DMA_MASK_NONE   0x0ULL
 142
 143static inline int valid_dma_direction(int dma_direction)
 144{
 145        return ((dma_direction == DMA_BIDIRECTIONAL) ||
 146                (dma_direction == DMA_TO_DEVICE) ||
 147                (dma_direction == DMA_FROM_DEVICE));
 148}
 149
 150#ifdef CONFIG_DMA_DECLARE_COHERENT
 151/*
 152 * These three functions are only for dma allocator.
 153 * Don't use them in device drivers.
 154 */
 155int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
 156                                       dma_addr_t *dma_handle, void **ret);
 157int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr);
 158
 159int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
 160                            void *cpu_addr, size_t size, int *ret);
 161
 162void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size, dma_addr_t *dma_handle);
 163int dma_release_from_global_coherent(int order, void *vaddr);
 164int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
 165                                  size_t size, int *ret);
 166
 167#else
 168#define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0)
 169#define dma_release_from_dev_coherent(dev, order, vaddr) (0)
 170#define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
 171
 172static inline void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size,
 173                                                   dma_addr_t *dma_handle)
 174{
 175        return NULL;
 176}
 177
 178static inline int dma_release_from_global_coherent(int order, void *vaddr)
 179{
 180        return 0;
 181}
 182
 183static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma,
 184                                                void *cpu_addr, size_t size,
 185                                                int *ret)
 186{
 187        return 0;
 188}
 189#endif /* CONFIG_DMA_DECLARE_COHERENT */
 190
 191static inline bool dma_is_direct(const struct dma_map_ops *ops)
 192{
 193        return likely(!ops);
 194}
 195
 196/*
 197 * All the dma_direct_* declarations are here just for the indirect call bypass,
 198 * and must not be used directly drivers!
 199 */
 200dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
 201                unsigned long offset, size_t size, enum dma_data_direction dir,
 202                unsigned long attrs);
 203int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
 204                enum dma_data_direction dir, unsigned long attrs);
 205dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
 206                size_t size, enum dma_data_direction dir, unsigned long attrs);
 207
 208#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
 209    defined(CONFIG_SWIOTLB)
 210void dma_direct_sync_single_for_device(struct device *dev,
 211                dma_addr_t addr, size_t size, enum dma_data_direction dir);
 212void dma_direct_sync_sg_for_device(struct device *dev,
 213                struct scatterlist *sgl, int nents, enum dma_data_direction dir);
 214#else
 215static inline void dma_direct_sync_single_for_device(struct device *dev,
 216                dma_addr_t addr, size_t size, enum dma_data_direction dir)
 217{
 218}
 219static inline void dma_direct_sync_sg_for_device(struct device *dev,
 220                struct scatterlist *sgl, int nents, enum dma_data_direction dir)
 221{
 222}
 223#endif
 224
 225#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
 226    defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
 227    defined(CONFIG_SWIOTLB)
 228void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
 229                size_t size, enum dma_data_direction dir, unsigned long attrs);
 230void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
 231                int nents, enum dma_data_direction dir, unsigned long attrs);
 232void dma_direct_sync_single_for_cpu(struct device *dev,
 233                dma_addr_t addr, size_t size, enum dma_data_direction dir);
 234void dma_direct_sync_sg_for_cpu(struct device *dev,
 235                struct scatterlist *sgl, int nents, enum dma_data_direction dir);
 236#else
 237static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
 238                size_t size, enum dma_data_direction dir, unsigned long attrs)
 239{
 240}
 241static inline void dma_direct_unmap_sg(struct device *dev,
 242                struct scatterlist *sgl, int nents, enum dma_data_direction dir,
 243                unsigned long attrs)
 244{
 245}
 246static inline void dma_direct_sync_single_for_cpu(struct device *dev,
 247                dma_addr_t addr, size_t size, enum dma_data_direction dir)
 248{
 249}
 250static inline void dma_direct_sync_sg_for_cpu(struct device *dev,
 251                struct scatterlist *sgl, int nents, enum dma_data_direction dir)
 252{
 253}
 254#endif
 255
 256size_t dma_direct_max_mapping_size(struct device *dev);
 257
 258#ifdef CONFIG_HAS_DMA
 259#include <asm/dma-mapping.h>
 260
 261static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
 262{
 263        if (dev->dma_ops)
 264                return dev->dma_ops;
 265        return get_arch_dma_ops(dev->bus);
 266}
 267
 268static inline void set_dma_ops(struct device *dev,
 269                               const struct dma_map_ops *dma_ops)
 270{
 271        dev->dma_ops = dma_ops;
 272}
 273
 274static inline dma_addr_t dma_map_page_attrs(struct device *dev,
 275                struct page *page, size_t offset, size_t size,
 276                enum dma_data_direction dir, unsigned long attrs)
 277{
 278        const struct dma_map_ops *ops = get_dma_ops(dev);
 279        dma_addr_t addr;
 280
 281        BUG_ON(!valid_dma_direction(dir));
 282        if (dma_is_direct(ops))
 283                addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
 284        else
 285                addr = ops->map_page(dev, page, offset, size, dir, attrs);
 286        debug_dma_map_page(dev, page, offset, size, dir, addr);
 287
 288        return addr;
 289}
 290
 291static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr,
 292                size_t size, enum dma_data_direction dir, unsigned long attrs)
 293{
 294        const struct dma_map_ops *ops = get_dma_ops(dev);
 295
 296        BUG_ON(!valid_dma_direction(dir));
 297        if (dma_is_direct(ops))
 298                dma_direct_unmap_page(dev, addr, size, dir, attrs);
 299        else if (ops->unmap_page)
 300                ops->unmap_page(dev, addr, size, dir, attrs);
 301        debug_dma_unmap_page(dev, addr, size, dir);
 302}
 303
 304/*
 305 * dma_maps_sg_attrs returns 0 on error and > 0 on success.
 306 * It should never return a value < 0.
 307 */
 308static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
 309                                   int nents, enum dma_data_direction dir,
 310                                   unsigned long attrs)
 311{
 312        const struct dma_map_ops *ops = get_dma_ops(dev);
 313        int ents;
 314
 315        BUG_ON(!valid_dma_direction(dir));
 316        if (dma_is_direct(ops))
 317                ents = dma_direct_map_sg(dev, sg, nents, dir, attrs);
 318        else
 319                ents = ops->map_sg(dev, sg, nents, dir, attrs);
 320        BUG_ON(ents < 0);
 321        debug_dma_map_sg(dev, sg, nents, ents, dir);
 322
 323        return ents;
 324}
 325
 326static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
 327                                      int nents, enum dma_data_direction dir,
 328                                      unsigned long attrs)
 329{
 330        const struct dma_map_ops *ops = get_dma_ops(dev);
 331
 332        BUG_ON(!valid_dma_direction(dir));
 333        debug_dma_unmap_sg(dev, sg, nents, dir);
 334        if (dma_is_direct(ops))
 335                dma_direct_unmap_sg(dev, sg, nents, dir, attrs);
 336        else if (ops->unmap_sg)
 337                ops->unmap_sg(dev, sg, nents, dir, attrs);
 338}
 339
 340static inline dma_addr_t dma_map_resource(struct device *dev,
 341                                          phys_addr_t phys_addr,
 342                                          size_t size,
 343                                          enum dma_data_direction dir,
 344                                          unsigned long attrs)
 345{
 346        const struct dma_map_ops *ops = get_dma_ops(dev);
 347        dma_addr_t addr = DMA_MAPPING_ERROR;
 348
 349        BUG_ON(!valid_dma_direction(dir));
 350
 351        /* Don't allow RAM to be mapped */
 352        if (WARN_ON_ONCE(pfn_valid(PHYS_PFN(phys_addr))))
 353                return DMA_MAPPING_ERROR;
 354
 355        if (dma_is_direct(ops))
 356                addr = dma_direct_map_resource(dev, phys_addr, size, dir, attrs);
 357        else if (ops->map_resource)
 358                addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
 359
 360        debug_dma_map_resource(dev, phys_addr, size, dir, addr);
 361        return addr;
 362}
 363
 364static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
 365                                      size_t size, enum dma_data_direction dir,
 366                                      unsigned long attrs)
 367{
 368        const struct dma_map_ops *ops = get_dma_ops(dev);
 369
 370        BUG_ON(!valid_dma_direction(dir));
 371        if (!dma_is_direct(ops) && ops->unmap_resource)
 372                ops->unmap_resource(dev, addr, size, dir, attrs);
 373        debug_dma_unmap_resource(dev, addr, size, dir);
 374}
 375
 376static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
 377                                           size_t size,
 378                                           enum dma_data_direction dir)
 379{
 380        const struct dma_map_ops *ops = get_dma_ops(dev);
 381
 382        BUG_ON(!valid_dma_direction(dir));
 383        if (dma_is_direct(ops))
 384                dma_direct_sync_single_for_cpu(dev, addr, size, dir);
 385        else if (ops->sync_single_for_cpu)
 386                ops->sync_single_for_cpu(dev, addr, size, dir);
 387        debug_dma_sync_single_for_cpu(dev, addr, size, dir);
 388}
 389
 390static inline void dma_sync_single_for_device(struct device *dev,
 391                                              dma_addr_t addr, size_t size,
 392                                              enum dma_data_direction dir)
 393{
 394        const struct dma_map_ops *ops = get_dma_ops(dev);
 395
 396        BUG_ON(!valid_dma_direction(dir));
 397        if (dma_is_direct(ops))
 398                dma_direct_sync_single_for_device(dev, addr, size, dir);
 399        else if (ops->sync_single_for_device)
 400                ops->sync_single_for_device(dev, addr, size, dir);
 401        debug_dma_sync_single_for_device(dev, addr, size, dir);
 402}
 403
 404static inline void
 405dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
 406                    int nelems, enum dma_data_direction dir)
 407{
 408        const struct dma_map_ops *ops = get_dma_ops(dev);
 409
 410        BUG_ON(!valid_dma_direction(dir));
 411        if (dma_is_direct(ops))
 412                dma_direct_sync_sg_for_cpu(dev, sg, nelems, dir);
 413        else if (ops->sync_sg_for_cpu)
 414                ops->sync_sg_for_cpu(dev, sg, nelems, dir);
 415        debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
 416}
 417
 418static inline void
 419dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
 420                       int nelems, enum dma_data_direction dir)
 421{
 422        const struct dma_map_ops *ops = get_dma_ops(dev);
 423
 424        BUG_ON(!valid_dma_direction(dir));
 425        if (dma_is_direct(ops))
 426                dma_direct_sync_sg_for_device(dev, sg, nelems, dir);
 427        else if (ops->sync_sg_for_device)
 428                ops->sync_sg_for_device(dev, sg, nelems, dir);
 429        debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
 430
 431}
 432
 433static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 434{
 435        debug_dma_mapping_error(dev, dma_addr);
 436
 437        if (dma_addr == DMA_MAPPING_ERROR)
 438                return -ENOMEM;
 439        return 0;
 440}
 441
 442void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
 443                gfp_t flag, unsigned long attrs);
 444void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
 445                dma_addr_t dma_handle, unsigned long attrs);
 446void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
 447                gfp_t gfp, unsigned long attrs);
 448void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
 449                dma_addr_t dma_handle);
 450void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
 451                enum dma_data_direction dir);
 452int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
 453                void *cpu_addr, dma_addr_t dma_addr, size_t size,
 454                unsigned long attrs);
 455int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
 456                void *cpu_addr, dma_addr_t dma_addr, size_t size,
 457                unsigned long attrs);
 458bool dma_can_mmap(struct device *dev);
 459int dma_supported(struct device *dev, u64 mask);
 460int dma_set_mask(struct device *dev, u64 mask);
 461int dma_set_coherent_mask(struct device *dev, u64 mask);
 462u64 dma_get_required_mask(struct device *dev);
 463size_t dma_max_mapping_size(struct device *dev);
 464unsigned long dma_get_merge_boundary(struct device *dev);
 465#else /* CONFIG_HAS_DMA */
 466static inline dma_addr_t dma_map_page_attrs(struct device *dev,
 467                struct page *page, size_t offset, size_t size,
 468                enum dma_data_direction dir, unsigned long attrs)
 469{
 470        return DMA_MAPPING_ERROR;
 471}
 472static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr,
 473                size_t size, enum dma_data_direction dir, unsigned long attrs)
 474{
 475}
 476static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
 477                int nents, enum dma_data_direction dir, unsigned long attrs)
 478{
 479        return 0;
 480}
 481static inline void dma_unmap_sg_attrs(struct device *dev,
 482                struct scatterlist *sg, int nents, enum dma_data_direction dir,
 483                unsigned long attrs)
 484{
 485}
 486static inline dma_addr_t dma_map_resource(struct device *dev,
 487                phys_addr_t phys_addr, size_t size, enum dma_data_direction dir,
 488                unsigned long attrs)
 489{
 490        return DMA_MAPPING_ERROR;
 491}
 492static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
 493                size_t size, enum dma_data_direction dir, unsigned long attrs)
 494{
 495}
 496static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
 497                size_t size, enum dma_data_direction dir)
 498{
 499}
 500static inline void dma_sync_single_for_device(struct device *dev,
 501                dma_addr_t addr, size_t size, enum dma_data_direction dir)
 502{
 503}
 504static inline void dma_sync_sg_for_cpu(struct device *dev,
 505                struct scatterlist *sg, int nelems, enum dma_data_direction dir)
 506{
 507}
 508static inline void dma_sync_sg_for_device(struct device *dev,
 509                struct scatterlist *sg, int nelems, enum dma_data_direction dir)
 510{
 511}
 512static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 513{
 514        return -ENOMEM;
 515}
 516static inline void *dma_alloc_attrs(struct device *dev, size_t size,
 517                dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
 518{
 519        return NULL;
 520}
 521static void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
 522                dma_addr_t dma_handle, unsigned long attrs)
 523{
 524}
 525static inline void *dmam_alloc_attrs(struct device *dev, size_t size,
 526                dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
 527{
 528        return NULL;
 529}
 530static inline void dmam_free_coherent(struct device *dev, size_t size,
 531                void *vaddr, dma_addr_t dma_handle)
 532{
 533}
 534static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
 535                enum dma_data_direction dir)
 536{
 537}
 538static inline int dma_get_sgtable_attrs(struct device *dev,
 539                struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr,
 540                size_t size, unsigned long attrs)
 541{
 542        return -ENXIO;
 543}
 544static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
 545                void *cpu_addr, dma_addr_t dma_addr, size_t size,
 546                unsigned long attrs)
 547{
 548        return -ENXIO;
 549}
 550static inline bool dma_can_mmap(struct device *dev)
 551{
 552        return false;
 553}
 554static inline int dma_supported(struct device *dev, u64 mask)
 555{
 556        return 0;
 557}
 558static inline int dma_set_mask(struct device *dev, u64 mask)
 559{
 560        return -EIO;
 561}
 562static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
 563{
 564        return -EIO;
 565}
 566static inline u64 dma_get_required_mask(struct device *dev)
 567{
 568        return 0;
 569}
 570static inline size_t dma_max_mapping_size(struct device *dev)
 571{
 572        return 0;
 573}
 574static inline unsigned long dma_get_merge_boundary(struct device *dev)
 575{
 576        return 0;
 577}
 578#endif /* CONFIG_HAS_DMA */
 579
 580static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
 581                size_t size, enum dma_data_direction dir, unsigned long attrs)
 582{
 583        /* DMA must never operate on areas that might be remapped. */
 584        if (dev_WARN_ONCE(dev, is_vmalloc_addr(ptr),
 585                          "rejecting DMA map of vmalloc memory\n"))
 586                return DMA_MAPPING_ERROR;
 587        debug_dma_map_single(dev, ptr, size);
 588        return dma_map_page_attrs(dev, virt_to_page(ptr), offset_in_page(ptr),
 589                        size, dir, attrs);
 590}
 591
 592static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
 593                size_t size, enum dma_data_direction dir, unsigned long attrs)
 594{
 595        return dma_unmap_page_attrs(dev, addr, size, dir, attrs);
 596}
 597
 598static inline void dma_sync_single_range_for_cpu(struct device *dev,
 599                dma_addr_t addr, unsigned long offset, size_t size,
 600                enum dma_data_direction dir)
 601{
 602        return dma_sync_single_for_cpu(dev, addr + offset, size, dir);
 603}
 604
 605static inline void dma_sync_single_range_for_device(struct device *dev,
 606                dma_addr_t addr, unsigned long offset, size_t size,
 607                enum dma_data_direction dir)
 608{
 609        return dma_sync_single_for_device(dev, addr + offset, size, dir);
 610}
 611
 612#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
 613#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
 614#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
 615#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
 616#define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
 617#define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
 618#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
 619#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
 620
 621extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
 622                void *cpu_addr, dma_addr_t dma_addr, size_t size,
 623                unsigned long attrs);
 624
 625struct page **dma_common_find_pages(void *cpu_addr);
 626void *dma_common_contiguous_remap(struct page *page, size_t size,
 627                        pgprot_t prot, const void *caller);
 628
 629void *dma_common_pages_remap(struct page **pages, size_t size,
 630                        pgprot_t prot, const void *caller);
 631void dma_common_free_remap(void *cpu_addr, size_t size);
 632
 633bool dma_in_atomic_pool(void *start, size_t size);
 634void *dma_alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags);
 635bool dma_free_from_pool(void *start, size_t size);
 636
 637int
 638dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr,
 639                dma_addr_t dma_addr, size_t size, unsigned long attrs);
 640
 641static inline void *dma_alloc_coherent(struct device *dev, size_t size,
 642                dma_addr_t *dma_handle, gfp_t gfp)
 643{
 644
 645        return dma_alloc_attrs(dev, size, dma_handle, gfp,
 646                        (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
 647}
 648
 649static inline void dma_free_coherent(struct device *dev, size_t size,
 650                void *cpu_addr, dma_addr_t dma_handle)
 651{
 652        return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0);
 653}
 654
 655
 656static inline u64 dma_get_mask(struct device *dev)
 657{
 658        if (dev->dma_mask && *dev->dma_mask)
 659                return *dev->dma_mask;
 660        return DMA_BIT_MASK(32);
 661}
 662
 663/*
 664 * Set both the DMA mask and the coherent DMA mask to the same thing.
 665 * Note that we don't check the return value from dma_set_coherent_mask()
 666 * as the DMA API guarantees that the coherent DMA mask can be set to
 667 * the same or smaller than the streaming DMA mask.
 668 */
 669static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
 670{
 671        int rc = dma_set_mask(dev, mask);
 672        if (rc == 0)
 673                dma_set_coherent_mask(dev, mask);
 674        return rc;
 675}
 676
 677/*
 678 * Similar to the above, except it deals with the case where the device
 679 * does not have dev->dma_mask appropriately setup.
 680 */
 681static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
 682{
 683        dev->dma_mask = &dev->coherent_dma_mask;
 684        return dma_set_mask_and_coherent(dev, mask);
 685}
 686
 687/**
 688 * dma_addressing_limited - return if the device is addressing limited
 689 * @dev:        device to check
 690 *
 691 * Return %true if the devices DMA mask is too small to address all memory in
 692 * the system, else %false.  Lack of addressing bits is the prime reason for
 693 * bounce buffering, but might not be the only one.
 694 */
 695static inline bool dma_addressing_limited(struct device *dev)
 696{
 697        return min_not_zero(dma_get_mask(dev), dev->bus_dma_limit) <
 698                            dma_get_required_mask(dev);
 699}
 700
 701#ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
 702void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
 703                const struct iommu_ops *iommu, bool coherent);
 704#else
 705static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
 706                u64 size, const struct iommu_ops *iommu, bool coherent)
 707{
 708}
 709#endif /* CONFIG_ARCH_HAS_SETUP_DMA_OPS */
 710
 711#ifdef CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS
 712void arch_teardown_dma_ops(struct device *dev);
 713#else
 714static inline void arch_teardown_dma_ops(struct device *dev)
 715{
 716}
 717#endif /* CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS */
 718
 719static inline unsigned int dma_get_max_seg_size(struct device *dev)
 720{
 721        if (dev->dma_parms && dev->dma_parms->max_segment_size)
 722                return dev->dma_parms->max_segment_size;
 723        return SZ_64K;
 724}
 725
 726static inline int dma_set_max_seg_size(struct device *dev, unsigned int size)
 727{
 728        if (dev->dma_parms) {
 729                dev->dma_parms->max_segment_size = size;
 730                return 0;
 731        }
 732        return -EIO;
 733}
 734
 735static inline unsigned long dma_get_seg_boundary(struct device *dev)
 736{
 737        if (dev->dma_parms && dev->dma_parms->segment_boundary_mask)
 738                return dev->dma_parms->segment_boundary_mask;
 739        return DMA_BIT_MASK(32);
 740}
 741
 742static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
 743{
 744        if (dev->dma_parms) {
 745                dev->dma_parms->segment_boundary_mask = mask;
 746                return 0;
 747        }
 748        return -EIO;
 749}
 750
 751static inline int dma_get_cache_alignment(void)
 752{
 753#ifdef ARCH_DMA_MINALIGN
 754        return ARCH_DMA_MINALIGN;
 755#endif
 756        return 1;
 757}
 758
 759#ifdef CONFIG_DMA_DECLARE_COHERENT
 760int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
 761                                dma_addr_t device_addr, size_t size);
 762#else
 763static inline int
 764dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
 765                            dma_addr_t device_addr, size_t size)
 766{
 767        return -ENOSYS;
 768}
 769#endif /* CONFIG_DMA_DECLARE_COHERENT */
 770
 771static inline void *dmam_alloc_coherent(struct device *dev, size_t size,
 772                dma_addr_t *dma_handle, gfp_t gfp)
 773{
 774        return dmam_alloc_attrs(dev, size, dma_handle, gfp,
 775                        (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
 776}
 777
 778static inline void *dma_alloc_wc(struct device *dev, size_t size,
 779                                 dma_addr_t *dma_addr, gfp_t gfp)
 780{
 781        unsigned long attrs = DMA_ATTR_WRITE_COMBINE;
 782
 783        if (gfp & __GFP_NOWARN)
 784                attrs |= DMA_ATTR_NO_WARN;
 785
 786        return dma_alloc_attrs(dev, size, dma_addr, gfp, attrs);
 787}
 788
 789static inline void dma_free_wc(struct device *dev, size_t size,
 790                               void *cpu_addr, dma_addr_t dma_addr)
 791{
 792        return dma_free_attrs(dev, size, cpu_addr, dma_addr,
 793                              DMA_ATTR_WRITE_COMBINE);
 794}
 795
 796static inline int dma_mmap_wc(struct device *dev,
 797                              struct vm_area_struct *vma,
 798                              void *cpu_addr, dma_addr_t dma_addr,
 799                              size_t size)
 800{
 801        return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size,
 802                              DMA_ATTR_WRITE_COMBINE);
 803}
 804
 805#ifdef CONFIG_NEED_DMA_MAP_STATE
 806#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)        dma_addr_t ADDR_NAME
 807#define DEFINE_DMA_UNMAP_LEN(LEN_NAME)          __u32 LEN_NAME
 808#define dma_unmap_addr(PTR, ADDR_NAME)           ((PTR)->ADDR_NAME)
 809#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL)  (((PTR)->ADDR_NAME) = (VAL))
 810#define dma_unmap_len(PTR, LEN_NAME)             ((PTR)->LEN_NAME)
 811#define dma_unmap_len_set(PTR, LEN_NAME, VAL)    (((PTR)->LEN_NAME) = (VAL))
 812#else
 813#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
 814#define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
 815#define dma_unmap_addr(PTR, ADDR_NAME)           (0)
 816#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL)  do { } while (0)
 817#define dma_unmap_len(PTR, LEN_NAME)             (0)
 818#define dma_unmap_len_set(PTR, LEN_NAME, VAL)    do { } while (0)
 819#endif
 820
 821#endif
 822