linux/arch/x86/include/asm/dma-mapping.h
<<
>>
Prefs
   1#ifndef _ASM_X86_DMA_MAPPING_H
   2#define _ASM_X86_DMA_MAPPING_H
   3
   4/*
   5 * IOMMU interface. See Documentation/DMA-API-HOWTO.txt and
   6 * Documentation/DMA-API.txt for documentation.
   7 */
   8
   9#include <linux/kmemcheck.h>
  10#include <linux/scatterlist.h>
  11#include <linux/dma-debug.h>
  12#include <asm/io.h>
  13#include <asm/swiotlb.h>
  14#include <linux/dma-contiguous.h>
  15
  16#ifdef CONFIG_ISA
  17# define ISA_DMA_BIT_MASK DMA_BIT_MASK(24)
  18#else
  19# define ISA_DMA_BIT_MASK DMA_BIT_MASK(32)
  20#endif
  21
  22#define DMA_ERROR_CODE  0
  23
  24extern int iommu_merge;
  25extern struct device x86_dma_fallback_dev;
  26extern int panic_on_overflow;
  27
  28extern struct dma_map_ops *dma_ops;
  29
  30static inline struct dma_map_ops *get_dma_ops(struct device *dev)
  31{
  32#ifndef CONFIG_X86_DEV_DMA_OPS
  33        return dma_ops;
  34#else
  35        if (unlikely(!dev) || !dev->archdata.dma_ops)
  36                return dma_ops;
  37        else
  38                return dev->archdata.dma_ops;
  39#endif
  40}
  41
  42bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp);
  43#define arch_dma_alloc_attrs arch_dma_alloc_attrs
  44
  45#define HAVE_ARCH_DMA_SUPPORTED 1
  46extern int dma_supported(struct device *hwdev, u64 mask);
  47
  48extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
  49                                        dma_addr_t *dma_addr, gfp_t flag,
  50                                        unsigned long attrs);
  51
  52extern void dma_generic_free_coherent(struct device *dev, size_t size,
  53                                      void *vaddr, dma_addr_t dma_addr,
  54                                      unsigned long attrs);
  55
  56#ifdef CONFIG_X86_DMA_REMAP /* Platform code defines bridge-specific code */
  57extern bool dma_capable(struct device *dev, dma_addr_t addr, size_t size);
  58extern dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
  59extern phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
  60#else
  61
  62static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
  63{
  64        if (!dev->dma_mask)
  65                return 0;
  66
  67        return addr + size - 1 <= *dev->dma_mask;
  68}
  69
  70static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
  71{
  72        return paddr;
  73}
  74
  75static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
  76{
  77        return daddr;
  78}
  79#endif /* CONFIG_X86_DMA_REMAP */
  80
  81static inline void
  82dma_cache_sync(struct device *dev, void *vaddr, size_t size,
  83        enum dma_data_direction dir)
  84{
  85        flush_write_buffers();
  86}
  87
  88static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
  89                                                    gfp_t gfp)
  90{
  91        unsigned long dma_mask = 0;
  92
  93        dma_mask = dev->coherent_dma_mask;
  94        if (!dma_mask)
  95                dma_mask = (gfp & GFP_DMA) ? DMA_BIT_MASK(24) : DMA_BIT_MASK(32);
  96
  97        return dma_mask;
  98}
  99
 100static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
 101{
 102        unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp);
 103
 104        if (dma_mask <= DMA_BIT_MASK(24))
 105                gfp |= GFP_DMA;
 106#ifdef CONFIG_X86_64
 107        if (dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA))
 108                gfp |= GFP_DMA32;
 109#endif
 110       return gfp;
 111}
 112
 113#endif
 114