linux/include/asm-x86/dma-mapping_64.h
<<
>>
Prefs
   1#ifndef _X8664_DMA_MAPPING_H
   2#define _X8664_DMA_MAPPING_H 1
   3
   4/*
   5 * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
   6 * documentation.
   7 */
   8
   9#include <linux/scatterlist.h>
  10#include <asm/io.h>
  11#include <asm/swiotlb.h>
  12
  13struct dma_mapping_ops {
  14        int             (*mapping_error)(dma_addr_t dma_addr);
  15        void*           (*alloc_coherent)(struct device *dev, size_t size,
  16                                dma_addr_t *dma_handle, gfp_t gfp);
  17        void            (*free_coherent)(struct device *dev, size_t size,
  18                                void *vaddr, dma_addr_t dma_handle);
  19        dma_addr_t      (*map_single)(struct device *hwdev, void *ptr,
  20                                size_t size, int direction);
  21        /* like map_single, but doesn't check the device mask */
  22        dma_addr_t      (*map_simple)(struct device *hwdev, char *ptr,
  23                                size_t size, int direction);
  24        void            (*unmap_single)(struct device *dev, dma_addr_t addr,
  25                                size_t size, int direction);
  26        void            (*sync_single_for_cpu)(struct device *hwdev,
  27                                dma_addr_t dma_handle, size_t size,
  28                                int direction);
  29        void            (*sync_single_for_device)(struct device *hwdev,
  30                                dma_addr_t dma_handle, size_t size,
  31                                int direction);
  32        void            (*sync_single_range_for_cpu)(struct device *hwdev,
  33                                dma_addr_t dma_handle, unsigned long offset,
  34                                size_t size, int direction);
  35        void            (*sync_single_range_for_device)(struct device *hwdev,
  36                                dma_addr_t dma_handle, unsigned long offset,
  37                                size_t size, int direction);
  38        void            (*sync_sg_for_cpu)(struct device *hwdev,
  39                                struct scatterlist *sg, int nelems,
  40                                int direction);
  41        void            (*sync_sg_for_device)(struct device *hwdev,
  42                                struct scatterlist *sg, int nelems,
  43                                int direction);
  44        int             (*map_sg)(struct device *hwdev, struct scatterlist *sg,
  45                                int nents, int direction);
  46        void            (*unmap_sg)(struct device *hwdev,
  47                                struct scatterlist *sg, int nents,
  48                                int direction);
  49        int             (*dma_supported)(struct device *hwdev, u64 mask);
  50        int             is_phys;
  51};
  52
  53extern dma_addr_t bad_dma_address;
  54extern const struct dma_mapping_ops* dma_ops;
  55extern int iommu_merge;
  56
  57static inline int dma_mapping_error(dma_addr_t dma_addr)
  58{
  59        if (dma_ops->mapping_error)
  60                return dma_ops->mapping_error(dma_addr);
  61
  62        return (dma_addr == bad_dma_address);
  63}
  64
  65#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
  66#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
  67
  68#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
  69#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
  70
  71extern void *dma_alloc_coherent(struct device *dev, size_t size,
  72                                dma_addr_t *dma_handle, gfp_t gfp);
  73extern void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
  74                              dma_addr_t dma_handle);
  75
  76static inline dma_addr_t
  77dma_map_single(struct device *hwdev, void *ptr, size_t size,
  78               int direction)
  79{
  80        BUG_ON(!valid_dma_direction(direction));
  81        return dma_ops->map_single(hwdev, ptr, size, direction);
  82}
  83
  84static inline void
  85dma_unmap_single(struct device *dev, dma_addr_t addr,size_t size,
  86                 int direction)
  87{
  88        BUG_ON(!valid_dma_direction(direction));
  89        dma_ops->unmap_single(dev, addr, size, direction);
  90}
  91
  92#define dma_map_page(dev,page,offset,size,dir) \
  93        dma_map_single((dev), page_address(page)+(offset), (size), (dir))
  94
  95#define dma_unmap_page dma_unmap_single
  96
  97static inline void
  98dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
  99                        size_t size, int direction)
 100{
 101        BUG_ON(!valid_dma_direction(direction));
 102        if (dma_ops->sync_single_for_cpu)
 103                dma_ops->sync_single_for_cpu(hwdev, dma_handle, size,
 104                                             direction);
 105        flush_write_buffers();
 106}
 107
 108static inline void
 109dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
 110                           size_t size, int direction)
 111{
 112        BUG_ON(!valid_dma_direction(direction));
 113        if (dma_ops->sync_single_for_device)
 114                dma_ops->sync_single_for_device(hwdev, dma_handle, size,
 115                                                direction);
 116        flush_write_buffers();
 117}
 118
 119static inline void
 120dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
 121                              unsigned long offset, size_t size, int direction)
 122{
 123        BUG_ON(!valid_dma_direction(direction));
 124        if (dma_ops->sync_single_range_for_cpu) {
 125                dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, size, direction);
 126        }
 127
 128        flush_write_buffers();
 129}
 130
 131static inline void
 132dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
 133                                 unsigned long offset, size_t size, int direction)
 134{
 135        BUG_ON(!valid_dma_direction(direction));
 136        if (dma_ops->sync_single_range_for_device)
 137                dma_ops->sync_single_range_for_device(hwdev, dma_handle,
 138                                                      offset, size, direction);
 139
 140        flush_write_buffers();
 141}
 142
 143static inline void
 144dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
 145                    int nelems, int direction)
 146{
 147        BUG_ON(!valid_dma_direction(direction));
 148        if (dma_ops->sync_sg_for_cpu)
 149                dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
 150        flush_write_buffers();
 151}
 152
 153static inline void
 154dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
 155                       int nelems, int direction)
 156{
 157        BUG_ON(!valid_dma_direction(direction));
 158        if (dma_ops->sync_sg_for_device) {
 159                dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction);
 160        }
 161
 162        flush_write_buffers();
 163}
 164
 165static inline int
 166dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction)
 167{
 168        BUG_ON(!valid_dma_direction(direction));
 169        return dma_ops->map_sg(hwdev, sg, nents, direction);
 170}
 171
 172static inline void
 173dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
 174             int direction)
 175{
 176        BUG_ON(!valid_dma_direction(direction));
 177        dma_ops->unmap_sg(hwdev, sg, nents, direction);
 178}
 179
 180extern int dma_supported(struct device *hwdev, u64 mask);
 181
 182/* same for gart, swiotlb, and nommu */
 183static inline int dma_get_cache_alignment(void)
 184{
 185        return boot_cpu_data.x86_clflush_size;
 186}
 187
 188#define dma_is_consistent(d, h) 1
 189
 190extern int dma_set_mask(struct device *dev, u64 mask);
 191
 192static inline void
 193dma_cache_sync(struct device *dev, void *vaddr, size_t size,
 194        enum dma_data_direction dir)
 195{
 196        flush_write_buffers();
 197}
 198
 199extern struct device fallback_dev;
 200extern int panic_on_overflow;
 201
 202#endif /* _X8664_DMA_MAPPING_H */
 203