linux/arch/sh/include/asm/dma-mapping.h
<<
>>
Prefs
   1#ifndef __ASM_SH_DMA_MAPPING_H
   2#define __ASM_SH_DMA_MAPPING_H
   3
   4#include <linux/mm.h>
   5#include <linux/scatterlist.h>
   6#include <linux/dma-debug.h>
   7#include <asm/cacheflush.h>
   8#include <asm/io.h>
   9#include <asm-generic/dma-coherent.h>
  10
  11extern struct bus_type pci_bus_type;
  12
  13#define dma_supported(dev, mask)        (1)
  14
  15static inline int dma_set_mask(struct device *dev, u64 mask)
  16{
  17        if (!dev->dma_mask || !dma_supported(dev, mask))
  18                return -EIO;
  19
  20        *dev->dma_mask = mask;
  21
  22        return 0;
  23}
  24
  25void *dma_alloc_coherent(struct device *dev, size_t size,
  26                         dma_addr_t *dma_handle, gfp_t flag);
  27
  28void dma_free_coherent(struct device *dev, size_t size,
  29                       void *vaddr, dma_addr_t dma_handle);
  30
  31void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
  32                    enum dma_data_direction dir);
  33
  34#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
  35#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
  36#define dma_is_consistent(d, h) (1)
  37
  38static inline dma_addr_t dma_map_single(struct device *dev,
  39                                        void *ptr, size_t size,
  40                                        enum dma_data_direction dir)
  41{
  42        dma_addr_t addr = virt_to_phys(ptr);
  43
  44#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
  45        if (dev->bus == &pci_bus_type)
  46                return addr;
  47#endif
  48        dma_cache_sync(dev, ptr, size, dir);
  49
  50        debug_dma_map_page(dev, virt_to_page(ptr),
  51                           (unsigned long)ptr & ~PAGE_MASK, size,
  52                           dir, addr, true);
  53
  54        return addr;
  55}
  56
  57static inline void dma_unmap_single(struct device *dev, dma_addr_t addr,
  58                                    size_t size, enum dma_data_direction dir)
  59{
  60        debug_dma_unmap_page(dev, addr, size, dir, true);
  61}
  62
  63static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
  64                             int nents, enum dma_data_direction dir)
  65{
  66        int i;
  67
  68        for (i = 0; i < nents; i++) {
  69#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
  70                dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir);
  71#endif
  72                sg[i].dma_address = sg_phys(&sg[i]);
  73                sg[i].dma_length = sg[i].length;
  74        }
  75
  76        debug_dma_map_sg(dev, sg, nents, i, dir);
  77
  78        return nents;
  79}
  80
  81static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
  82                                int nents, enum dma_data_direction dir)
  83{
  84        debug_dma_unmap_sg(dev, sg, nents, dir);
  85}
  86
  87static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
  88                                      unsigned long offset, size_t size,
  89                                      enum dma_data_direction dir)
  90{
  91        return dma_map_single(dev, page_address(page) + offset, size, dir);
  92}
  93
  94static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
  95                                  size_t size, enum dma_data_direction dir)
  96{
  97        dma_unmap_single(dev, dma_address, size, dir);
  98}
  99
 100static inline void __dma_sync_single(struct device *dev, dma_addr_t dma_handle,
 101                                   size_t size, enum dma_data_direction dir)
 102{
 103#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
 104        if (dev->bus == &pci_bus_type)
 105                return;
 106#endif
 107        dma_cache_sync(dev, phys_to_virt(dma_handle), size, dir);
 108}
 109
 110static inline void dma_sync_single_range(struct device *dev,
 111                                         dma_addr_t dma_handle,
 112                                         unsigned long offset, size_t size,
 113                                         enum dma_data_direction dir)
 114{
 115#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
 116        if (dev->bus == &pci_bus_type)
 117                return;
 118#endif
 119        dma_cache_sync(dev, phys_to_virt(dma_handle) + offset, size, dir);
 120}
 121
 122static inline void __dma_sync_sg(struct device *dev, struct scatterlist *sg,
 123                               int nelems, enum dma_data_direction dir)
 124{
 125        int i;
 126
 127        for (i = 0; i < nelems; i++) {
 128#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
 129                dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir);
 130#endif
 131                sg[i].dma_address = sg_phys(&sg[i]);
 132                sg[i].dma_length = sg[i].length;
 133        }
 134}
 135
 136static inline void dma_sync_single_for_cpu(struct device *dev,
 137                                           dma_addr_t dma_handle, size_t size,
 138                                           enum dma_data_direction dir)
 139{
 140        __dma_sync_single(dev, dma_handle, size, dir);
 141        debug_dma_sync_single_for_cpu(dev, dma_handle, size, dir);
 142}
 143
 144static inline void dma_sync_single_for_device(struct device *dev,
 145                                              dma_addr_t dma_handle,
 146                                              size_t size,
 147                                              enum dma_data_direction dir)
 148{
 149        __dma_sync_single(dev, dma_handle, size, dir);
 150        debug_dma_sync_single_for_device(dev, dma_handle, size, dir);
 151}
 152
 153static inline void dma_sync_single_range_for_cpu(struct device *dev,
 154                                                 dma_addr_t dma_handle,
 155                                                 unsigned long offset,
 156                                                 size_t size,
 157                                                 enum dma_data_direction direction)
 158{
 159        dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction);
 160        debug_dma_sync_single_range_for_cpu(dev, dma_handle,
 161                                            offset, size, direction);
 162}
 163
 164static inline void dma_sync_single_range_for_device(struct device *dev,
 165                                                    dma_addr_t dma_handle,
 166                                                    unsigned long offset,
 167                                                    size_t size,
 168                                                    enum dma_data_direction direction)
 169{
 170        dma_sync_single_for_device(dev, dma_handle+offset, size, direction);
 171        debug_dma_sync_single_range_for_device(dev, dma_handle,
 172                                               offset, size, direction);
 173}
 174
 175
 176static inline void dma_sync_sg_for_cpu(struct device *dev,
 177                                       struct scatterlist *sg, int nelems,
 178                                       enum dma_data_direction dir)
 179{
 180        __dma_sync_sg(dev, sg, nelems, dir);
 181        debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
 182}
 183
 184static inline void dma_sync_sg_for_device(struct device *dev,
 185                                          struct scatterlist *sg, int nelems,
 186                                          enum dma_data_direction dir)
 187{
 188        __dma_sync_sg(dev, sg, nelems, dir);
 189        debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
 190}
 191
 192static inline int dma_get_cache_alignment(void)
 193{
 194        /*
 195         * Each processor family will define its own L1_CACHE_SHIFT,
 196         * L1_CACHE_BYTES wraps to this, so this is always safe.
 197         */
 198        return L1_CACHE_BYTES;
 199}
 200
 201static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 202{
 203        return dma_addr == 0;
 204}
 205
 206#define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
 207
 208extern int
 209dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
 210                            dma_addr_t device_addr, size_t size, int flags);
 211
 212extern void
 213dma_release_declared_memory(struct device *dev);
 214
 215extern void *
 216dma_mark_declared_memory_occupied(struct device *dev,
 217                                  dma_addr_t device_addr, size_t size);
 218
 219#endif /* __ASM_SH_DMA_MAPPING_H */
 220