linux/arch/cris/include/asm/dma-mapping.h
<<
>>
Prefs
   1/* DMA mapping. Nothing tricky here, just virt_to_phys */
   2
   3#ifndef _ASM_CRIS_DMA_MAPPING_H
   4#define _ASM_CRIS_DMA_MAPPING_H
   5
   6#include <linux/mm.h>
   7#include <linux/kernel.h>
   8
   9#include <asm/cache.h>
  10#include <asm/io.h>
  11#include <asm/scatterlist.h>
  12
  13#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
  14#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
  15
  16#ifdef CONFIG_PCI
  17#include <asm-generic/dma-coherent.h>
  18
  19void *dma_alloc_coherent(struct device *dev, size_t size,
  20                           dma_addr_t *dma_handle, gfp_t flag);
  21
  22void dma_free_coherent(struct device *dev, size_t size,
  23                         void *vaddr, dma_addr_t dma_handle);
  24#else
  25static inline void *
  26dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
  27                   gfp_t flag)
  28{
  29        BUG();
  30        return NULL;
  31}
  32
  33static inline void
  34dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
  35                    dma_addr_t dma_handle)
  36{
  37        BUG();
  38}
  39#endif
  40static inline dma_addr_t
  41dma_map_single(struct device *dev, void *ptr, size_t size,
  42               enum dma_data_direction direction)
  43{
  44        BUG_ON(direction == DMA_NONE);
  45        return virt_to_phys(ptr);
  46}
  47
  48static inline void
  49dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
  50                 enum dma_data_direction direction)
  51{
  52        BUG_ON(direction == DMA_NONE);
  53}
  54
  55static inline int
  56dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
  57           enum dma_data_direction direction)
  58{
  59        printk("Map sg\n");
  60        return nents;
  61}
  62
  63static inline dma_addr_t
  64dma_map_page(struct device *dev, struct page *page, unsigned long offset,
  65             size_t size, enum dma_data_direction direction)
  66{
  67        BUG_ON(direction == DMA_NONE);
  68        return page_to_phys(page) + offset;
  69}
  70
  71static inline void
  72dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
  73               enum dma_data_direction direction)
  74{
  75        BUG_ON(direction == DMA_NONE);
  76}
  77
  78
  79static inline void
  80dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
  81             enum dma_data_direction direction)
  82{
  83        BUG_ON(direction == DMA_NONE);
  84}
  85
  86static inline void
  87dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
  88                        enum dma_data_direction direction)
  89{
  90}
  91
  92static inline void
  93dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
  94                        enum dma_data_direction direction)
  95{
  96}
  97
  98static inline void
  99dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
 100                              unsigned long offset, size_t size,
 101                              enum dma_data_direction direction)
 102{
 103}
 104
 105static inline void
 106dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
 107                                 unsigned long offset, size_t size,
 108                                 enum dma_data_direction direction)
 109{
 110}
 111
 112static inline void
 113dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
 114                    enum dma_data_direction direction)
 115{
 116}
 117
 118static inline void
 119dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
 120                    enum dma_data_direction direction)
 121{
 122}
 123
 124static inline int
 125dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 126{
 127        return 0;
 128}
 129
 130static inline int
 131dma_supported(struct device *dev, u64 mask)
 132{
 133        /*
 134         * we fall back to GFP_DMA when the mask isn't all 1s,
 135         * so we can't guarantee allocations that must be
 136         * within a tighter range than GFP_DMA..
 137         */
 138        if(mask < 0x00ffffff)
 139                return 0;
 140
 141        return 1;
 142}
 143
 144static inline int
 145dma_set_mask(struct device *dev, u64 mask)
 146{
 147        if(!dev->dma_mask || !dma_supported(dev, mask))
 148                return -EIO;
 149
 150        *dev->dma_mask = mask;
 151
 152        return 0;
 153}
 154
 155static inline int
 156dma_get_cache_alignment(void)
 157{
 158        return (1 << INTERNODE_CACHE_SHIFT);
 159}
 160
 161#define dma_is_consistent(d, h) (1)
 162
 163static inline void
 164dma_cache_sync(struct device *dev, void *vaddr, size_t size,
 165               enum dma_data_direction direction)
 166{
 167}
 168
 169
 170#endif
 171