linux/arch/xtensa/include/asm/dma-mapping.h
<<
>>
Prefs
   1/*
   2 * include/asm-xtensa/dma-mapping.h
   3 *
   4 * This file is subject to the terms and conditions of the GNU General Public
   5 * License.  See the file "COPYING" in the main directory of this archive
   6 * for more details.
   7 *
   8 * Copyright (C) 2003 - 2005 Tensilica Inc.
   9 */
  10
  11#ifndef _XTENSA_DMA_MAPPING_H
  12#define _XTENSA_DMA_MAPPING_H
  13
  14#include <asm/cache.h>
  15#include <asm/io.h>
  16#include <linux/mm.h>
  17#include <linux/scatterlist.h>
  18
  19#define DMA_ERROR_CODE          (~(dma_addr_t)0x0)
  20
  21/*
  22 * DMA-consistent mapping functions.
  23 */
  24
  25extern void *consistent_alloc(int, size_t, dma_addr_t, unsigned long);
  26extern void consistent_free(void*, size_t, dma_addr_t);
  27extern void consistent_sync(void*, size_t, int);
  28
  29#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
  30#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
  31
  32void *dma_alloc_coherent(struct device *dev, size_t size,
  33                           dma_addr_t *dma_handle, gfp_t flag);
  34
  35void dma_free_coherent(struct device *dev, size_t size,
  36                         void *vaddr, dma_addr_t dma_handle);
  37
  38static inline dma_addr_t
  39dma_map_single(struct device *dev, void *ptr, size_t size,
  40               enum dma_data_direction direction)
  41{
  42        BUG_ON(direction == DMA_NONE);
  43        consistent_sync(ptr, size, direction);
  44        return virt_to_phys(ptr);
  45}
  46
  47static inline void
  48dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
  49                 enum dma_data_direction direction)
  50{
  51        BUG_ON(direction == DMA_NONE);
  52}
  53
  54static inline int
  55dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
  56           enum dma_data_direction direction)
  57{
  58        int i;
  59
  60        BUG_ON(direction == DMA_NONE);
  61
  62        for (i = 0; i < nents; i++, sg++ ) {
  63                BUG_ON(!sg_page(sg));
  64
  65                sg->dma_address = sg_phys(sg);
  66                consistent_sync(sg_virt(sg), sg->length, direction);
  67        }
  68
  69        return nents;
  70}
  71
  72static inline dma_addr_t
  73dma_map_page(struct device *dev, struct page *page, unsigned long offset,
  74             size_t size, enum dma_data_direction direction)
  75{
  76        BUG_ON(direction == DMA_NONE);
  77        return (dma_addr_t)(page_to_pfn(page)) * PAGE_SIZE + offset;
  78}
  79
  80static inline void
  81dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
  82               enum dma_data_direction direction)
  83{
  84        BUG_ON(direction == DMA_NONE);
  85}
  86
  87
  88static inline void
  89dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
  90             enum dma_data_direction direction)
  91{
  92        BUG_ON(direction == DMA_NONE);
  93}
  94
  95static inline void
  96dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
  97                enum dma_data_direction direction)
  98{
  99        consistent_sync((void *)bus_to_virt(dma_handle), size, direction);
 100}
 101
 102static inline void
 103dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
 104                           size_t size, enum dma_data_direction direction)
 105{
 106        consistent_sync((void *)bus_to_virt(dma_handle), size, direction);
 107}
 108
 109static inline void
 110dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
 111                      unsigned long offset, size_t size,
 112                      enum dma_data_direction direction)
 113{
 114
 115        consistent_sync((void *)bus_to_virt(dma_handle)+offset,size,direction);
 116}
 117
 118static inline void
 119dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
 120                      unsigned long offset, size_t size,
 121                      enum dma_data_direction direction)
 122{
 123
 124        consistent_sync((void *)bus_to_virt(dma_handle)+offset,size,direction);
 125}
 126static inline void
 127dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
 128                 enum dma_data_direction dir)
 129{
 130        int i;
 131        for (i = 0; i < nelems; i++, sg++)
 132                consistent_sync(sg_virt(sg), sg->length, dir);
 133}
 134
 135static inline void
 136dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
 137                 enum dma_data_direction dir)
 138{
 139        int i;
 140        for (i = 0; i < nelems; i++, sg++)
 141                consistent_sync(sg_virt(sg), sg->length, dir);
 142}
 143static inline int
 144dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 145{
 146        return 0;
 147}
 148
 149static inline int
 150dma_supported(struct device *dev, u64 mask)
 151{
 152        return 1;
 153}
 154
 155static inline int
 156dma_set_mask(struct device *dev, u64 mask)
 157{
 158        if(!dev->dma_mask || !dma_supported(dev, mask))
 159                return -EIO;
 160
 161        *dev->dma_mask = mask;
 162
 163        return 0;
 164}
 165
 166static inline void
 167dma_cache_sync(struct device *dev, void *vaddr, size_t size,
 168               enum dma_data_direction direction)
 169{
 170        consistent_sync(vaddr, size, direction);
 171}
 172
 173/* Not supported for now */
 174static inline int dma_mmap_coherent(struct device *dev,
 175                                    struct vm_area_struct *vma, void *cpu_addr,
 176                                    dma_addr_t dma_addr, size_t size)
 177{
 178        return -EINVAL;
 179}
 180
 181static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
 182                                  void *cpu_addr, dma_addr_t dma_addr,
 183                                  size_t size)
 184{
 185        return -EINVAL;
 186}
 187
 188#endif  /* _XTENSA_DMA_MAPPING_H */
 189