linux/arch/xtensa/include/asm/dma-mapping.h
<<
>>
Prefs
   1/*
   2 * include/asm-xtensa/dma-mapping.h
   3 *
   4 * This file is subject to the terms and conditions of the GNU General Public
   5 * License.  See the file "COPYING" in the main directory of this archive
   6 * for more details.
   7 *
   8 * Copyright (C) 2003 - 2005 Tensilica Inc.
   9 */
  10
  11#ifndef _XTENSA_DMA_MAPPING_H
  12#define _XTENSA_DMA_MAPPING_H
  13
  14#include <asm/cache.h>
  15#include <asm/io.h>
  16#include <linux/mm.h>
  17#include <linux/scatterlist.h>
  18
  19/*
  20 * DMA-consistent mapping functions.
  21 */
  22
  23extern void *consistent_alloc(int, size_t, dma_addr_t, unsigned long);
  24extern void consistent_free(void*, size_t, dma_addr_t);
  25extern void consistent_sync(void*, size_t, int);
  26
  27#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
  28#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
  29
  30void *dma_alloc_coherent(struct device *dev, size_t size,
  31                           dma_addr_t *dma_handle, gfp_t flag);
  32
  33void dma_free_coherent(struct device *dev, size_t size,
  34                         void *vaddr, dma_addr_t dma_handle);
  35
  36static inline dma_addr_t
  37dma_map_single(struct device *dev, void *ptr, size_t size,
  38               enum dma_data_direction direction)
  39{
  40        BUG_ON(direction == DMA_NONE);
  41        consistent_sync(ptr, size, direction);
  42        return virt_to_phys(ptr);
  43}
  44
  45static inline void
  46dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
  47                 enum dma_data_direction direction)
  48{
  49        BUG_ON(direction == DMA_NONE);
  50}
  51
  52static inline int
  53dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
  54           enum dma_data_direction direction)
  55{
  56        int i;
  57
  58        BUG_ON(direction == DMA_NONE);
  59
  60        for (i = 0; i < nents; i++, sg++ ) {
  61                BUG_ON(!sg_page(sg));
  62
  63                sg->dma_address = sg_phys(sg);
  64                consistent_sync(sg_virt(sg), sg->length, direction);
  65        }
  66
  67        return nents;
  68}
  69
  70static inline dma_addr_t
  71dma_map_page(struct device *dev, struct page *page, unsigned long offset,
  72             size_t size, enum dma_data_direction direction)
  73{
  74        BUG_ON(direction == DMA_NONE);
  75        return (dma_addr_t)(page_to_pfn(page)) * PAGE_SIZE + offset;
  76}
  77
  78static inline void
  79dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
  80               enum dma_data_direction direction)
  81{
  82        BUG_ON(direction == DMA_NONE);
  83}
  84
  85
  86static inline void
  87dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
  88             enum dma_data_direction direction)
  89{
  90        BUG_ON(direction == DMA_NONE);
  91}
  92
  93static inline void
  94dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
  95                enum dma_data_direction direction)
  96{
  97        consistent_sync((void *)bus_to_virt(dma_handle), size, direction);
  98}
  99
 100static inline void
 101dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
 102                enum dma_data_direction direction)
 103{
 104        consistent_sync((void *)bus_to_virt(dma_handle), size, direction);
 105}
 106
 107static inline void
 108dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
 109                      unsigned long offset, size_t size,
 110                      enum dma_data_direction direction)
 111{
 112
 113        consistent_sync((void *)bus_to_virt(dma_handle)+offset,size,direction);
 114}
 115
 116static inline void
 117dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
 118                      unsigned long offset, size_t size,
 119                      enum dma_data_direction direction)
 120{
 121
 122        consistent_sync((void *)bus_to_virt(dma_handle)+offset,size,direction);
 123}
 124static inline void
 125dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
 126                 enum dma_data_direction dir)
 127{
 128        int i;
 129        for (i = 0; i < nelems; i++, sg++)
 130                consistent_sync(sg_virt(sg), sg->length, dir);
 131}
 132
 133static inline void
 134dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
 135                 enum dma_data_direction dir)
 136{
 137        int i;
 138        for (i = 0; i < nelems; i++, sg++)
 139                consistent_sync(sg_virt(sg), sg->length, dir);
 140}
 141static inline int
 142dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 143{
 144        return 0;
 145}
 146
 147static inline int
 148dma_supported(struct device *dev, u64 mask)
 149{
 150        return 1;
 151}
 152
 153static inline int
 154dma_set_mask(struct device *dev, u64 mask)
 155{
 156        if(!dev->dma_mask || !dma_supported(dev, mask))
 157                return -EIO;
 158
 159        *dev->dma_mask = mask;
 160
 161        return 0;
 162}
 163
 164static inline int
 165dma_get_cache_alignment(void)
 166{
 167        return L1_CACHE_BYTES;
 168}
 169
 170#define dma_is_consistent(d, h) (1)
 171
 172static inline void
 173dma_cache_sync(struct device *dev, void *vaddr, size_t size,
 174               enum dma_data_direction direction)
 175{
 176        consistent_sync(vaddr, size, direction);
 177}
 178
 179#endif  /* _XTENSA_DMA_MAPPING_H */
 180