linux/arch/frv/include/asm/dma-mapping.h
<<
>>
Prefs
   1#ifndef _ASM_DMA_MAPPING_H
   2#define _ASM_DMA_MAPPING_H
   3
   4#include <linux/device.h>
   5#include <asm/cache.h>
   6#include <asm/cacheflush.h>
   7#include <asm/scatterlist.h>
   8#include <asm/io.h>
   9
  10#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
  11#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
  12
  13extern unsigned long __nongprelbss dma_coherent_mem_start;
  14extern unsigned long __nongprelbss dma_coherent_mem_end;
  15
  16void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp);
  17void dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle);
  18
  19/*
  20 * Map a single buffer of the indicated size for DMA in streaming mode.
  21 * The 32-bit bus address to use is returned.
  22 *
  23 * Once the device is given the dma address, the device owns this memory
  24 * until either pci_unmap_single or pci_dma_sync_single is performed.
  25 */
  26extern dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
  27                                 enum dma_data_direction direction);
  28
  29/*
  30 * Unmap a single streaming mode DMA translation.  The dma_addr and size
  31 * must match what was provided for in a previous pci_map_single call.  All
  32 * other usages are undefined.
  33 *
  34 * After this call, reads by the cpu to the buffer are guarenteed to see
  35 * whatever the device wrote there.
  36 */
  37static inline
  38void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
  39                      enum dma_data_direction direction)
  40{
  41        BUG_ON(direction == DMA_NONE);
  42}
  43
  44/*
  45 * Map a set of buffers described by scatterlist in streaming
  46 * mode for DMA.  This is the scather-gather version of the
  47 * above pci_map_single interface.  Here the scatter gather list
  48 * elements are each tagged with the appropriate dma address
  49 * and length.  They are obtained via sg_dma_{address,length}(SG).
  50 *
  51 * NOTE: An implementation may be able to use a smaller number of
  52 *       DMA address/length pairs than there are SG table elements.
  53 *       (for example via virtual mapping capabilities)
  54 *       The routine returns the number of addr/length pairs actually
  55 *       used, at most nents.
  56 *
  57 * Device ownership issues as mentioned above for pci_map_single are
  58 * the same here.
  59 */
  60extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
  61                      enum dma_data_direction direction);
  62
  63/*
  64 * Unmap a set of streaming mode DMA translations.
  65 * Again, cpu read rules concerning calls here are the same as for
  66 * pci_unmap_single() above.
  67 */
  68static inline
  69void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
  70             enum dma_data_direction direction)
  71{
  72        BUG_ON(direction == DMA_NONE);
  73}
  74
  75extern
  76dma_addr_t dma_map_page(struct device *dev, struct page *page, unsigned long offset,
  77                        size_t size, enum dma_data_direction direction);
  78
  79static inline
  80void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
  81                    enum dma_data_direction direction)
  82{
  83        BUG_ON(direction == DMA_NONE);
  84}
  85
  86
  87static inline
  88void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
  89                             enum dma_data_direction direction)
  90{
  91}
  92
  93static inline
  94void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
  95                                enum dma_data_direction direction)
  96{
  97        flush_write_buffers();
  98}
  99
 100static inline
 101void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
 102                                   unsigned long offset, size_t size,
 103                                   enum dma_data_direction direction)
 104{
 105}
 106
 107static inline
 108void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
 109                                      unsigned long offset, size_t size,
 110                                      enum dma_data_direction direction)
 111{
 112        flush_write_buffers();
 113}
 114
 115static inline
 116void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
 117                         enum dma_data_direction direction)
 118{
 119}
 120
 121static inline
 122void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
 123                            enum dma_data_direction direction)
 124{
 125        flush_write_buffers();
 126}
 127
 128static inline
 129int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 130{
 131        return 0;
 132}
 133
 134static inline
 135int dma_supported(struct device *dev, u64 mask)
 136{
 137        /*
 138         * we fall back to GFP_DMA when the mask isn't all 1s,
 139         * so we can't guarantee allocations that must be
 140         * within a tighter range than GFP_DMA..
 141         */
 142        if (mask < 0x00ffffff)
 143                return 0;
 144
 145        return 1;
 146}
 147
 148static inline
 149int dma_set_mask(struct device *dev, u64 mask)
 150{
 151        if (!dev->dma_mask || !dma_supported(dev, mask))
 152                return -EIO;
 153
 154        *dev->dma_mask = mask;
 155
 156        return 0;
 157}
 158
 159static inline
 160int dma_get_cache_alignment(void)
 161{
 162        return 1 << L1_CACHE_SHIFT;
 163}
 164
 165#define dma_is_consistent(d, h) (1)
 166
 167static inline
 168void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
 169                    enum dma_data_direction direction)
 170{
 171        flush_write_buffers();
 172}
 173
 174#endif  /* _ASM_DMA_MAPPING_H */
 175