linux/arch/arm/include/asm/dma-mapping.h
<<
>>
Prefs
   1#ifndef ASMARM_DMA_MAPPING_H
   2#define ASMARM_DMA_MAPPING_H
   3
   4#ifdef __KERNEL__
   5
   6#include <linux/mm_types.h>
   7#include <linux/scatterlist.h>
   8#include <linux/dma-attrs.h>
   9#include <linux/dma-debug.h>
  10
  11#include <asm-generic/dma-coherent.h>
  12#include <asm/memory.h>
  13
  14#define DMA_ERROR_CODE  (~0)
  15extern struct dma_map_ops arm_dma_ops;
  16extern struct dma_map_ops arm_coherent_dma_ops;
  17
  18static inline struct dma_map_ops *get_dma_ops(struct device *dev)
  19{
  20        if (dev && dev->archdata.dma_ops)
  21                return dev->archdata.dma_ops;
  22        return &arm_dma_ops;
  23}
  24
  25static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
  26{
  27        BUG_ON(!dev);
  28        dev->archdata.dma_ops = ops;
  29}
  30
  31#include <asm-generic/dma-mapping-common.h>
  32
  33static inline int dma_set_mask(struct device *dev, u64 mask)
  34{
  35        return get_dma_ops(dev)->set_dma_mask(dev, mask);
  36}
  37
  38#ifdef __arch_page_to_dma
  39#error Please update to __arch_pfn_to_dma
  40#endif
  41
  42/*
  43 * dma_to_pfn/pfn_to_dma/dma_to_virt/virt_to_dma are architecture private
  44 * functions used internally by the DMA-mapping API to provide DMA
  45 * addresses. They must not be used by drivers.
  46 */
  47#ifndef __arch_pfn_to_dma
  48static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
  49{
  50        return (dma_addr_t)__pfn_to_bus(pfn);
  51}
  52
  53static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
  54{
  55        return __bus_to_pfn(addr);
  56}
  57
  58static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
  59{
  60        return (void *)__bus_to_virt((unsigned long)addr);
  61}
  62
  63static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
  64{
  65        return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
  66}
  67#else
  68static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
  69{
  70        return __arch_pfn_to_dma(dev, pfn);
  71}
  72
  73static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
  74{
  75        return __arch_dma_to_pfn(dev, addr);
  76}
  77
  78static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
  79{
  80        return __arch_dma_to_virt(dev, addr);
  81}
  82
  83static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
  84{
  85        return __arch_virt_to_dma(dev, addr);
  86}
  87#endif
  88
  89/*
  90 * DMA errors are defined by all-bits-set in the DMA address.
  91 */
  92static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
  93{
  94        debug_dma_mapping_error(dev, dma_addr);
  95        return dma_addr == DMA_ERROR_CODE;
  96}
  97
  98/*
  99 * Dummy noncoherent implementation.  We don't provide a dma_cache_sync
 100 * function so drivers using this API are highlighted with build warnings.
 101 */
 102static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
 103                dma_addr_t *handle, gfp_t gfp)
 104{
 105        return NULL;
 106}
 107
 108static inline void dma_free_noncoherent(struct device *dev, size_t size,
 109                void *cpu_addr, dma_addr_t handle)
 110{
 111}
 112
 113extern int dma_supported(struct device *dev, u64 mask);
 114
 115extern int arm_dma_set_mask(struct device *dev, u64 dma_mask);
 116
 117/**
 118 * arm_dma_alloc - allocate consistent memory for DMA
 119 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 120 * @size: required memory size
 121 * @handle: bus-specific DMA address
 122 * @attrs: optinal attributes that specific mapping properties
 123 *
 124 * Allocate some memory for a device for performing DMA.  This function
 125 * allocates pages, and will return the CPU-viewed address, and sets @handle
 126 * to be the device-viewed address.
 127 */
 128extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
 129                           gfp_t gfp, struct dma_attrs *attrs);
 130
 131#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
 132
 133static inline void *dma_alloc_attrs(struct device *dev, size_t size,
 134                                       dma_addr_t *dma_handle, gfp_t flag,
 135                                       struct dma_attrs *attrs)
 136{
 137        struct dma_map_ops *ops = get_dma_ops(dev);
 138        void *cpu_addr;
 139        BUG_ON(!ops);
 140
 141        cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
 142        debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
 143        return cpu_addr;
 144}
 145
 146/**
 147 * arm_dma_free - free memory allocated by arm_dma_alloc
 148 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 149 * @size: size of memory originally requested in dma_alloc_coherent
 150 * @cpu_addr: CPU-view address returned from dma_alloc_coherent
 151 * @handle: device-view address returned from dma_alloc_coherent
 152 * @attrs: optinal attributes that specific mapping properties
 153 *
 154 * Free (and unmap) a DMA buffer previously allocated by
 155 * arm_dma_alloc().
 156 *
 157 * References to memory and mappings associated with cpu_addr/handle
 158 * during and after this call executing are illegal.
 159 */
 160extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
 161                         dma_addr_t handle, struct dma_attrs *attrs);
 162
 163#define dma_free_coherent(d, s, c, h) dma_free_attrs(d, s, c, h, NULL)
 164
 165static inline void dma_free_attrs(struct device *dev, size_t size,
 166                                     void *cpu_addr, dma_addr_t dma_handle,
 167                                     struct dma_attrs *attrs)
 168{
 169        struct dma_map_ops *ops = get_dma_ops(dev);
 170        BUG_ON(!ops);
 171
 172        debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
 173        ops->free(dev, size, cpu_addr, dma_handle, attrs);
 174}
 175
 176/**
 177 * arm_dma_mmap - map a coherent DMA allocation into user space
 178 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 179 * @vma: vm_area_struct describing requested user mapping
 180 * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
 181 * @handle: device-view address returned from dma_alloc_coherent
 182 * @size: size of memory originally requested in dma_alloc_coherent
 183 * @attrs: optinal attributes that specific mapping properties
 184 *
 185 * Map a coherent DMA buffer previously allocated by dma_alloc_coherent
 186 * into user space.  The coherent DMA buffer must not be freed by the
 187 * driver until the user space mapping has been released.
 188 */
 189extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
 190                        void *cpu_addr, dma_addr_t dma_addr, size_t size,
 191                        struct dma_attrs *attrs);
 192
 193static inline void *dma_alloc_writecombine(struct device *dev, size_t size,
 194                                       dma_addr_t *dma_handle, gfp_t flag)
 195{
 196        DEFINE_DMA_ATTRS(attrs);
 197        dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
 198        return dma_alloc_attrs(dev, size, dma_handle, flag, &attrs);
 199}
 200
 201static inline void dma_free_writecombine(struct device *dev, size_t size,
 202                                     void *cpu_addr, dma_addr_t dma_handle)
 203{
 204        DEFINE_DMA_ATTRS(attrs);
 205        dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
 206        return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
 207}
 208
 209/*
 210 * This can be called during early boot to increase the size of the atomic
 211 * coherent DMA pool above the default value of 256KiB. It must be called
 212 * before postcore_initcall.
 213 */
 214extern void __init init_dma_coherent_pool_size(unsigned long size);
 215
 216/*
 217 * For SA-1111, IXP425, and ADI systems  the dma-mapping functions are "magic"
 218 * and utilize bounce buffers as needed to work around limited DMA windows.
 219 *
 220 * On the SA-1111, a bug limits DMA to only certain regions of RAM.
 221 * On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
 222 * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM)
 223 *
 224 * The following are helper functions used by the dmabounce subystem
 225 *
 226 */
 227
 228/**
 229 * dmabounce_register_dev
 230 *
 231 * @dev: valid struct device pointer
 232 * @small_buf_size: size of buffers to use with small buffer pool
 233 * @large_buf_size: size of buffers to use with large buffer pool (can be 0)
 234 * @needs_bounce_fn: called to determine whether buffer needs bouncing
 235 *
 236 * This function should be called by low-level platform code to register
 237 * a device as requireing DMA buffer bouncing. The function will allocate
 238 * appropriate DMA pools for the device.
 239 */
 240extern int dmabounce_register_dev(struct device *, unsigned long,
 241                unsigned long, int (*)(struct device *, dma_addr_t, size_t));
 242
 243/**
 244 * dmabounce_unregister_dev
 245 *
 246 * @dev: valid struct device pointer
 247 *
 248 * This function should be called by low-level platform code when device
 249 * that was previously registered with dmabounce_register_dev is removed
 250 * from the system.
 251 *
 252 */
 253extern void dmabounce_unregister_dev(struct device *);
 254
 255
 256
 257/*
 258 * The scatter list versions of the above methods.
 259 */
 260extern int arm_dma_map_sg(struct device *, struct scatterlist *, int,
 261                enum dma_data_direction, struct dma_attrs *attrs);
 262extern void arm_dma_unmap_sg(struct device *, struct scatterlist *, int,
 263                enum dma_data_direction, struct dma_attrs *attrs);
 264extern void arm_dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
 265                enum dma_data_direction);
 266extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
 267                enum dma_data_direction);
 268extern int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
 269                void *cpu_addr, dma_addr_t dma_addr, size_t size,
 270                struct dma_attrs *attrs);
 271
 272#endif /* __KERNEL__ */
 273#endif
 274