linux/arch/arm/include/asm/dma-mapping.h
<<
>>
Prefs
   1#ifndef ASMARM_DMA_MAPPING_H
   2#define ASMARM_DMA_MAPPING_H
   3
   4#ifdef __KERNEL__
   5
   6#include <linux/mm_types.h>
   7#include <linux/scatterlist.h>
   8
   9#include <asm-generic/dma-coherent.h>
  10#include <asm/memory.h>
  11
  12/*
  13 * page_to_dma/dma_to_virt/virt_to_dma are architecture private functions
  14 * used internally by the DMA-mapping API to provide DMA addresses. They
  15 * must not be used by drivers.
  16 */
  17#ifndef __arch_page_to_dma
  18
  19#if !defined(CONFIG_HIGHMEM)
  20static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
  21{
  22        return (dma_addr_t)__virt_to_bus((unsigned long)page_address(page));
  23}
  24#elif defined(__pfn_to_bus)
  25static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
  26{
  27        return (dma_addr_t)__pfn_to_bus(page_to_pfn(page));
  28}
  29#else
  30#error "this machine class needs to define __arch_page_to_dma to use HIGHMEM"
  31#endif
  32
  33static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
  34{
  35        return (void *)__bus_to_virt(addr);
  36}
  37
  38static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
  39{
  40        return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
  41}
  42#else
  43static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
  44{
  45        return __arch_page_to_dma(dev, page);
  46}
  47
  48static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
  49{
  50        return __arch_dma_to_virt(dev, addr);
  51}
  52
  53static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
  54{
  55        return __arch_virt_to_dma(dev, addr);
  56}
  57#endif
  58
  59/*
  60 * DMA-consistent mapping functions.  These allocate/free a region of
  61 * uncached, unwrite-buffered mapped memory space for use with DMA
  62 * devices.  This is the "generic" version.  The PCI specific version
  63 * is in pci.h
  64 *
  65 * Note: Drivers should NOT use this function directly, as it will break
  66 * platforms with CONFIG_DMABOUNCE.
  67 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
  68 */
  69extern void dma_cache_maint(const void *kaddr, size_t size, int rw);
  70extern void dma_cache_maint_page(struct page *page, unsigned long offset,
  71                                 size_t size, int rw);
  72
  73/*
  74 * Return whether the given device DMA address mask can be supported
  75 * properly.  For example, if your device can only drive the low 24-bits
  76 * during bus mastering, then you would pass 0x00ffffff as the mask
  77 * to this function.
  78 *
  79 * FIXME: This should really be a platform specific issue - we should
  80 * return false if GFP_DMA allocations may not satisfy the supplied 'mask'.
  81 */
  82static inline int dma_supported(struct device *dev, u64 mask)
  83{
  84        if (mask < ISA_DMA_THRESHOLD)
  85                return 0;
  86        return 1;
  87}
  88
  89static inline int dma_set_mask(struct device *dev, u64 dma_mask)
  90{
  91        if (!dev->dma_mask || !dma_supported(dev, dma_mask))
  92                return -EIO;
  93
  94        *dev->dma_mask = dma_mask;
  95
  96        return 0;
  97}
  98
  99static inline int dma_get_cache_alignment(void)
 100{
 101        return 32;
 102}
 103
 104static inline int dma_is_consistent(struct device *dev, dma_addr_t handle)
 105{
 106        return !!arch_is_coherent();
 107}
 108
 109/*
 110 * DMA errors are defined by all-bits-set in the DMA address.
 111 */
 112static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 113{
 114        return dma_addr == ~0;
 115}
 116
 117/*
 118 * Dummy noncoherent implementation.  We don't provide a dma_cache_sync
 119 * function so drivers using this API are highlighted with build warnings.
 120 */
 121static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
 122                dma_addr_t *handle, gfp_t gfp)
 123{
 124        return NULL;
 125}
 126
 127static inline void dma_free_noncoherent(struct device *dev, size_t size,
 128                void *cpu_addr, dma_addr_t handle)
 129{
 130}
 131
 132/**
 133 * dma_alloc_coherent - allocate consistent memory for DMA
 134 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 135 * @size: required memory size
 136 * @handle: bus-specific DMA address
 137 *
 138 * Allocate some uncached, unbuffered memory for a device for
 139 * performing DMA.  This function allocates pages, and will
 140 * return the CPU-viewed address, and sets @handle to be the
 141 * device-viewed address.
 142 */
 143extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
 144
 145/**
 146 * dma_free_coherent - free memory allocated by dma_alloc_coherent
 147 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 148 * @size: size of memory originally requested in dma_alloc_coherent
 149 * @cpu_addr: CPU-view address returned from dma_alloc_coherent
 150 * @handle: device-view address returned from dma_alloc_coherent
 151 *
 152 * Free (and unmap) a DMA buffer previously allocated by
 153 * dma_alloc_coherent().
 154 *
 155 * References to memory and mappings associated with cpu_addr/handle
 156 * during and after this call executing are illegal.
 157 */
 158extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t);
 159
 160/**
 161 * dma_mmap_coherent - map a coherent DMA allocation into user space
 162 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 163 * @vma: vm_area_struct describing requested user mapping
 164 * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
 165 * @handle: device-view address returned from dma_alloc_coherent
 166 * @size: size of memory originally requested in dma_alloc_coherent
 167 *
 168 * Map a coherent DMA buffer previously allocated by dma_alloc_coherent
 169 * into user space.  The coherent DMA buffer must not be freed by the
 170 * driver until the user space mapping has been released.
 171 */
 172int dma_mmap_coherent(struct device *, struct vm_area_struct *,
 173                void *, dma_addr_t, size_t);
 174
 175
 176/**
 177 * dma_alloc_writecombine - allocate writecombining memory for DMA
 178 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 179 * @size: required memory size
 180 * @handle: bus-specific DMA address
 181 *
 182 * Allocate some uncached, buffered memory for a device for
 183 * performing DMA.  This function allocates pages, and will
 184 * return the CPU-viewed address, and sets @handle to be the
 185 * device-viewed address.
 186 */
 187extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *,
 188                gfp_t);
 189
 190#define dma_free_writecombine(dev,size,cpu_addr,handle) \
 191        dma_free_coherent(dev,size,cpu_addr,handle)
 192
 193int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
 194                void *, dma_addr_t, size_t);
 195
 196
 197#ifdef CONFIG_DMABOUNCE
 198/*
 199 * For SA-1111, IXP425, and ADI systems  the dma-mapping functions are "magic"
 200 * and utilize bounce buffers as needed to work around limited DMA windows.
 201 *
 202 * On the SA-1111, a bug limits DMA to only certain regions of RAM.
 203 * On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
 204 * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM)
 205 *
 206 * The following are helper functions used by the dmabounce subystem
 207 *
 208 */
 209
 210/**
 211 * dmabounce_register_dev
 212 *
 213 * @dev: valid struct device pointer
 214 * @small_buf_size: size of buffers to use with small buffer pool
 215 * @large_buf_size: size of buffers to use with large buffer pool (can be 0)
 216 *
 217 * This function should be called by low-level platform code to register
 218 * a device as requireing DMA buffer bouncing. The function will allocate
 219 * appropriate DMA pools for the device.
 220 *
 221 */
 222extern int dmabounce_register_dev(struct device *, unsigned long,
 223                unsigned long);
 224
 225/**
 226 * dmabounce_unregister_dev
 227 *
 228 * @dev: valid struct device pointer
 229 *
 230 * This function should be called by low-level platform code when device
 231 * that was previously registered with dmabounce_register_dev is removed
 232 * from the system.
 233 *
 234 */
 235extern void dmabounce_unregister_dev(struct device *);
 236
 237/**
 238 * dma_needs_bounce
 239 *
 240 * @dev: valid struct device pointer
 241 * @dma_handle: dma_handle of unbounced buffer
 242 * @size: size of region being mapped
 243 *
 244 * Platforms that utilize the dmabounce mechanism must implement
 245 * this function.
 246 *
 247 * The dmabounce routines call this function whenever a dma-mapping
 248 * is requested to determine whether a given buffer needs to be bounced
 249 * or not. The function must return 0 if the buffer is OK for
 250 * DMA access and 1 if the buffer needs to be bounced.
 251 *
 252 */
 253extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);
 254
 255/*
 256 * The DMA API, implemented by dmabounce.c.  See below for descriptions.
 257 */
 258extern dma_addr_t dma_map_single(struct device *, void *, size_t,
 259                enum dma_data_direction);
 260extern dma_addr_t dma_map_page(struct device *, struct page *,
 261                unsigned long, size_t, enum dma_data_direction);
 262extern void dma_unmap_single(struct device *, dma_addr_t, size_t,
 263                enum dma_data_direction);
 264
 265/*
 266 * Private functions
 267 */
 268int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long,
 269                size_t, enum dma_data_direction);
 270int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long,
 271                size_t, enum dma_data_direction);
 272#else
 273static inline int dmabounce_sync_for_cpu(struct device *d, dma_addr_t addr,
 274        unsigned long offset, size_t size, enum dma_data_direction dir)
 275{
 276        return 1;
 277}
 278
 279static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
 280        unsigned long offset, size_t size, enum dma_data_direction dir)
 281{
 282        return 1;
 283}
 284
 285
 286/**
 287 * dma_map_single - map a single buffer for streaming DMA
 288 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 289 * @cpu_addr: CPU direct mapped address of buffer
 290 * @size: size of buffer to map
 291 * @dir: DMA transfer direction
 292 *
 293 * Ensure that any data held in the cache is appropriately discarded
 294 * or written back.
 295 *
 296 * The device owns this memory once this call has completed.  The CPU
 297 * can regain ownership by calling dma_unmap_single() or
 298 * dma_sync_single_for_cpu().
 299 */
 300static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
 301                size_t size, enum dma_data_direction dir)
 302{
 303        BUG_ON(!valid_dma_direction(dir));
 304
 305        if (!arch_is_coherent())
 306                dma_cache_maint(cpu_addr, size, dir);
 307
 308        return virt_to_dma(dev, cpu_addr);
 309}
 310
 311/**
 312 * dma_map_page - map a portion of a page for streaming DMA
 313 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 314 * @page: page that buffer resides in
 315 * @offset: offset into page for start of buffer
 316 * @size: size of buffer to map
 317 * @dir: DMA transfer direction
 318 *
 319 * Ensure that any data held in the cache is appropriately discarded
 320 * or written back.
 321 *
 322 * The device owns this memory once this call has completed.  The CPU
 323 * can regain ownership by calling dma_unmap_page().
 324 */
 325static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
 326             unsigned long offset, size_t size, enum dma_data_direction dir)
 327{
 328        BUG_ON(!valid_dma_direction(dir));
 329
 330        if (!arch_is_coherent())
 331                dma_cache_maint_page(page, offset, size, dir);
 332
 333        return page_to_dma(dev, page) + offset;
 334}
 335
 336/**
 337 * dma_unmap_single - unmap a single buffer previously mapped
 338 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 339 * @handle: DMA address of buffer
 340 * @size: size of buffer (same as passed to dma_map_single)
 341 * @dir: DMA transfer direction (same as passed to dma_map_single)
 342 *
 343 * Unmap a single streaming mode DMA translation.  The handle and size
 344 * must match what was provided in the previous dma_map_single() call.
 345 * All other usages are undefined.
 346 *
 347 * After this call, reads by the CPU to the buffer are guaranteed to see
 348 * whatever the device wrote there.
 349 */
 350static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
 351                size_t size, enum dma_data_direction dir)
 352{
 353        /* nothing to do */
 354}
 355#endif /* CONFIG_DMABOUNCE */
 356
 357/**
 358 * dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
 359 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 360 * @handle: DMA address of buffer
 361 * @size: size of buffer (same as passed to dma_map_page)
 362 * @dir: DMA transfer direction (same as passed to dma_map_page)
 363 *
 364 * Unmap a page streaming mode DMA translation.  The handle and size
 365 * must match what was provided in the previous dma_map_page() call.
 366 * All other usages are undefined.
 367 *
 368 * After this call, reads by the CPU to the buffer are guaranteed to see
 369 * whatever the device wrote there.
 370 */
 371static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
 372                size_t size, enum dma_data_direction dir)
 373{
 374        dma_unmap_single(dev, handle, size, dir);
 375}
 376
 377/**
 378 * dma_sync_single_range_for_cpu
 379 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 380 * @handle: DMA address of buffer
 381 * @offset: offset of region to start sync
 382 * @size: size of region to sync
 383 * @dir: DMA transfer direction (same as passed to dma_map_single)
 384 *
 385 * Make physical memory consistent for a single streaming mode DMA
 386 * translation after a transfer.
 387 *
 388 * If you perform a dma_map_single() but wish to interrogate the
 389 * buffer using the cpu, yet do not wish to teardown the PCI dma
 390 * mapping, you must call this function before doing so.  At the
 391 * next point you give the PCI dma address back to the card, you
 392 * must first the perform a dma_sync_for_device, and then the
 393 * device again owns the buffer.
 394 */
 395static inline void dma_sync_single_range_for_cpu(struct device *dev,
 396                dma_addr_t handle, unsigned long offset, size_t size,
 397                enum dma_data_direction dir)
 398{
 399        BUG_ON(!valid_dma_direction(dir));
 400
 401        dmabounce_sync_for_cpu(dev, handle, offset, size, dir);
 402}
 403
 404static inline void dma_sync_single_range_for_device(struct device *dev,
 405                dma_addr_t handle, unsigned long offset, size_t size,
 406                enum dma_data_direction dir)
 407{
 408        BUG_ON(!valid_dma_direction(dir));
 409
 410        if (!dmabounce_sync_for_device(dev, handle, offset, size, dir))
 411                return;
 412
 413        if (!arch_is_coherent())
 414                dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir);
 415}
 416
 417static inline void dma_sync_single_for_cpu(struct device *dev,
 418                dma_addr_t handle, size_t size, enum dma_data_direction dir)
 419{
 420        dma_sync_single_range_for_cpu(dev, handle, 0, size, dir);
 421}
 422
 423static inline void dma_sync_single_for_device(struct device *dev,
 424                dma_addr_t handle, size_t size, enum dma_data_direction dir)
 425{
 426        dma_sync_single_range_for_device(dev, handle, 0, size, dir);
 427}
 428
 429/*
 430 * The scatter list versions of the above methods.
 431 */
 432extern int dma_map_sg(struct device *, struct scatterlist *, int,
 433                enum dma_data_direction);
 434extern void dma_unmap_sg(struct device *, struct scatterlist *, int,
 435                enum dma_data_direction);
 436extern void dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
 437                enum dma_data_direction);
 438extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
 439                enum dma_data_direction);
 440
 441
 442#endif /* __KERNEL__ */
 443#endif
 444