linux/arch/blackfin/kernel/dma-mapping.c
<<
>>
Prefs
   1/*
   2 * Dynamic DMA mapping support
   3 *
   4 * Copyright 2005-2009 Analog Devices Inc.
   5 *
   6 * Licensed under the GPL-2 or later
   7 */
   8
   9#include <linux/types.h>
  10#include <linux/mm.h>
  11#include <linux/string.h>
  12#include <linux/bootmem.h>
  13#include <linux/spinlock.h>
  14#include <linux/device.h>
  15#include <linux/dma-mapping.h>
  16#include <linux/io.h>
  17#include <linux/scatterlist.h>
  18#include <asm/cacheflush.h>
  19#include <asm/bfin-global.h>
  20
  21static spinlock_t dma_page_lock;
  22static unsigned int *dma_page;
  23static unsigned int dma_pages;
  24static unsigned long dma_base;
  25static unsigned long dma_size;
  26static unsigned int dma_initialized;
  27
  28void dma_alloc_init(unsigned long start, unsigned long end)
  29{
  30        spin_lock_init(&dma_page_lock);
  31        dma_initialized = 0;
  32
  33        dma_page = (unsigned int *)__get_free_page(GFP_KERNEL);
  34        memset(dma_page, 0, PAGE_SIZE);
  35        dma_base = PAGE_ALIGN(start);
  36        dma_size = PAGE_ALIGN(end) - PAGE_ALIGN(start);
  37        dma_pages = dma_size >> PAGE_SHIFT;
  38        memset((void *)dma_base, 0, DMA_UNCACHED_REGION);
  39        dma_initialized = 1;
  40
  41        printk(KERN_INFO "%s: dma_page @ 0x%p - %d pages at 0x%08lx\n", __func__,
  42               dma_page, dma_pages, dma_base);
  43}
  44
  45static inline unsigned int get_pages(size_t size)
  46{
  47        return ((size - 1) >> PAGE_SHIFT) + 1;
  48}
  49
  50static unsigned long __alloc_dma_pages(unsigned int pages)
  51{
  52        unsigned long ret = 0, flags;
  53        int i, count = 0;
  54
  55        if (dma_initialized == 0)
  56                dma_alloc_init(_ramend - DMA_UNCACHED_REGION, _ramend);
  57
  58        spin_lock_irqsave(&dma_page_lock, flags);
  59
  60        for (i = 0; i < dma_pages;) {
  61                if (dma_page[i++] == 0) {
  62                        if (++count == pages) {
  63                                while (count--)
  64                                        dma_page[--i] = 1;
  65                                ret = dma_base + (i << PAGE_SHIFT);
  66                                break;
  67                        }
  68                } else
  69                        count = 0;
  70        }
  71        spin_unlock_irqrestore(&dma_page_lock, flags);
  72        return ret;
  73}
  74
  75static void __free_dma_pages(unsigned long addr, unsigned int pages)
  76{
  77        unsigned long page = (addr - dma_base) >> PAGE_SHIFT;
  78        unsigned long flags;
  79        int i;
  80
  81        if ((page + pages) > dma_pages) {
  82                printk(KERN_ERR "%s: freeing outside range.\n", __func__);
  83                BUG();
  84        }
  85
  86        spin_lock_irqsave(&dma_page_lock, flags);
  87        for (i = page; i < page + pages; i++) {
  88                dma_page[i] = 0;
  89        }
  90        spin_unlock_irqrestore(&dma_page_lock, flags);
  91}
  92
  93void *dma_alloc_coherent(struct device *dev, size_t size,
  94                         dma_addr_t * dma_handle, gfp_t gfp)
  95{
  96        void *ret;
  97
  98        ret = (void *)__alloc_dma_pages(get_pages(size));
  99
 100        if (ret) {
 101                memset(ret, 0, size);
 102                *dma_handle = virt_to_phys(ret);
 103        }
 104
 105        return ret;
 106}
 107EXPORT_SYMBOL(dma_alloc_coherent);
 108
 109void
 110dma_free_coherent(struct device *dev, size_t size, void *vaddr,
 111                  dma_addr_t dma_handle)
 112{
 113        __free_dma_pages((unsigned long)vaddr, get_pages(size));
 114}
 115EXPORT_SYMBOL(dma_free_coherent);
 116
 117/*
 118 * Dummy functions defined for some existing drivers
 119 */
 120
 121dma_addr_t
 122dma_map_single(struct device *dev, void *ptr, size_t size,
 123               enum dma_data_direction direction)
 124{
 125        BUG_ON(direction == DMA_NONE);
 126
 127        invalidate_dcache_range((unsigned long)ptr,
 128                        (unsigned long)ptr + size);
 129
 130        return (dma_addr_t) ptr;
 131}
 132EXPORT_SYMBOL(dma_map_single);
 133
 134int
 135dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
 136           enum dma_data_direction direction)
 137{
 138        int i;
 139
 140        BUG_ON(direction == DMA_NONE);
 141
 142        for (i = 0; i < nents; i++, sg++) {
 143                sg->dma_address = (dma_addr_t) sg_virt(sg);
 144
 145                invalidate_dcache_range(sg_dma_address(sg),
 146                                        sg_dma_address(sg) +
 147                                        sg_dma_len(sg));
 148        }
 149
 150        return nents;
 151}
 152EXPORT_SYMBOL(dma_map_sg);
 153
 154void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
 155                enum dma_data_direction direction)
 156{
 157        BUG_ON(direction == DMA_NONE);
 158}
 159EXPORT_SYMBOL(dma_unmap_single);
 160
 161void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
 162                int nhwentries, enum dma_data_direction direction)
 163{
 164        BUG_ON(direction == DMA_NONE);
 165}
 166EXPORT_SYMBOL(dma_unmap_sg);
 167