linux/drivers/base/dma-coherent.c
<<
>>
Prefs
   1/*
   2 * Coherent per-device memory handling.
   3 * Borrowed from i386
   4 */
   5#include <linux/io.h>
   6#include <linux/slab.h>
   7#include <linux/kernel.h>
   8#include <linux/module.h>
   9#include <linux/dma-mapping.h>
  10
  11struct dma_coherent_mem {
  12        void            *virt_base;
  13        dma_addr_t      device_base;
  14        unsigned long   pfn_base;
  15        int             size;
  16        int             flags;
  17        unsigned long   *bitmap;
  18        spinlock_t      spinlock;
  19};
  20
  21static bool dma_init_coherent_memory(
  22        phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags,
  23        struct dma_coherent_mem **mem)
  24{
  25        struct dma_coherent_mem *dma_mem = NULL;
  26        void __iomem *mem_base = NULL;
  27        int pages = size >> PAGE_SHIFT;
  28        int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
  29
  30        if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
  31                goto out;
  32        if (!size)
  33                goto out;
  34
  35        if (flags & DMA_MEMORY_MAP)
  36                mem_base = memremap(phys_addr, size, MEMREMAP_WC);
  37        else
  38                mem_base = ioremap(phys_addr, size);
  39        if (!mem_base)
  40                goto out;
  41
  42        dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
  43        if (!dma_mem)
  44                goto out;
  45        dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
  46        if (!dma_mem->bitmap)
  47                goto out;
  48
  49        dma_mem->virt_base = mem_base;
  50        dma_mem->device_base = device_addr;
  51        dma_mem->pfn_base = PFN_DOWN(phys_addr);
  52        dma_mem->size = pages;
  53        dma_mem->flags = flags;
  54        spin_lock_init(&dma_mem->spinlock);
  55
  56        *mem = dma_mem;
  57        return true;
  58
  59out:
  60        kfree(dma_mem);
  61        if (mem_base) {
  62                if (flags & DMA_MEMORY_MAP)
  63                        memunmap(mem_base);
  64                else
  65                        iounmap(mem_base);
  66        }
  67        return false;
  68}
  69
  70static void dma_release_coherent_memory(struct dma_coherent_mem *mem)
  71{
  72        if (!mem)
  73                return;
  74
  75        if (mem->flags & DMA_MEMORY_MAP)
  76                memunmap(mem->virt_base);
  77        else
  78                iounmap(mem->virt_base);
  79        kfree(mem->bitmap);
  80        kfree(mem);
  81}
  82
  83static int dma_assign_coherent_memory(struct device *dev,
  84                                      struct dma_coherent_mem *mem)
  85{
  86        if (dev->dma_mem)
  87                return -EBUSY;
  88
  89        dev->dma_mem = mem;
  90        /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
  91
  92        return 0;
  93}
  94
  95int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
  96                                dma_addr_t device_addr, size_t size, int flags)
  97{
  98        struct dma_coherent_mem *mem;
  99
 100        if (!dma_init_coherent_memory(phys_addr, device_addr, size, flags,
 101                                      &mem))
 102                return 0;
 103
 104        if (dma_assign_coherent_memory(dev, mem) == 0)
 105                return flags & DMA_MEMORY_MAP ? DMA_MEMORY_MAP : DMA_MEMORY_IO;
 106
 107        dma_release_coherent_memory(mem);
 108        return 0;
 109}
 110EXPORT_SYMBOL(dma_declare_coherent_memory);
 111
 112void dma_release_declared_memory(struct device *dev)
 113{
 114        struct dma_coherent_mem *mem = dev->dma_mem;
 115
 116        if (!mem)
 117                return;
 118        dma_release_coherent_memory(mem);
 119        dev->dma_mem = NULL;
 120}
 121EXPORT_SYMBOL(dma_release_declared_memory);
 122
 123void *dma_mark_declared_memory_occupied(struct device *dev,
 124                                        dma_addr_t device_addr, size_t size)
 125{
 126        struct dma_coherent_mem *mem = dev->dma_mem;
 127        unsigned long flags;
 128        int pos, err;
 129
 130        size += device_addr & ~PAGE_MASK;
 131
 132        if (!mem)
 133                return ERR_PTR(-EINVAL);
 134
 135        spin_lock_irqsave(&mem->spinlock, flags);
 136        pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
 137        err = bitmap_allocate_region(mem->bitmap, pos, get_order(size));
 138        spin_unlock_irqrestore(&mem->spinlock, flags);
 139
 140        if (err != 0)
 141                return ERR_PTR(err);
 142        return mem->virt_base + (pos << PAGE_SHIFT);
 143}
 144EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
 145
 146/**
 147 * dma_alloc_from_coherent() - try to allocate memory from the per-device coherent area
 148 *
 149 * @dev:        device from which we allocate memory
 150 * @size:       size of requested memory area
 151 * @dma_handle: This will be filled with the correct dma handle
 152 * @ret:        This pointer will be filled with the virtual address
 153 *              to allocated area.
 154 *
 155 * This function should be only called from per-arch dma_alloc_coherent()
 156 * to support allocation from per-device coherent memory pools.
 157 *
 158 * Returns 0 if dma_alloc_coherent should continue with allocating from
 159 * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
 160 */
 161int dma_alloc_from_coherent(struct device *dev, ssize_t size,
 162                                       dma_addr_t *dma_handle, void **ret)
 163{
 164        struct dma_coherent_mem *mem;
 165        int order = get_order(size);
 166        unsigned long flags;
 167        int pageno;
 168
 169        if (!dev)
 170                return 0;
 171        mem = dev->dma_mem;
 172        if (!mem)
 173                return 0;
 174
 175        *ret = NULL;
 176        spin_lock_irqsave(&mem->spinlock, flags);
 177
 178        if (unlikely(size > (mem->size << PAGE_SHIFT)))
 179                goto err;
 180
 181        pageno = bitmap_find_free_region(mem->bitmap, mem->size, order);
 182        if (unlikely(pageno < 0))
 183                goto err;
 184
 185        /*
 186         * Memory was found in the per-device area.
 187         */
 188        *dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
 189        *ret = mem->virt_base + (pageno << PAGE_SHIFT);
 190        if (mem->flags & DMA_MEMORY_MAP)
 191                memset(*ret, 0, size);
 192        else
 193                memset_io(*ret, 0, size);
 194        spin_unlock_irqrestore(&mem->spinlock, flags);
 195
 196        return 1;
 197
 198err:
 199        spin_unlock_irqrestore(&mem->spinlock, flags);
 200        /*
 201         * In the case where the allocation can not be satisfied from the
 202         * per-device area, try to fall back to generic memory if the
 203         * constraints allow it.
 204         */
 205        return mem->flags & DMA_MEMORY_EXCLUSIVE;
 206}
 207EXPORT_SYMBOL(dma_alloc_from_coherent);
 208
 209/**
 210 * dma_release_from_coherent() - try to free the memory allocated from per-device coherent memory pool
 211 * @dev:        device from which the memory was allocated
 212 * @order:      the order of pages allocated
 213 * @vaddr:      virtual address of allocated pages
 214 *
 215 * This checks whether the memory was allocated from the per-device
 216 * coherent memory pool and if so, releases that memory.
 217 *
 218 * Returns 1 if we correctly released the memory, or 0 if
 219 * dma_release_coherent() should proceed with releasing memory from
 220 * generic pools.
 221 */
 222int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
 223{
 224        struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
 225
 226        if (mem && vaddr >= mem->virt_base && vaddr <
 227                   (mem->virt_base + (mem->size << PAGE_SHIFT))) {
 228                int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
 229                unsigned long flags;
 230
 231                spin_lock_irqsave(&mem->spinlock, flags);
 232                bitmap_release_region(mem->bitmap, page, order);
 233                spin_unlock_irqrestore(&mem->spinlock, flags);
 234                return 1;
 235        }
 236        return 0;
 237}
 238EXPORT_SYMBOL(dma_release_from_coherent);
 239
 240/**
 241 * dma_mmap_from_coherent() - try to mmap the memory allocated from
 242 * per-device coherent memory pool to userspace
 243 * @dev:        device from which the memory was allocated
 244 * @vma:        vm_area for the userspace memory
 245 * @vaddr:      cpu address returned by dma_alloc_from_coherent
 246 * @size:       size of the memory buffer allocated by dma_alloc_from_coherent
 247 * @ret:        result from remap_pfn_range()
 248 *
 249 * This checks whether the memory was allocated from the per-device
 250 * coherent memory pool and if so, maps that memory to the provided vma.
 251 *
 252 * Returns 1 if we correctly mapped the memory, or 0 if the caller should
 253 * proceed with mapping memory from generic pools.
 254 */
 255int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
 256                           void *vaddr, size_t size, int *ret)
 257{
 258        struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
 259
 260        if (mem && vaddr >= mem->virt_base && vaddr + size <=
 261                   (mem->virt_base + (mem->size << PAGE_SHIFT))) {
 262                unsigned long off = vma->vm_pgoff;
 263                int start = (vaddr - mem->virt_base) >> PAGE_SHIFT;
 264                int user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
 265                int count = size >> PAGE_SHIFT;
 266
 267                *ret = -ENXIO;
 268                if (off < count && user_count <= count - off) {
 269                        unsigned long pfn = mem->pfn_base + start + off;
 270                        *ret = remap_pfn_range(vma, vma->vm_start, pfn,
 271                                               user_count << PAGE_SHIFT,
 272                                               vma->vm_page_prot);
 273                }
 274                return 1;
 275        }
 276        return 0;
 277}
 278EXPORT_SYMBOL(dma_mmap_from_coherent);
 279
 280/*
 281 * Support for reserved memory regions defined in device tree
 282 */
 283#ifdef CONFIG_OF_RESERVED_MEM
 284#include <linux/of.h>
 285#include <linux/of_fdt.h>
 286#include <linux/of_reserved_mem.h>
 287
 288static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
 289{
 290        struct dma_coherent_mem *mem = rmem->priv;
 291
 292        if (!mem &&
 293            !dma_init_coherent_memory(rmem->base, rmem->base, rmem->size,
 294                                      DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE,
 295                                      &mem)) {
 296                pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n",
 297                        &rmem->base, (unsigned long)rmem->size / SZ_1M);
 298                return -ENODEV;
 299        }
 300        rmem->priv = mem;
 301        dma_assign_coherent_memory(dev, mem);
 302        return 0;
 303}
 304
 305static void rmem_dma_device_release(struct reserved_mem *rmem,
 306                                    struct device *dev)
 307{
 308        dev->dma_mem = NULL;
 309}
 310
 311static const struct reserved_mem_ops rmem_dma_ops = {
 312        .device_init    = rmem_dma_device_init,
 313        .device_release = rmem_dma_device_release,
 314};
 315
 316static int __init rmem_dma_setup(struct reserved_mem *rmem)
 317{
 318        unsigned long node = rmem->fdt_node;
 319
 320        if (of_get_flat_dt_prop(node, "reusable", NULL))
 321                return -EINVAL;
 322
 323#ifdef CONFIG_ARM
 324        if (!of_get_flat_dt_prop(node, "no-map", NULL)) {
 325                pr_err("Reserved memory: regions without no-map are not yet supported\n");
 326                return -EINVAL;
 327        }
 328#endif
 329
 330        rmem->ops = &rmem_dma_ops;
 331        pr_info("Reserved memory: created DMA memory pool at %pa, size %ld MiB\n",
 332                &rmem->base, (unsigned long)rmem->size / SZ_1M);
 333        return 0;
 334}
 335RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool", rmem_dma_setup);
 336#endif
 337