linux/kernel/dma/coherent.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Coherent per-device memory handling.
   4 * Borrowed from i386
   5 */
   6#include <linux/io.h>
   7#include <linux/slab.h>
   8#include <linux/kernel.h>
   9#include <linux/module.h>
  10#include <linux/dma-direct.h>
  11#include <linux/dma-map-ops.h>
  12
  13struct dma_coherent_mem {
  14        void            *virt_base;
  15        dma_addr_t      device_base;
  16        unsigned long   pfn_base;
  17        int             size;
  18        unsigned long   *bitmap;
  19        spinlock_t      spinlock;
  20        bool            use_dev_dma_pfn_offset;
  21};
  22
  23static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *dev)
  24{
  25        if (dev && dev->dma_mem)
  26                return dev->dma_mem;
  27        return NULL;
  28}
  29
  30static inline dma_addr_t dma_get_device_base(struct device *dev,
  31                                             struct dma_coherent_mem * mem)
  32{
  33        if (mem->use_dev_dma_pfn_offset)
  34                return phys_to_dma(dev, PFN_PHYS(mem->pfn_base));
  35        return mem->device_base;
  36}
  37
  38static struct dma_coherent_mem *dma_init_coherent_memory(phys_addr_t phys_addr,
  39                dma_addr_t device_addr, size_t size, bool use_dma_pfn_offset)
  40{
  41        struct dma_coherent_mem *dma_mem;
  42        int pages = size >> PAGE_SHIFT;
  43        int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
  44        void *mem_base;
  45
  46        if (!size)
  47                return ERR_PTR(-EINVAL);
  48
  49        mem_base = memremap(phys_addr, size, MEMREMAP_WC);
  50        if (!mem_base)
  51                return ERR_PTR(-EINVAL);
  52
  53        dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
  54        if (!dma_mem)
  55                goto out_unmap_membase;
  56        dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
  57        if (!dma_mem->bitmap)
  58                goto out_free_dma_mem;
  59
  60        dma_mem->virt_base = mem_base;
  61        dma_mem->device_base = device_addr;
  62        dma_mem->pfn_base = PFN_DOWN(phys_addr);
  63        dma_mem->size = pages;
  64        dma_mem->use_dev_dma_pfn_offset = use_dma_pfn_offset;
  65        spin_lock_init(&dma_mem->spinlock);
  66
  67        return dma_mem;
  68
  69out_free_dma_mem:
  70        kfree(dma_mem);
  71out_unmap_membase:
  72        memunmap(mem_base);
  73        pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %zd MiB\n",
  74                &phys_addr, size / SZ_1M);
  75        return ERR_PTR(-ENOMEM);
  76}
  77
  78static void dma_release_coherent_memory(struct dma_coherent_mem *mem)
  79{
  80        if (!mem)
  81                return;
  82
  83        memunmap(mem->virt_base);
  84        kfree(mem->bitmap);
  85        kfree(mem);
  86}
  87
  88static int dma_assign_coherent_memory(struct device *dev,
  89                                      struct dma_coherent_mem *mem)
  90{
  91        if (!dev)
  92                return -ENODEV;
  93
  94        if (dev->dma_mem)
  95                return -EBUSY;
  96
  97        dev->dma_mem = mem;
  98        return 0;
  99}
 100
 101/*
 102 * Declare a region of memory to be handed out by dma_alloc_coherent() when it
 103 * is asked for coherent memory for this device.  This shall only be used
 104 * from platform code, usually based on the device tree description.
 105 *
 106 * phys_addr is the CPU physical address to which the memory is currently
 107 * assigned (this will be ioremapped so the CPU can access the region).
 108 *
 109 * device_addr is the DMA address the device needs to be programmed with to
 110 * actually address this memory (this will be handed out as the dma_addr_t in
 111 * dma_alloc_coherent()).
 112 *
 113 * size is the size of the area (must be a multiple of PAGE_SIZE).
 114 *
 115 * As a simplification for the platforms, only *one* such region of memory may
 116 * be declared per device.
 117 */
 118int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
 119                                dma_addr_t device_addr, size_t size)
 120{
 121        struct dma_coherent_mem *mem;
 122        int ret;
 123
 124        mem = dma_init_coherent_memory(phys_addr, device_addr, size, false);
 125        if (IS_ERR(mem))
 126                return PTR_ERR(mem);
 127
 128        ret = dma_assign_coherent_memory(dev, mem);
 129        if (ret)
 130                dma_release_coherent_memory(mem);
 131        return ret;
 132}
 133
 134static void *__dma_alloc_from_coherent(struct device *dev,
 135                                       struct dma_coherent_mem *mem,
 136                                       ssize_t size, dma_addr_t *dma_handle)
 137{
 138        int order = get_order(size);
 139        unsigned long flags;
 140        int pageno;
 141        void *ret;
 142
 143        spin_lock_irqsave(&mem->spinlock, flags);
 144
 145        if (unlikely(size > ((dma_addr_t)mem->size << PAGE_SHIFT)))
 146                goto err;
 147
 148        pageno = bitmap_find_free_region(mem->bitmap, mem->size, order);
 149        if (unlikely(pageno < 0))
 150                goto err;
 151
 152        /*
 153         * Memory was found in the coherent area.
 154         */
 155        *dma_handle = dma_get_device_base(dev, mem) +
 156                        ((dma_addr_t)pageno << PAGE_SHIFT);
 157        ret = mem->virt_base + ((dma_addr_t)pageno << PAGE_SHIFT);
 158        spin_unlock_irqrestore(&mem->spinlock, flags);
 159        memset(ret, 0, size);
 160        return ret;
 161err:
 162        spin_unlock_irqrestore(&mem->spinlock, flags);
 163        return NULL;
 164}
 165
 166/**
 167 * dma_alloc_from_dev_coherent() - allocate memory from device coherent pool
 168 * @dev:        device from which we allocate memory
 169 * @size:       size of requested memory area
 170 * @dma_handle: This will be filled with the correct dma handle
 171 * @ret:        This pointer will be filled with the virtual address
 172 *              to allocated area.
 173 *
 174 * This function should be only called from per-arch dma_alloc_coherent()
 175 * to support allocation from per-device coherent memory pools.
 176 *
 177 * Returns 0 if dma_alloc_coherent should continue with allocating from
 178 * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
 179 */
 180int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
 181                dma_addr_t *dma_handle, void **ret)
 182{
 183        struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
 184
 185        if (!mem)
 186                return 0;
 187
 188        *ret = __dma_alloc_from_coherent(dev, mem, size, dma_handle);
 189        return 1;
 190}
 191
 192static int __dma_release_from_coherent(struct dma_coherent_mem *mem,
 193                                       int order, void *vaddr)
 194{
 195        if (mem && vaddr >= mem->virt_base && vaddr <
 196                   (mem->virt_base + ((dma_addr_t)mem->size << PAGE_SHIFT))) {
 197                int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
 198                unsigned long flags;
 199
 200                spin_lock_irqsave(&mem->spinlock, flags);
 201                bitmap_release_region(mem->bitmap, page, order);
 202                spin_unlock_irqrestore(&mem->spinlock, flags);
 203                return 1;
 204        }
 205        return 0;
 206}
 207
 208/**
 209 * dma_release_from_dev_coherent() - free memory to device coherent memory pool
 210 * @dev:        device from which the memory was allocated
 211 * @order:      the order of pages allocated
 212 * @vaddr:      virtual address of allocated pages
 213 *
 214 * This checks whether the memory was allocated from the per-device
 215 * coherent memory pool and if so, releases that memory.
 216 *
 217 * Returns 1 if we correctly released the memory, or 0 if the caller should
 218 * proceed with releasing memory from generic pools.
 219 */
 220int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr)
 221{
 222        struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
 223
 224        return __dma_release_from_coherent(mem, order, vaddr);
 225}
 226
 227static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem,
 228                struct vm_area_struct *vma, void *vaddr, size_t size, int *ret)
 229{
 230        if (mem && vaddr >= mem->virt_base && vaddr + size <=
 231                   (mem->virt_base + ((dma_addr_t)mem->size << PAGE_SHIFT))) {
 232                unsigned long off = vma->vm_pgoff;
 233                int start = (vaddr - mem->virt_base) >> PAGE_SHIFT;
 234                unsigned long user_count = vma_pages(vma);
 235                int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
 236
 237                *ret = -ENXIO;
 238                if (off < count && user_count <= count - off) {
 239                        unsigned long pfn = mem->pfn_base + start + off;
 240                        *ret = remap_pfn_range(vma, vma->vm_start, pfn,
 241                                               user_count << PAGE_SHIFT,
 242                                               vma->vm_page_prot);
 243                }
 244                return 1;
 245        }
 246        return 0;
 247}
 248
 249/**
 250 * dma_mmap_from_dev_coherent() - mmap memory from the device coherent pool
 251 * @dev:        device from which the memory was allocated
 252 * @vma:        vm_area for the userspace memory
 253 * @vaddr:      cpu address returned by dma_alloc_from_dev_coherent
 254 * @size:       size of the memory buffer allocated
 255 * @ret:        result from remap_pfn_range()
 256 *
 257 * This checks whether the memory was allocated from the per-device
 258 * coherent memory pool and if so, maps that memory to the provided vma.
 259 *
 260 * Returns 1 if @vaddr belongs to the device coherent pool and the caller
 261 * should return @ret, or 0 if they should proceed with mapping memory from
 262 * generic areas.
 263 */
 264int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
 265                           void *vaddr, size_t size, int *ret)
 266{
 267        struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
 268
 269        return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret);
 270}
 271
 272#ifdef CONFIG_DMA_GLOBAL_POOL
 273static struct dma_coherent_mem *dma_coherent_default_memory __ro_after_init;
 274
 275void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size,
 276                                     dma_addr_t *dma_handle)
 277{
 278        if (!dma_coherent_default_memory)
 279                return NULL;
 280
 281        return __dma_alloc_from_coherent(dev, dma_coherent_default_memory, size,
 282                                         dma_handle);
 283}
 284
 285int dma_release_from_global_coherent(int order, void *vaddr)
 286{
 287        if (!dma_coherent_default_memory)
 288                return 0;
 289
 290        return __dma_release_from_coherent(dma_coherent_default_memory, order,
 291                        vaddr);
 292}
 293
 294int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr,
 295                                   size_t size, int *ret)
 296{
 297        if (!dma_coherent_default_memory)
 298                return 0;
 299
 300        return __dma_mmap_from_coherent(dma_coherent_default_memory, vma,
 301                                        vaddr, size, ret);
 302}
 303
 304int dma_init_global_coherent(phys_addr_t phys_addr, size_t size)
 305{
 306        struct dma_coherent_mem *mem;
 307
 308        mem = dma_init_coherent_memory(phys_addr, phys_addr, size, true);
 309        if (IS_ERR(mem))
 310                return PTR_ERR(mem);
 311        dma_coherent_default_memory = mem;
 312        pr_info("DMA: default coherent area is set\n");
 313        return 0;
 314}
 315#endif /* CONFIG_DMA_GLOBAL_POOL */
 316
 317/*
 318 * Support for reserved memory regions defined in device tree
 319 */
 320#ifdef CONFIG_OF_RESERVED_MEM
 321#include <linux/of.h>
 322#include <linux/of_fdt.h>
 323#include <linux/of_reserved_mem.h>
 324
 325#ifdef CONFIG_DMA_GLOBAL_POOL
 326static struct reserved_mem *dma_reserved_default_memory __initdata;
 327#endif
 328
 329static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
 330{
 331        if (!rmem->priv) {
 332                struct dma_coherent_mem *mem;
 333
 334                mem = dma_init_coherent_memory(rmem->base, rmem->base,
 335                                               rmem->size, true);
 336                if (IS_ERR(mem))
 337                        return PTR_ERR(mem);
 338                rmem->priv = mem;
 339        }
 340        dma_assign_coherent_memory(dev, rmem->priv);
 341        return 0;
 342}
 343
 344static void rmem_dma_device_release(struct reserved_mem *rmem,
 345                                    struct device *dev)
 346{
 347        if (dev)
 348                dev->dma_mem = NULL;
 349}
 350
 351static const struct reserved_mem_ops rmem_dma_ops = {
 352        .device_init    = rmem_dma_device_init,
 353        .device_release = rmem_dma_device_release,
 354};
 355
 356static int __init rmem_dma_setup(struct reserved_mem *rmem)
 357{
 358        unsigned long node = rmem->fdt_node;
 359
 360        if (of_get_flat_dt_prop(node, "reusable", NULL))
 361                return -EINVAL;
 362
 363#ifdef CONFIG_ARM
 364        if (!of_get_flat_dt_prop(node, "no-map", NULL)) {
 365                pr_err("Reserved memory: regions without no-map are not yet supported\n");
 366                return -EINVAL;
 367        }
 368#endif
 369
 370#ifdef CONFIG_DMA_GLOBAL_POOL
 371        if (of_get_flat_dt_prop(node, "linux,dma-default", NULL)) {
 372                WARN(dma_reserved_default_memory,
 373                     "Reserved memory: region for default DMA coherent area is redefined\n");
 374                dma_reserved_default_memory = rmem;
 375        }
 376#endif
 377
 378        rmem->ops = &rmem_dma_ops;
 379        pr_info("Reserved memory: created DMA memory pool at %pa, size %ld MiB\n",
 380                &rmem->base, (unsigned long)rmem->size / SZ_1M);
 381        return 0;
 382}
 383
 384#ifdef CONFIG_DMA_GLOBAL_POOL
 385static int __init dma_init_reserved_memory(void)
 386{
 387        if (!dma_reserved_default_memory)
 388                return -ENOMEM;
 389        return dma_init_global_coherent(dma_reserved_default_memory->base,
 390                                        dma_reserved_default_memory->size);
 391}
 392core_initcall(dma_init_reserved_memory);
 393#endif /* CONFIG_DMA_GLOBAL_POOL */
 394
 395RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool", rmem_dma_setup);
 396#endif
 397