linux/kernel/dma/coherent.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Coherent per-device memory handling.
   4 * Borrowed from i386
   5 */
   6#include <linux/io.h>
   7#include <linux/slab.h>
   8#include <linux/kernel.h>
   9#include <linux/module.h>
  10#include <linux/dma-mapping.h>
  11
  12struct dma_coherent_mem {
  13        void            *virt_base;
  14        dma_addr_t      device_base;
  15        unsigned long   pfn_base;
  16        int             size;
  17        unsigned long   *bitmap;
  18        spinlock_t      spinlock;
  19        bool            use_dev_dma_pfn_offset;
  20};
  21
  22static struct dma_coherent_mem *dma_coherent_default_memory __ro_after_init;
  23
  24static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *dev)
  25{
  26        if (dev && dev->dma_mem)
  27                return dev->dma_mem;
  28        return NULL;
  29}
  30
  31static inline dma_addr_t dma_get_device_base(struct device *dev,
  32                                             struct dma_coherent_mem * mem)
  33{
  34        if (mem->use_dev_dma_pfn_offset)
  35                return (mem->pfn_base - dev->dma_pfn_offset) << PAGE_SHIFT;
  36        else
  37                return mem->device_base;
  38}
  39
  40static int dma_init_coherent_memory(phys_addr_t phys_addr,
  41                dma_addr_t device_addr, size_t size,
  42                struct dma_coherent_mem **mem)
  43{
  44        struct dma_coherent_mem *dma_mem = NULL;
  45        void *mem_base = NULL;
  46        int pages = size >> PAGE_SHIFT;
  47        int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
  48        int ret;
  49
  50        if (!size) {
  51                ret = -EINVAL;
  52                goto out;
  53        }
  54
  55        mem_base = memremap(phys_addr, size, MEMREMAP_WC);
  56        if (!mem_base) {
  57                ret = -EINVAL;
  58                goto out;
  59        }
  60        dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
  61        if (!dma_mem) {
  62                ret = -ENOMEM;
  63                goto out;
  64        }
  65        dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
  66        if (!dma_mem->bitmap) {
  67                ret = -ENOMEM;
  68                goto out;
  69        }
  70
  71        dma_mem->virt_base = mem_base;
  72        dma_mem->device_base = device_addr;
  73        dma_mem->pfn_base = PFN_DOWN(phys_addr);
  74        dma_mem->size = pages;
  75        spin_lock_init(&dma_mem->spinlock);
  76
  77        *mem = dma_mem;
  78        return 0;
  79
  80out:
  81        kfree(dma_mem);
  82        if (mem_base)
  83                memunmap(mem_base);
  84        return ret;
  85}
  86
  87static void dma_release_coherent_memory(struct dma_coherent_mem *mem)
  88{
  89        if (!mem)
  90                return;
  91
  92        memunmap(mem->virt_base);
  93        kfree(mem->bitmap);
  94        kfree(mem);
  95}
  96
  97static int dma_assign_coherent_memory(struct device *dev,
  98                                      struct dma_coherent_mem *mem)
  99{
 100        if (!dev)
 101                return -ENODEV;
 102
 103        if (dev->dma_mem)
 104                return -EBUSY;
 105
 106        dev->dma_mem = mem;
 107        return 0;
 108}
 109
 110int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
 111                                dma_addr_t device_addr, size_t size)
 112{
 113        struct dma_coherent_mem *mem;
 114        int ret;
 115
 116        ret = dma_init_coherent_memory(phys_addr, device_addr, size, &mem);
 117        if (ret)
 118                return ret;
 119
 120        ret = dma_assign_coherent_memory(dev, mem);
 121        if (ret)
 122                dma_release_coherent_memory(mem);
 123        return ret;
 124}
 125
 126static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem,
 127                ssize_t size, dma_addr_t *dma_handle)
 128{
 129        int order = get_order(size);
 130        unsigned long flags;
 131        int pageno;
 132        void *ret;
 133
 134        spin_lock_irqsave(&mem->spinlock, flags);
 135
 136        if (unlikely(size > (mem->size << PAGE_SHIFT)))
 137                goto err;
 138
 139        pageno = bitmap_find_free_region(mem->bitmap, mem->size, order);
 140        if (unlikely(pageno < 0))
 141                goto err;
 142
 143        /*
 144         * Memory was found in the coherent area.
 145         */
 146        *dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
 147        ret = mem->virt_base + (pageno << PAGE_SHIFT);
 148        spin_unlock_irqrestore(&mem->spinlock, flags);
 149        memset(ret, 0, size);
 150        return ret;
 151err:
 152        spin_unlock_irqrestore(&mem->spinlock, flags);
 153        return NULL;
 154}
 155
 156/**
 157 * dma_alloc_from_dev_coherent() - allocate memory from device coherent pool
 158 * @dev:        device from which we allocate memory
 159 * @size:       size of requested memory area
 160 * @dma_handle: This will be filled with the correct dma handle
 161 * @ret:        This pointer will be filled with the virtual address
 162 *              to allocated area.
 163 *
 164 * This function should be only called from per-arch dma_alloc_coherent()
 165 * to support allocation from per-device coherent memory pools.
 166 *
 167 * Returns 0 if dma_alloc_coherent should continue with allocating from
 168 * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
 169 */
 170int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
 171                dma_addr_t *dma_handle, void **ret)
 172{
 173        struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
 174
 175        if (!mem)
 176                return 0;
 177
 178        *ret = __dma_alloc_from_coherent(mem, size, dma_handle);
 179        return 1;
 180}
 181
 182void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle)
 183{
 184        if (!dma_coherent_default_memory)
 185                return NULL;
 186
 187        return __dma_alloc_from_coherent(dma_coherent_default_memory, size,
 188                        dma_handle);
 189}
 190
 191static int __dma_release_from_coherent(struct dma_coherent_mem *mem,
 192                                       int order, void *vaddr)
 193{
 194        if (mem && vaddr >= mem->virt_base && vaddr <
 195                   (mem->virt_base + (mem->size << PAGE_SHIFT))) {
 196                int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
 197                unsigned long flags;
 198
 199                spin_lock_irqsave(&mem->spinlock, flags);
 200                bitmap_release_region(mem->bitmap, page, order);
 201                spin_unlock_irqrestore(&mem->spinlock, flags);
 202                return 1;
 203        }
 204        return 0;
 205}
 206
 207/**
 208 * dma_release_from_dev_coherent() - free memory to device coherent memory pool
 209 * @dev:        device from which the memory was allocated
 210 * @order:      the order of pages allocated
 211 * @vaddr:      virtual address of allocated pages
 212 *
 213 * This checks whether the memory was allocated from the per-device
 214 * coherent memory pool and if so, releases that memory.
 215 *
 216 * Returns 1 if we correctly released the memory, or 0 if the caller should
 217 * proceed with releasing memory from generic pools.
 218 */
 219int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr)
 220{
 221        struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
 222
 223        return __dma_release_from_coherent(mem, order, vaddr);
 224}
 225
 226int dma_release_from_global_coherent(int order, void *vaddr)
 227{
 228        if (!dma_coherent_default_memory)
 229                return 0;
 230
 231        return __dma_release_from_coherent(dma_coherent_default_memory, order,
 232                        vaddr);
 233}
 234
 235static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem,
 236                struct vm_area_struct *vma, void *vaddr, size_t size, int *ret)
 237{
 238        if (mem && vaddr >= mem->virt_base && vaddr + size <=
 239                   (mem->virt_base + (mem->size << PAGE_SHIFT))) {
 240                unsigned long off = vma->vm_pgoff;
 241                int start = (vaddr - mem->virt_base) >> PAGE_SHIFT;
 242                int user_count = vma_pages(vma);
 243                int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
 244
 245                *ret = -ENXIO;
 246                if (off < count && user_count <= count - off) {
 247                        unsigned long pfn = mem->pfn_base + start + off;
 248                        *ret = remap_pfn_range(vma, vma->vm_start, pfn,
 249                                               user_count << PAGE_SHIFT,
 250                                               vma->vm_page_prot);
 251                }
 252                return 1;
 253        }
 254        return 0;
 255}
 256
 257/**
 258 * dma_mmap_from_dev_coherent() - mmap memory from the device coherent pool
 259 * @dev:        device from which the memory was allocated
 260 * @vma:        vm_area for the userspace memory
 261 * @vaddr:      cpu address returned by dma_alloc_from_dev_coherent
 262 * @size:       size of the memory buffer allocated
 263 * @ret:        result from remap_pfn_range()
 264 *
 265 * This checks whether the memory was allocated from the per-device
 266 * coherent memory pool and if so, maps that memory to the provided vma.
 267 *
 268 * Returns 1 if @vaddr belongs to the device coherent pool and the caller
 269 * should return @ret, or 0 if they should proceed with mapping memory from
 270 * generic areas.
 271 */
 272int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
 273                           void *vaddr, size_t size, int *ret)
 274{
 275        struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
 276
 277        return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret);
 278}
 279
 280int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr,
 281                                   size_t size, int *ret)
 282{
 283        if (!dma_coherent_default_memory)
 284                return 0;
 285
 286        return __dma_mmap_from_coherent(dma_coherent_default_memory, vma,
 287                                        vaddr, size, ret);
 288}
 289
 290/*
 291 * Support for reserved memory regions defined in device tree
 292 */
 293#ifdef CONFIG_OF_RESERVED_MEM
 294#include <linux/of.h>
 295#include <linux/of_fdt.h>
 296#include <linux/of_reserved_mem.h>
 297
 298static struct reserved_mem *dma_reserved_default_memory __initdata;
 299
 300static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
 301{
 302        struct dma_coherent_mem *mem = rmem->priv;
 303        int ret;
 304
 305        if (!mem) {
 306                ret = dma_init_coherent_memory(rmem->base, rmem->base,
 307                                               rmem->size, &mem);
 308                if (ret) {
 309                        pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n",
 310                                &rmem->base, (unsigned long)rmem->size / SZ_1M);
 311                        return ret;
 312                }
 313        }
 314        mem->use_dev_dma_pfn_offset = true;
 315        rmem->priv = mem;
 316        dma_assign_coherent_memory(dev, mem);
 317        return 0;
 318}
 319
 320static void rmem_dma_device_release(struct reserved_mem *rmem,
 321                                    struct device *dev)
 322{
 323        if (dev)
 324                dev->dma_mem = NULL;
 325}
 326
 327static const struct reserved_mem_ops rmem_dma_ops = {
 328        .device_init    = rmem_dma_device_init,
 329        .device_release = rmem_dma_device_release,
 330};
 331
 332static int __init rmem_dma_setup(struct reserved_mem *rmem)
 333{
 334        unsigned long node = rmem->fdt_node;
 335
 336        if (of_get_flat_dt_prop(node, "reusable", NULL))
 337                return -EINVAL;
 338
 339#ifdef CONFIG_ARM
 340        if (!of_get_flat_dt_prop(node, "no-map", NULL)) {
 341                pr_err("Reserved memory: regions without no-map are not yet supported\n");
 342                return -EINVAL;
 343        }
 344
 345        if (of_get_flat_dt_prop(node, "linux,dma-default", NULL)) {
 346                WARN(dma_reserved_default_memory,
 347                     "Reserved memory: region for default DMA coherent area is redefined\n");
 348                dma_reserved_default_memory = rmem;
 349        }
 350#endif
 351
 352        rmem->ops = &rmem_dma_ops;
 353        pr_info("Reserved memory: created DMA memory pool at %pa, size %ld MiB\n",
 354                &rmem->base, (unsigned long)rmem->size / SZ_1M);
 355        return 0;
 356}
 357
 358static int __init dma_init_reserved_memory(void)
 359{
 360        const struct reserved_mem_ops *ops;
 361        int ret;
 362
 363        if (!dma_reserved_default_memory)
 364                return -ENOMEM;
 365
 366        ops = dma_reserved_default_memory->ops;
 367
 368        /*
 369         * We rely on rmem_dma_device_init() does not propagate error of
 370         * dma_assign_coherent_memory() for "NULL" device.
 371         */
 372        ret = ops->device_init(dma_reserved_default_memory, NULL);
 373
 374        if (!ret) {
 375                dma_coherent_default_memory = dma_reserved_default_memory->priv;
 376                pr_info("DMA: default coherent area is set\n");
 377        }
 378
 379        return ret;
 380}
 381
 382core_initcall(dma_init_reserved_memory);
 383
 384RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool", rmem_dma_setup);
 385#endif
 386