linux/kernel/dma/coherent.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Coherent per-device memory handling.
   4 * Borrowed from i386
   5 */
   6#include <linux/io.h>
   7#include <linux/slab.h>
   8#include <linux/kernel.h>
   9#include <linux/module.h>
  10#include <linux/dma-mapping.h>
  11
  12struct dma_coherent_mem {
  13        void            *virt_base;
  14        dma_addr_t      device_base;
  15        unsigned long   pfn_base;
  16        int             size;
  17        unsigned long   *bitmap;
  18        spinlock_t      spinlock;
  19        bool            use_dev_dma_pfn_offset;
  20};
  21
  22static struct dma_coherent_mem *dma_coherent_default_memory __ro_after_init;
  23
  24static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *dev)
  25{
  26        if (dev && dev->dma_mem)
  27                return dev->dma_mem;
  28        return NULL;
  29}
  30
  31static inline dma_addr_t dma_get_device_base(struct device *dev,
  32                                             struct dma_coherent_mem * mem)
  33{
  34        if (mem->use_dev_dma_pfn_offset)
  35                return (mem->pfn_base - dev->dma_pfn_offset) << PAGE_SHIFT;
  36        else
  37                return mem->device_base;
  38}
  39
  40static int dma_init_coherent_memory(phys_addr_t phys_addr,
  41                dma_addr_t device_addr, size_t size,
  42                struct dma_coherent_mem **mem)
  43{
  44        struct dma_coherent_mem *dma_mem = NULL;
  45        void *mem_base = NULL;
  46        int pages = size >> PAGE_SHIFT;
  47        int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
  48        int ret;
  49
  50        if (!size) {
  51                ret = -EINVAL;
  52                goto out;
  53        }
  54
  55        mem_base = memremap(phys_addr, size, MEMREMAP_WC);
  56        if (!mem_base) {
  57                ret = -EINVAL;
  58                goto out;
  59        }
  60        dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
  61        if (!dma_mem) {
  62                ret = -ENOMEM;
  63                goto out;
  64        }
  65        dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
  66        if (!dma_mem->bitmap) {
  67                ret = -ENOMEM;
  68                goto out;
  69        }
  70
  71        dma_mem->virt_base = mem_base;
  72        dma_mem->device_base = device_addr;
  73        dma_mem->pfn_base = PFN_DOWN(phys_addr);
  74        dma_mem->size = pages;
  75        spin_lock_init(&dma_mem->spinlock);
  76
  77        *mem = dma_mem;
  78        return 0;
  79
  80out:
  81        kfree(dma_mem);
  82        if (mem_base)
  83                memunmap(mem_base);
  84        return ret;
  85}
  86
  87static void dma_release_coherent_memory(struct dma_coherent_mem *mem)
  88{
  89        if (!mem)
  90                return;
  91
  92        memunmap(mem->virt_base);
  93        kfree(mem->bitmap);
  94        kfree(mem);
  95}
  96
  97static int dma_assign_coherent_memory(struct device *dev,
  98                                      struct dma_coherent_mem *mem)
  99{
 100        if (!dev)
 101                return -ENODEV;
 102
 103        if (dev->dma_mem)
 104                return -EBUSY;
 105
 106        dev->dma_mem = mem;
 107        return 0;
 108}
 109
 110int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
 111                                dma_addr_t device_addr, size_t size)
 112{
 113        struct dma_coherent_mem *mem;
 114        int ret;
 115
 116        ret = dma_init_coherent_memory(phys_addr, device_addr, size, &mem);
 117        if (ret)
 118                return ret;
 119
 120        ret = dma_assign_coherent_memory(dev, mem);
 121        if (ret)
 122                dma_release_coherent_memory(mem);
 123        return ret;
 124}
 125
 126static void *__dma_alloc_from_coherent(struct device *dev,
 127                                       struct dma_coherent_mem *mem,
 128                                       ssize_t size, dma_addr_t *dma_handle)
 129{
 130        int order = get_order(size);
 131        unsigned long flags;
 132        int pageno;
 133        void *ret;
 134
 135        spin_lock_irqsave(&mem->spinlock, flags);
 136
 137        if (unlikely(size > (mem->size << PAGE_SHIFT)))
 138                goto err;
 139
 140        pageno = bitmap_find_free_region(mem->bitmap, mem->size, order);
 141        if (unlikely(pageno < 0))
 142                goto err;
 143
 144        /*
 145         * Memory was found in the coherent area.
 146         */
 147        *dma_handle = dma_get_device_base(dev, mem) + (pageno << PAGE_SHIFT);
 148        ret = mem->virt_base + (pageno << PAGE_SHIFT);
 149        spin_unlock_irqrestore(&mem->spinlock, flags);
 150        memset(ret, 0, size);
 151        return ret;
 152err:
 153        spin_unlock_irqrestore(&mem->spinlock, flags);
 154        return NULL;
 155}
 156
 157/**
 158 * dma_alloc_from_dev_coherent() - allocate memory from device coherent pool
 159 * @dev:        device from which we allocate memory
 160 * @size:       size of requested memory area
 161 * @dma_handle: This will be filled with the correct dma handle
 162 * @ret:        This pointer will be filled with the virtual address
 163 *              to allocated area.
 164 *
 165 * This function should be only called from per-arch dma_alloc_coherent()
 166 * to support allocation from per-device coherent memory pools.
 167 *
 168 * Returns 0 if dma_alloc_coherent should continue with allocating from
 169 * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
 170 */
 171int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
 172                dma_addr_t *dma_handle, void **ret)
 173{
 174        struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
 175
 176        if (!mem)
 177                return 0;
 178
 179        *ret = __dma_alloc_from_coherent(dev, mem, size, dma_handle);
 180        return 1;
 181}
 182
 183void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size,
 184                                     dma_addr_t *dma_handle)
 185{
 186        if (!dma_coherent_default_memory)
 187                return NULL;
 188
 189        return __dma_alloc_from_coherent(dev, dma_coherent_default_memory, size,
 190                                         dma_handle);
 191}
 192
 193static int __dma_release_from_coherent(struct dma_coherent_mem *mem,
 194                                       int order, void *vaddr)
 195{
 196        if (mem && vaddr >= mem->virt_base && vaddr <
 197                   (mem->virt_base + (mem->size << PAGE_SHIFT))) {
 198                int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
 199                unsigned long flags;
 200
 201                spin_lock_irqsave(&mem->spinlock, flags);
 202                bitmap_release_region(mem->bitmap, page, order);
 203                spin_unlock_irqrestore(&mem->spinlock, flags);
 204                return 1;
 205        }
 206        return 0;
 207}
 208
 209/**
 210 * dma_release_from_dev_coherent() - free memory to device coherent memory pool
 211 * @dev:        device from which the memory was allocated
 212 * @order:      the order of pages allocated
 213 * @vaddr:      virtual address of allocated pages
 214 *
 215 * This checks whether the memory was allocated from the per-device
 216 * coherent memory pool and if so, releases that memory.
 217 *
 218 * Returns 1 if we correctly released the memory, or 0 if the caller should
 219 * proceed with releasing memory from generic pools.
 220 */
 221int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr)
 222{
 223        struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
 224
 225        return __dma_release_from_coherent(mem, order, vaddr);
 226}
 227
 228int dma_release_from_global_coherent(int order, void *vaddr)
 229{
 230        if (!dma_coherent_default_memory)
 231                return 0;
 232
 233        return __dma_release_from_coherent(dma_coherent_default_memory, order,
 234                        vaddr);
 235}
 236
 237static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem,
 238                struct vm_area_struct *vma, void *vaddr, size_t size, int *ret)
 239{
 240        if (mem && vaddr >= mem->virt_base && vaddr + size <=
 241                   (mem->virt_base + (mem->size << PAGE_SHIFT))) {
 242                unsigned long off = vma->vm_pgoff;
 243                int start = (vaddr - mem->virt_base) >> PAGE_SHIFT;
 244                int user_count = vma_pages(vma);
 245                int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
 246
 247                *ret = -ENXIO;
 248                if (off < count && user_count <= count - off) {
 249                        unsigned long pfn = mem->pfn_base + start + off;
 250                        *ret = remap_pfn_range(vma, vma->vm_start, pfn,
 251                                               user_count << PAGE_SHIFT,
 252                                               vma->vm_page_prot);
 253                }
 254                return 1;
 255        }
 256        return 0;
 257}
 258
 259/**
 260 * dma_mmap_from_dev_coherent() - mmap memory from the device coherent pool
 261 * @dev:        device from which the memory was allocated
 262 * @vma:        vm_area for the userspace memory
 263 * @vaddr:      cpu address returned by dma_alloc_from_dev_coherent
 264 * @size:       size of the memory buffer allocated
 265 * @ret:        result from remap_pfn_range()
 266 *
 267 * This checks whether the memory was allocated from the per-device
 268 * coherent memory pool and if so, maps that memory to the provided vma.
 269 *
 270 * Returns 1 if @vaddr belongs to the device coherent pool and the caller
 271 * should return @ret, or 0 if they should proceed with mapping memory from
 272 * generic areas.
 273 */
 274int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
 275                           void *vaddr, size_t size, int *ret)
 276{
 277        struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
 278
 279        return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret);
 280}
 281
 282int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr,
 283                                   size_t size, int *ret)
 284{
 285        if (!dma_coherent_default_memory)
 286                return 0;
 287
 288        return __dma_mmap_from_coherent(dma_coherent_default_memory, vma,
 289                                        vaddr, size, ret);
 290}
 291
 292/*
 293 * Support for reserved memory regions defined in device tree
 294 */
 295#ifdef CONFIG_OF_RESERVED_MEM
 296#include <linux/of.h>
 297#include <linux/of_fdt.h>
 298#include <linux/of_reserved_mem.h>
 299
 300static struct reserved_mem *dma_reserved_default_memory __initdata;
 301
 302static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
 303{
 304        struct dma_coherent_mem *mem = rmem->priv;
 305        int ret;
 306
 307        if (!mem) {
 308                ret = dma_init_coherent_memory(rmem->base, rmem->base,
 309                                               rmem->size, &mem);
 310                if (ret) {
 311                        pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n",
 312                                &rmem->base, (unsigned long)rmem->size / SZ_1M);
 313                        return ret;
 314                }
 315        }
 316        mem->use_dev_dma_pfn_offset = true;
 317        rmem->priv = mem;
 318        dma_assign_coherent_memory(dev, mem);
 319        return 0;
 320}
 321
 322static void rmem_dma_device_release(struct reserved_mem *rmem,
 323                                    struct device *dev)
 324{
 325        if (dev)
 326                dev->dma_mem = NULL;
 327}
 328
 329static const struct reserved_mem_ops rmem_dma_ops = {
 330        .device_init    = rmem_dma_device_init,
 331        .device_release = rmem_dma_device_release,
 332};
 333
 334static int __init rmem_dma_setup(struct reserved_mem *rmem)
 335{
 336        unsigned long node = rmem->fdt_node;
 337
 338        if (of_get_flat_dt_prop(node, "reusable", NULL))
 339                return -EINVAL;
 340
 341#ifdef CONFIG_ARM
 342        if (!of_get_flat_dt_prop(node, "no-map", NULL)) {
 343                pr_err("Reserved memory: regions without no-map are not yet supported\n");
 344                return -EINVAL;
 345        }
 346
 347        if (of_get_flat_dt_prop(node, "linux,dma-default", NULL)) {
 348                WARN(dma_reserved_default_memory,
 349                     "Reserved memory: region for default DMA coherent area is redefined\n");
 350                dma_reserved_default_memory = rmem;
 351        }
 352#endif
 353
 354        rmem->ops = &rmem_dma_ops;
 355        pr_info("Reserved memory: created DMA memory pool at %pa, size %ld MiB\n",
 356                &rmem->base, (unsigned long)rmem->size / SZ_1M);
 357        return 0;
 358}
 359
 360static int __init dma_init_reserved_memory(void)
 361{
 362        const struct reserved_mem_ops *ops;
 363        int ret;
 364
 365        if (!dma_reserved_default_memory)
 366                return -ENOMEM;
 367
 368        ops = dma_reserved_default_memory->ops;
 369
 370        /*
 371         * We rely on rmem_dma_device_init() does not propagate error of
 372         * dma_assign_coherent_memory() for "NULL" device.
 373         */
 374        ret = ops->device_init(dma_reserved_default_memory, NULL);
 375
 376        if (!ret) {
 377                dma_coherent_default_memory = dma_reserved_default_memory->priv;
 378                pr_info("DMA: default coherent area is set\n");
 379        }
 380
 381        return ret;
 382}
 383
 384core_initcall(dma_init_reserved_memory);
 385
 386RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool", rmem_dma_setup);
 387#endif
 388