linux/kernel/dma/coherent.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Coherent per-device memory handling.
   4 * Borrowed from i386
   5 */
   6#include <linux/io.h>
   7#include <linux/slab.h>
   8#include <linux/kernel.h>
   9#include <linux/module.h>
  10#include <linux/dma-mapping.h>
  11
  12struct dma_coherent_mem {
  13        void            *virt_base;
  14        dma_addr_t      device_base;
  15        unsigned long   pfn_base;
  16        int             size;
  17        unsigned long   *bitmap;
  18        spinlock_t      spinlock;
  19        bool            use_dev_dma_pfn_offset;
  20};
  21
  22static struct dma_coherent_mem *dma_coherent_default_memory __ro_after_init;
  23
  24static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *dev)
  25{
  26        if (dev && dev->dma_mem)
  27                return dev->dma_mem;
  28        return NULL;
  29}
  30
  31static inline dma_addr_t dma_get_device_base(struct device *dev,
  32                                             struct dma_coherent_mem * mem)
  33{
  34        if (mem->use_dev_dma_pfn_offset)
  35                return (mem->pfn_base - dev->dma_pfn_offset) << PAGE_SHIFT;
  36        else
  37                return mem->device_base;
  38}
  39
  40static int dma_init_coherent_memory(phys_addr_t phys_addr,
  41                dma_addr_t device_addr, size_t size,
  42                struct dma_coherent_mem **mem)
  43{
  44        struct dma_coherent_mem *dma_mem = NULL;
  45        void *mem_base = NULL;
  46        int pages = size >> PAGE_SHIFT;
  47        int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
  48        int ret;
  49
  50        if (!size) {
  51                ret = -EINVAL;
  52                goto out;
  53        }
  54
  55        mem_base = memremap(phys_addr, size, MEMREMAP_WC);
  56        if (!mem_base) {
  57                ret = -EINVAL;
  58                goto out;
  59        }
  60        dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
  61        if (!dma_mem) {
  62                ret = -ENOMEM;
  63                goto out;
  64        }
  65        dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
  66        if (!dma_mem->bitmap) {
  67                ret = -ENOMEM;
  68                goto out;
  69        }
  70
  71        dma_mem->virt_base = mem_base;
  72        dma_mem->device_base = device_addr;
  73        dma_mem->pfn_base = PFN_DOWN(phys_addr);
  74        dma_mem->size = pages;
  75        spin_lock_init(&dma_mem->spinlock);
  76
  77        *mem = dma_mem;
  78        return 0;
  79
  80out:
  81        kfree(dma_mem);
  82        if (mem_base)
  83                memunmap(mem_base);
  84        return ret;
  85}
  86
  87static void dma_release_coherent_memory(struct dma_coherent_mem *mem)
  88{
  89        if (!mem)
  90                return;
  91
  92        memunmap(mem->virt_base);
  93        kfree(mem->bitmap);
  94        kfree(mem);
  95}
  96
  97static int dma_assign_coherent_memory(struct device *dev,
  98                                      struct dma_coherent_mem *mem)
  99{
 100        if (!dev)
 101                return -ENODEV;
 102
 103        if (dev->dma_mem)
 104                return -EBUSY;
 105
 106        dev->dma_mem = mem;
 107        return 0;
 108}
 109
 110int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
 111                                dma_addr_t device_addr, size_t size)
 112{
 113        struct dma_coherent_mem *mem;
 114        int ret;
 115
 116        ret = dma_init_coherent_memory(phys_addr, device_addr, size, &mem);
 117        if (ret)
 118                return ret;
 119
 120        ret = dma_assign_coherent_memory(dev, mem);
 121        if (ret)
 122                dma_release_coherent_memory(mem);
 123        return ret;
 124}
 125
 126static void *__dma_alloc_from_coherent(struct device *dev,
 127                                       struct dma_coherent_mem *mem,
 128                                       ssize_t size, dma_addr_t *dma_handle)
 129{
 130        int order = get_order(size);
 131        unsigned long flags;
 132        int pageno;
 133        void *ret;
 134
 135        spin_lock_irqsave(&mem->spinlock, flags);
 136
 137        if (unlikely(size > ((dma_addr_t)mem->size << PAGE_SHIFT)))
 138                goto err;
 139
 140        pageno = bitmap_find_free_region(mem->bitmap, mem->size, order);
 141        if (unlikely(pageno < 0))
 142                goto err;
 143
 144        /*
 145         * Memory was found in the coherent area.
 146         */
 147        *dma_handle = dma_get_device_base(dev, mem) +
 148                        ((dma_addr_t)pageno << PAGE_SHIFT);
 149        ret = mem->virt_base + ((dma_addr_t)pageno << PAGE_SHIFT);
 150        spin_unlock_irqrestore(&mem->spinlock, flags);
 151        memset(ret, 0, size);
 152        return ret;
 153err:
 154        spin_unlock_irqrestore(&mem->spinlock, flags);
 155        return NULL;
 156}
 157
 158/**
 159 * dma_alloc_from_dev_coherent() - allocate memory from device coherent pool
 160 * @dev:        device from which we allocate memory
 161 * @size:       size of requested memory area
 162 * @dma_handle: This will be filled with the correct dma handle
 163 * @ret:        This pointer will be filled with the virtual address
 164 *              to allocated area.
 165 *
 166 * This function should be only called from per-arch dma_alloc_coherent()
 167 * to support allocation from per-device coherent memory pools.
 168 *
 169 * Returns 0 if dma_alloc_coherent should continue with allocating from
 170 * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
 171 */
 172int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
 173                dma_addr_t *dma_handle, void **ret)
 174{
 175        struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
 176
 177        if (!mem)
 178                return 0;
 179
 180        *ret = __dma_alloc_from_coherent(dev, mem, size, dma_handle);
 181        return 1;
 182}
 183
 184void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size,
 185                                     dma_addr_t *dma_handle)
 186{
 187        if (!dma_coherent_default_memory)
 188                return NULL;
 189
 190        return __dma_alloc_from_coherent(dev, dma_coherent_default_memory, size,
 191                                         dma_handle);
 192}
 193
 194static int __dma_release_from_coherent(struct dma_coherent_mem *mem,
 195                                       int order, void *vaddr)
 196{
 197        if (mem && vaddr >= mem->virt_base && vaddr <
 198                   (mem->virt_base + ((dma_addr_t)mem->size << PAGE_SHIFT))) {
 199                int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
 200                unsigned long flags;
 201
 202                spin_lock_irqsave(&mem->spinlock, flags);
 203                bitmap_release_region(mem->bitmap, page, order);
 204                spin_unlock_irqrestore(&mem->spinlock, flags);
 205                return 1;
 206        }
 207        return 0;
 208}
 209
 210/**
 211 * dma_release_from_dev_coherent() - free memory to device coherent memory pool
 212 * @dev:        device from which the memory was allocated
 213 * @order:      the order of pages allocated
 214 * @vaddr:      virtual address of allocated pages
 215 *
 216 * This checks whether the memory was allocated from the per-device
 217 * coherent memory pool and if so, releases that memory.
 218 *
 219 * Returns 1 if we correctly released the memory, or 0 if the caller should
 220 * proceed with releasing memory from generic pools.
 221 */
 222int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr)
 223{
 224        struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
 225
 226        return __dma_release_from_coherent(mem, order, vaddr);
 227}
 228
 229int dma_release_from_global_coherent(int order, void *vaddr)
 230{
 231        if (!dma_coherent_default_memory)
 232                return 0;
 233
 234        return __dma_release_from_coherent(dma_coherent_default_memory, order,
 235                        vaddr);
 236}
 237
 238static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem,
 239                struct vm_area_struct *vma, void *vaddr, size_t size, int *ret)
 240{
 241        if (mem && vaddr >= mem->virt_base && vaddr + size <=
 242                   (mem->virt_base + ((dma_addr_t)mem->size << PAGE_SHIFT))) {
 243                unsigned long off = vma->vm_pgoff;
 244                int start = (vaddr - mem->virt_base) >> PAGE_SHIFT;
 245                unsigned long user_count = vma_pages(vma);
 246                int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
 247
 248                *ret = -ENXIO;
 249                if (off < count && user_count <= count - off) {
 250                        unsigned long pfn = mem->pfn_base + start + off;
 251                        *ret = remap_pfn_range(vma, vma->vm_start, pfn,
 252                                               user_count << PAGE_SHIFT,
 253                                               vma->vm_page_prot);
 254                }
 255                return 1;
 256        }
 257        return 0;
 258}
 259
 260/**
 261 * dma_mmap_from_dev_coherent() - mmap memory from the device coherent pool
 262 * @dev:        device from which the memory was allocated
 263 * @vma:        vm_area for the userspace memory
 264 * @vaddr:      cpu address returned by dma_alloc_from_dev_coherent
 265 * @size:       size of the memory buffer allocated
 266 * @ret:        result from remap_pfn_range()
 267 *
 268 * This checks whether the memory was allocated from the per-device
 269 * coherent memory pool and if so, maps that memory to the provided vma.
 270 *
 271 * Returns 1 if @vaddr belongs to the device coherent pool and the caller
 272 * should return @ret, or 0 if they should proceed with mapping memory from
 273 * generic areas.
 274 */
 275int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
 276                           void *vaddr, size_t size, int *ret)
 277{
 278        struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
 279
 280        return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret);
 281}
 282
 283int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr,
 284                                   size_t size, int *ret)
 285{
 286        if (!dma_coherent_default_memory)
 287                return 0;
 288
 289        return __dma_mmap_from_coherent(dma_coherent_default_memory, vma,
 290                                        vaddr, size, ret);
 291}
 292
 293/*
 294 * Support for reserved memory regions defined in device tree
 295 */
 296#ifdef CONFIG_OF_RESERVED_MEM
 297#include <linux/of.h>
 298#include <linux/of_fdt.h>
 299#include <linux/of_reserved_mem.h>
 300
 301static struct reserved_mem *dma_reserved_default_memory __initdata;
 302
 303static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
 304{
 305        struct dma_coherent_mem *mem = rmem->priv;
 306        int ret;
 307
 308        if (!mem) {
 309                ret = dma_init_coherent_memory(rmem->base, rmem->base,
 310                                               rmem->size, &mem);
 311                if (ret) {
 312                        pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n",
 313                                &rmem->base, (unsigned long)rmem->size / SZ_1M);
 314                        return ret;
 315                }
 316        }
 317        mem->use_dev_dma_pfn_offset = true;
 318        rmem->priv = mem;
 319        dma_assign_coherent_memory(dev, mem);
 320        return 0;
 321}
 322
 323static void rmem_dma_device_release(struct reserved_mem *rmem,
 324                                    struct device *dev)
 325{
 326        if (dev)
 327                dev->dma_mem = NULL;
 328}
 329
 330static const struct reserved_mem_ops rmem_dma_ops = {
 331        .device_init    = rmem_dma_device_init,
 332        .device_release = rmem_dma_device_release,
 333};
 334
 335static int __init rmem_dma_setup(struct reserved_mem *rmem)
 336{
 337        unsigned long node = rmem->fdt_node;
 338
 339        if (of_get_flat_dt_prop(node, "reusable", NULL))
 340                return -EINVAL;
 341
 342#ifdef CONFIG_ARM
 343        if (!of_get_flat_dt_prop(node, "no-map", NULL)) {
 344                pr_err("Reserved memory: regions without no-map are not yet supported\n");
 345                return -EINVAL;
 346        }
 347
 348        if (of_get_flat_dt_prop(node, "linux,dma-default", NULL)) {
 349                WARN(dma_reserved_default_memory,
 350                     "Reserved memory: region for default DMA coherent area is redefined\n");
 351                dma_reserved_default_memory = rmem;
 352        }
 353#endif
 354
 355        rmem->ops = &rmem_dma_ops;
 356        pr_info("Reserved memory: created DMA memory pool at %pa, size %ld MiB\n",
 357                &rmem->base, (unsigned long)rmem->size / SZ_1M);
 358        return 0;
 359}
 360
 361static int __init dma_init_reserved_memory(void)
 362{
 363        const struct reserved_mem_ops *ops;
 364        int ret;
 365
 366        if (!dma_reserved_default_memory)
 367                return -ENOMEM;
 368
 369        ops = dma_reserved_default_memory->ops;
 370
 371        /*
 372         * We rely on rmem_dma_device_init() does not propagate error of
 373         * dma_assign_coherent_memory() for "NULL" device.
 374         */
 375        ret = ops->device_init(dma_reserved_default_memory, NULL);
 376
 377        if (!ret) {
 378                dma_coherent_default_memory = dma_reserved_default_memory->priv;
 379                pr_info("DMA: default coherent area is set\n");
 380        }
 381
 382        return ret;
 383}
 384
 385core_initcall(dma_init_reserved_memory);
 386
 387RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool", rmem_dma_setup);
 388#endif
 389