linux/kernel/dma/coherent.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Coherent per-device memory handling.
   4 * Borrowed from i386
   5 */
   6#include <linux/io.h>
   7#include <linux/slab.h>
   8#include <linux/kernel.h>
   9#include <linux/module.h>
  10#include <linux/dma-mapping.h>
  11
  12struct dma_coherent_mem {
  13        void            *virt_base;
  14        dma_addr_t      device_base;
  15        unsigned long   pfn_base;
  16        int             size;
  17        unsigned long   *bitmap;
  18        spinlock_t      spinlock;
  19        bool            use_dev_dma_pfn_offset;
  20};
  21
  22static struct dma_coherent_mem *dma_coherent_default_memory __ro_after_init;
  23
  24static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *dev)
  25{
  26        if (dev && dev->dma_mem)
  27                return dev->dma_mem;
  28        return NULL;
  29}
  30
  31static inline dma_addr_t dma_get_device_base(struct device *dev,
  32                                             struct dma_coherent_mem * mem)
  33{
  34        if (mem->use_dev_dma_pfn_offset)
  35                return (mem->pfn_base - dev->dma_pfn_offset) << PAGE_SHIFT;
  36        else
  37                return mem->device_base;
  38}
  39
  40static int dma_init_coherent_memory(phys_addr_t phys_addr,
  41                dma_addr_t device_addr, size_t size,
  42                struct dma_coherent_mem **mem)
  43{
  44        struct dma_coherent_mem *dma_mem = NULL;
  45        void *mem_base = NULL;
  46        int pages = size >> PAGE_SHIFT;
  47        int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
  48        int ret;
  49
  50        if (!size) {
  51                ret = -EINVAL;
  52                goto out;
  53        }
  54
  55        mem_base = memremap(phys_addr, size, MEMREMAP_WC);
  56        if (!mem_base) {
  57                ret = -EINVAL;
  58                goto out;
  59        }
  60        dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
  61        if (!dma_mem) {
  62                ret = -ENOMEM;
  63                goto out;
  64        }
  65        dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
  66        if (!dma_mem->bitmap) {
  67                ret = -ENOMEM;
  68                goto out;
  69        }
  70
  71        dma_mem->virt_base = mem_base;
  72        dma_mem->device_base = device_addr;
  73        dma_mem->pfn_base = PFN_DOWN(phys_addr);
  74        dma_mem->size = pages;
  75        spin_lock_init(&dma_mem->spinlock);
  76
  77        *mem = dma_mem;
  78        return 0;
  79
  80out:
  81        kfree(dma_mem);
  82        if (mem_base)
  83                memunmap(mem_base);
  84        return ret;
  85}
  86
  87static void dma_release_coherent_memory(struct dma_coherent_mem *mem)
  88{
  89        if (!mem)
  90                return;
  91
  92        memunmap(mem->virt_base);
  93        kfree(mem->bitmap);
  94        kfree(mem);
  95}
  96
  97static int dma_assign_coherent_memory(struct device *dev,
  98                                      struct dma_coherent_mem *mem)
  99{
 100        if (!dev)
 101                return -ENODEV;
 102
 103        if (dev->dma_mem)
 104                return -EBUSY;
 105
 106        dev->dma_mem = mem;
 107        return 0;
 108}
 109
 110int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
 111                                dma_addr_t device_addr, size_t size)
 112{
 113        struct dma_coherent_mem *mem;
 114        int ret;
 115
 116        ret = dma_init_coherent_memory(phys_addr, device_addr, size, &mem);
 117        if (ret)
 118                return ret;
 119
 120        ret = dma_assign_coherent_memory(dev, mem);
 121        if (ret)
 122                dma_release_coherent_memory(mem);
 123        return ret;
 124}
 125EXPORT_SYMBOL(dma_declare_coherent_memory);
 126
 127void dma_release_declared_memory(struct device *dev)
 128{
 129        struct dma_coherent_mem *mem = dev->dma_mem;
 130
 131        if (!mem)
 132                return;
 133        dma_release_coherent_memory(mem);
 134        dev->dma_mem = NULL;
 135}
 136EXPORT_SYMBOL(dma_release_declared_memory);
 137
 138static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem,
 139                ssize_t size, dma_addr_t *dma_handle)
 140{
 141        int order = get_order(size);
 142        unsigned long flags;
 143        int pageno;
 144        void *ret;
 145
 146        spin_lock_irqsave(&mem->spinlock, flags);
 147
 148        if (unlikely(size > (mem->size << PAGE_SHIFT)))
 149                goto err;
 150
 151        pageno = bitmap_find_free_region(mem->bitmap, mem->size, order);
 152        if (unlikely(pageno < 0))
 153                goto err;
 154
 155        /*
 156         * Memory was found in the coherent area.
 157         */
 158        *dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
 159        ret = mem->virt_base + (pageno << PAGE_SHIFT);
 160        spin_unlock_irqrestore(&mem->spinlock, flags);
 161        memset(ret, 0, size);
 162        return ret;
 163err:
 164        spin_unlock_irqrestore(&mem->spinlock, flags);
 165        return NULL;
 166}
 167
 168/**
 169 * dma_alloc_from_dev_coherent() - allocate memory from device coherent pool
 170 * @dev:        device from which we allocate memory
 171 * @size:       size of requested memory area
 172 * @dma_handle: This will be filled with the correct dma handle
 173 * @ret:        This pointer will be filled with the virtual address
 174 *              to allocated area.
 175 *
 176 * This function should be only called from per-arch dma_alloc_coherent()
 177 * to support allocation from per-device coherent memory pools.
 178 *
 179 * Returns 0 if dma_alloc_coherent should continue with allocating from
 180 * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
 181 */
 182int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
 183                dma_addr_t *dma_handle, void **ret)
 184{
 185        struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
 186
 187        if (!mem)
 188                return 0;
 189
 190        *ret = __dma_alloc_from_coherent(mem, size, dma_handle);
 191        return 1;
 192}
 193
 194void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle)
 195{
 196        if (!dma_coherent_default_memory)
 197                return NULL;
 198
 199        return __dma_alloc_from_coherent(dma_coherent_default_memory, size,
 200                        dma_handle);
 201}
 202
 203static int __dma_release_from_coherent(struct dma_coherent_mem *mem,
 204                                       int order, void *vaddr)
 205{
 206        if (mem && vaddr >= mem->virt_base && vaddr <
 207                   (mem->virt_base + (mem->size << PAGE_SHIFT))) {
 208                int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
 209                unsigned long flags;
 210
 211                spin_lock_irqsave(&mem->spinlock, flags);
 212                bitmap_release_region(mem->bitmap, page, order);
 213                spin_unlock_irqrestore(&mem->spinlock, flags);
 214                return 1;
 215        }
 216        return 0;
 217}
 218
 219/**
 220 * dma_release_from_dev_coherent() - free memory to device coherent memory pool
 221 * @dev:        device from which the memory was allocated
 222 * @order:      the order of pages allocated
 223 * @vaddr:      virtual address of allocated pages
 224 *
 225 * This checks whether the memory was allocated from the per-device
 226 * coherent memory pool and if so, releases that memory.
 227 *
 228 * Returns 1 if we correctly released the memory, or 0 if the caller should
 229 * proceed with releasing memory from generic pools.
 230 */
 231int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr)
 232{
 233        struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
 234
 235        return __dma_release_from_coherent(mem, order, vaddr);
 236}
 237
 238int dma_release_from_global_coherent(int order, void *vaddr)
 239{
 240        if (!dma_coherent_default_memory)
 241                return 0;
 242
 243        return __dma_release_from_coherent(dma_coherent_default_memory, order,
 244                        vaddr);
 245}
 246
 247static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem,
 248                struct vm_area_struct *vma, void *vaddr, size_t size, int *ret)
 249{
 250        if (mem && vaddr >= mem->virt_base && vaddr + size <=
 251                   (mem->virt_base + (mem->size << PAGE_SHIFT))) {
 252                unsigned long off = vma->vm_pgoff;
 253                int start = (vaddr - mem->virt_base) >> PAGE_SHIFT;
 254                int user_count = vma_pages(vma);
 255                int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
 256
 257                *ret = -ENXIO;
 258                if (off < count && user_count <= count - off) {
 259                        unsigned long pfn = mem->pfn_base + start + off;
 260                        *ret = remap_pfn_range(vma, vma->vm_start, pfn,
 261                                               user_count << PAGE_SHIFT,
 262                                               vma->vm_page_prot);
 263                }
 264                return 1;
 265        }
 266        return 0;
 267}
 268
 269/**
 270 * dma_mmap_from_dev_coherent() - mmap memory from the device coherent pool
 271 * @dev:        device from which the memory was allocated
 272 * @vma:        vm_area for the userspace memory
 273 * @vaddr:      cpu address returned by dma_alloc_from_dev_coherent
 274 * @size:       size of the memory buffer allocated
 275 * @ret:        result from remap_pfn_range()
 276 *
 277 * This checks whether the memory was allocated from the per-device
 278 * coherent memory pool and if so, maps that memory to the provided vma.
 279 *
 280 * Returns 1 if @vaddr belongs to the device coherent pool and the caller
 281 * should return @ret, or 0 if they should proceed with mapping memory from
 282 * generic areas.
 283 */
 284int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
 285                           void *vaddr, size_t size, int *ret)
 286{
 287        struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
 288
 289        return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret);
 290}
 291EXPORT_SYMBOL(dma_mmap_from_dev_coherent);
 292
 293int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr,
 294                                   size_t size, int *ret)
 295{
 296        if (!dma_coherent_default_memory)
 297                return 0;
 298
 299        return __dma_mmap_from_coherent(dma_coherent_default_memory, vma,
 300                                        vaddr, size, ret);
 301}
 302
 303/*
 304 * Support for reserved memory regions defined in device tree
 305 */
 306#ifdef CONFIG_OF_RESERVED_MEM
 307#include <linux/of.h>
 308#include <linux/of_fdt.h>
 309#include <linux/of_reserved_mem.h>
 310
 311static struct reserved_mem *dma_reserved_default_memory __initdata;
 312
 313static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
 314{
 315        struct dma_coherent_mem *mem = rmem->priv;
 316        int ret;
 317
 318        if (!mem) {
 319                ret = dma_init_coherent_memory(rmem->base, rmem->base,
 320                                               rmem->size, &mem);
 321                if (ret) {
 322                        pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n",
 323                                &rmem->base, (unsigned long)rmem->size / SZ_1M);
 324                        return ret;
 325                }
 326        }
 327        mem->use_dev_dma_pfn_offset = true;
 328        rmem->priv = mem;
 329        dma_assign_coherent_memory(dev, mem);
 330        return 0;
 331}
 332
 333static void rmem_dma_device_release(struct reserved_mem *rmem,
 334                                    struct device *dev)
 335{
 336        if (dev)
 337                dev->dma_mem = NULL;
 338}
 339
 340static const struct reserved_mem_ops rmem_dma_ops = {
 341        .device_init    = rmem_dma_device_init,
 342        .device_release = rmem_dma_device_release,
 343};
 344
 345static int __init rmem_dma_setup(struct reserved_mem *rmem)
 346{
 347        unsigned long node = rmem->fdt_node;
 348
 349        if (of_get_flat_dt_prop(node, "reusable", NULL))
 350                return -EINVAL;
 351
 352#ifdef CONFIG_ARM
 353        if (!of_get_flat_dt_prop(node, "no-map", NULL)) {
 354                pr_err("Reserved memory: regions without no-map are not yet supported\n");
 355                return -EINVAL;
 356        }
 357
 358        if (of_get_flat_dt_prop(node, "linux,dma-default", NULL)) {
 359                WARN(dma_reserved_default_memory,
 360                     "Reserved memory: region for default DMA coherent area is redefined\n");
 361                dma_reserved_default_memory = rmem;
 362        }
 363#endif
 364
 365        rmem->ops = &rmem_dma_ops;
 366        pr_info("Reserved memory: created DMA memory pool at %pa, size %ld MiB\n",
 367                &rmem->base, (unsigned long)rmem->size / SZ_1M);
 368        return 0;
 369}
 370
 371static int __init dma_init_reserved_memory(void)
 372{
 373        const struct reserved_mem_ops *ops;
 374        int ret;
 375
 376        if (!dma_reserved_default_memory)
 377                return -ENOMEM;
 378
 379        ops = dma_reserved_default_memory->ops;
 380
 381        /*
 382         * We rely on rmem_dma_device_init() does not propagate error of
 383         * dma_assign_coherent_memory() for "NULL" device.
 384         */
 385        ret = ops->device_init(dma_reserved_default_memory, NULL);
 386
 387        if (!ret) {
 388                dma_coherent_default_memory = dma_reserved_default_memory->priv;
 389                pr_info("DMA: default coherent area is set\n");
 390        }
 391
 392        return ret;
 393}
 394
 395core_initcall(dma_init_reserved_memory);
 396
 397RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool", rmem_dma_setup);
 398#endif
 399