linux/kernel/dma/mapping.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * arch-independent dma-mapping routines
   4 *
   5 * Copyright (c) 2006  SUSE Linux Products GmbH
   6 * Copyright (c) 2006  Tejun Heo <teheo@suse.de>
   7 */
   8
   9#include <linux/acpi.h>
  10#include <linux/dma-mapping.h>
  11#include <linux/export.h>
  12#include <linux/gfp.h>
  13#include <linux/of_device.h>
  14#include <linux/slab.h>
  15#include <linux/vmalloc.h>
  16
  17/*
  18 * Managed DMA API
  19 */
  20struct dma_devres {
  21        size_t          size;
  22        void            *vaddr;
  23        dma_addr_t      dma_handle;
  24        unsigned long   attrs;
  25};
  26
  27static void dmam_release(struct device *dev, void *res)
  28{
  29        struct dma_devres *this = res;
  30
  31        dma_free_attrs(dev, this->size, this->vaddr, this->dma_handle,
  32                        this->attrs);
  33}
  34
  35static int dmam_match(struct device *dev, void *res, void *match_data)
  36{
  37        struct dma_devres *this = res, *match = match_data;
  38
  39        if (this->vaddr == match->vaddr) {
  40                WARN_ON(this->size != match->size ||
  41                        this->dma_handle != match->dma_handle);
  42                return 1;
  43        }
  44        return 0;
  45}
  46
  47/**
  48 * dmam_alloc_coherent - Managed dma_alloc_coherent()
  49 * @dev: Device to allocate coherent memory for
  50 * @size: Size of allocation
  51 * @dma_handle: Out argument for allocated DMA handle
  52 * @gfp: Allocation flags
  53 *
  54 * Managed dma_alloc_coherent().  Memory allocated using this function
  55 * will be automatically released on driver detach.
  56 *
  57 * RETURNS:
  58 * Pointer to allocated memory on success, NULL on failure.
  59 */
  60void *dmam_alloc_coherent(struct device *dev, size_t size,
  61                           dma_addr_t *dma_handle, gfp_t gfp)
  62{
  63        struct dma_devres *dr;
  64        void *vaddr;
  65
  66        dr = devres_alloc(dmam_release, sizeof(*dr), gfp);
  67        if (!dr)
  68                return NULL;
  69
  70        vaddr = dma_alloc_coherent(dev, size, dma_handle, gfp);
  71        if (!vaddr) {
  72                devres_free(dr);
  73                return NULL;
  74        }
  75
  76        dr->vaddr = vaddr;
  77        dr->dma_handle = *dma_handle;
  78        dr->size = size;
  79
  80        devres_add(dev, dr);
  81
  82        return vaddr;
  83}
  84EXPORT_SYMBOL(dmam_alloc_coherent);
  85
  86/**
  87 * dmam_free_coherent - Managed dma_free_coherent()
  88 * @dev: Device to free coherent memory for
  89 * @size: Size of allocation
  90 * @vaddr: Virtual address of the memory to free
  91 * @dma_handle: DMA handle of the memory to free
  92 *
  93 * Managed dma_free_coherent().
  94 */
  95void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
  96                        dma_addr_t dma_handle)
  97{
  98        struct dma_devres match_data = { size, vaddr, dma_handle };
  99
 100        dma_free_coherent(dev, size, vaddr, dma_handle);
 101        WARN_ON(devres_destroy(dev, dmam_release, dmam_match, &match_data));
 102}
 103EXPORT_SYMBOL(dmam_free_coherent);
 104
 105/**
 106 * dmam_alloc_attrs - Managed dma_alloc_attrs()
 107 * @dev: Device to allocate non_coherent memory for
 108 * @size: Size of allocation
 109 * @dma_handle: Out argument for allocated DMA handle
 110 * @gfp: Allocation flags
 111 * @attrs: Flags in the DMA_ATTR_* namespace.
 112 *
 113 * Managed dma_alloc_attrs().  Memory allocated using this function will be
 114 * automatically released on driver detach.
 115 *
 116 * RETURNS:
 117 * Pointer to allocated memory on success, NULL on failure.
 118 */
 119void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
 120                gfp_t gfp, unsigned long attrs)
 121{
 122        struct dma_devres *dr;
 123        void *vaddr;
 124
 125        dr = devres_alloc(dmam_release, sizeof(*dr), gfp);
 126        if (!dr)
 127                return NULL;
 128
 129        vaddr = dma_alloc_attrs(dev, size, dma_handle, gfp, attrs);
 130        if (!vaddr) {
 131                devres_free(dr);
 132                return NULL;
 133        }
 134
 135        dr->vaddr = vaddr;
 136        dr->dma_handle = *dma_handle;
 137        dr->size = size;
 138        dr->attrs = attrs;
 139
 140        devres_add(dev, dr);
 141
 142        return vaddr;
 143}
 144EXPORT_SYMBOL(dmam_alloc_attrs);
 145
 146#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
 147
 148static void dmam_coherent_decl_release(struct device *dev, void *res)
 149{
 150        dma_release_declared_memory(dev);
 151}
 152
 153/**
 154 * dmam_declare_coherent_memory - Managed dma_declare_coherent_memory()
 155 * @dev: Device to declare coherent memory for
 156 * @phys_addr: Physical address of coherent memory to be declared
 157 * @device_addr: Device address of coherent memory to be declared
 158 * @size: Size of coherent memory to be declared
 159 * @flags: Flags
 160 *
 161 * Managed dma_declare_coherent_memory().
 162 *
 163 * RETURNS:
 164 * 0 on success, -errno on failure.
 165 */
 166int dmam_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
 167                                 dma_addr_t device_addr, size_t size, int flags)
 168{
 169        void *res;
 170        int rc;
 171
 172        res = devres_alloc(dmam_coherent_decl_release, 0, GFP_KERNEL);
 173        if (!res)
 174                return -ENOMEM;
 175
 176        rc = dma_declare_coherent_memory(dev, phys_addr, device_addr, size,
 177                                         flags);
 178        if (!rc)
 179                devres_add(dev, res);
 180        else
 181                devres_free(res);
 182
 183        return rc;
 184}
 185EXPORT_SYMBOL(dmam_declare_coherent_memory);
 186
 187/**
 188 * dmam_release_declared_memory - Managed dma_release_declared_memory().
 189 * @dev: Device to release declared coherent memory for
 190 *
 191 * Managed dmam_release_declared_memory().
 192 */
 193void dmam_release_declared_memory(struct device *dev)
 194{
 195        WARN_ON(devres_destroy(dev, dmam_coherent_decl_release, NULL, NULL));
 196}
 197EXPORT_SYMBOL(dmam_release_declared_memory);
 198
 199#endif
 200
 201/*
 202 * Create scatter-list for the already allocated DMA buffer.
 203 */
 204int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
 205                 void *cpu_addr, dma_addr_t handle, size_t size)
 206{
 207        struct page *page = virt_to_page(cpu_addr);
 208        int ret;
 209
 210        ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
 211        if (unlikely(ret))
 212                return ret;
 213
 214        sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
 215        return 0;
 216}
 217EXPORT_SYMBOL(dma_common_get_sgtable);
 218
 219/*
 220 * Create userspace mapping for the DMA-coherent memory.
 221 */
 222int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
 223                    void *cpu_addr, dma_addr_t dma_addr, size_t size)
 224{
 225        int ret = -ENXIO;
 226#ifndef CONFIG_ARCH_NO_COHERENT_DMA_MMAP
 227        unsigned long user_count = vma_pages(vma);
 228        unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
 229        unsigned long off = vma->vm_pgoff;
 230
 231        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 232
 233        if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
 234                return ret;
 235
 236        if (off < count && user_count <= (count - off))
 237                ret = remap_pfn_range(vma, vma->vm_start,
 238                                      page_to_pfn(virt_to_page(cpu_addr)) + off,
 239                                      user_count << PAGE_SHIFT,
 240                                      vma->vm_page_prot);
 241#endif  /* !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */
 242
 243        return ret;
 244}
 245EXPORT_SYMBOL(dma_common_mmap);
 246
 247#ifdef CONFIG_MMU
 248static struct vm_struct *__dma_common_pages_remap(struct page **pages,
 249                        size_t size, unsigned long vm_flags, pgprot_t prot,
 250                        const void *caller)
 251{
 252        struct vm_struct *area;
 253
 254        area = get_vm_area_caller(size, vm_flags, caller);
 255        if (!area)
 256                return NULL;
 257
 258        if (map_vm_area(area, prot, pages)) {
 259                vunmap(area->addr);
 260                return NULL;
 261        }
 262
 263        return area;
 264}
 265
 266/*
 267 * remaps an array of PAGE_SIZE pages into another vm_area
 268 * Cannot be used in non-sleeping contexts
 269 */
 270void *dma_common_pages_remap(struct page **pages, size_t size,
 271                        unsigned long vm_flags, pgprot_t prot,
 272                        const void *caller)
 273{
 274        struct vm_struct *area;
 275
 276        area = __dma_common_pages_remap(pages, size, vm_flags, prot, caller);
 277        if (!area)
 278                return NULL;
 279
 280        area->pages = pages;
 281
 282        return area->addr;
 283}
 284
 285/*
 286 * remaps an allocated contiguous region into another vm_area.
 287 * Cannot be used in non-sleeping contexts
 288 */
 289
 290void *dma_common_contiguous_remap(struct page *page, size_t size,
 291                        unsigned long vm_flags,
 292                        pgprot_t prot, const void *caller)
 293{
 294        int i;
 295        struct page **pages;
 296        struct vm_struct *area;
 297
 298        pages = kmalloc(sizeof(struct page *) << get_order(size), GFP_KERNEL);
 299        if (!pages)
 300                return NULL;
 301
 302        for (i = 0; i < (size >> PAGE_SHIFT); i++)
 303                pages[i] = nth_page(page, i);
 304
 305        area = __dma_common_pages_remap(pages, size, vm_flags, prot, caller);
 306
 307        kfree(pages);
 308
 309        if (!area)
 310                return NULL;
 311        return area->addr;
 312}
 313
 314/*
 315 * unmaps a range previously mapped by dma_common_*_remap
 316 */
 317void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags)
 318{
 319        struct vm_struct *area = find_vm_area(cpu_addr);
 320
 321        if (!area || (area->flags & vm_flags) != vm_flags) {
 322                WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
 323                return;
 324        }
 325
 326        unmap_kernel_range((unsigned long)cpu_addr, PAGE_ALIGN(size));
 327        vunmap(cpu_addr);
 328}
 329#endif
 330
 331/*
 332 * enables DMA API use for a device
 333 */
 334int dma_configure(struct device *dev)
 335{
 336        if (dev->bus->dma_configure)
 337                return dev->bus->dma_configure(dev);
 338        return 0;
 339}
 340
 341void dma_deconfigure(struct device *dev)
 342{
 343        of_dma_deconfigure(dev);
 344        acpi_dma_deconfigure(dev);
 345}
 346