linux/drivers/dma-buf/heaps/heap-helpers.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2#include <linux/device.h>
   3#include <linux/dma-buf.h>
   4#include <linux/err.h>
   5#include <linux/highmem.h>
   6#include <linux/idr.h>
   7#include <linux/list.h>
   8#include <linux/slab.h>
   9#include <linux/uaccess.h>
  10#include <linux/vmalloc.h>
  11#include <uapi/linux/dma-heap.h>
  12
  13#include "heap-helpers.h"
  14
  15void init_heap_helper_buffer(struct heap_helper_buffer *buffer,
  16                             void (*free)(struct heap_helper_buffer *))
  17{
  18        buffer->priv_virt = NULL;
  19        mutex_init(&buffer->lock);
  20        buffer->vmap_cnt = 0;
  21        buffer->vaddr = NULL;
  22        buffer->pagecount = 0;
  23        buffer->pages = NULL;
  24        INIT_LIST_HEAD(&buffer->attachments);
  25        buffer->free = free;
  26}
  27
  28struct dma_buf *heap_helper_export_dmabuf(struct heap_helper_buffer *buffer,
  29                                          int fd_flags)
  30{
  31        DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
  32
  33        exp_info.ops = &heap_helper_ops;
  34        exp_info.size = buffer->size;
  35        exp_info.flags = fd_flags;
  36        exp_info.priv = buffer;
  37
  38        return dma_buf_export(&exp_info);
  39}
  40
  41static void *dma_heap_map_kernel(struct heap_helper_buffer *buffer)
  42{
  43        void *vaddr;
  44
  45        vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, PAGE_KERNEL);
  46        if (!vaddr)
  47                return ERR_PTR(-ENOMEM);
  48
  49        return vaddr;
  50}
  51
  52static void dma_heap_buffer_destroy(struct heap_helper_buffer *buffer)
  53{
  54        if (buffer->vmap_cnt > 0) {
  55                WARN(1, "%s: buffer still mapped in the kernel\n", __func__);
  56                vunmap(buffer->vaddr);
  57        }
  58
  59        buffer->free(buffer);
  60}
  61
  62static void *dma_heap_buffer_vmap_get(struct heap_helper_buffer *buffer)
  63{
  64        void *vaddr;
  65
  66        if (buffer->vmap_cnt) {
  67                buffer->vmap_cnt++;
  68                return buffer->vaddr;
  69        }
  70        vaddr = dma_heap_map_kernel(buffer);
  71        if (IS_ERR(vaddr))
  72                return vaddr;
  73        buffer->vaddr = vaddr;
  74        buffer->vmap_cnt++;
  75        return vaddr;
  76}
  77
  78static void dma_heap_buffer_vmap_put(struct heap_helper_buffer *buffer)
  79{
  80        if (!--buffer->vmap_cnt) {
  81                vunmap(buffer->vaddr);
  82                buffer->vaddr = NULL;
  83        }
  84}
  85
  86struct dma_heaps_attachment {
  87        struct device *dev;
  88        struct sg_table table;
  89        struct list_head list;
  90};
  91
  92static int dma_heap_attach(struct dma_buf *dmabuf,
  93                           struct dma_buf_attachment *attachment)
  94{
  95        struct dma_heaps_attachment *a;
  96        struct heap_helper_buffer *buffer = dmabuf->priv;
  97        int ret;
  98
  99        a = kzalloc(sizeof(*a), GFP_KERNEL);
 100        if (!a)
 101                return -ENOMEM;
 102
 103        ret = sg_alloc_table_from_pages(&a->table, buffer->pages,
 104                                        buffer->pagecount, 0,
 105                                        buffer->pagecount << PAGE_SHIFT,
 106                                        GFP_KERNEL);
 107        if (ret) {
 108                kfree(a);
 109                return ret;
 110        }
 111
 112        a->dev = attachment->dev;
 113        INIT_LIST_HEAD(&a->list);
 114
 115        attachment->priv = a;
 116
 117        mutex_lock(&buffer->lock);
 118        list_add(&a->list, &buffer->attachments);
 119        mutex_unlock(&buffer->lock);
 120
 121        return 0;
 122}
 123
 124static void dma_heap_detach(struct dma_buf *dmabuf,
 125                            struct dma_buf_attachment *attachment)
 126{
 127        struct dma_heaps_attachment *a = attachment->priv;
 128        struct heap_helper_buffer *buffer = dmabuf->priv;
 129
 130        mutex_lock(&buffer->lock);
 131        list_del(&a->list);
 132        mutex_unlock(&buffer->lock);
 133
 134        sg_free_table(&a->table);
 135        kfree(a);
 136}
 137
 138static
 139struct sg_table *dma_heap_map_dma_buf(struct dma_buf_attachment *attachment,
 140                                      enum dma_data_direction direction)
 141{
 142        struct dma_heaps_attachment *a = attachment->priv;
 143        struct sg_table *table;
 144
 145        table = &a->table;
 146
 147        if (!dma_map_sg(attachment->dev, table->sgl, table->nents,
 148                        direction))
 149                table = ERR_PTR(-ENOMEM);
 150        return table;
 151}
 152
 153static void dma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
 154                                   struct sg_table *table,
 155                                   enum dma_data_direction direction)
 156{
 157        dma_unmap_sg(attachment->dev, table->sgl, table->nents, direction);
 158}
 159
 160static vm_fault_t dma_heap_vm_fault(struct vm_fault *vmf)
 161{
 162        struct vm_area_struct *vma = vmf->vma;
 163        struct heap_helper_buffer *buffer = vma->vm_private_data;
 164
 165        if (vmf->pgoff > buffer->pagecount)
 166                return VM_FAULT_SIGBUS;
 167
 168        vmf->page = buffer->pages[vmf->pgoff];
 169        get_page(vmf->page);
 170
 171        return 0;
 172}
 173
 174static const struct vm_operations_struct dma_heap_vm_ops = {
 175        .fault = dma_heap_vm_fault,
 176};
 177
 178static int dma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
 179{
 180        struct heap_helper_buffer *buffer = dmabuf->priv;
 181
 182        if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
 183                return -EINVAL;
 184
 185        vma->vm_ops = &dma_heap_vm_ops;
 186        vma->vm_private_data = buffer;
 187
 188        return 0;
 189}
 190
 191static void dma_heap_dma_buf_release(struct dma_buf *dmabuf)
 192{
 193        struct heap_helper_buffer *buffer = dmabuf->priv;
 194
 195        dma_heap_buffer_destroy(buffer);
 196}
 197
 198static int dma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
 199                                             enum dma_data_direction direction)
 200{
 201        struct heap_helper_buffer *buffer = dmabuf->priv;
 202        struct dma_heaps_attachment *a;
 203        int ret = 0;
 204
 205        mutex_lock(&buffer->lock);
 206
 207        if (buffer->vmap_cnt)
 208                invalidate_kernel_vmap_range(buffer->vaddr, buffer->size);
 209
 210        list_for_each_entry(a, &buffer->attachments, list) {
 211                dma_sync_sg_for_cpu(a->dev, a->table.sgl, a->table.nents,
 212                                    direction);
 213        }
 214        mutex_unlock(&buffer->lock);
 215
 216        return ret;
 217}
 218
 219static int dma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
 220                                           enum dma_data_direction direction)
 221{
 222        struct heap_helper_buffer *buffer = dmabuf->priv;
 223        struct dma_heaps_attachment *a;
 224
 225        mutex_lock(&buffer->lock);
 226
 227        if (buffer->vmap_cnt)
 228                flush_kernel_vmap_range(buffer->vaddr, buffer->size);
 229
 230        list_for_each_entry(a, &buffer->attachments, list) {
 231                dma_sync_sg_for_device(a->dev, a->table.sgl, a->table.nents,
 232                                       direction);
 233        }
 234        mutex_unlock(&buffer->lock);
 235
 236        return 0;
 237}
 238
 239static void *dma_heap_dma_buf_vmap(struct dma_buf *dmabuf)
 240{
 241        struct heap_helper_buffer *buffer = dmabuf->priv;
 242        void *vaddr;
 243
 244        mutex_lock(&buffer->lock);
 245        vaddr = dma_heap_buffer_vmap_get(buffer);
 246        mutex_unlock(&buffer->lock);
 247
 248        return vaddr;
 249}
 250
 251static void dma_heap_dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
 252{
 253        struct heap_helper_buffer *buffer = dmabuf->priv;
 254
 255        mutex_lock(&buffer->lock);
 256        dma_heap_buffer_vmap_put(buffer);
 257        mutex_unlock(&buffer->lock);
 258}
 259
 260const struct dma_buf_ops heap_helper_ops = {
 261        .map_dma_buf = dma_heap_map_dma_buf,
 262        .unmap_dma_buf = dma_heap_unmap_dma_buf,
 263        .mmap = dma_heap_mmap,
 264        .release = dma_heap_dma_buf_release,
 265        .attach = dma_heap_attach,
 266        .detach = dma_heap_detach,
 267        .begin_cpu_access = dma_heap_dma_buf_begin_cpu_access,
 268        .end_cpu_access = dma_heap_dma_buf_end_cpu_access,
 269        .vmap = dma_heap_dma_buf_vmap,
 270        .vunmap = dma_heap_dma_buf_vunmap,
 271};
 272