linux/drivers/staging/android/ion/ion.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * drivers/staging/android/ion/ion.c
   4 *
   5 * Copyright (C) 2011 Google, Inc.
   6 */
   7
   8#include <linux/anon_inodes.h>
   9#include <linux/debugfs.h>
  10#include <linux/device.h>
  11#include <linux/dma-buf.h>
  12#include <linux/err.h>
  13#include <linux/export.h>
  14#include <linux/file.h>
  15#include <linux/freezer.h>
  16#include <linux/fs.h>
  17#include <linux/idr.h>
  18#include <linux/kthread.h>
  19#include <linux/list.h>
  20#include <linux/memblock.h>
  21#include <linux/miscdevice.h>
  22#include <linux/mm.h>
  23#include <linux/mm_types.h>
  24#include <linux/rbtree.h>
  25#include <linux/sched/task.h>
  26#include <linux/seq_file.h>
  27#include <linux/slab.h>
  28#include <linux/uaccess.h>
  29#include <linux/vmalloc.h>
  30
  31#include "ion.h"
  32
  33static struct ion_device *internal_dev;
  34static int heap_id;
  35
  36/* this function should only be called while dev->lock is held */
  37static void ion_buffer_add(struct ion_device *dev,
  38                           struct ion_buffer *buffer)
  39{
  40        struct rb_node **p = &dev->buffers.rb_node;
  41        struct rb_node *parent = NULL;
  42        struct ion_buffer *entry;
  43
  44        while (*p) {
  45                parent = *p;
  46                entry = rb_entry(parent, struct ion_buffer, node);
  47
  48                if (buffer < entry) {
  49                        p = &(*p)->rb_left;
  50                } else if (buffer > entry) {
  51                        p = &(*p)->rb_right;
  52                } else {
  53                        pr_err("%s: buffer already found.", __func__);
  54                        BUG();
  55                }
  56        }
  57
  58        rb_link_node(&buffer->node, parent, p);
  59        rb_insert_color(&buffer->node, &dev->buffers);
  60}
  61
  62/* this function should only be called while dev->lock is held */
  63static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
  64                                            struct ion_device *dev,
  65                                            unsigned long len,
  66                                            unsigned long flags)
  67{
  68        struct ion_buffer *buffer;
  69        int ret;
  70
  71        buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
  72        if (!buffer)
  73                return ERR_PTR(-ENOMEM);
  74
  75        buffer->heap = heap;
  76        buffer->flags = flags;
  77        buffer->dev = dev;
  78        buffer->size = len;
  79
  80        ret = heap->ops->allocate(heap, buffer, len, flags);
  81
  82        if (ret) {
  83                if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
  84                        goto err2;
  85
  86                ion_heap_freelist_drain(heap, 0);
  87                ret = heap->ops->allocate(heap, buffer, len, flags);
  88                if (ret)
  89                        goto err2;
  90        }
  91
  92        if (!buffer->sg_table) {
  93                WARN_ONCE(1, "This heap needs to set the sgtable");
  94                ret = -EINVAL;
  95                goto err1;
  96        }
  97
  98        INIT_LIST_HEAD(&buffer->attachments);
  99        mutex_init(&buffer->lock);
 100        mutex_lock(&dev->buffer_lock);
 101        ion_buffer_add(dev, buffer);
 102        mutex_unlock(&dev->buffer_lock);
 103        return buffer;
 104
 105err1:
 106        heap->ops->free(buffer);
 107err2:
 108        kfree(buffer);
 109        return ERR_PTR(ret);
 110}
 111
 112void ion_buffer_destroy(struct ion_buffer *buffer)
 113{
 114        if (buffer->kmap_cnt > 0) {
 115                pr_warn_once("%s: buffer still mapped in the kernel\n",
 116                             __func__);
 117                buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
 118        }
 119        buffer->heap->ops->free(buffer);
 120        kfree(buffer);
 121}
 122
 123static void _ion_buffer_destroy(struct ion_buffer *buffer)
 124{
 125        struct ion_heap *heap = buffer->heap;
 126        struct ion_device *dev = buffer->dev;
 127
 128        mutex_lock(&dev->buffer_lock);
 129        rb_erase(&buffer->node, &dev->buffers);
 130        mutex_unlock(&dev->buffer_lock);
 131
 132        if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
 133                ion_heap_freelist_add(heap, buffer);
 134        else
 135                ion_buffer_destroy(buffer);
 136}
 137
 138static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
 139{
 140        void *vaddr;
 141
 142        if (buffer->kmap_cnt) {
 143                buffer->kmap_cnt++;
 144                return buffer->vaddr;
 145        }
 146        vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
 147        if (WARN_ONCE(!vaddr,
 148                      "heap->ops->map_kernel should return ERR_PTR on error"))
 149                return ERR_PTR(-EINVAL);
 150        if (IS_ERR(vaddr))
 151                return vaddr;
 152        buffer->vaddr = vaddr;
 153        buffer->kmap_cnt++;
 154        return vaddr;
 155}
 156
 157static void ion_buffer_kmap_put(struct ion_buffer *buffer)
 158{
 159        buffer->kmap_cnt--;
 160        if (!buffer->kmap_cnt) {
 161                buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
 162                buffer->vaddr = NULL;
 163        }
 164}
 165
 166static struct sg_table *dup_sg_table(struct sg_table *table)
 167{
 168        struct sg_table *new_table;
 169        int ret, i;
 170        struct scatterlist *sg, *new_sg;
 171
 172        new_table = kzalloc(sizeof(*new_table), GFP_KERNEL);
 173        if (!new_table)
 174                return ERR_PTR(-ENOMEM);
 175
 176        ret = sg_alloc_table(new_table, table->nents, GFP_KERNEL);
 177        if (ret) {
 178                kfree(new_table);
 179                return ERR_PTR(-ENOMEM);
 180        }
 181
 182        new_sg = new_table->sgl;
 183        for_each_sg(table->sgl, sg, table->nents, i) {
 184                memcpy(new_sg, sg, sizeof(*sg));
 185                new_sg->dma_address = 0;
 186                new_sg = sg_next(new_sg);
 187        }
 188
 189        return new_table;
 190}
 191
 192static void free_duped_table(struct sg_table *table)
 193{
 194        sg_free_table(table);
 195        kfree(table);
 196}
 197
 198struct ion_dma_buf_attachment {
 199        struct device *dev;
 200        struct sg_table *table;
 201        struct list_head list;
 202};
 203
 204static int ion_dma_buf_attach(struct dma_buf *dmabuf, struct device *dev,
 205                              struct dma_buf_attachment *attachment)
 206{
 207        struct ion_dma_buf_attachment *a;
 208        struct sg_table *table;
 209        struct ion_buffer *buffer = dmabuf->priv;
 210
 211        a = kzalloc(sizeof(*a), GFP_KERNEL);
 212        if (!a)
 213                return -ENOMEM;
 214
 215        table = dup_sg_table(buffer->sg_table);
 216        if (IS_ERR(table)) {
 217                kfree(a);
 218                return -ENOMEM;
 219        }
 220
 221        a->table = table;
 222        a->dev = dev;
 223        INIT_LIST_HEAD(&a->list);
 224
 225        attachment->priv = a;
 226
 227        mutex_lock(&buffer->lock);
 228        list_add(&a->list, &buffer->attachments);
 229        mutex_unlock(&buffer->lock);
 230
 231        return 0;
 232}
 233
 234static void ion_dma_buf_detatch(struct dma_buf *dmabuf,
 235                                struct dma_buf_attachment *attachment)
 236{
 237        struct ion_dma_buf_attachment *a = attachment->priv;
 238        struct ion_buffer *buffer = dmabuf->priv;
 239
 240        free_duped_table(a->table);
 241        mutex_lock(&buffer->lock);
 242        list_del(&a->list);
 243        mutex_unlock(&buffer->lock);
 244
 245        kfree(a);
 246}
 247
 248static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
 249                                        enum dma_data_direction direction)
 250{
 251        struct ion_dma_buf_attachment *a = attachment->priv;
 252        struct sg_table *table;
 253
 254        table = a->table;
 255
 256        if (!dma_map_sg(attachment->dev, table->sgl, table->nents,
 257                        direction))
 258                return ERR_PTR(-ENOMEM);
 259
 260        return table;
 261}
 262
 263static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
 264                              struct sg_table *table,
 265                              enum dma_data_direction direction)
 266{
 267        dma_unmap_sg(attachment->dev, table->sgl, table->nents, direction);
 268}
 269
 270static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
 271{
 272        struct ion_buffer *buffer = dmabuf->priv;
 273        int ret = 0;
 274
 275        if (!buffer->heap->ops->map_user) {
 276                pr_err("%s: this heap does not define a method for mapping to userspace\n",
 277                       __func__);
 278                return -EINVAL;
 279        }
 280
 281        if (!(buffer->flags & ION_FLAG_CACHED))
 282                vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
 283
 284        mutex_lock(&buffer->lock);
 285        /* now map it to userspace */
 286        ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
 287        mutex_unlock(&buffer->lock);
 288
 289        if (ret)
 290                pr_err("%s: failure mapping buffer to userspace\n",
 291                       __func__);
 292
 293        return ret;
 294}
 295
 296static void ion_dma_buf_release(struct dma_buf *dmabuf)
 297{
 298        struct ion_buffer *buffer = dmabuf->priv;
 299
 300        _ion_buffer_destroy(buffer);
 301}
 302
 303static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
 304                                        enum dma_data_direction direction)
 305{
 306        struct ion_buffer *buffer = dmabuf->priv;
 307        void *vaddr;
 308        struct ion_dma_buf_attachment *a;
 309        int ret = 0;
 310
 311        /*
 312         * TODO: Move this elsewhere because we don't always need a vaddr
 313         */
 314        if (buffer->heap->ops->map_kernel) {
 315                mutex_lock(&buffer->lock);
 316                vaddr = ion_buffer_kmap_get(buffer);
 317                if (IS_ERR(vaddr)) {
 318                        ret = PTR_ERR(vaddr);
 319                        goto unlock;
 320                }
 321                mutex_unlock(&buffer->lock);
 322        }
 323
 324        mutex_lock(&buffer->lock);
 325        list_for_each_entry(a, &buffer->attachments, list) {
 326                dma_sync_sg_for_cpu(a->dev, a->table->sgl, a->table->nents,
 327                                    direction);
 328        }
 329
 330unlock:
 331        mutex_unlock(&buffer->lock);
 332        return ret;
 333}
 334
 335static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
 336                                      enum dma_data_direction direction)
 337{
 338        struct ion_buffer *buffer = dmabuf->priv;
 339        struct ion_dma_buf_attachment *a;
 340
 341        if (buffer->heap->ops->map_kernel) {
 342                mutex_lock(&buffer->lock);
 343                ion_buffer_kmap_put(buffer);
 344                mutex_unlock(&buffer->lock);
 345        }
 346
 347        mutex_lock(&buffer->lock);
 348        list_for_each_entry(a, &buffer->attachments, list) {
 349                dma_sync_sg_for_device(a->dev, a->table->sgl, a->table->nents,
 350                                       direction);
 351        }
 352        mutex_unlock(&buffer->lock);
 353
 354        return 0;
 355}
 356
 357static const struct dma_buf_ops dma_buf_ops = {
 358        .map_dma_buf = ion_map_dma_buf,
 359        .unmap_dma_buf = ion_unmap_dma_buf,
 360        .mmap = ion_mmap,
 361        .release = ion_dma_buf_release,
 362        .attach = ion_dma_buf_attach,
 363        .detach = ion_dma_buf_detatch,
 364        .begin_cpu_access = ion_dma_buf_begin_cpu_access,
 365        .end_cpu_access = ion_dma_buf_end_cpu_access,
 366};
 367
 368int ion_alloc(size_t len, unsigned int heap_id_mask, unsigned int flags)
 369{
 370        struct ion_device *dev = internal_dev;
 371        struct ion_buffer *buffer = NULL;
 372        struct ion_heap *heap;
 373        DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
 374        int fd;
 375        struct dma_buf *dmabuf;
 376
 377        pr_debug("%s: len %zu heap_id_mask %u flags %x\n", __func__,
 378                 len, heap_id_mask, flags);
 379        /*
 380         * traverse the list of heaps available in this system in priority
 381         * order.  If the heap type is supported by the client, and matches the
 382         * request of the caller allocate from it.  Repeat until allocate has
 383         * succeeded or all heaps have been tried
 384         */
 385        len = PAGE_ALIGN(len);
 386
 387        if (!len)
 388                return -EINVAL;
 389
 390        down_read(&dev->lock);
 391        plist_for_each_entry(heap, &dev->heaps, node) {
 392                /* if the caller didn't specify this heap id */
 393                if (!((1 << heap->id) & heap_id_mask))
 394                        continue;
 395                buffer = ion_buffer_create(heap, dev, len, flags);
 396                if (!IS_ERR(buffer))
 397                        break;
 398        }
 399        up_read(&dev->lock);
 400
 401        if (!buffer)
 402                return -ENODEV;
 403
 404        if (IS_ERR(buffer))
 405                return PTR_ERR(buffer);
 406
 407        exp_info.ops = &dma_buf_ops;
 408        exp_info.size = buffer->size;
 409        exp_info.flags = O_RDWR;
 410        exp_info.priv = buffer;
 411
 412        dmabuf = dma_buf_export(&exp_info);
 413        if (IS_ERR(dmabuf)) {
 414                _ion_buffer_destroy(buffer);
 415                return PTR_ERR(dmabuf);
 416        }
 417
 418        fd = dma_buf_fd(dmabuf, O_CLOEXEC);
 419        if (fd < 0)
 420                dma_buf_put(dmabuf);
 421
 422        return fd;
 423}
 424
 425int ion_query_heaps(struct ion_heap_query *query)
 426{
 427        struct ion_device *dev = internal_dev;
 428        struct ion_heap_data __user *buffer = u64_to_user_ptr(query->heaps);
 429        int ret = -EINVAL, cnt = 0, max_cnt;
 430        struct ion_heap *heap;
 431        struct ion_heap_data hdata;
 432
 433        memset(&hdata, 0, sizeof(hdata));
 434
 435        down_read(&dev->lock);
 436        if (!buffer) {
 437                query->cnt = dev->heap_cnt;
 438                ret = 0;
 439                goto out;
 440        }
 441
 442        if (query->cnt <= 0)
 443                goto out;
 444
 445        max_cnt = query->cnt;
 446
 447        plist_for_each_entry(heap, &dev->heaps, node) {
 448                strncpy(hdata.name, heap->name, MAX_HEAP_NAME);
 449                hdata.name[sizeof(hdata.name) - 1] = '\0';
 450                hdata.type = heap->type;
 451                hdata.heap_id = heap->id;
 452
 453                if (copy_to_user(&buffer[cnt], &hdata, sizeof(hdata))) {
 454                        ret = -EFAULT;
 455                        goto out;
 456                }
 457
 458                cnt++;
 459                if (cnt >= max_cnt)
 460                        break;
 461        }
 462
 463        query->cnt = cnt;
 464        ret = 0;
 465out:
 466        up_read(&dev->lock);
 467        return ret;
 468}
 469
 470static const struct file_operations ion_fops = {
 471        .owner          = THIS_MODULE,
 472        .unlocked_ioctl = ion_ioctl,
 473        .compat_ioctl   = compat_ptr_ioctl,
 474};
 475
 476static int debug_shrink_set(void *data, u64 val)
 477{
 478        struct ion_heap *heap = data;
 479        struct shrink_control sc;
 480        int objs;
 481
 482        sc.gfp_mask = GFP_HIGHUSER;
 483        sc.nr_to_scan = val;
 484
 485        if (!val) {
 486                objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
 487                sc.nr_to_scan = objs;
 488        }
 489
 490        heap->shrinker.scan_objects(&heap->shrinker, &sc);
 491        return 0;
 492}
 493
 494static int debug_shrink_get(void *data, u64 *val)
 495{
 496        struct ion_heap *heap = data;
 497        struct shrink_control sc;
 498        int objs;
 499
 500        sc.gfp_mask = GFP_HIGHUSER;
 501        sc.nr_to_scan = 0;
 502
 503        objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
 504        *val = objs;
 505        return 0;
 506}
 507
 508DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
 509                        debug_shrink_set, "%llu\n");
 510
 511void ion_device_add_heap(struct ion_heap *heap)
 512{
 513        struct ion_device *dev = internal_dev;
 514        int ret;
 515
 516        if (!heap->ops->allocate || !heap->ops->free)
 517                pr_err("%s: can not add heap with invalid ops struct.\n",
 518                       __func__);
 519
 520        spin_lock_init(&heap->free_lock);
 521        heap->free_list_size = 0;
 522
 523        if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
 524                ion_heap_init_deferred_free(heap);
 525
 526        if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink) {
 527                ret = ion_heap_init_shrinker(heap);
 528                if (ret)
 529                        pr_err("%s: Failed to register shrinker\n", __func__);
 530        }
 531
 532        heap->dev = dev;
 533        down_write(&dev->lock);
 534        heap->id = heap_id++;
 535        /*
 536         * use negative heap->id to reverse the priority -- when traversing
 537         * the list later attempt higher id numbers first
 538         */
 539        plist_node_init(&heap->node, -heap->id);
 540        plist_add(&heap->node, &dev->heaps);
 541
 542        if (heap->shrinker.count_objects && heap->shrinker.scan_objects) {
 543                char debug_name[64];
 544
 545                snprintf(debug_name, 64, "%s_shrink", heap->name);
 546                debugfs_create_file(debug_name, 0644, dev->debug_root,
 547                                    heap, &debug_shrink_fops);
 548        }
 549
 550        dev->heap_cnt++;
 551        up_write(&dev->lock);
 552}
 553EXPORT_SYMBOL(ion_device_add_heap);
 554
 555static int ion_device_create(void)
 556{
 557        struct ion_device *idev;
 558        int ret;
 559
 560        idev = kzalloc(sizeof(*idev), GFP_KERNEL);
 561        if (!idev)
 562                return -ENOMEM;
 563
 564        idev->dev.minor = MISC_DYNAMIC_MINOR;
 565        idev->dev.name = "ion";
 566        idev->dev.fops = &ion_fops;
 567        idev->dev.parent = NULL;
 568        ret = misc_register(&idev->dev);
 569        if (ret) {
 570                pr_err("ion: failed to register misc device.\n");
 571                kfree(idev);
 572                return ret;
 573        }
 574
 575        idev->debug_root = debugfs_create_dir("ion", NULL);
 576        idev->buffers = RB_ROOT;
 577        mutex_init(&idev->buffer_lock);
 578        init_rwsem(&idev->lock);
 579        plist_head_init(&idev->heaps);
 580        internal_dev = idev;
 581        return 0;
 582}
 583subsys_initcall(ion_device_create);
 584