linux/drivers/gpu/drm/etnaviv/etnaviv_gem.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2015-2018 Etnaviv Project
   4 */
   5
   6#include <drm/drm_prime.h>
   7#include <linux/dma-mapping.h>
   8#include <linux/shmem_fs.h>
   9#include <linux/spinlock.h>
  10#include <linux/vmalloc.h>
  11
  12#include "etnaviv_drv.h"
  13#include "etnaviv_gem.h"
  14#include "etnaviv_gpu.h"
  15#include "etnaviv_mmu.h"
  16
  17static struct lock_class_key etnaviv_shm_lock_class;
  18static struct lock_class_key etnaviv_userptr_lock_class;
  19
  20static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj)
  21{
  22        struct drm_device *dev = etnaviv_obj->base.dev;
  23        struct sg_table *sgt = etnaviv_obj->sgt;
  24
  25        /*
  26         * For non-cached buffers, ensure the new pages are clean
  27         * because display controller, GPU, etc. are not coherent.
  28         */
  29        if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
  30                dma_map_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
  31}
  32
  33static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj)
  34{
  35        struct drm_device *dev = etnaviv_obj->base.dev;
  36        struct sg_table *sgt = etnaviv_obj->sgt;
  37
  38        /*
  39         * For non-cached buffers, ensure the new pages are clean
  40         * because display controller, GPU, etc. are not coherent:
  41         *
  42         * WARNING: The DMA API does not support concurrent CPU
  43         * and device access to the memory area.  With BIDIRECTIONAL,
  44         * we will clean the cache lines which overlap the region,
  45         * and invalidate all cache lines (partially) contained in
  46         * the region.
  47         *
  48         * If you have dirty data in the overlapping cache lines,
  49         * that will corrupt the GPU-written data.  If you have
  50         * written into the remainder of the region, this can
  51         * discard those writes.
  52         */
  53        if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
  54                dma_unmap_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
  55}
  56
  57/* called with etnaviv_obj->lock held */
  58static int etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
  59{
  60        struct drm_device *dev = etnaviv_obj->base.dev;
  61        struct page **p = drm_gem_get_pages(&etnaviv_obj->base);
  62
  63        if (IS_ERR(p)) {
  64                dev_dbg(dev->dev, "could not get pages: %ld\n", PTR_ERR(p));
  65                return PTR_ERR(p);
  66        }
  67
  68        etnaviv_obj->pages = p;
  69
  70        return 0;
  71}
  72
  73static void put_pages(struct etnaviv_gem_object *etnaviv_obj)
  74{
  75        if (etnaviv_obj->sgt) {
  76                etnaviv_gem_scatterlist_unmap(etnaviv_obj);
  77                sg_free_table(etnaviv_obj->sgt);
  78                kfree(etnaviv_obj->sgt);
  79                etnaviv_obj->sgt = NULL;
  80        }
  81        if (etnaviv_obj->pages) {
  82                drm_gem_put_pages(&etnaviv_obj->base, etnaviv_obj->pages,
  83                                  true, false);
  84
  85                etnaviv_obj->pages = NULL;
  86        }
  87}
  88
  89struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
  90{
  91        int ret;
  92
  93        lockdep_assert_held(&etnaviv_obj->lock);
  94
  95        if (!etnaviv_obj->pages) {
  96                ret = etnaviv_obj->ops->get_pages(etnaviv_obj);
  97                if (ret < 0)
  98                        return ERR_PTR(ret);
  99        }
 100
 101        if (!etnaviv_obj->sgt) {
 102                struct drm_device *dev = etnaviv_obj->base.dev;
 103                int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
 104                struct sg_table *sgt;
 105
 106                sgt = drm_prime_pages_to_sg(etnaviv_obj->base.dev,
 107                                            etnaviv_obj->pages, npages);
 108                if (IS_ERR(sgt)) {
 109                        dev_err(dev->dev, "failed to allocate sgt: %ld\n",
 110                                PTR_ERR(sgt));
 111                        return ERR_CAST(sgt);
 112                }
 113
 114                etnaviv_obj->sgt = sgt;
 115
 116                etnaviv_gem_scatter_map(etnaviv_obj);
 117        }
 118
 119        return etnaviv_obj->pages;
 120}
 121
 122void etnaviv_gem_put_pages(struct etnaviv_gem_object *etnaviv_obj)
 123{
 124        lockdep_assert_held(&etnaviv_obj->lock);
 125        /* when we start tracking the pin count, then do something here */
 126}
 127
 128static int etnaviv_gem_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
 129                struct vm_area_struct *vma)
 130{
 131        pgprot_t vm_page_prot;
 132
 133        vma->vm_flags &= ~VM_PFNMAP;
 134        vma->vm_flags |= VM_MIXEDMAP;
 135
 136        vm_page_prot = vm_get_page_prot(vma->vm_flags);
 137
 138        if (etnaviv_obj->flags & ETNA_BO_WC) {
 139                vma->vm_page_prot = pgprot_writecombine(vm_page_prot);
 140        } else if (etnaviv_obj->flags & ETNA_BO_UNCACHED) {
 141                vma->vm_page_prot = pgprot_noncached(vm_page_prot);
 142        } else {
 143                /*
 144                 * Shunt off cached objs to shmem file so they have their own
 145                 * address_space (so unmap_mapping_range does what we want,
 146                 * in particular in the case of mmap'd dmabufs)
 147                 */
 148                vma->vm_pgoff = 0;
 149                vma_set_file(vma, etnaviv_obj->base.filp);
 150
 151                vma->vm_page_prot = vm_page_prot;
 152        }
 153
 154        return 0;
 155}
 156
 157int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 158{
 159        struct etnaviv_gem_object *obj;
 160        int ret;
 161
 162        ret = drm_gem_mmap(filp, vma);
 163        if (ret) {
 164                DBG("mmap failed: %d", ret);
 165                return ret;
 166        }
 167
 168        obj = to_etnaviv_bo(vma->vm_private_data);
 169        return obj->ops->mmap(obj, vma);
 170}
 171
 172static vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf)
 173{
 174        struct vm_area_struct *vma = vmf->vma;
 175        struct drm_gem_object *obj = vma->vm_private_data;
 176        struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
 177        struct page **pages, *page;
 178        pgoff_t pgoff;
 179        int err;
 180
 181        /*
 182         * Make sure we don't parallel update on a fault, nor move or remove
 183         * something from beneath our feet.  Note that vmf_insert_page() is
 184         * specifically coded to take care of this, so we don't have to.
 185         */
 186        err = mutex_lock_interruptible(&etnaviv_obj->lock);
 187        if (err)
 188                return VM_FAULT_NOPAGE;
 189        /* make sure we have pages attached now */
 190        pages = etnaviv_gem_get_pages(etnaviv_obj);
 191        mutex_unlock(&etnaviv_obj->lock);
 192
 193        if (IS_ERR(pages)) {
 194                err = PTR_ERR(pages);
 195                return vmf_error(err);
 196        }
 197
 198        /* We don't use vmf->pgoff since that has the fake offset: */
 199        pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
 200
 201        page = pages[pgoff];
 202
 203        VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
 204             page_to_pfn(page), page_to_pfn(page) << PAGE_SHIFT);
 205
 206        return vmf_insert_page(vma, vmf->address, page);
 207}
 208
 209int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset)
 210{
 211        int ret;
 212
 213        /* Make it mmapable */
 214        ret = drm_gem_create_mmap_offset(obj);
 215        if (ret)
 216                dev_err(obj->dev->dev, "could not allocate mmap offset\n");
 217        else
 218                *offset = drm_vma_node_offset_addr(&obj->vma_node);
 219
 220        return ret;
 221}
 222
 223static struct etnaviv_vram_mapping *
 224etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj,
 225                             struct etnaviv_iommu_context *context)
 226{
 227        struct etnaviv_vram_mapping *mapping;
 228
 229        list_for_each_entry(mapping, &obj->vram_list, obj_node) {
 230                if (mapping->context == context)
 231                        return mapping;
 232        }
 233
 234        return NULL;
 235}
 236
 237void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping)
 238{
 239        struct etnaviv_gem_object *etnaviv_obj = mapping->object;
 240
 241        mutex_lock(&etnaviv_obj->lock);
 242        WARN_ON(mapping->use == 0);
 243        mapping->use -= 1;
 244        mutex_unlock(&etnaviv_obj->lock);
 245
 246        drm_gem_object_put(&etnaviv_obj->base);
 247}
 248
 249struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
 250        struct drm_gem_object *obj, struct etnaviv_iommu_context *mmu_context,
 251        u64 va)
 252{
 253        struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
 254        struct etnaviv_vram_mapping *mapping;
 255        struct page **pages;
 256        int ret = 0;
 257
 258        mutex_lock(&etnaviv_obj->lock);
 259        mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, mmu_context);
 260        if (mapping) {
 261                /*
 262                 * Holding the object lock prevents the use count changing
 263                 * beneath us.  If the use count is zero, the MMU might be
 264                 * reaping this object, so take the lock and re-check that
 265                 * the MMU owns this mapping to close this race.
 266                 */
 267                if (mapping->use == 0) {
 268                        mutex_lock(&mmu_context->lock);
 269                        if (mapping->context == mmu_context)
 270                                mapping->use += 1;
 271                        else
 272                                mapping = NULL;
 273                        mutex_unlock(&mmu_context->lock);
 274                        if (mapping)
 275                                goto out;
 276                } else {
 277                        mapping->use += 1;
 278                        goto out;
 279                }
 280        }
 281
 282        pages = etnaviv_gem_get_pages(etnaviv_obj);
 283        if (IS_ERR(pages)) {
 284                ret = PTR_ERR(pages);
 285                goto out;
 286        }
 287
 288        /*
 289         * See if we have a reaped vram mapping we can re-use before
 290         * allocating a fresh mapping.
 291         */
 292        mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, NULL);
 293        if (!mapping) {
 294                mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
 295                if (!mapping) {
 296                        ret = -ENOMEM;
 297                        goto out;
 298                }
 299
 300                INIT_LIST_HEAD(&mapping->scan_node);
 301                mapping->object = etnaviv_obj;
 302        } else {
 303                list_del(&mapping->obj_node);
 304        }
 305
 306        etnaviv_iommu_context_get(mmu_context);
 307        mapping->context = mmu_context;
 308        mapping->use = 1;
 309
 310        ret = etnaviv_iommu_map_gem(mmu_context, etnaviv_obj,
 311                                    mmu_context->global->memory_base,
 312                                    mapping, va);
 313        if (ret < 0) {
 314                etnaviv_iommu_context_put(mmu_context);
 315                kfree(mapping);
 316        } else {
 317                list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list);
 318        }
 319
 320out:
 321        mutex_unlock(&etnaviv_obj->lock);
 322
 323        if (ret)
 324                return ERR_PTR(ret);
 325
 326        /* Take a reference on the object */
 327        drm_gem_object_get(obj);
 328        return mapping;
 329}
 330
 331void *etnaviv_gem_vmap(struct drm_gem_object *obj)
 332{
 333        struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
 334
 335        if (etnaviv_obj->vaddr)
 336                return etnaviv_obj->vaddr;
 337
 338        mutex_lock(&etnaviv_obj->lock);
 339        /*
 340         * Need to check again, as we might have raced with another thread
 341         * while waiting for the mutex.
 342         */
 343        if (!etnaviv_obj->vaddr)
 344                etnaviv_obj->vaddr = etnaviv_obj->ops->vmap(etnaviv_obj);
 345        mutex_unlock(&etnaviv_obj->lock);
 346
 347        return etnaviv_obj->vaddr;
 348}
 349
 350static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj)
 351{
 352        struct page **pages;
 353
 354        lockdep_assert_held(&obj->lock);
 355
 356        pages = etnaviv_gem_get_pages(obj);
 357        if (IS_ERR(pages))
 358                return NULL;
 359
 360        return vmap(pages, obj->base.size >> PAGE_SHIFT,
 361                        VM_MAP, pgprot_writecombine(PAGE_KERNEL));
 362}
 363
 364static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
 365{
 366        if (op & ETNA_PREP_READ)
 367                return DMA_FROM_DEVICE;
 368        else if (op & ETNA_PREP_WRITE)
 369                return DMA_TO_DEVICE;
 370        else
 371                return DMA_BIDIRECTIONAL;
 372}
 373
 374int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
 375                struct drm_etnaviv_timespec *timeout)
 376{
 377        struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
 378        struct drm_device *dev = obj->dev;
 379        bool write = !!(op & ETNA_PREP_WRITE);
 380        int ret;
 381
 382        if (!etnaviv_obj->sgt) {
 383                void *ret;
 384
 385                mutex_lock(&etnaviv_obj->lock);
 386                ret = etnaviv_gem_get_pages(etnaviv_obj);
 387                mutex_unlock(&etnaviv_obj->lock);
 388                if (IS_ERR(ret))
 389                        return PTR_ERR(ret);
 390        }
 391
 392        if (op & ETNA_PREP_NOSYNC) {
 393                if (!dma_resv_test_signaled(obj->resv, write))
 394                        return -EBUSY;
 395        } else {
 396                unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
 397
 398                ret = dma_resv_wait_timeout(obj->resv, write, true, remain);
 399                if (ret <= 0)
 400                        return ret == 0 ? -ETIMEDOUT : ret;
 401        }
 402
 403        if (etnaviv_obj->flags & ETNA_BO_CACHED) {
 404                dma_sync_sgtable_for_cpu(dev->dev, etnaviv_obj->sgt,
 405                                         etnaviv_op_to_dma_dir(op));
 406                etnaviv_obj->last_cpu_prep_op = op;
 407        }
 408
 409        return 0;
 410}
 411
 412int etnaviv_gem_cpu_fini(struct drm_gem_object *obj)
 413{
 414        struct drm_device *dev = obj->dev;
 415        struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
 416
 417        if (etnaviv_obj->flags & ETNA_BO_CACHED) {
 418                /* fini without a prep is almost certainly a userspace error */
 419                WARN_ON(etnaviv_obj->last_cpu_prep_op == 0);
 420                dma_sync_sgtable_for_device(dev->dev, etnaviv_obj->sgt,
 421                        etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op));
 422                etnaviv_obj->last_cpu_prep_op = 0;
 423        }
 424
 425        return 0;
 426}
 427
 428int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
 429        struct drm_etnaviv_timespec *timeout)
 430{
 431        struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
 432
 433        return etnaviv_gpu_wait_obj_inactive(gpu, etnaviv_obj, timeout);
 434}
 435
 436#ifdef CONFIG_DEBUG_FS
 437static void etnaviv_gem_describe_fence(struct dma_fence *fence,
 438        const char *type, struct seq_file *m)
 439{
 440        if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
 441                seq_printf(m, "\t%9s: %s %s seq %llu\n",
 442                           type,
 443                           fence->ops->get_driver_name(fence),
 444                           fence->ops->get_timeline_name(fence),
 445                           fence->seqno);
 446}
 447
 448static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
 449{
 450        struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
 451        struct dma_resv *robj = obj->resv;
 452        struct dma_resv_list *fobj;
 453        struct dma_fence *fence;
 454        unsigned long off = drm_vma_node_start(&obj->vma_node);
 455
 456        seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n",
 457                        etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I',
 458                        obj->name, kref_read(&obj->refcount),
 459                        off, etnaviv_obj->vaddr, obj->size);
 460
 461        rcu_read_lock();
 462        fobj = dma_resv_shared_list(robj);
 463        if (fobj) {
 464                unsigned int i, shared_count = fobj->shared_count;
 465
 466                for (i = 0; i < shared_count; i++) {
 467                        fence = rcu_dereference(fobj->shared[i]);
 468                        etnaviv_gem_describe_fence(fence, "Shared", m);
 469                }
 470        }
 471
 472        fence = dma_resv_excl_fence(robj);
 473        if (fence)
 474                etnaviv_gem_describe_fence(fence, "Exclusive", m);
 475        rcu_read_unlock();
 476}
 477
 478void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv,
 479        struct seq_file *m)
 480{
 481        struct etnaviv_gem_object *etnaviv_obj;
 482        int count = 0;
 483        size_t size = 0;
 484
 485        mutex_lock(&priv->gem_lock);
 486        list_for_each_entry(etnaviv_obj, &priv->gem_list, gem_node) {
 487                struct drm_gem_object *obj = &etnaviv_obj->base;
 488
 489                seq_puts(m, "   ");
 490                etnaviv_gem_describe(obj, m);
 491                count++;
 492                size += obj->size;
 493        }
 494        mutex_unlock(&priv->gem_lock);
 495
 496        seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
 497}
 498#endif
 499
 500static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj)
 501{
 502        vunmap(etnaviv_obj->vaddr);
 503        put_pages(etnaviv_obj);
 504}
 505
 506static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = {
 507        .get_pages = etnaviv_gem_shmem_get_pages,
 508        .release = etnaviv_gem_shmem_release,
 509        .vmap = etnaviv_gem_vmap_impl,
 510        .mmap = etnaviv_gem_mmap_obj,
 511};
 512
 513void etnaviv_gem_free_object(struct drm_gem_object *obj)
 514{
 515        struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
 516        struct etnaviv_drm_private *priv = obj->dev->dev_private;
 517        struct etnaviv_vram_mapping *mapping, *tmp;
 518
 519        /* object should not be active */
 520        WARN_ON(is_active(etnaviv_obj));
 521
 522        mutex_lock(&priv->gem_lock);
 523        list_del(&etnaviv_obj->gem_node);
 524        mutex_unlock(&priv->gem_lock);
 525
 526        list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
 527                                 obj_node) {
 528                struct etnaviv_iommu_context *context = mapping->context;
 529
 530                WARN_ON(mapping->use);
 531
 532                if (context) {
 533                        etnaviv_iommu_unmap_gem(context, mapping);
 534                        etnaviv_iommu_context_put(context);
 535                }
 536
 537                list_del(&mapping->obj_node);
 538                kfree(mapping);
 539        }
 540
 541        drm_gem_free_mmap_offset(obj);
 542        etnaviv_obj->ops->release(etnaviv_obj);
 543        drm_gem_object_release(obj);
 544
 545        kfree(etnaviv_obj);
 546}
 547
 548void etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
 549{
 550        struct etnaviv_drm_private *priv = dev->dev_private;
 551        struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
 552
 553        mutex_lock(&priv->gem_lock);
 554        list_add_tail(&etnaviv_obj->gem_node, &priv->gem_list);
 555        mutex_unlock(&priv->gem_lock);
 556}
 557
 558static const struct vm_operations_struct vm_ops = {
 559        .fault = etnaviv_gem_fault,
 560        .open = drm_gem_vm_open,
 561        .close = drm_gem_vm_close,
 562};
 563
 564static const struct drm_gem_object_funcs etnaviv_gem_object_funcs = {
 565        .free = etnaviv_gem_free_object,
 566        .pin = etnaviv_gem_prime_pin,
 567        .unpin = etnaviv_gem_prime_unpin,
 568        .get_sg_table = etnaviv_gem_prime_get_sg_table,
 569        .vmap = etnaviv_gem_prime_vmap,
 570        .vm_ops = &vm_ops,
 571};
 572
 573static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
 574        const struct etnaviv_gem_ops *ops, struct drm_gem_object **obj)
 575{
 576        struct etnaviv_gem_object *etnaviv_obj;
 577        unsigned sz = sizeof(*etnaviv_obj);
 578        bool valid = true;
 579
 580        /* validate flags */
 581        switch (flags & ETNA_BO_CACHE_MASK) {
 582        case ETNA_BO_UNCACHED:
 583        case ETNA_BO_CACHED:
 584        case ETNA_BO_WC:
 585                break;
 586        default:
 587                valid = false;
 588        }
 589
 590        if (!valid) {
 591                dev_err(dev->dev, "invalid cache flag: %x\n",
 592                        (flags & ETNA_BO_CACHE_MASK));
 593                return -EINVAL;
 594        }
 595
 596        etnaviv_obj = kzalloc(sz, GFP_KERNEL);
 597        if (!etnaviv_obj)
 598                return -ENOMEM;
 599
 600        etnaviv_obj->flags = flags;
 601        etnaviv_obj->ops = ops;
 602
 603        mutex_init(&etnaviv_obj->lock);
 604        INIT_LIST_HEAD(&etnaviv_obj->vram_list);
 605
 606        *obj = &etnaviv_obj->base;
 607        (*obj)->funcs = &etnaviv_gem_object_funcs;
 608
 609        return 0;
 610}
 611
 612/* convenience method to construct a GEM buffer object, and userspace handle */
 613int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
 614        u32 size, u32 flags, u32 *handle)
 615{
 616        struct etnaviv_drm_private *priv = dev->dev_private;
 617        struct drm_gem_object *obj = NULL;
 618        int ret;
 619
 620        size = PAGE_ALIGN(size);
 621
 622        ret = etnaviv_gem_new_impl(dev, size, flags,
 623                                   &etnaviv_gem_shmem_ops, &obj);
 624        if (ret)
 625                goto fail;
 626
 627        lockdep_set_class(&to_etnaviv_bo(obj)->lock, &etnaviv_shm_lock_class);
 628
 629        ret = drm_gem_object_init(dev, obj, size);
 630        if (ret)
 631                goto fail;
 632
 633        /*
 634         * Our buffers are kept pinned, so allocating them from the MOVABLE
 635         * zone is a really bad idea, and conflicts with CMA. See comments
 636         * above new_inode() why this is required _and_ expected if you're
 637         * going to pin these pages.
 638         */
 639        mapping_set_gfp_mask(obj->filp->f_mapping, priv->shm_gfp_mask);
 640
 641        etnaviv_gem_obj_add(dev, obj);
 642
 643        ret = drm_gem_handle_create(file, obj, handle);
 644
 645        /* drop reference from allocate - handle holds it now */
 646fail:
 647        drm_gem_object_put(obj);
 648
 649        return ret;
 650}
 651
 652int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
 653        const struct etnaviv_gem_ops *ops, struct etnaviv_gem_object **res)
 654{
 655        struct drm_gem_object *obj;
 656        int ret;
 657
 658        ret = etnaviv_gem_new_impl(dev, size, flags, ops, &obj);
 659        if (ret)
 660                return ret;
 661
 662        drm_gem_private_object_init(dev, obj, size);
 663
 664        *res = to_etnaviv_bo(obj);
 665
 666        return 0;
 667}
 668
 669static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
 670{
 671        struct page **pvec = NULL;
 672        struct etnaviv_gem_userptr *userptr = &etnaviv_obj->userptr;
 673        int ret, pinned = 0, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
 674
 675        might_lock_read(&current->mm->mmap_lock);
 676
 677        if (userptr->mm != current->mm)
 678                return -EPERM;
 679
 680        pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
 681        if (!pvec)
 682                return -ENOMEM;
 683
 684        do {
 685                unsigned num_pages = npages - pinned;
 686                uint64_t ptr = userptr->ptr + pinned * PAGE_SIZE;
 687                struct page **pages = pvec + pinned;
 688
 689                ret = pin_user_pages_fast(ptr, num_pages,
 690                                          FOLL_WRITE | FOLL_FORCE | FOLL_LONGTERM,
 691                                          pages);
 692                if (ret < 0) {
 693                        unpin_user_pages(pvec, pinned);
 694                        kvfree(pvec);
 695                        return ret;
 696                }
 697
 698                pinned += ret;
 699
 700        } while (pinned < npages);
 701
 702        etnaviv_obj->pages = pvec;
 703
 704        return 0;
 705}
 706
 707static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
 708{
 709        if (etnaviv_obj->sgt) {
 710                etnaviv_gem_scatterlist_unmap(etnaviv_obj);
 711                sg_free_table(etnaviv_obj->sgt);
 712                kfree(etnaviv_obj->sgt);
 713        }
 714        if (etnaviv_obj->pages) {
 715                int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
 716
 717                unpin_user_pages(etnaviv_obj->pages, npages);
 718                kvfree(etnaviv_obj->pages);
 719        }
 720}
 721
 722static int etnaviv_gem_userptr_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
 723                struct vm_area_struct *vma)
 724{
 725        return -EINVAL;
 726}
 727
 728static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = {
 729        .get_pages = etnaviv_gem_userptr_get_pages,
 730        .release = etnaviv_gem_userptr_release,
 731        .vmap = etnaviv_gem_vmap_impl,
 732        .mmap = etnaviv_gem_userptr_mmap_obj,
 733};
 734
 735int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
 736        uintptr_t ptr, u32 size, u32 flags, u32 *handle)
 737{
 738        struct etnaviv_gem_object *etnaviv_obj;
 739        int ret;
 740
 741        ret = etnaviv_gem_new_private(dev, size, ETNA_BO_CACHED,
 742                                      &etnaviv_gem_userptr_ops, &etnaviv_obj);
 743        if (ret)
 744                return ret;
 745
 746        lockdep_set_class(&etnaviv_obj->lock, &etnaviv_userptr_lock_class);
 747
 748        etnaviv_obj->userptr.ptr = ptr;
 749        etnaviv_obj->userptr.mm = current->mm;
 750        etnaviv_obj->userptr.ro = !(flags & ETNA_USERPTR_WRITE);
 751
 752        etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
 753
 754        ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle);
 755
 756        /* drop reference from allocate - handle holds it now */
 757        drm_gem_object_put(&etnaviv_obj->base);
 758        return ret;
 759}
 760