linux/drivers/gpu/drm/etnaviv/etnaviv_gem.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2015-2018 Etnaviv Project
   4 */
   5
   6#include <drm/drm_prime.h>
   7#include <linux/dma-mapping.h>
   8#include <linux/shmem_fs.h>
   9#include <linux/spinlock.h>
  10#include <linux/vmalloc.h>
  11
  12#include "etnaviv_drv.h"
  13#include "etnaviv_gem.h"
  14#include "etnaviv_gpu.h"
  15#include "etnaviv_mmu.h"
  16
  17static struct lock_class_key etnaviv_shm_lock_class;
  18static struct lock_class_key etnaviv_userptr_lock_class;
  19
  20static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj)
  21{
  22        struct drm_device *dev = etnaviv_obj->base.dev;
  23        struct sg_table *sgt = etnaviv_obj->sgt;
  24
  25        /*
  26         * For non-cached buffers, ensure the new pages are clean
  27         * because display controller, GPU, etc. are not coherent.
  28         */
  29        if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
  30                dma_map_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
  31}
  32
  33static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj)
  34{
  35        struct drm_device *dev = etnaviv_obj->base.dev;
  36        struct sg_table *sgt = etnaviv_obj->sgt;
  37
  38        /*
  39         * For non-cached buffers, ensure the new pages are clean
  40         * because display controller, GPU, etc. are not coherent:
  41         *
  42         * WARNING: The DMA API does not support concurrent CPU
  43         * and device access to the memory area.  With BIDIRECTIONAL,
  44         * we will clean the cache lines which overlap the region,
  45         * and invalidate all cache lines (partially) contained in
  46         * the region.
  47         *
  48         * If you have dirty data in the overlapping cache lines,
  49         * that will corrupt the GPU-written data.  If you have
  50         * written into the remainder of the region, this can
  51         * discard those writes.
  52         */
  53        if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
  54                dma_unmap_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
  55}
  56
  57/* called with etnaviv_obj->lock held */
  58static int etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
  59{
  60        struct drm_device *dev = etnaviv_obj->base.dev;
  61        struct page **p = drm_gem_get_pages(&etnaviv_obj->base);
  62
  63        if (IS_ERR(p)) {
  64                dev_dbg(dev->dev, "could not get pages: %ld\n", PTR_ERR(p));
  65                return PTR_ERR(p);
  66        }
  67
  68        etnaviv_obj->pages = p;
  69
  70        return 0;
  71}
  72
  73static void put_pages(struct etnaviv_gem_object *etnaviv_obj)
  74{
  75        if (etnaviv_obj->sgt) {
  76                etnaviv_gem_scatterlist_unmap(etnaviv_obj);
  77                sg_free_table(etnaviv_obj->sgt);
  78                kfree(etnaviv_obj->sgt);
  79                etnaviv_obj->sgt = NULL;
  80        }
  81        if (etnaviv_obj->pages) {
  82                drm_gem_put_pages(&etnaviv_obj->base, etnaviv_obj->pages,
  83                                  true, false);
  84
  85                etnaviv_obj->pages = NULL;
  86        }
  87}
  88
  89struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
  90{
  91        int ret;
  92
  93        lockdep_assert_held(&etnaviv_obj->lock);
  94
  95        if (!etnaviv_obj->pages) {
  96                ret = etnaviv_obj->ops->get_pages(etnaviv_obj);
  97                if (ret < 0)
  98                        return ERR_PTR(ret);
  99        }
 100
 101        if (!etnaviv_obj->sgt) {
 102                struct drm_device *dev = etnaviv_obj->base.dev;
 103                int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
 104                struct sg_table *sgt;
 105
 106                sgt = drm_prime_pages_to_sg(etnaviv_obj->base.dev,
 107                                            etnaviv_obj->pages, npages);
 108                if (IS_ERR(sgt)) {
 109                        dev_err(dev->dev, "failed to allocate sgt: %ld\n",
 110                                PTR_ERR(sgt));
 111                        return ERR_CAST(sgt);
 112                }
 113
 114                etnaviv_obj->sgt = sgt;
 115
 116                etnaviv_gem_scatter_map(etnaviv_obj);
 117        }
 118
 119        return etnaviv_obj->pages;
 120}
 121
 122void etnaviv_gem_put_pages(struct etnaviv_gem_object *etnaviv_obj)
 123{
 124        lockdep_assert_held(&etnaviv_obj->lock);
 125        /* when we start tracking the pin count, then do something here */
 126}
 127
 128static int etnaviv_gem_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
 129                struct vm_area_struct *vma)
 130{
 131        pgprot_t vm_page_prot;
 132
 133        vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
 134
 135        vm_page_prot = vm_get_page_prot(vma->vm_flags);
 136
 137        if (etnaviv_obj->flags & ETNA_BO_WC) {
 138                vma->vm_page_prot = pgprot_writecombine(vm_page_prot);
 139        } else if (etnaviv_obj->flags & ETNA_BO_UNCACHED) {
 140                vma->vm_page_prot = pgprot_noncached(vm_page_prot);
 141        } else {
 142                /*
 143                 * Shunt off cached objs to shmem file so they have their own
 144                 * address_space (so unmap_mapping_range does what we want,
 145                 * in particular in the case of mmap'd dmabufs)
 146                 */
 147                vma->vm_pgoff = 0;
 148                vma_set_file(vma, etnaviv_obj->base.filp);
 149
 150                vma->vm_page_prot = vm_page_prot;
 151        }
 152
 153        return 0;
 154}
 155
 156static int etnaviv_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
 157{
 158        struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
 159
 160        return etnaviv_obj->ops->mmap(etnaviv_obj, vma);
 161}
 162
 163static vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf)
 164{
 165        struct vm_area_struct *vma = vmf->vma;
 166        struct drm_gem_object *obj = vma->vm_private_data;
 167        struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
 168        struct page **pages, *page;
 169        pgoff_t pgoff;
 170        int err;
 171
 172        /*
 173         * Make sure we don't parallel update on a fault, nor move or remove
 174         * something from beneath our feet.  Note that vmf_insert_page() is
 175         * specifically coded to take care of this, so we don't have to.
 176         */
 177        err = mutex_lock_interruptible(&etnaviv_obj->lock);
 178        if (err)
 179                return VM_FAULT_NOPAGE;
 180        /* make sure we have pages attached now */
 181        pages = etnaviv_gem_get_pages(etnaviv_obj);
 182        mutex_unlock(&etnaviv_obj->lock);
 183
 184        if (IS_ERR(pages)) {
 185                err = PTR_ERR(pages);
 186                return vmf_error(err);
 187        }
 188
 189        /* We don't use vmf->pgoff since that has the fake offset: */
 190        pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
 191
 192        page = pages[pgoff];
 193
 194        VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
 195             page_to_pfn(page), page_to_pfn(page) << PAGE_SHIFT);
 196
 197        return vmf_insert_page(vma, vmf->address, page);
 198}
 199
 200int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset)
 201{
 202        int ret;
 203
 204        /* Make it mmapable */
 205        ret = drm_gem_create_mmap_offset(obj);
 206        if (ret)
 207                dev_err(obj->dev->dev, "could not allocate mmap offset\n");
 208        else
 209                *offset = drm_vma_node_offset_addr(&obj->vma_node);
 210
 211        return ret;
 212}
 213
 214static struct etnaviv_vram_mapping *
 215etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj,
 216                             struct etnaviv_iommu_context *context)
 217{
 218        struct etnaviv_vram_mapping *mapping;
 219
 220        list_for_each_entry(mapping, &obj->vram_list, obj_node) {
 221                if (mapping->context == context)
 222                        return mapping;
 223        }
 224
 225        return NULL;
 226}
 227
 228void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping)
 229{
 230        struct etnaviv_gem_object *etnaviv_obj = mapping->object;
 231
 232        mutex_lock(&etnaviv_obj->lock);
 233        WARN_ON(mapping->use == 0);
 234        mapping->use -= 1;
 235        mutex_unlock(&etnaviv_obj->lock);
 236
 237        drm_gem_object_put(&etnaviv_obj->base);
 238}
 239
 240struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
 241        struct drm_gem_object *obj, struct etnaviv_iommu_context *mmu_context,
 242        u64 va)
 243{
 244        struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
 245        struct etnaviv_vram_mapping *mapping;
 246        struct page **pages;
 247        int ret = 0;
 248
 249        mutex_lock(&etnaviv_obj->lock);
 250        mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, mmu_context);
 251        if (mapping) {
 252                /*
 253                 * Holding the object lock prevents the use count changing
 254                 * beneath us.  If the use count is zero, the MMU might be
 255                 * reaping this object, so take the lock and re-check that
 256                 * the MMU owns this mapping to close this race.
 257                 */
 258                if (mapping->use == 0) {
 259                        mutex_lock(&mmu_context->lock);
 260                        if (mapping->context == mmu_context)
 261                                mapping->use += 1;
 262                        else
 263                                mapping = NULL;
 264                        mutex_unlock(&mmu_context->lock);
 265                        if (mapping)
 266                                goto out;
 267                } else {
 268                        mapping->use += 1;
 269                        goto out;
 270                }
 271        }
 272
 273        pages = etnaviv_gem_get_pages(etnaviv_obj);
 274        if (IS_ERR(pages)) {
 275                ret = PTR_ERR(pages);
 276                goto out;
 277        }
 278
 279        /*
 280         * See if we have a reaped vram mapping we can re-use before
 281         * allocating a fresh mapping.
 282         */
 283        mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, NULL);
 284        if (!mapping) {
 285                mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
 286                if (!mapping) {
 287                        ret = -ENOMEM;
 288                        goto out;
 289                }
 290
 291                INIT_LIST_HEAD(&mapping->scan_node);
 292                mapping->object = etnaviv_obj;
 293        } else {
 294                list_del(&mapping->obj_node);
 295        }
 296
 297        mapping->context = etnaviv_iommu_context_get(mmu_context);
 298        mapping->use = 1;
 299
 300        ret = etnaviv_iommu_map_gem(mmu_context, etnaviv_obj,
 301                                    mmu_context->global->memory_base,
 302                                    mapping, va);
 303        if (ret < 0) {
 304                etnaviv_iommu_context_put(mmu_context);
 305                kfree(mapping);
 306        } else {
 307                list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list);
 308        }
 309
 310out:
 311        mutex_unlock(&etnaviv_obj->lock);
 312
 313        if (ret)
 314                return ERR_PTR(ret);
 315
 316        /* Take a reference on the object */
 317        drm_gem_object_get(obj);
 318        return mapping;
 319}
 320
 321void *etnaviv_gem_vmap(struct drm_gem_object *obj)
 322{
 323        struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
 324
 325        if (etnaviv_obj->vaddr)
 326                return etnaviv_obj->vaddr;
 327
 328        mutex_lock(&etnaviv_obj->lock);
 329        /*
 330         * Need to check again, as we might have raced with another thread
 331         * while waiting for the mutex.
 332         */
 333        if (!etnaviv_obj->vaddr)
 334                etnaviv_obj->vaddr = etnaviv_obj->ops->vmap(etnaviv_obj);
 335        mutex_unlock(&etnaviv_obj->lock);
 336
 337        return etnaviv_obj->vaddr;
 338}
 339
 340static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj)
 341{
 342        struct page **pages;
 343
 344        lockdep_assert_held(&obj->lock);
 345
 346        pages = etnaviv_gem_get_pages(obj);
 347        if (IS_ERR(pages))
 348                return NULL;
 349
 350        return vmap(pages, obj->base.size >> PAGE_SHIFT,
 351                        VM_MAP, pgprot_writecombine(PAGE_KERNEL));
 352}
 353
 354static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
 355{
 356        if (op & ETNA_PREP_READ)
 357                return DMA_FROM_DEVICE;
 358        else if (op & ETNA_PREP_WRITE)
 359                return DMA_TO_DEVICE;
 360        else
 361                return DMA_BIDIRECTIONAL;
 362}
 363
 364int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
 365                struct drm_etnaviv_timespec *timeout)
 366{
 367        struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
 368        struct drm_device *dev = obj->dev;
 369        bool write = !!(op & ETNA_PREP_WRITE);
 370        int ret;
 371
 372        if (!etnaviv_obj->sgt) {
 373                void *ret;
 374
 375                mutex_lock(&etnaviv_obj->lock);
 376                ret = etnaviv_gem_get_pages(etnaviv_obj);
 377                mutex_unlock(&etnaviv_obj->lock);
 378                if (IS_ERR(ret))
 379                        return PTR_ERR(ret);
 380        }
 381
 382        if (op & ETNA_PREP_NOSYNC) {
 383                if (!dma_resv_test_signaled(obj->resv, write))
 384                        return -EBUSY;
 385        } else {
 386                unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
 387
 388                ret = dma_resv_wait_timeout(obj->resv, write, true, remain);
 389                if (ret <= 0)
 390                        return ret == 0 ? -ETIMEDOUT : ret;
 391        }
 392
 393        if (etnaviv_obj->flags & ETNA_BO_CACHED) {
 394                dma_sync_sgtable_for_cpu(dev->dev, etnaviv_obj->sgt,
 395                                         etnaviv_op_to_dma_dir(op));
 396                etnaviv_obj->last_cpu_prep_op = op;
 397        }
 398
 399        return 0;
 400}
 401
 402int etnaviv_gem_cpu_fini(struct drm_gem_object *obj)
 403{
 404        struct drm_device *dev = obj->dev;
 405        struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
 406
 407        if (etnaviv_obj->flags & ETNA_BO_CACHED) {
 408                /* fini without a prep is almost certainly a userspace error */
 409                WARN_ON(etnaviv_obj->last_cpu_prep_op == 0);
 410                dma_sync_sgtable_for_device(dev->dev, etnaviv_obj->sgt,
 411                        etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op));
 412                etnaviv_obj->last_cpu_prep_op = 0;
 413        }
 414
 415        return 0;
 416}
 417
 418int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
 419        struct drm_etnaviv_timespec *timeout)
 420{
 421        struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
 422
 423        return etnaviv_gpu_wait_obj_inactive(gpu, etnaviv_obj, timeout);
 424}
 425
 426#ifdef CONFIG_DEBUG_FS
 427static void etnaviv_gem_describe_fence(struct dma_fence *fence,
 428        const char *type, struct seq_file *m)
 429{
 430        if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
 431                seq_printf(m, "\t%9s: %s %s seq %llu\n",
 432                           type,
 433                           fence->ops->get_driver_name(fence),
 434                           fence->ops->get_timeline_name(fence),
 435                           fence->seqno);
 436}
 437
 438static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
 439{
 440        struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
 441        struct dma_resv *robj = obj->resv;
 442        struct dma_resv_list *fobj;
 443        struct dma_fence *fence;
 444        unsigned long off = drm_vma_node_start(&obj->vma_node);
 445
 446        seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n",
 447                        etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I',
 448                        obj->name, kref_read(&obj->refcount),
 449                        off, etnaviv_obj->vaddr, obj->size);
 450
 451        rcu_read_lock();
 452        fobj = dma_resv_shared_list(robj);
 453        if (fobj) {
 454                unsigned int i, shared_count = fobj->shared_count;
 455
 456                for (i = 0; i < shared_count; i++) {
 457                        fence = rcu_dereference(fobj->shared[i]);
 458                        etnaviv_gem_describe_fence(fence, "Shared", m);
 459                }
 460        }
 461
 462        fence = dma_resv_excl_fence(robj);
 463        if (fence)
 464                etnaviv_gem_describe_fence(fence, "Exclusive", m);
 465        rcu_read_unlock();
 466}
 467
 468void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv,
 469        struct seq_file *m)
 470{
 471        struct etnaviv_gem_object *etnaviv_obj;
 472        int count = 0;
 473        size_t size = 0;
 474
 475        mutex_lock(&priv->gem_lock);
 476        list_for_each_entry(etnaviv_obj, &priv->gem_list, gem_node) {
 477                struct drm_gem_object *obj = &etnaviv_obj->base;
 478
 479                seq_puts(m, "   ");
 480                etnaviv_gem_describe(obj, m);
 481                count++;
 482                size += obj->size;
 483        }
 484        mutex_unlock(&priv->gem_lock);
 485
 486        seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
 487}
 488#endif
 489
 490static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj)
 491{
 492        vunmap(etnaviv_obj->vaddr);
 493        put_pages(etnaviv_obj);
 494}
 495
 496static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = {
 497        .get_pages = etnaviv_gem_shmem_get_pages,
 498        .release = etnaviv_gem_shmem_release,
 499        .vmap = etnaviv_gem_vmap_impl,
 500        .mmap = etnaviv_gem_mmap_obj,
 501};
 502
 503void etnaviv_gem_free_object(struct drm_gem_object *obj)
 504{
 505        struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
 506        struct etnaviv_drm_private *priv = obj->dev->dev_private;
 507        struct etnaviv_vram_mapping *mapping, *tmp;
 508
 509        /* object should not be active */
 510        WARN_ON(is_active(etnaviv_obj));
 511
 512        mutex_lock(&priv->gem_lock);
 513        list_del(&etnaviv_obj->gem_node);
 514        mutex_unlock(&priv->gem_lock);
 515
 516        list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
 517                                 obj_node) {
 518                struct etnaviv_iommu_context *context = mapping->context;
 519
 520                WARN_ON(mapping->use);
 521
 522                if (context) {
 523                        etnaviv_iommu_unmap_gem(context, mapping);
 524                        etnaviv_iommu_context_put(context);
 525                }
 526
 527                list_del(&mapping->obj_node);
 528                kfree(mapping);
 529        }
 530
 531        drm_gem_free_mmap_offset(obj);
 532        etnaviv_obj->ops->release(etnaviv_obj);
 533        drm_gem_object_release(obj);
 534
 535        kfree(etnaviv_obj);
 536}
 537
 538void etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
 539{
 540        struct etnaviv_drm_private *priv = dev->dev_private;
 541        struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
 542
 543        mutex_lock(&priv->gem_lock);
 544        list_add_tail(&etnaviv_obj->gem_node, &priv->gem_list);
 545        mutex_unlock(&priv->gem_lock);
 546}
 547
 548static const struct vm_operations_struct vm_ops = {
 549        .fault = etnaviv_gem_fault,
 550        .open = drm_gem_vm_open,
 551        .close = drm_gem_vm_close,
 552};
 553
 554static const struct drm_gem_object_funcs etnaviv_gem_object_funcs = {
 555        .free = etnaviv_gem_free_object,
 556        .pin = etnaviv_gem_prime_pin,
 557        .unpin = etnaviv_gem_prime_unpin,
 558        .get_sg_table = etnaviv_gem_prime_get_sg_table,
 559        .vmap = etnaviv_gem_prime_vmap,
 560        .mmap = etnaviv_gem_mmap,
 561        .vm_ops = &vm_ops,
 562};
 563
 564static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
 565        const struct etnaviv_gem_ops *ops, struct drm_gem_object **obj)
 566{
 567        struct etnaviv_gem_object *etnaviv_obj;
 568        unsigned sz = sizeof(*etnaviv_obj);
 569        bool valid = true;
 570
 571        /* validate flags */
 572        switch (flags & ETNA_BO_CACHE_MASK) {
 573        case ETNA_BO_UNCACHED:
 574        case ETNA_BO_CACHED:
 575        case ETNA_BO_WC:
 576                break;
 577        default:
 578                valid = false;
 579        }
 580
 581        if (!valid) {
 582                dev_err(dev->dev, "invalid cache flag: %x\n",
 583                        (flags & ETNA_BO_CACHE_MASK));
 584                return -EINVAL;
 585        }
 586
 587        etnaviv_obj = kzalloc(sz, GFP_KERNEL);
 588        if (!etnaviv_obj)
 589                return -ENOMEM;
 590
 591        etnaviv_obj->flags = flags;
 592        etnaviv_obj->ops = ops;
 593
 594        mutex_init(&etnaviv_obj->lock);
 595        INIT_LIST_HEAD(&etnaviv_obj->vram_list);
 596
 597        *obj = &etnaviv_obj->base;
 598        (*obj)->funcs = &etnaviv_gem_object_funcs;
 599
 600        return 0;
 601}
 602
 603/* convenience method to construct a GEM buffer object, and userspace handle */
 604int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
 605        u32 size, u32 flags, u32 *handle)
 606{
 607        struct etnaviv_drm_private *priv = dev->dev_private;
 608        struct drm_gem_object *obj = NULL;
 609        int ret;
 610
 611        size = PAGE_ALIGN(size);
 612
 613        ret = etnaviv_gem_new_impl(dev, size, flags,
 614                                   &etnaviv_gem_shmem_ops, &obj);
 615        if (ret)
 616                goto fail;
 617
 618        lockdep_set_class(&to_etnaviv_bo(obj)->lock, &etnaviv_shm_lock_class);
 619
 620        ret = drm_gem_object_init(dev, obj, size);
 621        if (ret)
 622                goto fail;
 623
 624        /*
 625         * Our buffers are kept pinned, so allocating them from the MOVABLE
 626         * zone is a really bad idea, and conflicts with CMA. See comments
 627         * above new_inode() why this is required _and_ expected if you're
 628         * going to pin these pages.
 629         */
 630        mapping_set_gfp_mask(obj->filp->f_mapping, priv->shm_gfp_mask);
 631
 632        etnaviv_gem_obj_add(dev, obj);
 633
 634        ret = drm_gem_handle_create(file, obj, handle);
 635
 636        /* drop reference from allocate - handle holds it now */
 637fail:
 638        drm_gem_object_put(obj);
 639
 640        return ret;
 641}
 642
 643int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
 644        const struct etnaviv_gem_ops *ops, struct etnaviv_gem_object **res)
 645{
 646        struct drm_gem_object *obj;
 647        int ret;
 648
 649        ret = etnaviv_gem_new_impl(dev, size, flags, ops, &obj);
 650        if (ret)
 651                return ret;
 652
 653        drm_gem_private_object_init(dev, obj, size);
 654
 655        *res = to_etnaviv_bo(obj);
 656
 657        return 0;
 658}
 659
 660static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
 661{
 662        struct page **pvec = NULL;
 663        struct etnaviv_gem_userptr *userptr = &etnaviv_obj->userptr;
 664        int ret, pinned = 0, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
 665
 666        might_lock_read(&current->mm->mmap_lock);
 667
 668        if (userptr->mm != current->mm)
 669                return -EPERM;
 670
 671        pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
 672        if (!pvec)
 673                return -ENOMEM;
 674
 675        do {
 676                unsigned num_pages = npages - pinned;
 677                uint64_t ptr = userptr->ptr + pinned * PAGE_SIZE;
 678                struct page **pages = pvec + pinned;
 679
 680                ret = pin_user_pages_fast(ptr, num_pages,
 681                                          FOLL_WRITE | FOLL_FORCE | FOLL_LONGTERM,
 682                                          pages);
 683                if (ret < 0) {
 684                        unpin_user_pages(pvec, pinned);
 685                        kvfree(pvec);
 686                        return ret;
 687                }
 688
 689                pinned += ret;
 690
 691        } while (pinned < npages);
 692
 693        etnaviv_obj->pages = pvec;
 694
 695        return 0;
 696}
 697
 698static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
 699{
 700        if (etnaviv_obj->sgt) {
 701                etnaviv_gem_scatterlist_unmap(etnaviv_obj);
 702                sg_free_table(etnaviv_obj->sgt);
 703                kfree(etnaviv_obj->sgt);
 704        }
 705        if (etnaviv_obj->pages) {
 706                int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
 707
 708                unpin_user_pages(etnaviv_obj->pages, npages);
 709                kvfree(etnaviv_obj->pages);
 710        }
 711}
 712
 713static int etnaviv_gem_userptr_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
 714                struct vm_area_struct *vma)
 715{
 716        return -EINVAL;
 717}
 718
 719static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = {
 720        .get_pages = etnaviv_gem_userptr_get_pages,
 721        .release = etnaviv_gem_userptr_release,
 722        .vmap = etnaviv_gem_vmap_impl,
 723        .mmap = etnaviv_gem_userptr_mmap_obj,
 724};
 725
 726int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
 727        uintptr_t ptr, u32 size, u32 flags, u32 *handle)
 728{
 729        struct etnaviv_gem_object *etnaviv_obj;
 730        int ret;
 731
 732        ret = etnaviv_gem_new_private(dev, size, ETNA_BO_CACHED,
 733                                      &etnaviv_gem_userptr_ops, &etnaviv_obj);
 734        if (ret)
 735                return ret;
 736
 737        lockdep_set_class(&etnaviv_obj->lock, &etnaviv_userptr_lock_class);
 738
 739        etnaviv_obj->userptr.ptr = ptr;
 740        etnaviv_obj->userptr.mm = current->mm;
 741        etnaviv_obj->userptr.ro = !(flags & ETNA_USERPTR_WRITE);
 742
 743        etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
 744
 745        ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle);
 746
 747        /* drop reference from allocate - handle holds it now */
 748        drm_gem_object_put(&etnaviv_obj->base);
 749        return ret;
 750}
 751