linux/drivers/gpu/drm/etnaviv/etnaviv_gem.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2015-2018 Etnaviv Project
   4 */
   5
   6#include <linux/spinlock.h>
   7#include <linux/shmem_fs.h>
   8#include <linux/sched/mm.h>
   9#include <linux/sched/task.h>
  10
  11#include "etnaviv_drv.h"
  12#include "etnaviv_gem.h"
  13#include "etnaviv_gpu.h"
  14#include "etnaviv_mmu.h"
  15
  16static struct lock_class_key etnaviv_shm_lock_class;
  17static struct lock_class_key etnaviv_userptr_lock_class;
  18
  19static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj)
  20{
  21        struct drm_device *dev = etnaviv_obj->base.dev;
  22        struct sg_table *sgt = etnaviv_obj->sgt;
  23
  24        /*
  25         * For non-cached buffers, ensure the new pages are clean
  26         * because display controller, GPU, etc. are not coherent.
  27         */
  28        if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
  29                dma_map_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
  30}
  31
  32static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj)
  33{
  34        struct drm_device *dev = etnaviv_obj->base.dev;
  35        struct sg_table *sgt = etnaviv_obj->sgt;
  36
  37        /*
  38         * For non-cached buffers, ensure the new pages are clean
  39         * because display controller, GPU, etc. are not coherent:
  40         *
  41         * WARNING: The DMA API does not support concurrent CPU
  42         * and device access to the memory area.  With BIDIRECTIONAL,
  43         * we will clean the cache lines which overlap the region,
  44         * and invalidate all cache lines (partially) contained in
  45         * the region.
  46         *
  47         * If you have dirty data in the overlapping cache lines,
  48         * that will corrupt the GPU-written data.  If you have
  49         * written into the remainder of the region, this can
  50         * discard those writes.
  51         */
  52        if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
  53                dma_unmap_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
  54}
  55
  56/* called with etnaviv_obj->lock held */
  57static int etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
  58{
  59        struct drm_device *dev = etnaviv_obj->base.dev;
  60        struct page **p = drm_gem_get_pages(&etnaviv_obj->base);
  61
  62        if (IS_ERR(p)) {
  63                dev_dbg(dev->dev, "could not get pages: %ld\n", PTR_ERR(p));
  64                return PTR_ERR(p);
  65        }
  66
  67        etnaviv_obj->pages = p;
  68
  69        return 0;
  70}
  71
  72static void put_pages(struct etnaviv_gem_object *etnaviv_obj)
  73{
  74        if (etnaviv_obj->sgt) {
  75                etnaviv_gem_scatterlist_unmap(etnaviv_obj);
  76                sg_free_table(etnaviv_obj->sgt);
  77                kfree(etnaviv_obj->sgt);
  78                etnaviv_obj->sgt = NULL;
  79        }
  80        if (etnaviv_obj->pages) {
  81                drm_gem_put_pages(&etnaviv_obj->base, etnaviv_obj->pages,
  82                                  true, false);
  83
  84                etnaviv_obj->pages = NULL;
  85        }
  86}
  87
  88struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
  89{
  90        int ret;
  91
  92        lockdep_assert_held(&etnaviv_obj->lock);
  93
  94        if (!etnaviv_obj->pages) {
  95                ret = etnaviv_obj->ops->get_pages(etnaviv_obj);
  96                if (ret < 0)
  97                        return ERR_PTR(ret);
  98        }
  99
 100        if (!etnaviv_obj->sgt) {
 101                struct drm_device *dev = etnaviv_obj->base.dev;
 102                int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
 103                struct sg_table *sgt;
 104
 105                sgt = drm_prime_pages_to_sg(etnaviv_obj->pages, npages);
 106                if (IS_ERR(sgt)) {
 107                        dev_err(dev->dev, "failed to allocate sgt: %ld\n",
 108                                PTR_ERR(sgt));
 109                        return ERR_CAST(sgt);
 110                }
 111
 112                etnaviv_obj->sgt = sgt;
 113
 114                etnaviv_gem_scatter_map(etnaviv_obj);
 115        }
 116
 117        return etnaviv_obj->pages;
 118}
 119
 120void etnaviv_gem_put_pages(struct etnaviv_gem_object *etnaviv_obj)
 121{
 122        lockdep_assert_held(&etnaviv_obj->lock);
 123        /* when we start tracking the pin count, then do something here */
 124}
 125
 126static int etnaviv_gem_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
 127                struct vm_area_struct *vma)
 128{
 129        pgprot_t vm_page_prot;
 130
 131        vma->vm_flags &= ~VM_PFNMAP;
 132        vma->vm_flags |= VM_MIXEDMAP;
 133
 134        vm_page_prot = vm_get_page_prot(vma->vm_flags);
 135
 136        if (etnaviv_obj->flags & ETNA_BO_WC) {
 137                vma->vm_page_prot = pgprot_writecombine(vm_page_prot);
 138        } else if (etnaviv_obj->flags & ETNA_BO_UNCACHED) {
 139                vma->vm_page_prot = pgprot_noncached(vm_page_prot);
 140        } else {
 141                /*
 142                 * Shunt off cached objs to shmem file so they have their own
 143                 * address_space (so unmap_mapping_range does what we want,
 144                 * in particular in the case of mmap'd dmabufs)
 145                 */
 146                fput(vma->vm_file);
 147                get_file(etnaviv_obj->base.filp);
 148                vma->vm_pgoff = 0;
 149                vma->vm_file  = etnaviv_obj->base.filp;
 150
 151                vma->vm_page_prot = vm_page_prot;
 152        }
 153
 154        return 0;
 155}
 156
 157int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 158{
 159        struct etnaviv_gem_object *obj;
 160        int ret;
 161
 162        ret = drm_gem_mmap(filp, vma);
 163        if (ret) {
 164                DBG("mmap failed: %d", ret);
 165                return ret;
 166        }
 167
 168        obj = to_etnaviv_bo(vma->vm_private_data);
 169        return obj->ops->mmap(obj, vma);
 170}
 171
 172vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf)
 173{
 174        struct vm_area_struct *vma = vmf->vma;
 175        struct drm_gem_object *obj = vma->vm_private_data;
 176        struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
 177        struct page **pages, *page;
 178        pgoff_t pgoff;
 179        int err;
 180
 181        /*
 182         * Make sure we don't parallel update on a fault, nor move or remove
 183         * something from beneath our feet.  Note that vmf_insert_page() is
 184         * specifically coded to take care of this, so we don't have to.
 185         */
 186        err = mutex_lock_interruptible(&etnaviv_obj->lock);
 187        if (err)
 188                return VM_FAULT_NOPAGE;
 189        /* make sure we have pages attached now */
 190        pages = etnaviv_gem_get_pages(etnaviv_obj);
 191        mutex_unlock(&etnaviv_obj->lock);
 192
 193        if (IS_ERR(pages)) {
 194                err = PTR_ERR(pages);
 195                return vmf_error(err);
 196        }
 197
 198        /* We don't use vmf->pgoff since that has the fake offset: */
 199        pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
 200
 201        page = pages[pgoff];
 202
 203        VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
 204             page_to_pfn(page), page_to_pfn(page) << PAGE_SHIFT);
 205
 206        return vmf_insert_page(vma, vmf->address, page);
 207}
 208
 209int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset)
 210{
 211        int ret;
 212
 213        /* Make it mmapable */
 214        ret = drm_gem_create_mmap_offset(obj);
 215        if (ret)
 216                dev_err(obj->dev->dev, "could not allocate mmap offset\n");
 217        else
 218                *offset = drm_vma_node_offset_addr(&obj->vma_node);
 219
 220        return ret;
 221}
 222
 223static struct etnaviv_vram_mapping *
 224etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj,
 225                             struct etnaviv_iommu *mmu)
 226{
 227        struct etnaviv_vram_mapping *mapping;
 228
 229        list_for_each_entry(mapping, &obj->vram_list, obj_node) {
 230                if (mapping->mmu == mmu)
 231                        return mapping;
 232        }
 233
 234        return NULL;
 235}
 236
 237void etnaviv_gem_mapping_reference(struct etnaviv_vram_mapping *mapping)
 238{
 239        struct etnaviv_gem_object *etnaviv_obj = mapping->object;
 240
 241        drm_gem_object_get(&etnaviv_obj->base);
 242
 243        mutex_lock(&etnaviv_obj->lock);
 244        WARN_ON(mapping->use == 0);
 245        mapping->use += 1;
 246        mutex_unlock(&etnaviv_obj->lock);
 247}
 248
 249void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping)
 250{
 251        struct etnaviv_gem_object *etnaviv_obj = mapping->object;
 252
 253        mutex_lock(&etnaviv_obj->lock);
 254        WARN_ON(mapping->use == 0);
 255        mapping->use -= 1;
 256        mutex_unlock(&etnaviv_obj->lock);
 257
 258        drm_gem_object_put_unlocked(&etnaviv_obj->base);
 259}
 260
 261struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
 262        struct drm_gem_object *obj, struct etnaviv_gpu *gpu)
 263{
 264        struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
 265        struct etnaviv_vram_mapping *mapping;
 266        struct page **pages;
 267        int ret = 0;
 268
 269        mutex_lock(&etnaviv_obj->lock);
 270        mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, gpu->mmu);
 271        if (mapping) {
 272                /*
 273                 * Holding the object lock prevents the use count changing
 274                 * beneath us.  If the use count is zero, the MMU might be
 275                 * reaping this object, so take the lock and re-check that
 276                 * the MMU owns this mapping to close this race.
 277                 */
 278                if (mapping->use == 0) {
 279                        mutex_lock(&gpu->mmu->lock);
 280                        if (mapping->mmu == gpu->mmu)
 281                                mapping->use += 1;
 282                        else
 283                                mapping = NULL;
 284                        mutex_unlock(&gpu->mmu->lock);
 285                        if (mapping)
 286                                goto out;
 287                } else {
 288                        mapping->use += 1;
 289                        goto out;
 290                }
 291        }
 292
 293        pages = etnaviv_gem_get_pages(etnaviv_obj);
 294        if (IS_ERR(pages)) {
 295                ret = PTR_ERR(pages);
 296                goto out;
 297        }
 298
 299        /*
 300         * See if we have a reaped vram mapping we can re-use before
 301         * allocating a fresh mapping.
 302         */
 303        mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, NULL);
 304        if (!mapping) {
 305                mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
 306                if (!mapping) {
 307                        ret = -ENOMEM;
 308                        goto out;
 309                }
 310
 311                INIT_LIST_HEAD(&mapping->scan_node);
 312                mapping->object = etnaviv_obj;
 313        } else {
 314                list_del(&mapping->obj_node);
 315        }
 316
 317        mapping->mmu = gpu->mmu;
 318        mapping->use = 1;
 319
 320        ret = etnaviv_iommu_map_gem(gpu->mmu, etnaviv_obj, gpu->memory_base,
 321                                    mapping);
 322        if (ret < 0)
 323                kfree(mapping);
 324        else
 325                list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list);
 326
 327out:
 328        mutex_unlock(&etnaviv_obj->lock);
 329
 330        if (ret)
 331                return ERR_PTR(ret);
 332
 333        /* Take a reference on the object */
 334        drm_gem_object_get(obj);
 335        return mapping;
 336}
 337
 338void *etnaviv_gem_vmap(struct drm_gem_object *obj)
 339{
 340        struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
 341
 342        if (etnaviv_obj->vaddr)
 343                return etnaviv_obj->vaddr;
 344
 345        mutex_lock(&etnaviv_obj->lock);
 346        /*
 347         * Need to check again, as we might have raced with another thread
 348         * while waiting for the mutex.
 349         */
 350        if (!etnaviv_obj->vaddr)
 351                etnaviv_obj->vaddr = etnaviv_obj->ops->vmap(etnaviv_obj);
 352        mutex_unlock(&etnaviv_obj->lock);
 353
 354        return etnaviv_obj->vaddr;
 355}
 356
 357static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj)
 358{
 359        struct page **pages;
 360
 361        lockdep_assert_held(&obj->lock);
 362
 363        pages = etnaviv_gem_get_pages(obj);
 364        if (IS_ERR(pages))
 365                return NULL;
 366
 367        return vmap(pages, obj->base.size >> PAGE_SHIFT,
 368                        VM_MAP, pgprot_writecombine(PAGE_KERNEL));
 369}
 370
 371static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
 372{
 373        if (op & ETNA_PREP_READ)
 374                return DMA_FROM_DEVICE;
 375        else if (op & ETNA_PREP_WRITE)
 376                return DMA_TO_DEVICE;
 377        else
 378                return DMA_BIDIRECTIONAL;
 379}
 380
 381int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
 382                struct timespec *timeout)
 383{
 384        struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
 385        struct drm_device *dev = obj->dev;
 386        bool write = !!(op & ETNA_PREP_WRITE);
 387        int ret;
 388
 389        if (!etnaviv_obj->sgt) {
 390                void *ret;
 391
 392                mutex_lock(&etnaviv_obj->lock);
 393                ret = etnaviv_gem_get_pages(etnaviv_obj);
 394                mutex_unlock(&etnaviv_obj->lock);
 395                if (IS_ERR(ret))
 396                        return PTR_ERR(ret);
 397        }
 398
 399        if (op & ETNA_PREP_NOSYNC) {
 400                if (!reservation_object_test_signaled_rcu(obj->resv,
 401                                                          write))
 402                        return -EBUSY;
 403        } else {
 404                unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
 405
 406                ret = reservation_object_wait_timeout_rcu(obj->resv,
 407                                                          write, true, remain);
 408                if (ret <= 0)
 409                        return ret == 0 ? -ETIMEDOUT : ret;
 410        }
 411
 412        if (etnaviv_obj->flags & ETNA_BO_CACHED) {
 413                dma_sync_sg_for_cpu(dev->dev, etnaviv_obj->sgt->sgl,
 414                                    etnaviv_obj->sgt->nents,
 415                                    etnaviv_op_to_dma_dir(op));
 416                etnaviv_obj->last_cpu_prep_op = op;
 417        }
 418
 419        return 0;
 420}
 421
 422int etnaviv_gem_cpu_fini(struct drm_gem_object *obj)
 423{
 424        struct drm_device *dev = obj->dev;
 425        struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
 426
 427        if (etnaviv_obj->flags & ETNA_BO_CACHED) {
 428                /* fini without a prep is almost certainly a userspace error */
 429                WARN_ON(etnaviv_obj->last_cpu_prep_op == 0);
 430                dma_sync_sg_for_device(dev->dev, etnaviv_obj->sgt->sgl,
 431                        etnaviv_obj->sgt->nents,
 432                        etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op));
 433                etnaviv_obj->last_cpu_prep_op = 0;
 434        }
 435
 436        return 0;
 437}
 438
 439int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
 440        struct timespec *timeout)
 441{
 442        struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
 443
 444        return etnaviv_gpu_wait_obj_inactive(gpu, etnaviv_obj, timeout);
 445}
 446
 447#ifdef CONFIG_DEBUG_FS
 448static void etnaviv_gem_describe_fence(struct dma_fence *fence,
 449        const char *type, struct seq_file *m)
 450{
 451        if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
 452                seq_printf(m, "\t%9s: %s %s seq %llu\n",
 453                           type,
 454                           fence->ops->get_driver_name(fence),
 455                           fence->ops->get_timeline_name(fence),
 456                           fence->seqno);
 457}
 458
 459static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
 460{
 461        struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
 462        struct reservation_object *robj = obj->resv;
 463        struct reservation_object_list *fobj;
 464        struct dma_fence *fence;
 465        unsigned long off = drm_vma_node_start(&obj->vma_node);
 466
 467        seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n",
 468                        etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I',
 469                        obj->name, kref_read(&obj->refcount),
 470                        off, etnaviv_obj->vaddr, obj->size);
 471
 472        rcu_read_lock();
 473        fobj = rcu_dereference(robj->fence);
 474        if (fobj) {
 475                unsigned int i, shared_count = fobj->shared_count;
 476
 477                for (i = 0; i < shared_count; i++) {
 478                        fence = rcu_dereference(fobj->shared[i]);
 479                        etnaviv_gem_describe_fence(fence, "Shared", m);
 480                }
 481        }
 482
 483        fence = rcu_dereference(robj->fence_excl);
 484        if (fence)
 485                etnaviv_gem_describe_fence(fence, "Exclusive", m);
 486        rcu_read_unlock();
 487}
 488
 489void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv,
 490        struct seq_file *m)
 491{
 492        struct etnaviv_gem_object *etnaviv_obj;
 493        int count = 0;
 494        size_t size = 0;
 495
 496        mutex_lock(&priv->gem_lock);
 497        list_for_each_entry(etnaviv_obj, &priv->gem_list, gem_node) {
 498                struct drm_gem_object *obj = &etnaviv_obj->base;
 499
 500                seq_puts(m, "   ");
 501                etnaviv_gem_describe(obj, m);
 502                count++;
 503                size += obj->size;
 504        }
 505        mutex_unlock(&priv->gem_lock);
 506
 507        seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
 508}
 509#endif
 510
 511static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj)
 512{
 513        vunmap(etnaviv_obj->vaddr);
 514        put_pages(etnaviv_obj);
 515}
 516
 517static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = {
 518        .get_pages = etnaviv_gem_shmem_get_pages,
 519        .release = etnaviv_gem_shmem_release,
 520        .vmap = etnaviv_gem_vmap_impl,
 521        .mmap = etnaviv_gem_mmap_obj,
 522};
 523
 524void etnaviv_gem_free_object(struct drm_gem_object *obj)
 525{
 526        struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
 527        struct etnaviv_drm_private *priv = obj->dev->dev_private;
 528        struct etnaviv_vram_mapping *mapping, *tmp;
 529
 530        /* object should not be active */
 531        WARN_ON(is_active(etnaviv_obj));
 532
 533        mutex_lock(&priv->gem_lock);
 534        list_del(&etnaviv_obj->gem_node);
 535        mutex_unlock(&priv->gem_lock);
 536
 537        list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
 538                                 obj_node) {
 539                struct etnaviv_iommu *mmu = mapping->mmu;
 540
 541                WARN_ON(mapping->use);
 542
 543                if (mmu)
 544                        etnaviv_iommu_unmap_gem(mmu, mapping);
 545
 546                list_del(&mapping->obj_node);
 547                kfree(mapping);
 548        }
 549
 550        drm_gem_free_mmap_offset(obj);
 551        etnaviv_obj->ops->release(etnaviv_obj);
 552        drm_gem_object_release(obj);
 553
 554        kfree(etnaviv_obj);
 555}
 556
 557void etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
 558{
 559        struct etnaviv_drm_private *priv = dev->dev_private;
 560        struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
 561
 562        mutex_lock(&priv->gem_lock);
 563        list_add_tail(&etnaviv_obj->gem_node, &priv->gem_list);
 564        mutex_unlock(&priv->gem_lock);
 565}
 566
 567static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
 568        struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
 569        struct drm_gem_object **obj)
 570{
 571        struct etnaviv_gem_object *etnaviv_obj;
 572        unsigned sz = sizeof(*etnaviv_obj);
 573        bool valid = true;
 574
 575        /* validate flags */
 576        switch (flags & ETNA_BO_CACHE_MASK) {
 577        case ETNA_BO_UNCACHED:
 578        case ETNA_BO_CACHED:
 579        case ETNA_BO_WC:
 580                break;
 581        default:
 582                valid = false;
 583        }
 584
 585        if (!valid) {
 586                dev_err(dev->dev, "invalid cache flag: %x\n",
 587                        (flags & ETNA_BO_CACHE_MASK));
 588                return -EINVAL;
 589        }
 590
 591        etnaviv_obj = kzalloc(sz, GFP_KERNEL);
 592        if (!etnaviv_obj)
 593                return -ENOMEM;
 594
 595        etnaviv_obj->flags = flags;
 596        etnaviv_obj->ops = ops;
 597        if (robj)
 598                etnaviv_obj->base.resv = robj;
 599
 600        mutex_init(&etnaviv_obj->lock);
 601        INIT_LIST_HEAD(&etnaviv_obj->vram_list);
 602
 603        *obj = &etnaviv_obj->base;
 604
 605        return 0;
 606}
 607
 608/* convenience method to construct a GEM buffer object, and userspace handle */
 609int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
 610        u32 size, u32 flags, u32 *handle)
 611{
 612        struct drm_gem_object *obj = NULL;
 613        int ret;
 614
 615        size = PAGE_ALIGN(size);
 616
 617        ret = etnaviv_gem_new_impl(dev, size, flags, NULL,
 618                                   &etnaviv_gem_shmem_ops, &obj);
 619        if (ret)
 620                goto fail;
 621
 622        lockdep_set_class(&to_etnaviv_bo(obj)->lock, &etnaviv_shm_lock_class);
 623
 624        ret = drm_gem_object_init(dev, obj, size);
 625        if (ret)
 626                goto fail;
 627
 628        /*
 629         * Our buffers are kept pinned, so allocating them from the MOVABLE
 630         * zone is a really bad idea, and conflicts with CMA. See comments
 631         * above new_inode() why this is required _and_ expected if you're
 632         * going to pin these pages.
 633         */
 634        mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
 635                             __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
 636
 637        etnaviv_gem_obj_add(dev, obj);
 638
 639        ret = drm_gem_handle_create(file, obj, handle);
 640
 641        /* drop reference from allocate - handle holds it now */
 642fail:
 643        drm_gem_object_put_unlocked(obj);
 644
 645        return ret;
 646}
 647
 648int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
 649        struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
 650        struct etnaviv_gem_object **res)
 651{
 652        struct drm_gem_object *obj;
 653        int ret;
 654
 655        ret = etnaviv_gem_new_impl(dev, size, flags, robj, ops, &obj);
 656        if (ret)
 657                return ret;
 658
 659        drm_gem_private_object_init(dev, obj, size);
 660
 661        *res = to_etnaviv_bo(obj);
 662
 663        return 0;
 664}
 665
 666static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
 667{
 668        struct page **pvec = NULL;
 669        struct etnaviv_gem_userptr *userptr = &etnaviv_obj->userptr;
 670        int ret, pinned = 0, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
 671
 672        might_lock_read(&current->mm->mmap_sem);
 673
 674        if (userptr->mm != current->mm)
 675                return -EPERM;
 676
 677        pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
 678        if (!pvec)
 679                return -ENOMEM;
 680
 681        do {
 682                unsigned num_pages = npages - pinned;
 683                uint64_t ptr = userptr->ptr + pinned * PAGE_SIZE;
 684                struct page **pages = pvec + pinned;
 685
 686                ret = get_user_pages_fast(ptr, num_pages,
 687                                          !userptr->ro ? FOLL_WRITE : 0, pages);
 688                if (ret < 0) {
 689                        release_pages(pvec, pinned);
 690                        kvfree(pvec);
 691                        return ret;
 692                }
 693
 694                pinned += ret;
 695
 696        } while (pinned < npages);
 697
 698        etnaviv_obj->pages = pvec;
 699
 700        return 0;
 701}
 702
 703static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
 704{
 705        if (etnaviv_obj->sgt) {
 706                etnaviv_gem_scatterlist_unmap(etnaviv_obj);
 707                sg_free_table(etnaviv_obj->sgt);
 708                kfree(etnaviv_obj->sgt);
 709        }
 710        if (etnaviv_obj->pages) {
 711                int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
 712
 713                release_pages(etnaviv_obj->pages, npages);
 714                kvfree(etnaviv_obj->pages);
 715        }
 716}
 717
 718static int etnaviv_gem_userptr_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
 719                struct vm_area_struct *vma)
 720{
 721        return -EINVAL;
 722}
 723
 724static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = {
 725        .get_pages = etnaviv_gem_userptr_get_pages,
 726        .release = etnaviv_gem_userptr_release,
 727        .vmap = etnaviv_gem_vmap_impl,
 728        .mmap = etnaviv_gem_userptr_mmap_obj,
 729};
 730
 731int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
 732        uintptr_t ptr, u32 size, u32 flags, u32 *handle)
 733{
 734        struct etnaviv_gem_object *etnaviv_obj;
 735        int ret;
 736
 737        ret = etnaviv_gem_new_private(dev, size, ETNA_BO_CACHED, NULL,
 738                                      &etnaviv_gem_userptr_ops, &etnaviv_obj);
 739        if (ret)
 740                return ret;
 741
 742        lockdep_set_class(&etnaviv_obj->lock, &etnaviv_userptr_lock_class);
 743
 744        etnaviv_obj->userptr.ptr = ptr;
 745        etnaviv_obj->userptr.mm = current->mm;
 746        etnaviv_obj->userptr.ro = !(flags & ETNA_USERPTR_WRITE);
 747
 748        etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
 749
 750        ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle);
 751
 752        /* drop reference from allocate - handle holds it now */
 753        drm_gem_object_put_unlocked(&etnaviv_obj->base);
 754        return ret;
 755}
 756