linux/drivers/gpu/drm/etnaviv/etnaviv_gem.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2015-2018 Etnaviv Project
   4 */
   5
   6#include <linux/spinlock.h>
   7#include <linux/shmem_fs.h>
   8#include <linux/sched/mm.h>
   9#include <linux/sched/task.h>
  10
  11#include "etnaviv_drv.h"
  12#include "etnaviv_gem.h"
  13#include "etnaviv_gpu.h"
  14#include "etnaviv_mmu.h"
  15
  16static struct lock_class_key etnaviv_shm_lock_class;
  17static struct lock_class_key etnaviv_userptr_lock_class;
  18
  19static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj)
  20{
  21        struct drm_device *dev = etnaviv_obj->base.dev;
  22        struct sg_table *sgt = etnaviv_obj->sgt;
  23
  24        /*
  25         * For non-cached buffers, ensure the new pages are clean
  26         * because display controller, GPU, etc. are not coherent.
  27         */
  28        if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
  29                dma_map_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
  30}
  31
  32static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj)
  33{
  34        struct drm_device *dev = etnaviv_obj->base.dev;
  35        struct sg_table *sgt = etnaviv_obj->sgt;
  36
  37        /*
  38         * For non-cached buffers, ensure the new pages are clean
  39         * because display controller, GPU, etc. are not coherent:
  40         *
  41         * WARNING: The DMA API does not support concurrent CPU
  42         * and device access to the memory area.  With BIDIRECTIONAL,
  43         * we will clean the cache lines which overlap the region,
  44         * and invalidate all cache lines (partially) contained in
  45         * the region.
  46         *
  47         * If you have dirty data in the overlapping cache lines,
  48         * that will corrupt the GPU-written data.  If you have
  49         * written into the remainder of the region, this can
  50         * discard those writes.
  51         */
  52        if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
  53                dma_unmap_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
  54}
  55
  56/* called with etnaviv_obj->lock held */
  57static int etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
  58{
  59        struct drm_device *dev = etnaviv_obj->base.dev;
  60        struct page **p = drm_gem_get_pages(&etnaviv_obj->base);
  61
  62        if (IS_ERR(p)) {
  63                dev_dbg(dev->dev, "could not get pages: %ld\n", PTR_ERR(p));
  64                return PTR_ERR(p);
  65        }
  66
  67        etnaviv_obj->pages = p;
  68
  69        return 0;
  70}
  71
  72static void put_pages(struct etnaviv_gem_object *etnaviv_obj)
  73{
  74        if (etnaviv_obj->sgt) {
  75                etnaviv_gem_scatterlist_unmap(etnaviv_obj);
  76                sg_free_table(etnaviv_obj->sgt);
  77                kfree(etnaviv_obj->sgt);
  78                etnaviv_obj->sgt = NULL;
  79        }
  80        if (etnaviv_obj->pages) {
  81                drm_gem_put_pages(&etnaviv_obj->base, etnaviv_obj->pages,
  82                                  true, false);
  83
  84                etnaviv_obj->pages = NULL;
  85        }
  86}
  87
  88struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
  89{
  90        int ret;
  91
  92        lockdep_assert_held(&etnaviv_obj->lock);
  93
  94        if (!etnaviv_obj->pages) {
  95                ret = etnaviv_obj->ops->get_pages(etnaviv_obj);
  96                if (ret < 0)
  97                        return ERR_PTR(ret);
  98        }
  99
 100        if (!etnaviv_obj->sgt) {
 101                struct drm_device *dev = etnaviv_obj->base.dev;
 102                int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
 103                struct sg_table *sgt;
 104
 105                sgt = drm_prime_pages_to_sg(etnaviv_obj->pages, npages);
 106                if (IS_ERR(sgt)) {
 107                        dev_err(dev->dev, "failed to allocate sgt: %ld\n",
 108                                PTR_ERR(sgt));
 109                        return ERR_CAST(sgt);
 110                }
 111
 112                etnaviv_obj->sgt = sgt;
 113
 114                etnaviv_gem_scatter_map(etnaviv_obj);
 115        }
 116
 117        return etnaviv_obj->pages;
 118}
 119
 120void etnaviv_gem_put_pages(struct etnaviv_gem_object *etnaviv_obj)
 121{
 122        lockdep_assert_held(&etnaviv_obj->lock);
 123        /* when we start tracking the pin count, then do something here */
 124}
 125
 126static int etnaviv_gem_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
 127                struct vm_area_struct *vma)
 128{
 129        pgprot_t vm_page_prot;
 130
 131        vma->vm_flags &= ~VM_PFNMAP;
 132        vma->vm_flags |= VM_MIXEDMAP;
 133
 134        vm_page_prot = vm_get_page_prot(vma->vm_flags);
 135
 136        if (etnaviv_obj->flags & ETNA_BO_WC) {
 137                vma->vm_page_prot = pgprot_writecombine(vm_page_prot);
 138        } else if (etnaviv_obj->flags & ETNA_BO_UNCACHED) {
 139                vma->vm_page_prot = pgprot_noncached(vm_page_prot);
 140        } else {
 141                /*
 142                 * Shunt off cached objs to shmem file so they have their own
 143                 * address_space (so unmap_mapping_range does what we want,
 144                 * in particular in the case of mmap'd dmabufs)
 145                 */
 146                fput(vma->vm_file);
 147                get_file(etnaviv_obj->base.filp);
 148                vma->vm_pgoff = 0;
 149                vma->vm_file  = etnaviv_obj->base.filp;
 150
 151                vma->vm_page_prot = vm_page_prot;
 152        }
 153
 154        return 0;
 155}
 156
 157int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 158{
 159        struct etnaviv_gem_object *obj;
 160        int ret;
 161
 162        ret = drm_gem_mmap(filp, vma);
 163        if (ret) {
 164                DBG("mmap failed: %d", ret);
 165                return ret;
 166        }
 167
 168        obj = to_etnaviv_bo(vma->vm_private_data);
 169        return obj->ops->mmap(obj, vma);
 170}
 171
 172vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf)
 173{
 174        struct vm_area_struct *vma = vmf->vma;
 175        struct drm_gem_object *obj = vma->vm_private_data;
 176        struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
 177        struct page **pages, *page;
 178        pgoff_t pgoff;
 179        int err;
 180
 181        /*
 182         * Make sure we don't parallel update on a fault, nor move or remove
 183         * something from beneath our feet.  Note that vmf_insert_page() is
 184         * specifically coded to take care of this, so we don't have to.
 185         */
 186        err = mutex_lock_interruptible(&etnaviv_obj->lock);
 187        if (err)
 188                return VM_FAULT_NOPAGE;
 189        /* make sure we have pages attached now */
 190        pages = etnaviv_gem_get_pages(etnaviv_obj);
 191        mutex_unlock(&etnaviv_obj->lock);
 192
 193        if (IS_ERR(pages)) {
 194                err = PTR_ERR(pages);
 195                return vmf_error(err);
 196        }
 197
 198        /* We don't use vmf->pgoff since that has the fake offset: */
 199        pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
 200
 201        page = pages[pgoff];
 202
 203        VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
 204             page_to_pfn(page), page_to_pfn(page) << PAGE_SHIFT);
 205
 206        return vmf_insert_page(vma, vmf->address, page);
 207}
 208
 209int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset)
 210{
 211        int ret;
 212
 213        /* Make it mmapable */
 214        ret = drm_gem_create_mmap_offset(obj);
 215        if (ret)
 216                dev_err(obj->dev->dev, "could not allocate mmap offset\n");
 217        else
 218                *offset = drm_vma_node_offset_addr(&obj->vma_node);
 219
 220        return ret;
 221}
 222
 223static struct etnaviv_vram_mapping *
 224etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj,
 225                             struct etnaviv_iommu *mmu)
 226{
 227        struct etnaviv_vram_mapping *mapping;
 228
 229        list_for_each_entry(mapping, &obj->vram_list, obj_node) {
 230                if (mapping->mmu == mmu)
 231                        return mapping;
 232        }
 233
 234        return NULL;
 235}
 236
 237void etnaviv_gem_mapping_reference(struct etnaviv_vram_mapping *mapping)
 238{
 239        struct etnaviv_gem_object *etnaviv_obj = mapping->object;
 240
 241        drm_gem_object_get(&etnaviv_obj->base);
 242
 243        mutex_lock(&etnaviv_obj->lock);
 244        WARN_ON(mapping->use == 0);
 245        mapping->use += 1;
 246        mutex_unlock(&etnaviv_obj->lock);
 247}
 248
 249void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping)
 250{
 251        struct etnaviv_gem_object *etnaviv_obj = mapping->object;
 252
 253        mutex_lock(&etnaviv_obj->lock);
 254        WARN_ON(mapping->use == 0);
 255        mapping->use -= 1;
 256        mutex_unlock(&etnaviv_obj->lock);
 257
 258        drm_gem_object_put_unlocked(&etnaviv_obj->base);
 259}
 260
 261struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
 262        struct drm_gem_object *obj, struct etnaviv_gpu *gpu)
 263{
 264        struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
 265        struct etnaviv_vram_mapping *mapping;
 266        struct page **pages;
 267        int ret = 0;
 268
 269        mutex_lock(&etnaviv_obj->lock);
 270        mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, gpu->mmu);
 271        if (mapping) {
 272                /*
 273                 * Holding the object lock prevents the use count changing
 274                 * beneath us.  If the use count is zero, the MMU might be
 275                 * reaping this object, so take the lock and re-check that
 276                 * the MMU owns this mapping to close this race.
 277                 */
 278                if (mapping->use == 0) {
 279                        mutex_lock(&gpu->mmu->lock);
 280                        if (mapping->mmu == gpu->mmu)
 281                                mapping->use += 1;
 282                        else
 283                                mapping = NULL;
 284                        mutex_unlock(&gpu->mmu->lock);
 285                        if (mapping)
 286                                goto out;
 287                } else {
 288                        mapping->use += 1;
 289                        goto out;
 290                }
 291        }
 292
 293        pages = etnaviv_gem_get_pages(etnaviv_obj);
 294        if (IS_ERR(pages)) {
 295                ret = PTR_ERR(pages);
 296                goto out;
 297        }
 298
 299        /*
 300         * See if we have a reaped vram mapping we can re-use before
 301         * allocating a fresh mapping.
 302         */
 303        mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, NULL);
 304        if (!mapping) {
 305                mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
 306                if (!mapping) {
 307                        ret = -ENOMEM;
 308                        goto out;
 309                }
 310
 311                INIT_LIST_HEAD(&mapping->scan_node);
 312                mapping->object = etnaviv_obj;
 313        } else {
 314                list_del(&mapping->obj_node);
 315        }
 316
 317        mapping->mmu = gpu->mmu;
 318        mapping->use = 1;
 319
 320        ret = etnaviv_iommu_map_gem(gpu->mmu, etnaviv_obj, gpu->memory_base,
 321                                    mapping);
 322        if (ret < 0)
 323                kfree(mapping);
 324        else
 325                list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list);
 326
 327out:
 328        mutex_unlock(&etnaviv_obj->lock);
 329
 330        if (ret)
 331                return ERR_PTR(ret);
 332
 333        /* Take a reference on the object */
 334        drm_gem_object_get(obj);
 335        return mapping;
 336}
 337
 338void *etnaviv_gem_vmap(struct drm_gem_object *obj)
 339{
 340        struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
 341
 342        if (etnaviv_obj->vaddr)
 343                return etnaviv_obj->vaddr;
 344
 345        mutex_lock(&etnaviv_obj->lock);
 346        /*
 347         * Need to check again, as we might have raced with another thread
 348         * while waiting for the mutex.
 349         */
 350        if (!etnaviv_obj->vaddr)
 351                etnaviv_obj->vaddr = etnaviv_obj->ops->vmap(etnaviv_obj);
 352        mutex_unlock(&etnaviv_obj->lock);
 353
 354        return etnaviv_obj->vaddr;
 355}
 356
 357static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj)
 358{
 359        struct page **pages;
 360
 361        lockdep_assert_held(&obj->lock);
 362
 363        pages = etnaviv_gem_get_pages(obj);
 364        if (IS_ERR(pages))
 365                return NULL;
 366
 367        return vmap(pages, obj->base.size >> PAGE_SHIFT,
 368                        VM_MAP, pgprot_writecombine(PAGE_KERNEL));
 369}
 370
 371static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
 372{
 373        if (op & ETNA_PREP_READ)
 374                return DMA_FROM_DEVICE;
 375        else if (op & ETNA_PREP_WRITE)
 376                return DMA_TO_DEVICE;
 377        else
 378                return DMA_BIDIRECTIONAL;
 379}
 380
 381int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
 382                struct timespec *timeout)
 383{
 384        struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
 385        struct drm_device *dev = obj->dev;
 386        bool write = !!(op & ETNA_PREP_WRITE);
 387        int ret;
 388
 389        if (!etnaviv_obj->sgt) {
 390                void *ret;
 391
 392                mutex_lock(&etnaviv_obj->lock);
 393                ret = etnaviv_gem_get_pages(etnaviv_obj);
 394                mutex_unlock(&etnaviv_obj->lock);
 395                if (IS_ERR(ret))
 396                        return PTR_ERR(ret);
 397        }
 398
 399        if (op & ETNA_PREP_NOSYNC) {
 400                if (!reservation_object_test_signaled_rcu(etnaviv_obj->resv,
 401                                                          write))
 402                        return -EBUSY;
 403        } else {
 404                unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
 405
 406                ret = reservation_object_wait_timeout_rcu(etnaviv_obj->resv,
 407                                                          write, true, remain);
 408                if (ret <= 0)
 409                        return ret == 0 ? -ETIMEDOUT : ret;
 410        }
 411
 412        if (etnaviv_obj->flags & ETNA_BO_CACHED) {
 413                dma_sync_sg_for_cpu(dev->dev, etnaviv_obj->sgt->sgl,
 414                                    etnaviv_obj->sgt->nents,
 415                                    etnaviv_op_to_dma_dir(op));
 416                etnaviv_obj->last_cpu_prep_op = op;
 417        }
 418
 419        return 0;
 420}
 421
 422int etnaviv_gem_cpu_fini(struct drm_gem_object *obj)
 423{
 424        struct drm_device *dev = obj->dev;
 425        struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
 426
 427        if (etnaviv_obj->flags & ETNA_BO_CACHED) {
 428                /* fini without a prep is almost certainly a userspace error */
 429                WARN_ON(etnaviv_obj->last_cpu_prep_op == 0);
 430                dma_sync_sg_for_device(dev->dev, etnaviv_obj->sgt->sgl,
 431                        etnaviv_obj->sgt->nents,
 432                        etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op));
 433                etnaviv_obj->last_cpu_prep_op = 0;
 434        }
 435
 436        return 0;
 437}
 438
 439int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
 440        struct timespec *timeout)
 441{
 442        struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
 443
 444        return etnaviv_gpu_wait_obj_inactive(gpu, etnaviv_obj, timeout);
 445}
 446
 447#ifdef CONFIG_DEBUG_FS
 448static void etnaviv_gem_describe_fence(struct dma_fence *fence,
 449        const char *type, struct seq_file *m)
 450{
 451        if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
 452                seq_printf(m, "\t%9s: %s %s seq %u\n",
 453                           type,
 454                           fence->ops->get_driver_name(fence),
 455                           fence->ops->get_timeline_name(fence),
 456                           fence->seqno);
 457}
 458
 459static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
 460{
 461        struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
 462        struct reservation_object *robj = etnaviv_obj->resv;
 463        struct reservation_object_list *fobj;
 464        struct dma_fence *fence;
 465        unsigned long off = drm_vma_node_start(&obj->vma_node);
 466
 467        seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n",
 468                        etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I',
 469                        obj->name, kref_read(&obj->refcount),
 470                        off, etnaviv_obj->vaddr, obj->size);
 471
 472        rcu_read_lock();
 473        fobj = rcu_dereference(robj->fence);
 474        if (fobj) {
 475                unsigned int i, shared_count = fobj->shared_count;
 476
 477                for (i = 0; i < shared_count; i++) {
 478                        fence = rcu_dereference(fobj->shared[i]);
 479                        etnaviv_gem_describe_fence(fence, "Shared", m);
 480                }
 481        }
 482
 483        fence = rcu_dereference(robj->fence_excl);
 484        if (fence)
 485                etnaviv_gem_describe_fence(fence, "Exclusive", m);
 486        rcu_read_unlock();
 487}
 488
 489void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv,
 490        struct seq_file *m)
 491{
 492        struct etnaviv_gem_object *etnaviv_obj;
 493        int count = 0;
 494        size_t size = 0;
 495
 496        mutex_lock(&priv->gem_lock);
 497        list_for_each_entry(etnaviv_obj, &priv->gem_list, gem_node) {
 498                struct drm_gem_object *obj = &etnaviv_obj->base;
 499
 500                seq_puts(m, "   ");
 501                etnaviv_gem_describe(obj, m);
 502                count++;
 503                size += obj->size;
 504        }
 505        mutex_unlock(&priv->gem_lock);
 506
 507        seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
 508}
 509#endif
 510
 511static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj)
 512{
 513        vunmap(etnaviv_obj->vaddr);
 514        put_pages(etnaviv_obj);
 515}
 516
 517static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = {
 518        .get_pages = etnaviv_gem_shmem_get_pages,
 519        .release = etnaviv_gem_shmem_release,
 520        .vmap = etnaviv_gem_vmap_impl,
 521        .mmap = etnaviv_gem_mmap_obj,
 522};
 523
 524void etnaviv_gem_free_object(struct drm_gem_object *obj)
 525{
 526        struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
 527        struct etnaviv_drm_private *priv = obj->dev->dev_private;
 528        struct etnaviv_vram_mapping *mapping, *tmp;
 529
 530        /* object should not be active */
 531        WARN_ON(is_active(etnaviv_obj));
 532
 533        mutex_lock(&priv->gem_lock);
 534        list_del(&etnaviv_obj->gem_node);
 535        mutex_unlock(&priv->gem_lock);
 536
 537        list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
 538                                 obj_node) {
 539                struct etnaviv_iommu *mmu = mapping->mmu;
 540
 541                WARN_ON(mapping->use);
 542
 543                if (mmu)
 544                        etnaviv_iommu_unmap_gem(mmu, mapping);
 545
 546                list_del(&mapping->obj_node);
 547                kfree(mapping);
 548        }
 549
 550        drm_gem_free_mmap_offset(obj);
 551        etnaviv_obj->ops->release(etnaviv_obj);
 552        if (etnaviv_obj->resv == &etnaviv_obj->_resv)
 553                reservation_object_fini(&etnaviv_obj->_resv);
 554        drm_gem_object_release(obj);
 555
 556        kfree(etnaviv_obj);
 557}
 558
 559void etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
 560{
 561        struct etnaviv_drm_private *priv = dev->dev_private;
 562        struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
 563
 564        mutex_lock(&priv->gem_lock);
 565        list_add_tail(&etnaviv_obj->gem_node, &priv->gem_list);
 566        mutex_unlock(&priv->gem_lock);
 567}
 568
 569static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
 570        struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
 571        struct drm_gem_object **obj)
 572{
 573        struct etnaviv_gem_object *etnaviv_obj;
 574        unsigned sz = sizeof(*etnaviv_obj);
 575        bool valid = true;
 576
 577        /* validate flags */
 578        switch (flags & ETNA_BO_CACHE_MASK) {
 579        case ETNA_BO_UNCACHED:
 580        case ETNA_BO_CACHED:
 581        case ETNA_BO_WC:
 582                break;
 583        default:
 584                valid = false;
 585        }
 586
 587        if (!valid) {
 588                dev_err(dev->dev, "invalid cache flag: %x\n",
 589                        (flags & ETNA_BO_CACHE_MASK));
 590                return -EINVAL;
 591        }
 592
 593        etnaviv_obj = kzalloc(sz, GFP_KERNEL);
 594        if (!etnaviv_obj)
 595                return -ENOMEM;
 596
 597        etnaviv_obj->flags = flags;
 598        etnaviv_obj->ops = ops;
 599        if (robj) {
 600                etnaviv_obj->resv = robj;
 601        } else {
 602                etnaviv_obj->resv = &etnaviv_obj->_resv;
 603                reservation_object_init(&etnaviv_obj->_resv);
 604        }
 605
 606        mutex_init(&etnaviv_obj->lock);
 607        INIT_LIST_HEAD(&etnaviv_obj->vram_list);
 608
 609        *obj = &etnaviv_obj->base;
 610
 611        return 0;
 612}
 613
 614/* convenience method to construct a GEM buffer object, and userspace handle */
 615int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
 616        u32 size, u32 flags, u32 *handle)
 617{
 618        struct drm_gem_object *obj = NULL;
 619        int ret;
 620
 621        size = PAGE_ALIGN(size);
 622
 623        ret = etnaviv_gem_new_impl(dev, size, flags, NULL,
 624                                   &etnaviv_gem_shmem_ops, &obj);
 625        if (ret)
 626                goto fail;
 627
 628        lockdep_set_class(&to_etnaviv_bo(obj)->lock, &etnaviv_shm_lock_class);
 629
 630        ret = drm_gem_object_init(dev, obj, size);
 631        if (ret == 0) {
 632                struct address_space *mapping;
 633
 634                /*
 635                 * Our buffers are kept pinned, so allocating them
 636                 * from the MOVABLE zone is a really bad idea, and
 637                 * conflicts with CMA. See comments above new_inode()
 638                 * why this is required _and_ expected if you're
 639                 * going to pin these pages.
 640                 */
 641                mapping = obj->filp->f_mapping;
 642                mapping_set_gfp_mask(mapping, GFP_HIGHUSER |
 643                                     __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
 644        }
 645
 646        if (ret)
 647                goto fail;
 648
 649        etnaviv_gem_obj_add(dev, obj);
 650
 651        ret = drm_gem_handle_create(file, obj, handle);
 652
 653        /* drop reference from allocate - handle holds it now */
 654fail:
 655        drm_gem_object_put_unlocked(obj);
 656
 657        return ret;
 658}
 659
 660int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
 661        struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
 662        struct etnaviv_gem_object **res)
 663{
 664        struct drm_gem_object *obj;
 665        int ret;
 666
 667        ret = etnaviv_gem_new_impl(dev, size, flags, robj, ops, &obj);
 668        if (ret)
 669                return ret;
 670
 671        drm_gem_private_object_init(dev, obj, size);
 672
 673        *res = to_etnaviv_bo(obj);
 674
 675        return 0;
 676}
 677
 678static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
 679{
 680        struct page **pvec = NULL;
 681        struct etnaviv_gem_userptr *userptr = &etnaviv_obj->userptr;
 682        int ret, pinned = 0, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
 683
 684        might_lock_read(&current->mm->mmap_sem);
 685
 686        if (userptr->mm != current->mm)
 687                return -EPERM;
 688
 689        pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
 690        if (!pvec)
 691                return -ENOMEM;
 692
 693        do {
 694                unsigned num_pages = npages - pinned;
 695                uint64_t ptr = userptr->ptr + pinned * PAGE_SIZE;
 696                struct page **pages = pvec + pinned;
 697
 698                ret = get_user_pages_fast(ptr, num_pages,
 699                                          !userptr->ro ? FOLL_WRITE : 0, pages);
 700                if (ret < 0) {
 701                        release_pages(pvec, pinned);
 702                        kvfree(pvec);
 703                        return ret;
 704                }
 705
 706                pinned += ret;
 707
 708        } while (pinned < npages);
 709
 710        etnaviv_obj->pages = pvec;
 711
 712        return 0;
 713}
 714
 715static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
 716{
 717        if (etnaviv_obj->sgt) {
 718                etnaviv_gem_scatterlist_unmap(etnaviv_obj);
 719                sg_free_table(etnaviv_obj->sgt);
 720                kfree(etnaviv_obj->sgt);
 721        }
 722        if (etnaviv_obj->pages) {
 723                int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
 724
 725                release_pages(etnaviv_obj->pages, npages);
 726                kvfree(etnaviv_obj->pages);
 727        }
 728}
 729
 730static int etnaviv_gem_userptr_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
 731                struct vm_area_struct *vma)
 732{
 733        return -EINVAL;
 734}
 735
 736static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = {
 737        .get_pages = etnaviv_gem_userptr_get_pages,
 738        .release = etnaviv_gem_userptr_release,
 739        .vmap = etnaviv_gem_vmap_impl,
 740        .mmap = etnaviv_gem_userptr_mmap_obj,
 741};
 742
 743int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
 744        uintptr_t ptr, u32 size, u32 flags, u32 *handle)
 745{
 746        struct etnaviv_gem_object *etnaviv_obj;
 747        int ret;
 748
 749        ret = etnaviv_gem_new_private(dev, size, ETNA_BO_CACHED, NULL,
 750                                      &etnaviv_gem_userptr_ops, &etnaviv_obj);
 751        if (ret)
 752                return ret;
 753
 754        lockdep_set_class(&etnaviv_obj->lock, &etnaviv_userptr_lock_class);
 755
 756        etnaviv_obj->userptr.ptr = ptr;
 757        etnaviv_obj->userptr.mm = current->mm;
 758        etnaviv_obj->userptr.ro = !(flags & ETNA_USERPTR_WRITE);
 759
 760        etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
 761
 762        ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle);
 763
 764        /* drop reference from allocate - handle holds it now */
 765        drm_gem_object_put_unlocked(&etnaviv_obj->base);
 766        return ret;
 767}
 768