linux/drivers/gpu/drm/msm/msm_gem.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2013 Red Hat
   4 * Author: Rob Clark <robdclark@gmail.com>
   5 */
   6
   7#include <linux/dma-map-ops.h>
   8#include <linux/spinlock.h>
   9#include <linux/shmem_fs.h>
  10#include <linux/dma-buf.h>
  11#include <linux/pfn_t.h>
  12
  13#include <drm/drm_prime.h>
  14
  15#include "msm_drv.h"
  16#include "msm_fence.h"
  17#include "msm_gem.h"
  18#include "msm_gpu.h"
  19#include "msm_mmu.h"
  20
  21static void update_inactive(struct msm_gem_object *msm_obj);
  22
  23static dma_addr_t physaddr(struct drm_gem_object *obj)
  24{
  25        struct msm_gem_object *msm_obj = to_msm_bo(obj);
  26        struct msm_drm_private *priv = obj->dev->dev_private;
  27        return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
  28                        priv->vram.paddr;
  29}
  30
  31static bool use_pages(struct drm_gem_object *obj)
  32{
  33        struct msm_gem_object *msm_obj = to_msm_bo(obj);
  34        return !msm_obj->vram_node;
  35}
  36
  37/*
  38 * Cache sync.. this is a bit over-complicated, to fit dma-mapping
  39 * API.  Really GPU cache is out of scope here (handled on cmdstream)
  40 * and all we need to do is invalidate newly allocated pages before
  41 * mapping to CPU as uncached/writecombine.
  42 *
  43 * On top of this, we have the added headache, that depending on
  44 * display generation, the display's iommu may be wired up to either
  45 * the toplevel drm device (mdss), or to the mdp sub-node, meaning
  46 * that here we either have dma-direct or iommu ops.
  47 *
  48 * Let this be a cautionary tail of abstraction gone wrong.
  49 */
  50
  51static void sync_for_device(struct msm_gem_object *msm_obj)
  52{
  53        struct device *dev = msm_obj->base.dev->dev;
  54
  55        dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
  56}
  57
  58static void sync_for_cpu(struct msm_gem_object *msm_obj)
  59{
  60        struct device *dev = msm_obj->base.dev->dev;
  61
  62        dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
  63}
  64
  65/* allocate pages from VRAM carveout, used when no IOMMU: */
  66static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
  67{
  68        struct msm_gem_object *msm_obj = to_msm_bo(obj);
  69        struct msm_drm_private *priv = obj->dev->dev_private;
  70        dma_addr_t paddr;
  71        struct page **p;
  72        int ret, i;
  73
  74        p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
  75        if (!p)
  76                return ERR_PTR(-ENOMEM);
  77
  78        spin_lock(&priv->vram.lock);
  79        ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
  80        spin_unlock(&priv->vram.lock);
  81        if (ret) {
  82                kvfree(p);
  83                return ERR_PTR(ret);
  84        }
  85
  86        paddr = physaddr(obj);
  87        for (i = 0; i < npages; i++) {
  88                p[i] = phys_to_page(paddr);
  89                paddr += PAGE_SIZE;
  90        }
  91
  92        return p;
  93}
  94
  95static struct page **get_pages(struct drm_gem_object *obj)
  96{
  97        struct msm_gem_object *msm_obj = to_msm_bo(obj);
  98
  99        GEM_WARN_ON(!msm_gem_is_locked(obj));
 100
 101        if (!msm_obj->pages) {
 102                struct drm_device *dev = obj->dev;
 103                struct page **p;
 104                int npages = obj->size >> PAGE_SHIFT;
 105
 106                if (use_pages(obj))
 107                        p = drm_gem_get_pages(obj);
 108                else
 109                        p = get_pages_vram(obj, npages);
 110
 111                if (IS_ERR(p)) {
 112                        DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
 113                                        PTR_ERR(p));
 114                        return p;
 115                }
 116
 117                msm_obj->pages = p;
 118
 119                msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages);
 120                if (IS_ERR(msm_obj->sgt)) {
 121                        void *ptr = ERR_CAST(msm_obj->sgt);
 122
 123                        DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
 124                        msm_obj->sgt = NULL;
 125                        return ptr;
 126                }
 127
 128                /* For non-cached buffers, ensure the new pages are clean
 129                 * because display controller, GPU, etc. are not coherent:
 130                 */
 131                if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
 132                        sync_for_device(msm_obj);
 133
 134                update_inactive(msm_obj);
 135        }
 136
 137        return msm_obj->pages;
 138}
 139
 140static void put_pages_vram(struct drm_gem_object *obj)
 141{
 142        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 143        struct msm_drm_private *priv = obj->dev->dev_private;
 144
 145        spin_lock(&priv->vram.lock);
 146        drm_mm_remove_node(msm_obj->vram_node);
 147        spin_unlock(&priv->vram.lock);
 148
 149        kvfree(msm_obj->pages);
 150}
 151
 152static void put_pages(struct drm_gem_object *obj)
 153{
 154        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 155
 156        if (msm_obj->pages) {
 157                if (msm_obj->sgt) {
 158                        /* For non-cached buffers, ensure the new
 159                         * pages are clean because display controller,
 160                         * GPU, etc. are not coherent:
 161                         */
 162                        if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
 163                                sync_for_cpu(msm_obj);
 164
 165                        sg_free_table(msm_obj->sgt);
 166                        kfree(msm_obj->sgt);
 167                        msm_obj->sgt = NULL;
 168                }
 169
 170                if (use_pages(obj))
 171                        drm_gem_put_pages(obj, msm_obj->pages, true, false);
 172                else
 173                        put_pages_vram(obj);
 174
 175                msm_obj->pages = NULL;
 176        }
 177}
 178
 179struct page **msm_gem_get_pages(struct drm_gem_object *obj)
 180{
 181        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 182        struct page **p;
 183
 184        msm_gem_lock(obj);
 185
 186        if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
 187                msm_gem_unlock(obj);
 188                return ERR_PTR(-EBUSY);
 189        }
 190
 191        p = get_pages(obj);
 192
 193        if (!IS_ERR(p)) {
 194                msm_obj->pin_count++;
 195                update_inactive(msm_obj);
 196        }
 197
 198        msm_gem_unlock(obj);
 199        return p;
 200}
 201
 202void msm_gem_put_pages(struct drm_gem_object *obj)
 203{
 204        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 205
 206        msm_gem_lock(obj);
 207        msm_obj->pin_count--;
 208        GEM_WARN_ON(msm_obj->pin_count < 0);
 209        update_inactive(msm_obj);
 210        msm_gem_unlock(obj);
 211}
 212
 213static pgprot_t msm_gem_pgprot(struct msm_gem_object *msm_obj, pgprot_t prot)
 214{
 215        if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
 216                return pgprot_writecombine(prot);
 217        return prot;
 218}
 219
 220static vm_fault_t msm_gem_fault(struct vm_fault *vmf)
 221{
 222        struct vm_area_struct *vma = vmf->vma;
 223        struct drm_gem_object *obj = vma->vm_private_data;
 224        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 225        struct page **pages;
 226        unsigned long pfn;
 227        pgoff_t pgoff;
 228        int err;
 229        vm_fault_t ret;
 230
 231        /*
 232         * vm_ops.open/drm_gem_mmap_obj and close get and put
 233         * a reference on obj. So, we dont need to hold one here.
 234         */
 235        err = msm_gem_lock_interruptible(obj);
 236        if (err) {
 237                ret = VM_FAULT_NOPAGE;
 238                goto out;
 239        }
 240
 241        if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
 242                msm_gem_unlock(obj);
 243                return VM_FAULT_SIGBUS;
 244        }
 245
 246        /* make sure we have pages attached now */
 247        pages = get_pages(obj);
 248        if (IS_ERR(pages)) {
 249                ret = vmf_error(PTR_ERR(pages));
 250                goto out_unlock;
 251        }
 252
 253        /* We don't use vmf->pgoff since that has the fake offset: */
 254        pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
 255
 256        pfn = page_to_pfn(pages[pgoff]);
 257
 258        VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
 259                        pfn, pfn << PAGE_SHIFT);
 260
 261        ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
 262out_unlock:
 263        msm_gem_unlock(obj);
 264out:
 265        return ret;
 266}
 267
 268/** get mmap offset */
 269static uint64_t mmap_offset(struct drm_gem_object *obj)
 270{
 271        struct drm_device *dev = obj->dev;
 272        int ret;
 273
 274        GEM_WARN_ON(!msm_gem_is_locked(obj));
 275
 276        /* Make it mmapable */
 277        ret = drm_gem_create_mmap_offset(obj);
 278
 279        if (ret) {
 280                DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
 281                return 0;
 282        }
 283
 284        return drm_vma_node_offset_addr(&obj->vma_node);
 285}
 286
 287uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
 288{
 289        uint64_t offset;
 290
 291        msm_gem_lock(obj);
 292        offset = mmap_offset(obj);
 293        msm_gem_unlock(obj);
 294        return offset;
 295}
 296
 297static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
 298                struct msm_gem_address_space *aspace)
 299{
 300        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 301        struct msm_gem_vma *vma;
 302
 303        GEM_WARN_ON(!msm_gem_is_locked(obj));
 304
 305        vma = kzalloc(sizeof(*vma), GFP_KERNEL);
 306        if (!vma)
 307                return ERR_PTR(-ENOMEM);
 308
 309        vma->aspace = aspace;
 310
 311        list_add_tail(&vma->list, &msm_obj->vmas);
 312
 313        return vma;
 314}
 315
 316static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
 317                struct msm_gem_address_space *aspace)
 318{
 319        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 320        struct msm_gem_vma *vma;
 321
 322        GEM_WARN_ON(!msm_gem_is_locked(obj));
 323
 324        list_for_each_entry(vma, &msm_obj->vmas, list) {
 325                if (vma->aspace == aspace)
 326                        return vma;
 327        }
 328
 329        return NULL;
 330}
 331
 332static void del_vma(struct msm_gem_vma *vma)
 333{
 334        if (!vma)
 335                return;
 336
 337        list_del(&vma->list);
 338        kfree(vma);
 339}
 340
 341/*
 342 * If close is true, this also closes the VMA (releasing the allocated
 343 * iova range) in addition to removing the iommu mapping.  In the eviction
 344 * case (!close), we keep the iova allocated, but only remove the iommu
 345 * mapping.
 346 */
 347static void
 348put_iova_spaces(struct drm_gem_object *obj, bool close)
 349{
 350        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 351        struct msm_gem_vma *vma;
 352
 353        GEM_WARN_ON(!msm_gem_is_locked(obj));
 354
 355        list_for_each_entry(vma, &msm_obj->vmas, list) {
 356                if (vma->aspace) {
 357                        msm_gem_purge_vma(vma->aspace, vma);
 358                        if (close)
 359                                msm_gem_close_vma(vma->aspace, vma);
 360                }
 361        }
 362}
 363
 364/* Called with msm_obj locked */
 365static void
 366put_iova_vmas(struct drm_gem_object *obj)
 367{
 368        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 369        struct msm_gem_vma *vma, *tmp;
 370
 371        GEM_WARN_ON(!msm_gem_is_locked(obj));
 372
 373        list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
 374                del_vma(vma);
 375        }
 376}
 377
 378static int get_iova_locked(struct drm_gem_object *obj,
 379                struct msm_gem_address_space *aspace, uint64_t *iova,
 380                u64 range_start, u64 range_end)
 381{
 382        struct msm_gem_vma *vma;
 383        int ret = 0;
 384
 385        GEM_WARN_ON(!msm_gem_is_locked(obj));
 386
 387        vma = lookup_vma(obj, aspace);
 388
 389        if (!vma) {
 390                vma = add_vma(obj, aspace);
 391                if (IS_ERR(vma))
 392                        return PTR_ERR(vma);
 393
 394                ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT,
 395                        range_start, range_end);
 396                if (ret) {
 397                        del_vma(vma);
 398                        return ret;
 399                }
 400        }
 401
 402        *iova = vma->iova;
 403        return 0;
 404}
 405
 406static int msm_gem_pin_iova(struct drm_gem_object *obj,
 407                struct msm_gem_address_space *aspace)
 408{
 409        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 410        struct msm_gem_vma *vma;
 411        struct page **pages;
 412        int ret, prot = IOMMU_READ;
 413
 414        if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
 415                prot |= IOMMU_WRITE;
 416
 417        if (msm_obj->flags & MSM_BO_MAP_PRIV)
 418                prot |= IOMMU_PRIV;
 419
 420        if (msm_obj->flags & MSM_BO_CACHED_COHERENT)
 421                prot |= IOMMU_CACHE;
 422
 423        GEM_WARN_ON(!msm_gem_is_locked(obj));
 424
 425        if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
 426                return -EBUSY;
 427
 428        vma = lookup_vma(obj, aspace);
 429        if (GEM_WARN_ON(!vma))
 430                return -EINVAL;
 431
 432        pages = get_pages(obj);
 433        if (IS_ERR(pages))
 434                return PTR_ERR(pages);
 435
 436        ret = msm_gem_map_vma(aspace, vma, prot,
 437                        msm_obj->sgt, obj->size >> PAGE_SHIFT);
 438
 439        if (!ret)
 440                msm_obj->pin_count++;
 441
 442        return ret;
 443}
 444
 445static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
 446                struct msm_gem_address_space *aspace, uint64_t *iova,
 447                u64 range_start, u64 range_end)
 448{
 449        u64 local;
 450        int ret;
 451
 452        GEM_WARN_ON(!msm_gem_is_locked(obj));
 453
 454        ret = get_iova_locked(obj, aspace, &local,
 455                range_start, range_end);
 456
 457        if (!ret)
 458                ret = msm_gem_pin_iova(obj, aspace);
 459
 460        if (!ret)
 461                *iova = local;
 462
 463        return ret;
 464}
 465
 466/*
 467 * get iova and pin it. Should have a matching put
 468 * limits iova to specified range (in pages)
 469 */
 470int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
 471                struct msm_gem_address_space *aspace, uint64_t *iova,
 472                u64 range_start, u64 range_end)
 473{
 474        int ret;
 475
 476        msm_gem_lock(obj);
 477        ret = get_and_pin_iova_range_locked(obj, aspace, iova, range_start, range_end);
 478        msm_gem_unlock(obj);
 479
 480        return ret;
 481}
 482
 483int msm_gem_get_and_pin_iova_locked(struct drm_gem_object *obj,
 484                struct msm_gem_address_space *aspace, uint64_t *iova)
 485{
 486        return get_and_pin_iova_range_locked(obj, aspace, iova, 0, U64_MAX);
 487}
 488
 489/* get iova and pin it. Should have a matching put */
 490int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
 491                struct msm_gem_address_space *aspace, uint64_t *iova)
 492{
 493        return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX);
 494}
 495
 496/*
 497 * Get an iova but don't pin it. Doesn't need a put because iovas are currently
 498 * valid for the life of the object
 499 */
 500int msm_gem_get_iova(struct drm_gem_object *obj,
 501                struct msm_gem_address_space *aspace, uint64_t *iova)
 502{
 503        int ret;
 504
 505        msm_gem_lock(obj);
 506        ret = get_iova_locked(obj, aspace, iova, 0, U64_MAX);
 507        msm_gem_unlock(obj);
 508
 509        return ret;
 510}
 511
 512/* get iova without taking a reference, used in places where you have
 513 * already done a 'msm_gem_get_and_pin_iova' or 'msm_gem_get_iova'
 514 */
 515uint64_t msm_gem_iova(struct drm_gem_object *obj,
 516                struct msm_gem_address_space *aspace)
 517{
 518        struct msm_gem_vma *vma;
 519
 520        msm_gem_lock(obj);
 521        vma = lookup_vma(obj, aspace);
 522        msm_gem_unlock(obj);
 523        GEM_WARN_ON(!vma);
 524
 525        return vma ? vma->iova : 0;
 526}
 527
 528/*
 529 * Locked variant of msm_gem_unpin_iova()
 530 */
 531void msm_gem_unpin_iova_locked(struct drm_gem_object *obj,
 532                struct msm_gem_address_space *aspace)
 533{
 534        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 535        struct msm_gem_vma *vma;
 536
 537        GEM_WARN_ON(!msm_gem_is_locked(obj));
 538
 539        vma = lookup_vma(obj, aspace);
 540
 541        if (!GEM_WARN_ON(!vma)) {
 542                msm_gem_unmap_vma(aspace, vma);
 543
 544                msm_obj->pin_count--;
 545                GEM_WARN_ON(msm_obj->pin_count < 0);
 546
 547                update_inactive(msm_obj);
 548        }
 549}
 550
 551/*
 552 * Unpin a iova by updating the reference counts. The memory isn't actually
 553 * purged until something else (shrinker, mm_notifier, destroy, etc) decides
 554 * to get rid of it
 555 */
 556void msm_gem_unpin_iova(struct drm_gem_object *obj,
 557                struct msm_gem_address_space *aspace)
 558{
 559        msm_gem_lock(obj);
 560        msm_gem_unpin_iova_locked(obj, aspace);
 561        msm_gem_unlock(obj);
 562}
 563
 564int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
 565                struct drm_mode_create_dumb *args)
 566{
 567        args->pitch = align_pitch(args->width, args->bpp);
 568        args->size  = PAGE_ALIGN(args->pitch * args->height);
 569        return msm_gem_new_handle(dev, file, args->size,
 570                        MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
 571}
 572
 573int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
 574                uint32_t handle, uint64_t *offset)
 575{
 576        struct drm_gem_object *obj;
 577        int ret = 0;
 578
 579        /* GEM does all our handle to object mapping */
 580        obj = drm_gem_object_lookup(file, handle);
 581        if (obj == NULL) {
 582                ret = -ENOENT;
 583                goto fail;
 584        }
 585
 586        *offset = msm_gem_mmap_offset(obj);
 587
 588        drm_gem_object_put(obj);
 589
 590fail:
 591        return ret;
 592}
 593
 594static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
 595{
 596        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 597        int ret = 0;
 598
 599        GEM_WARN_ON(!msm_gem_is_locked(obj));
 600
 601        if (obj->import_attach)
 602                return ERR_PTR(-ENODEV);
 603
 604        if (GEM_WARN_ON(msm_obj->madv > madv)) {
 605                DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
 606                        msm_obj->madv, madv);
 607                return ERR_PTR(-EBUSY);
 608        }
 609
 610        /* increment vmap_count *before* vmap() call, so shrinker can
 611         * check vmap_count (is_vunmapable()) outside of msm_obj lock.
 612         * This guarantees that we won't try to msm_gem_vunmap() this
 613         * same object from within the vmap() call (while we already
 614         * hold msm_obj lock)
 615         */
 616        msm_obj->vmap_count++;
 617
 618        if (!msm_obj->vaddr) {
 619                struct page **pages = get_pages(obj);
 620                if (IS_ERR(pages)) {
 621                        ret = PTR_ERR(pages);
 622                        goto fail;
 623                }
 624                msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
 625                                VM_MAP, msm_gem_pgprot(msm_obj, PAGE_KERNEL));
 626                if (msm_obj->vaddr == NULL) {
 627                        ret = -ENOMEM;
 628                        goto fail;
 629                }
 630
 631                update_inactive(msm_obj);
 632        }
 633
 634        return msm_obj->vaddr;
 635
 636fail:
 637        msm_obj->vmap_count--;
 638        return ERR_PTR(ret);
 639}
 640
 641void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
 642{
 643        return get_vaddr(obj, MSM_MADV_WILLNEED);
 644}
 645
 646void *msm_gem_get_vaddr(struct drm_gem_object *obj)
 647{
 648        void *ret;
 649
 650        msm_gem_lock(obj);
 651        ret = msm_gem_get_vaddr_locked(obj);
 652        msm_gem_unlock(obj);
 653
 654        return ret;
 655}
 656
 657/*
 658 * Don't use this!  It is for the very special case of dumping
 659 * submits from GPU hangs or faults, were the bo may already
 660 * be MSM_MADV_DONTNEED, but we know the buffer is still on the
 661 * active list.
 662 */
 663void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
 664{
 665        return get_vaddr(obj, __MSM_MADV_PURGED);
 666}
 667
 668void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
 669{
 670        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 671
 672        GEM_WARN_ON(!msm_gem_is_locked(obj));
 673        GEM_WARN_ON(msm_obj->vmap_count < 1);
 674
 675        msm_obj->vmap_count--;
 676}
 677
 678void msm_gem_put_vaddr(struct drm_gem_object *obj)
 679{
 680        msm_gem_lock(obj);
 681        msm_gem_put_vaddr_locked(obj);
 682        msm_gem_unlock(obj);
 683}
 684
 685/* Update madvise status, returns true if not purged, else
 686 * false or -errno.
 687 */
 688int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
 689{
 690        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 691
 692        msm_gem_lock(obj);
 693
 694        if (msm_obj->madv != __MSM_MADV_PURGED)
 695                msm_obj->madv = madv;
 696
 697        madv = msm_obj->madv;
 698
 699        /* If the obj is inactive, we might need to move it
 700         * between inactive lists
 701         */
 702        if (msm_obj->active_count == 0)
 703                update_inactive(msm_obj);
 704
 705        msm_gem_unlock(obj);
 706
 707        return (madv != __MSM_MADV_PURGED);
 708}
 709
 710void msm_gem_purge(struct drm_gem_object *obj)
 711{
 712        struct drm_device *dev = obj->dev;
 713        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 714
 715        GEM_WARN_ON(!msm_gem_is_locked(obj));
 716        GEM_WARN_ON(!is_purgeable(msm_obj));
 717
 718        /* Get rid of any iommu mapping(s): */
 719        put_iova_spaces(obj, true);
 720
 721        msm_gem_vunmap(obj);
 722
 723        drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
 724
 725        put_pages(obj);
 726
 727        put_iova_vmas(obj);
 728
 729        msm_obj->madv = __MSM_MADV_PURGED;
 730        update_inactive(msm_obj);
 731
 732        drm_gem_free_mmap_offset(obj);
 733
 734        /* Our goal here is to return as much of the memory as
 735         * is possible back to the system as we are called from OOM.
 736         * To do this we must instruct the shmfs to drop all of its
 737         * backing pages, *now*.
 738         */
 739        shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
 740
 741        invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
 742                        0, (loff_t)-1);
 743}
 744
 745/*
 746 * Unpin the backing pages and make them available to be swapped out.
 747 */
 748void msm_gem_evict(struct drm_gem_object *obj)
 749{
 750        struct drm_device *dev = obj->dev;
 751        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 752
 753        GEM_WARN_ON(!msm_gem_is_locked(obj));
 754        GEM_WARN_ON(is_unevictable(msm_obj));
 755        GEM_WARN_ON(!msm_obj->evictable);
 756        GEM_WARN_ON(msm_obj->active_count);
 757
 758        /* Get rid of any iommu mapping(s): */
 759        put_iova_spaces(obj, false);
 760
 761        drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
 762
 763        put_pages(obj);
 764
 765        update_inactive(msm_obj);
 766}
 767
 768void msm_gem_vunmap(struct drm_gem_object *obj)
 769{
 770        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 771
 772        GEM_WARN_ON(!msm_gem_is_locked(obj));
 773
 774        if (!msm_obj->vaddr || GEM_WARN_ON(!is_vunmapable(msm_obj)))
 775                return;
 776
 777        vunmap(msm_obj->vaddr);
 778        msm_obj->vaddr = NULL;
 779}
 780
 781void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu)
 782{
 783        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 784        struct msm_drm_private *priv = obj->dev->dev_private;
 785
 786        might_sleep();
 787        GEM_WARN_ON(!msm_gem_is_locked(obj));
 788        GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
 789        GEM_WARN_ON(msm_obj->dontneed);
 790
 791        if (msm_obj->active_count++ == 0) {
 792                mutex_lock(&priv->mm_lock);
 793                if (msm_obj->evictable)
 794                        mark_unevictable(msm_obj);
 795                list_move_tail(&msm_obj->mm_list, &gpu->active_list);
 796                mutex_unlock(&priv->mm_lock);
 797        }
 798}
 799
 800void msm_gem_active_put(struct drm_gem_object *obj)
 801{
 802        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 803
 804        might_sleep();
 805        GEM_WARN_ON(!msm_gem_is_locked(obj));
 806
 807        if (--msm_obj->active_count == 0) {
 808                update_inactive(msm_obj);
 809        }
 810}
 811
 812static void update_inactive(struct msm_gem_object *msm_obj)
 813{
 814        struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
 815
 816        GEM_WARN_ON(!msm_gem_is_locked(&msm_obj->base));
 817
 818        if (msm_obj->active_count != 0)
 819                return;
 820
 821        mutex_lock(&priv->mm_lock);
 822
 823        if (msm_obj->dontneed)
 824                mark_unpurgeable(msm_obj);
 825        if (msm_obj->evictable)
 826                mark_unevictable(msm_obj);
 827
 828        list_del(&msm_obj->mm_list);
 829        if ((msm_obj->madv == MSM_MADV_WILLNEED) && msm_obj->sgt) {
 830                list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
 831                mark_evictable(msm_obj);
 832        } else if (msm_obj->madv == MSM_MADV_DONTNEED) {
 833                list_add_tail(&msm_obj->mm_list, &priv->inactive_dontneed);
 834                mark_purgeable(msm_obj);
 835        } else {
 836                GEM_WARN_ON((msm_obj->madv != __MSM_MADV_PURGED) && msm_obj->sgt);
 837                list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
 838        }
 839
 840        mutex_unlock(&priv->mm_lock);
 841}
 842
 843int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
 844{
 845        bool write = !!(op & MSM_PREP_WRITE);
 846        unsigned long remain =
 847                op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
 848        long ret;
 849
 850        ret = dma_resv_wait_timeout(obj->resv, write, true,  remain);
 851        if (ret == 0)
 852                return remain == 0 ? -EBUSY : -ETIMEDOUT;
 853        else if (ret < 0)
 854                return ret;
 855
 856        /* TODO cache maintenance */
 857
 858        return 0;
 859}
 860
 861int msm_gem_cpu_fini(struct drm_gem_object *obj)
 862{
 863        /* TODO cache maintenance */
 864        return 0;
 865}
 866
 867#ifdef CONFIG_DEBUG_FS
 868static void describe_fence(struct dma_fence *fence, const char *type,
 869                struct seq_file *m)
 870{
 871        if (!dma_fence_is_signaled(fence))
 872                seq_printf(m, "\t%9s: %s %s seq %llu\n", type,
 873                                fence->ops->get_driver_name(fence),
 874                                fence->ops->get_timeline_name(fence),
 875                                fence->seqno);
 876}
 877
 878void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
 879                struct msm_gem_stats *stats)
 880{
 881        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 882        struct dma_resv *robj = obj->resv;
 883        struct dma_resv_list *fobj;
 884        struct dma_fence *fence;
 885        struct msm_gem_vma *vma;
 886        uint64_t off = drm_vma_node_start(&obj->vma_node);
 887        const char *madv;
 888
 889        msm_gem_lock(obj);
 890
 891        stats->all.count++;
 892        stats->all.size += obj->size;
 893
 894        if (is_active(msm_obj)) {
 895                stats->active.count++;
 896                stats->active.size += obj->size;
 897        }
 898
 899        if (msm_obj->pages) {
 900                stats->resident.count++;
 901                stats->resident.size += obj->size;
 902        }
 903
 904        switch (msm_obj->madv) {
 905        case __MSM_MADV_PURGED:
 906                stats->purged.count++;
 907                stats->purged.size += obj->size;
 908                madv = " purged";
 909                break;
 910        case MSM_MADV_DONTNEED:
 911                stats->purgeable.count++;
 912                stats->purgeable.size += obj->size;
 913                madv = " purgeable";
 914                break;
 915        case MSM_MADV_WILLNEED:
 916        default:
 917                madv = "";
 918                break;
 919        }
 920
 921        seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
 922                        msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
 923                        obj->name, kref_read(&obj->refcount),
 924                        off, msm_obj->vaddr);
 925
 926        seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
 927
 928        if (!list_empty(&msm_obj->vmas)) {
 929
 930                seq_puts(m, "      vmas:");
 931
 932                list_for_each_entry(vma, &msm_obj->vmas, list) {
 933                        const char *name, *comm;
 934                        if (vma->aspace) {
 935                                struct msm_gem_address_space *aspace = vma->aspace;
 936                                struct task_struct *task =
 937                                        get_pid_task(aspace->pid, PIDTYPE_PID);
 938                                if (task) {
 939                                        comm = kstrdup(task->comm, GFP_KERNEL);
 940                                } else {
 941                                        comm = NULL;
 942                                }
 943                                name = aspace->name;
 944                        } else {
 945                                name = comm = NULL;
 946                        }
 947                        seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s,inuse=%d]",
 948                                name, comm ? ":" : "", comm ? comm : "",
 949                                vma->aspace, vma->iova,
 950                                vma->mapped ? "mapped" : "unmapped",
 951                                vma->inuse);
 952                        kfree(comm);
 953                }
 954
 955                seq_puts(m, "\n");
 956        }
 957
 958        rcu_read_lock();
 959        fobj = dma_resv_shared_list(robj);
 960        if (fobj) {
 961                unsigned int i, shared_count = fobj->shared_count;
 962
 963                for (i = 0; i < shared_count; i++) {
 964                        fence = rcu_dereference(fobj->shared[i]);
 965                        describe_fence(fence, "Shared", m);
 966                }
 967        }
 968
 969        fence = dma_resv_excl_fence(robj);
 970        if (fence)
 971                describe_fence(fence, "Exclusive", m);
 972        rcu_read_unlock();
 973
 974        msm_gem_unlock(obj);
 975}
 976
 977void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
 978{
 979        struct msm_gem_stats stats = {};
 980        struct msm_gem_object *msm_obj;
 981
 982        seq_puts(m, "   flags       id ref  offset   kaddr            size     madv      name\n");
 983        list_for_each_entry(msm_obj, list, node) {
 984                struct drm_gem_object *obj = &msm_obj->base;
 985                seq_puts(m, "   ");
 986                msm_gem_describe(obj, m, &stats);
 987        }
 988
 989        seq_printf(m, "Total:     %4d objects, %9zu bytes\n",
 990                        stats.all.count, stats.all.size);
 991        seq_printf(m, "Active:    %4d objects, %9zu bytes\n",
 992                        stats.active.count, stats.active.size);
 993        seq_printf(m, "Resident:  %4d objects, %9zu bytes\n",
 994                        stats.resident.count, stats.resident.size);
 995        seq_printf(m, "Purgeable: %4d objects, %9zu bytes\n",
 996                        stats.purgeable.count, stats.purgeable.size);
 997        seq_printf(m, "Purged:    %4d objects, %9zu bytes\n",
 998                        stats.purged.count, stats.purged.size);
 999}
1000#endif
1001
1002/* don't call directly!  Use drm_gem_object_put() */
1003void msm_gem_free_object(struct drm_gem_object *obj)
1004{
1005        struct msm_gem_object *msm_obj = to_msm_bo(obj);
1006        struct drm_device *dev = obj->dev;
1007        struct msm_drm_private *priv = dev->dev_private;
1008
1009        mutex_lock(&priv->obj_lock);
1010        list_del(&msm_obj->node);
1011        mutex_unlock(&priv->obj_lock);
1012
1013        mutex_lock(&priv->mm_lock);
1014        if (msm_obj->dontneed)
1015                mark_unpurgeable(msm_obj);
1016        list_del(&msm_obj->mm_list);
1017        mutex_unlock(&priv->mm_lock);
1018
1019        msm_gem_lock(obj);
1020
1021        /* object should not be on active list: */
1022        GEM_WARN_ON(is_active(msm_obj));
1023
1024        put_iova_spaces(obj, true);
1025
1026        if (obj->import_attach) {
1027                GEM_WARN_ON(msm_obj->vaddr);
1028
1029                /* Don't drop the pages for imported dmabuf, as they are not
1030                 * ours, just free the array we allocated:
1031                 */
1032                kvfree(msm_obj->pages);
1033
1034                put_iova_vmas(obj);
1035
1036                /* dma_buf_detach() grabs resv lock, so we need to unlock
1037                 * prior to drm_prime_gem_destroy
1038                 */
1039                msm_gem_unlock(obj);
1040
1041                drm_prime_gem_destroy(obj, msm_obj->sgt);
1042        } else {
1043                msm_gem_vunmap(obj);
1044                put_pages(obj);
1045                put_iova_vmas(obj);
1046                msm_gem_unlock(obj);
1047        }
1048
1049        drm_gem_object_release(obj);
1050
1051        kfree(msm_obj);
1052}
1053
1054static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
1055{
1056        struct msm_gem_object *msm_obj = to_msm_bo(obj);
1057
1058        vma->vm_flags &= ~VM_PFNMAP;
1059        vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
1060        vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
1061
1062        return 0;
1063}
1064
1065/* convenience method to construct a GEM buffer object, and userspace handle */
1066int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1067                uint32_t size, uint32_t flags, uint32_t *handle,
1068                char *name)
1069{
1070        struct drm_gem_object *obj;
1071        int ret;
1072
1073        obj = msm_gem_new(dev, size, flags);
1074
1075        if (IS_ERR(obj))
1076                return PTR_ERR(obj);
1077
1078        if (name)
1079                msm_gem_object_set_name(obj, "%s", name);
1080
1081        ret = drm_gem_handle_create(file, obj, handle);
1082
1083        /* drop reference from allocate - handle holds it now */
1084        drm_gem_object_put(obj);
1085
1086        return ret;
1087}
1088
1089static const struct vm_operations_struct vm_ops = {
1090        .fault = msm_gem_fault,
1091        .open = drm_gem_vm_open,
1092        .close = drm_gem_vm_close,
1093};
1094
1095static const struct drm_gem_object_funcs msm_gem_object_funcs = {
1096        .free = msm_gem_free_object,
1097        .pin = msm_gem_prime_pin,
1098        .unpin = msm_gem_prime_unpin,
1099        .get_sg_table = msm_gem_prime_get_sg_table,
1100        .vmap = msm_gem_prime_vmap,
1101        .vunmap = msm_gem_prime_vunmap,
1102        .mmap = msm_gem_object_mmap,
1103        .vm_ops = &vm_ops,
1104};
1105
1106static int msm_gem_new_impl(struct drm_device *dev,
1107                uint32_t size, uint32_t flags,
1108                struct drm_gem_object **obj)
1109{
1110        struct msm_drm_private *priv = dev->dev_private;
1111        struct msm_gem_object *msm_obj;
1112
1113        switch (flags & MSM_BO_CACHE_MASK) {
1114        case MSM_BO_UNCACHED:
1115        case MSM_BO_CACHED:
1116        case MSM_BO_WC:
1117                break;
1118        case MSM_BO_CACHED_COHERENT:
1119                if (priv->has_cached_coherent)
1120                        break;
1121                fallthrough;
1122        default:
1123                DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n",
1124                                (flags & MSM_BO_CACHE_MASK));
1125                return -EINVAL;
1126        }
1127
1128        msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
1129        if (!msm_obj)
1130                return -ENOMEM;
1131
1132        msm_obj->flags = flags;
1133        msm_obj->madv = MSM_MADV_WILLNEED;
1134
1135        INIT_LIST_HEAD(&msm_obj->vmas);
1136
1137        *obj = &msm_obj->base;
1138        (*obj)->funcs = &msm_gem_object_funcs;
1139
1140        return 0;
1141}
1142
1143struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32_t flags)
1144{
1145        struct msm_drm_private *priv = dev->dev_private;
1146        struct msm_gem_object *msm_obj;
1147        struct drm_gem_object *obj = NULL;
1148        bool use_vram = false;
1149        int ret;
1150
1151        size = PAGE_ALIGN(size);
1152
1153        if (!msm_use_mmu(dev))
1154                use_vram = true;
1155        else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
1156                use_vram = true;
1157
1158        if (GEM_WARN_ON(use_vram && !priv->vram.size))
1159                return ERR_PTR(-EINVAL);
1160
1161        /* Disallow zero sized objects as they make the underlying
1162         * infrastructure grumpy
1163         */
1164        if (size == 0)
1165                return ERR_PTR(-EINVAL);
1166
1167        ret = msm_gem_new_impl(dev, size, flags, &obj);
1168        if (ret)
1169                goto fail;
1170
1171        msm_obj = to_msm_bo(obj);
1172
1173        if (use_vram) {
1174                struct msm_gem_vma *vma;
1175                struct page **pages;
1176
1177                drm_gem_private_object_init(dev, obj, size);
1178
1179                msm_gem_lock(obj);
1180
1181                vma = add_vma(obj, NULL);
1182                msm_gem_unlock(obj);
1183                if (IS_ERR(vma)) {
1184                        ret = PTR_ERR(vma);
1185                        goto fail;
1186                }
1187
1188                to_msm_bo(obj)->vram_node = &vma->node;
1189
1190                /* Call chain get_pages() -> update_inactive() tries to
1191                 * access msm_obj->mm_list, but it is not initialized yet.
1192                 * To avoid NULL pointer dereference error, initialize
1193                 * mm_list to be empty.
1194                 */
1195                INIT_LIST_HEAD(&msm_obj->mm_list);
1196
1197                msm_gem_lock(obj);
1198                pages = get_pages(obj);
1199                msm_gem_unlock(obj);
1200                if (IS_ERR(pages)) {
1201                        ret = PTR_ERR(pages);
1202                        goto fail;
1203                }
1204
1205                vma->iova = physaddr(obj);
1206        } else {
1207                ret = drm_gem_object_init(dev, obj, size);
1208                if (ret)
1209                        goto fail;
1210                /*
1211                 * Our buffers are kept pinned, so allocating them from the
1212                 * MOVABLE zone is a really bad idea, and conflicts with CMA.
1213                 * See comments above new_inode() why this is required _and_
1214                 * expected if you're going to pin these pages.
1215                 */
1216                mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
1217        }
1218
1219        mutex_lock(&priv->mm_lock);
1220        list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
1221        mutex_unlock(&priv->mm_lock);
1222
1223        mutex_lock(&priv->obj_lock);
1224        list_add_tail(&msm_obj->node, &priv->objects);
1225        mutex_unlock(&priv->obj_lock);
1226
1227        return obj;
1228
1229fail:
1230        drm_gem_object_put(obj);
1231        return ERR_PTR(ret);
1232}
1233
1234struct drm_gem_object *msm_gem_import(struct drm_device *dev,
1235                struct dma_buf *dmabuf, struct sg_table *sgt)
1236{
1237        struct msm_drm_private *priv = dev->dev_private;
1238        struct msm_gem_object *msm_obj;
1239        struct drm_gem_object *obj;
1240        uint32_t size;
1241        int ret, npages;
1242
1243        /* if we don't have IOMMU, don't bother pretending we can import: */
1244        if (!msm_use_mmu(dev)) {
1245                DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
1246                return ERR_PTR(-EINVAL);
1247        }
1248
1249        size = PAGE_ALIGN(dmabuf->size);
1250
1251        ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
1252        if (ret)
1253                goto fail;
1254
1255        drm_gem_private_object_init(dev, obj, size);
1256
1257        npages = size / PAGE_SIZE;
1258
1259        msm_obj = to_msm_bo(obj);
1260        msm_gem_lock(obj);
1261        msm_obj->sgt = sgt;
1262        msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
1263        if (!msm_obj->pages) {
1264                msm_gem_unlock(obj);
1265                ret = -ENOMEM;
1266                goto fail;
1267        }
1268
1269        ret = drm_prime_sg_to_page_array(sgt, msm_obj->pages, npages);
1270        if (ret) {
1271                msm_gem_unlock(obj);
1272                goto fail;
1273        }
1274
1275        msm_gem_unlock(obj);
1276
1277        mutex_lock(&priv->mm_lock);
1278        list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
1279        mutex_unlock(&priv->mm_lock);
1280
1281        mutex_lock(&priv->obj_lock);
1282        list_add_tail(&msm_obj->node, &priv->objects);
1283        mutex_unlock(&priv->obj_lock);
1284
1285        return obj;
1286
1287fail:
1288        drm_gem_object_put(obj);
1289        return ERR_PTR(ret);
1290}
1291
1292void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1293                uint32_t flags, struct msm_gem_address_space *aspace,
1294                struct drm_gem_object **bo, uint64_t *iova)
1295{
1296        void *vaddr;
1297        struct drm_gem_object *obj = msm_gem_new(dev, size, flags);
1298        int ret;
1299
1300        if (IS_ERR(obj))
1301                return ERR_CAST(obj);
1302
1303        if (iova) {
1304                ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
1305                if (ret)
1306                        goto err;
1307        }
1308
1309        vaddr = msm_gem_get_vaddr(obj);
1310        if (IS_ERR(vaddr)) {
1311                msm_gem_unpin_iova(obj, aspace);
1312                ret = PTR_ERR(vaddr);
1313                goto err;
1314        }
1315
1316        if (bo)
1317                *bo = obj;
1318
1319        return vaddr;
1320err:
1321        drm_gem_object_put(obj);
1322
1323        return ERR_PTR(ret);
1324
1325}
1326
1327void msm_gem_kernel_put(struct drm_gem_object *bo,
1328                struct msm_gem_address_space *aspace)
1329{
1330        if (IS_ERR_OR_NULL(bo))
1331                return;
1332
1333        msm_gem_put_vaddr(bo);
1334        msm_gem_unpin_iova(bo, aspace);
1335        drm_gem_object_put(bo);
1336}
1337
1338void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
1339{
1340        struct msm_gem_object *msm_obj = to_msm_bo(bo);
1341        va_list ap;
1342
1343        if (!fmt)
1344                return;
1345
1346        va_start(ap, fmt);
1347        vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
1348        va_end(ap);
1349}
1350