linux/drivers/gpu/drm/msm/msm_gem.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2013 Red Hat
   3 * Author: Rob Clark <robdclark@gmail.com>
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms of the GNU General Public License version 2 as published by
   7 * the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it will be useful, but WITHOUT
  10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  12 * more details.
  13 *
  14 * You should have received a copy of the GNU General Public License along with
  15 * this program.  If not, see <http://www.gnu.org/licenses/>.
  16 */
  17
  18#include <linux/spinlock.h>
  19#include <linux/shmem_fs.h>
  20#include <linux/dma-buf.h>
  21#include <linux/pfn_t.h>
  22
  23#include "msm_drv.h"
  24#include "msm_fence.h"
  25#include "msm_gem.h"
  26#include "msm_gpu.h"
  27#include "msm_mmu.h"
  28
  29static dma_addr_t physaddr(struct drm_gem_object *obj)
  30{
  31        struct msm_gem_object *msm_obj = to_msm_bo(obj);
  32        struct msm_drm_private *priv = obj->dev->dev_private;
  33        return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
  34                        priv->vram.paddr;
  35}
  36
  37static bool use_pages(struct drm_gem_object *obj)
  38{
  39        struct msm_gem_object *msm_obj = to_msm_bo(obj);
  40        return !msm_obj->vram_node;
  41}
  42
  43/* allocate pages from VRAM carveout, used when no IOMMU: */
  44static struct page **get_pages_vram(struct drm_gem_object *obj,
  45                int npages)
  46{
  47        struct msm_gem_object *msm_obj = to_msm_bo(obj);
  48        struct msm_drm_private *priv = obj->dev->dev_private;
  49        dma_addr_t paddr;
  50        struct page **p;
  51        int ret, i;
  52
  53        p = drm_malloc_ab(npages, sizeof(struct page *));
  54        if (!p)
  55                return ERR_PTR(-ENOMEM);
  56
  57        ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node,
  58                        npages, 0, DRM_MM_SEARCH_DEFAULT);
  59        if (ret) {
  60                drm_free_large(p);
  61                return ERR_PTR(ret);
  62        }
  63
  64        paddr = physaddr(obj);
  65        for (i = 0; i < npages; i++) {
  66                p[i] = phys_to_page(paddr);
  67                paddr += PAGE_SIZE;
  68        }
  69
  70        return p;
  71}
  72
  73/* called with dev->struct_mutex held */
  74static struct page **get_pages(struct drm_gem_object *obj)
  75{
  76        struct msm_gem_object *msm_obj = to_msm_bo(obj);
  77
  78        if (!msm_obj->pages) {
  79                struct drm_device *dev = obj->dev;
  80                struct page **p;
  81                int npages = obj->size >> PAGE_SHIFT;
  82
  83                if (use_pages(obj))
  84                        p = drm_gem_get_pages(obj);
  85                else
  86                        p = get_pages_vram(obj, npages);
  87
  88                if (IS_ERR(p)) {
  89                        dev_err(dev->dev, "could not get pages: %ld\n",
  90                                        PTR_ERR(p));
  91                        return p;
  92                }
  93
  94                msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
  95                if (IS_ERR(msm_obj->sgt)) {
  96                        dev_err(dev->dev, "failed to allocate sgt\n");
  97                        return ERR_CAST(msm_obj->sgt);
  98                }
  99
 100                msm_obj->pages = p;
 101
 102                /* For non-cached buffers, ensure the new pages are clean
 103                 * because display controller, GPU, etc. are not coherent:
 104                 */
 105                if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
 106                        dma_map_sg(dev->dev, msm_obj->sgt->sgl,
 107                                        msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
 108        }
 109
 110        return msm_obj->pages;
 111}
 112
 113static void put_pages(struct drm_gem_object *obj)
 114{
 115        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 116
 117        if (msm_obj->pages) {
 118                /* For non-cached buffers, ensure the new pages are clean
 119                 * because display controller, GPU, etc. are not coherent:
 120                 */
 121                if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
 122                        dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
 123                                        msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
 124                sg_free_table(msm_obj->sgt);
 125                kfree(msm_obj->sgt);
 126
 127                if (use_pages(obj))
 128                        drm_gem_put_pages(obj, msm_obj->pages, true, false);
 129                else {
 130                        drm_mm_remove_node(msm_obj->vram_node);
 131                        drm_free_large(msm_obj->pages);
 132                }
 133
 134                msm_obj->pages = NULL;
 135        }
 136}
 137
 138struct page **msm_gem_get_pages(struct drm_gem_object *obj)
 139{
 140        struct drm_device *dev = obj->dev;
 141        struct page **p;
 142        mutex_lock(&dev->struct_mutex);
 143        p = get_pages(obj);
 144        mutex_unlock(&dev->struct_mutex);
 145        return p;
 146}
 147
 148void msm_gem_put_pages(struct drm_gem_object *obj)
 149{
 150        /* when we start tracking the pin count, then do something here */
 151}
 152
 153int msm_gem_mmap_obj(struct drm_gem_object *obj,
 154                struct vm_area_struct *vma)
 155{
 156        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 157
 158        vma->vm_flags &= ~VM_PFNMAP;
 159        vma->vm_flags |= VM_MIXEDMAP;
 160
 161        if (msm_obj->flags & MSM_BO_WC) {
 162                vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
 163        } else if (msm_obj->flags & MSM_BO_UNCACHED) {
 164                vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
 165        } else {
 166                /*
 167                 * Shunt off cached objs to shmem file so they have their own
 168                 * address_space (so unmap_mapping_range does what we want,
 169                 * in particular in the case of mmap'd dmabufs)
 170                 */
 171                fput(vma->vm_file);
 172                get_file(obj->filp);
 173                vma->vm_pgoff = 0;
 174                vma->vm_file  = obj->filp;
 175
 176                vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
 177        }
 178
 179        return 0;
 180}
 181
 182int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 183{
 184        int ret;
 185
 186        ret = drm_gem_mmap(filp, vma);
 187        if (ret) {
 188                DBG("mmap failed: %d", ret);
 189                return ret;
 190        }
 191
 192        return msm_gem_mmap_obj(vma->vm_private_data, vma);
 193}
 194
 195int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 196{
 197        struct drm_gem_object *obj = vma->vm_private_data;
 198        struct drm_device *dev = obj->dev;
 199        struct msm_drm_private *priv = dev->dev_private;
 200        struct page **pages;
 201        unsigned long pfn;
 202        pgoff_t pgoff;
 203        int ret;
 204
 205        /* This should only happen if userspace tries to pass a mmap'd
 206         * but unfaulted gem bo vaddr into submit ioctl, triggering
 207         * a page fault while struct_mutex is already held.  This is
 208         * not a valid use-case so just bail.
 209         */
 210        if (priv->struct_mutex_task == current)
 211                return VM_FAULT_SIGBUS;
 212
 213        /* Make sure we don't parallel update on a fault, nor move or remove
 214         * something from beneath our feet
 215         */
 216        ret = mutex_lock_interruptible(&dev->struct_mutex);
 217        if (ret)
 218                goto out;
 219
 220        /* make sure we have pages attached now */
 221        pages = get_pages(obj);
 222        if (IS_ERR(pages)) {
 223                ret = PTR_ERR(pages);
 224                goto out_unlock;
 225        }
 226
 227        /* We don't use vmf->pgoff since that has the fake offset: */
 228        pgoff = ((unsigned long)vmf->virtual_address -
 229                        vma->vm_start) >> PAGE_SHIFT;
 230
 231        pfn = page_to_pfn(pages[pgoff]);
 232
 233        VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
 234                        pfn, pfn << PAGE_SHIFT);
 235
 236        ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
 237                        __pfn_to_pfn_t(pfn, PFN_DEV));
 238
 239out_unlock:
 240        mutex_unlock(&dev->struct_mutex);
 241out:
 242        switch (ret) {
 243        case -EAGAIN:
 244        case 0:
 245        case -ERESTARTSYS:
 246        case -EINTR:
 247        case -EBUSY:
 248                /*
 249                 * EBUSY is ok: this just means that another thread
 250                 * already did the job.
 251                 */
 252                return VM_FAULT_NOPAGE;
 253        case -ENOMEM:
 254                return VM_FAULT_OOM;
 255        default:
 256                return VM_FAULT_SIGBUS;
 257        }
 258}
 259
 260/** get mmap offset */
 261static uint64_t mmap_offset(struct drm_gem_object *obj)
 262{
 263        struct drm_device *dev = obj->dev;
 264        int ret;
 265
 266        WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 267
 268        /* Make it mmapable */
 269        ret = drm_gem_create_mmap_offset(obj);
 270
 271        if (ret) {
 272                dev_err(dev->dev, "could not allocate mmap offset\n");
 273                return 0;
 274        }
 275
 276        return drm_vma_node_offset_addr(&obj->vma_node);
 277}
 278
 279uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
 280{
 281        uint64_t offset;
 282        mutex_lock(&obj->dev->struct_mutex);
 283        offset = mmap_offset(obj);
 284        mutex_unlock(&obj->dev->struct_mutex);
 285        return offset;
 286}
 287
 288static void
 289put_iova(struct drm_gem_object *obj)
 290{
 291        struct drm_device *dev = obj->dev;
 292        struct msm_drm_private *priv = obj->dev->dev_private;
 293        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 294        int id;
 295
 296        WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 297
 298        for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
 299                struct msm_mmu *mmu = priv->mmus[id];
 300                if (mmu && msm_obj->domain[id].iova) {
 301                        uint32_t offset = msm_obj->domain[id].iova;
 302                        mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size);
 303                        msm_obj->domain[id].iova = 0;
 304                }
 305        }
 306}
 307
 308/* should be called under struct_mutex.. although it can be called
 309 * from atomic context without struct_mutex to acquire an extra
 310 * iova ref if you know one is already held.
 311 *
 312 * That means when I do eventually need to add support for unpinning
 313 * the refcnt counter needs to be atomic_t.
 314 */
 315int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
 316                uint32_t *iova)
 317{
 318        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 319        int ret = 0;
 320
 321        if (!msm_obj->domain[id].iova) {
 322                struct msm_drm_private *priv = obj->dev->dev_private;
 323                struct page **pages = get_pages(obj);
 324
 325                if (IS_ERR(pages))
 326                        return PTR_ERR(pages);
 327
 328                if (iommu_present(&platform_bus_type)) {
 329                        struct msm_mmu *mmu = priv->mmus[id];
 330                        uint32_t offset;
 331
 332                        if (WARN_ON(!mmu))
 333                                return -EINVAL;
 334
 335                        offset = (uint32_t)mmap_offset(obj);
 336                        ret = mmu->funcs->map(mmu, offset, msm_obj->sgt,
 337                                        obj->size, IOMMU_READ | IOMMU_WRITE);
 338                        msm_obj->domain[id].iova = offset;
 339                } else {
 340                        msm_obj->domain[id].iova = physaddr(obj);
 341                }
 342        }
 343
 344        if (!ret)
 345                *iova = msm_obj->domain[id].iova;
 346
 347        return ret;
 348}
 349
 350/* get iova, taking a reference.  Should have a matching put */
 351int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
 352{
 353        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 354        int ret;
 355
 356        /* this is safe right now because we don't unmap until the
 357         * bo is deleted:
 358         */
 359        if (msm_obj->domain[id].iova) {
 360                *iova = msm_obj->domain[id].iova;
 361                return 0;
 362        }
 363
 364        mutex_lock(&obj->dev->struct_mutex);
 365        ret = msm_gem_get_iova_locked(obj, id, iova);
 366        mutex_unlock(&obj->dev->struct_mutex);
 367        return ret;
 368}
 369
 370/* get iova without taking a reference, used in places where you have
 371 * already done a 'msm_gem_get_iova()'.
 372 */
 373uint32_t msm_gem_iova(struct drm_gem_object *obj, int id)
 374{
 375        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 376        WARN_ON(!msm_obj->domain[id].iova);
 377        return msm_obj->domain[id].iova;
 378}
 379
 380void msm_gem_put_iova(struct drm_gem_object *obj, int id)
 381{
 382        // XXX TODO ..
 383        // NOTE: probably don't need a _locked() version.. we wouldn't
 384        // normally unmap here, but instead just mark that it could be
 385        // unmapped (if the iova refcnt drops to zero), but then later
 386        // if another _get_iova_locked() fails we can start unmapping
 387        // things that are no longer needed..
 388}
 389
 390int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
 391                struct drm_mode_create_dumb *args)
 392{
 393        args->pitch = align_pitch(args->width, args->bpp);
 394        args->size  = PAGE_ALIGN(args->pitch * args->height);
 395        return msm_gem_new_handle(dev, file, args->size,
 396                        MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
 397}
 398
 399int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
 400                uint32_t handle, uint64_t *offset)
 401{
 402        struct drm_gem_object *obj;
 403        int ret = 0;
 404
 405        /* GEM does all our handle to object mapping */
 406        obj = drm_gem_object_lookup(file, handle);
 407        if (obj == NULL) {
 408                ret = -ENOENT;
 409                goto fail;
 410        }
 411
 412        *offset = msm_gem_mmap_offset(obj);
 413
 414        drm_gem_object_unreference_unlocked(obj);
 415
 416fail:
 417        return ret;
 418}
 419
 420void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
 421{
 422        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 423        WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
 424        if (!msm_obj->vaddr) {
 425                struct page **pages = get_pages(obj);
 426                if (IS_ERR(pages))
 427                        return ERR_CAST(pages);
 428                msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
 429                                VM_MAP, pgprot_writecombine(PAGE_KERNEL));
 430                if (msm_obj->vaddr == NULL)
 431                        return ERR_PTR(-ENOMEM);
 432        }
 433        msm_obj->vmap_count++;
 434        return msm_obj->vaddr;
 435}
 436
 437void *msm_gem_get_vaddr(struct drm_gem_object *obj)
 438{
 439        void *ret;
 440        mutex_lock(&obj->dev->struct_mutex);
 441        ret = msm_gem_get_vaddr_locked(obj);
 442        mutex_unlock(&obj->dev->struct_mutex);
 443        return ret;
 444}
 445
 446void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
 447{
 448        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 449        WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
 450        WARN_ON(msm_obj->vmap_count < 1);
 451        msm_obj->vmap_count--;
 452}
 453
 454void msm_gem_put_vaddr(struct drm_gem_object *obj)
 455{
 456        mutex_lock(&obj->dev->struct_mutex);
 457        msm_gem_put_vaddr_locked(obj);
 458        mutex_unlock(&obj->dev->struct_mutex);
 459}
 460
 461/* Update madvise status, returns true if not purged, else
 462 * false or -errno.
 463 */
 464int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
 465{
 466        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 467
 468        WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
 469
 470        if (msm_obj->madv != __MSM_MADV_PURGED)
 471                msm_obj->madv = madv;
 472
 473        return (msm_obj->madv != __MSM_MADV_PURGED);
 474}
 475
 476void msm_gem_purge(struct drm_gem_object *obj)
 477{
 478        struct drm_device *dev = obj->dev;
 479        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 480
 481        WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 482        WARN_ON(!is_purgeable(msm_obj));
 483        WARN_ON(obj->import_attach);
 484
 485        put_iova(obj);
 486
 487        msm_gem_vunmap(obj);
 488
 489        put_pages(obj);
 490
 491        msm_obj->madv = __MSM_MADV_PURGED;
 492
 493        drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
 494        drm_gem_free_mmap_offset(obj);
 495
 496        /* Our goal here is to return as much of the memory as
 497         * is possible back to the system as we are called from OOM.
 498         * To do this we must instruct the shmfs to drop all of its
 499         * backing pages, *now*.
 500         */
 501        shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
 502
 503        invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
 504                        0, (loff_t)-1);
 505}
 506
 507void msm_gem_vunmap(struct drm_gem_object *obj)
 508{
 509        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 510
 511        if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
 512                return;
 513
 514        vunmap(msm_obj->vaddr);
 515        msm_obj->vaddr = NULL;
 516}
 517
 518/* must be called before _move_to_active().. */
 519int msm_gem_sync_object(struct drm_gem_object *obj,
 520                struct msm_fence_context *fctx, bool exclusive)
 521{
 522        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 523        struct reservation_object_list *fobj;
 524        struct fence *fence;
 525        int i, ret;
 526
 527        if (!exclusive) {
 528                /* NOTE: _reserve_shared() must happen before _add_shared_fence(),
 529                 * which makes this a slightly strange place to call it.  OTOH this
 530                 * is a convenient can-fail point to hook it in.  (And similar to
 531                 * how etnaviv and nouveau handle this.)
 532                 */
 533                ret = reservation_object_reserve_shared(msm_obj->resv);
 534                if (ret)
 535                        return ret;
 536        }
 537
 538        fobj = reservation_object_get_list(msm_obj->resv);
 539        if (!fobj || (fobj->shared_count == 0)) {
 540                fence = reservation_object_get_excl(msm_obj->resv);
 541                /* don't need to wait on our own fences, since ring is fifo */
 542                if (fence && (fence->context != fctx->context)) {
 543                        ret = fence_wait(fence, true);
 544                        if (ret)
 545                                return ret;
 546                }
 547        }
 548
 549        if (!exclusive || !fobj)
 550                return 0;
 551
 552        for (i = 0; i < fobj->shared_count; i++) {
 553                fence = rcu_dereference_protected(fobj->shared[i],
 554                                                reservation_object_held(msm_obj->resv));
 555                if (fence->context != fctx->context) {
 556                        ret = fence_wait(fence, true);
 557                        if (ret)
 558                                return ret;
 559                }
 560        }
 561
 562        return 0;
 563}
 564
 565void msm_gem_move_to_active(struct drm_gem_object *obj,
 566                struct msm_gpu *gpu, bool exclusive, struct fence *fence)
 567{
 568        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 569        WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
 570        msm_obj->gpu = gpu;
 571        if (exclusive)
 572                reservation_object_add_excl_fence(msm_obj->resv, fence);
 573        else
 574                reservation_object_add_shared_fence(msm_obj->resv, fence);
 575        list_del_init(&msm_obj->mm_list);
 576        list_add_tail(&msm_obj->mm_list, &gpu->active_list);
 577}
 578
 579void msm_gem_move_to_inactive(struct drm_gem_object *obj)
 580{
 581        struct drm_device *dev = obj->dev;
 582        struct msm_drm_private *priv = dev->dev_private;
 583        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 584
 585        WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 586
 587        msm_obj->gpu = NULL;
 588        list_del_init(&msm_obj->mm_list);
 589        list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
 590}
 591
 592int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
 593{
 594        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 595        bool write = !!(op & MSM_PREP_WRITE);
 596        unsigned long remain =
 597                op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
 598        long ret;
 599
 600        ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write,
 601                                                  true,  remain);
 602        if (ret == 0)
 603                return remain == 0 ? -EBUSY : -ETIMEDOUT;
 604        else if (ret < 0)
 605                return ret;
 606
 607        /* TODO cache maintenance */
 608
 609        return 0;
 610}
 611
 612int msm_gem_cpu_fini(struct drm_gem_object *obj)
 613{
 614        /* TODO cache maintenance */
 615        return 0;
 616}
 617
 618#ifdef CONFIG_DEBUG_FS
 619static void describe_fence(struct fence *fence, const char *type,
 620                struct seq_file *m)
 621{
 622        if (!fence_is_signaled(fence))
 623                seq_printf(m, "\t%9s: %s %s seq %u\n", type,
 624                                fence->ops->get_driver_name(fence),
 625                                fence->ops->get_timeline_name(fence),
 626                                fence->seqno);
 627}
 628
 629void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
 630{
 631        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 632        struct reservation_object *robj = msm_obj->resv;
 633        struct reservation_object_list *fobj;
 634        struct fence *fence;
 635        uint64_t off = drm_vma_node_start(&obj->vma_node);
 636        const char *madv;
 637
 638        WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
 639
 640        switch (msm_obj->madv) {
 641        case __MSM_MADV_PURGED:
 642                madv = " purged";
 643                break;
 644        case MSM_MADV_DONTNEED:
 645                madv = " purgeable";
 646                break;
 647        case MSM_MADV_WILLNEED:
 648        default:
 649                madv = "";
 650                break;
 651        }
 652
 653        seq_printf(m, "%08x: %c %2d (%2d) %08llx %p %zu%s\n",
 654                        msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
 655                        obj->name, obj->refcount.refcount.counter,
 656                        off, msm_obj->vaddr, obj->size, madv);
 657
 658        rcu_read_lock();
 659        fobj = rcu_dereference(robj->fence);
 660        if (fobj) {
 661                unsigned int i, shared_count = fobj->shared_count;
 662
 663                for (i = 0; i < shared_count; i++) {
 664                        fence = rcu_dereference(fobj->shared[i]);
 665                        describe_fence(fence, "Shared", m);
 666                }
 667        }
 668
 669        fence = rcu_dereference(robj->fence_excl);
 670        if (fence)
 671                describe_fence(fence, "Exclusive", m);
 672        rcu_read_unlock();
 673}
 674
 675void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
 676{
 677        struct msm_gem_object *msm_obj;
 678        int count = 0;
 679        size_t size = 0;
 680
 681        list_for_each_entry(msm_obj, list, mm_list) {
 682                struct drm_gem_object *obj = &msm_obj->base;
 683                seq_printf(m, "   ");
 684                msm_gem_describe(obj, m);
 685                count++;
 686                size += obj->size;
 687        }
 688
 689        seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
 690}
 691#endif
 692
 693void msm_gem_free_object(struct drm_gem_object *obj)
 694{
 695        struct drm_device *dev = obj->dev;
 696        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 697
 698        WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 699
 700        /* object should not be on active list: */
 701        WARN_ON(is_active(msm_obj));
 702
 703        list_del(&msm_obj->mm_list);
 704
 705        put_iova(obj);
 706
 707        if (obj->import_attach) {
 708                if (msm_obj->vaddr)
 709                        dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
 710
 711                /* Don't drop the pages for imported dmabuf, as they are not
 712                 * ours, just free the array we allocated:
 713                 */
 714                if (msm_obj->pages)
 715                        drm_free_large(msm_obj->pages);
 716
 717                drm_prime_gem_destroy(obj, msm_obj->sgt);
 718        } else {
 719                msm_gem_vunmap(obj);
 720                put_pages(obj);
 721        }
 722
 723        if (msm_obj->resv == &msm_obj->_resv)
 724                reservation_object_fini(msm_obj->resv);
 725
 726        drm_gem_object_release(obj);
 727
 728        kfree(msm_obj);
 729}
 730
 731/* convenience method to construct a GEM buffer object, and userspace handle */
 732int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
 733                uint32_t size, uint32_t flags, uint32_t *handle)
 734{
 735        struct drm_gem_object *obj;
 736        int ret;
 737
 738        ret = mutex_lock_interruptible(&dev->struct_mutex);
 739        if (ret)
 740                return ret;
 741
 742        obj = msm_gem_new(dev, size, flags);
 743
 744        mutex_unlock(&dev->struct_mutex);
 745
 746        if (IS_ERR(obj))
 747                return PTR_ERR(obj);
 748
 749        ret = drm_gem_handle_create(file, obj, handle);
 750
 751        /* drop reference from allocate - handle holds it now */
 752        drm_gem_object_unreference_unlocked(obj);
 753
 754        return ret;
 755}
 756
 757static int msm_gem_new_impl(struct drm_device *dev,
 758                uint32_t size, uint32_t flags,
 759                struct reservation_object *resv,
 760                struct drm_gem_object **obj)
 761{
 762        struct msm_drm_private *priv = dev->dev_private;
 763        struct msm_gem_object *msm_obj;
 764        unsigned sz;
 765        bool use_vram = false;
 766
 767        switch (flags & MSM_BO_CACHE_MASK) {
 768        case MSM_BO_UNCACHED:
 769        case MSM_BO_CACHED:
 770        case MSM_BO_WC:
 771                break;
 772        default:
 773                dev_err(dev->dev, "invalid cache flag: %x\n",
 774                                (flags & MSM_BO_CACHE_MASK));
 775                return -EINVAL;
 776        }
 777
 778        if (!iommu_present(&platform_bus_type))
 779                use_vram = true;
 780        else if ((flags & MSM_BO_STOLEN) && priv->vram.size)
 781                use_vram = true;
 782
 783        if (WARN_ON(use_vram && !priv->vram.size))
 784                return -EINVAL;
 785
 786        sz = sizeof(*msm_obj);
 787        if (use_vram)
 788                sz += sizeof(struct drm_mm_node);
 789
 790        msm_obj = kzalloc(sz, GFP_KERNEL);
 791        if (!msm_obj)
 792                return -ENOMEM;
 793
 794        if (use_vram)
 795                msm_obj->vram_node = (void *)&msm_obj[1];
 796
 797        msm_obj->flags = flags;
 798        msm_obj->madv = MSM_MADV_WILLNEED;
 799
 800        if (resv) {
 801                msm_obj->resv = resv;
 802        } else {
 803                msm_obj->resv = &msm_obj->_resv;
 804                reservation_object_init(msm_obj->resv);
 805        }
 806
 807        INIT_LIST_HEAD(&msm_obj->submit_entry);
 808        list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
 809
 810        *obj = &msm_obj->base;
 811
 812        return 0;
 813}
 814
 815struct drm_gem_object *msm_gem_new(struct drm_device *dev,
 816                uint32_t size, uint32_t flags)
 817{
 818        struct drm_gem_object *obj = NULL;
 819        int ret;
 820
 821        WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 822
 823        size = PAGE_ALIGN(size);
 824
 825        ret = msm_gem_new_impl(dev, size, flags, NULL, &obj);
 826        if (ret)
 827                goto fail;
 828
 829        if (use_pages(obj)) {
 830                ret = drm_gem_object_init(dev, obj, size);
 831                if (ret)
 832                        goto fail;
 833        } else {
 834                drm_gem_private_object_init(dev, obj, size);
 835        }
 836
 837        return obj;
 838
 839fail:
 840        drm_gem_object_unreference(obj);
 841        return ERR_PTR(ret);
 842}
 843
 844struct drm_gem_object *msm_gem_import(struct drm_device *dev,
 845                struct dma_buf *dmabuf, struct sg_table *sgt)
 846{
 847        struct msm_gem_object *msm_obj;
 848        struct drm_gem_object *obj;
 849        uint32_t size;
 850        int ret, npages;
 851
 852        /* if we don't have IOMMU, don't bother pretending we can import: */
 853        if (!iommu_present(&platform_bus_type)) {
 854                dev_err(dev->dev, "cannot import without IOMMU\n");
 855                return ERR_PTR(-EINVAL);
 856        }
 857
 858        size = PAGE_ALIGN(dmabuf->size);
 859
 860        ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj);
 861        if (ret)
 862                goto fail;
 863
 864        drm_gem_private_object_init(dev, obj, size);
 865
 866        npages = size / PAGE_SIZE;
 867
 868        msm_obj = to_msm_bo(obj);
 869        msm_obj->sgt = sgt;
 870        msm_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
 871        if (!msm_obj->pages) {
 872                ret = -ENOMEM;
 873                goto fail;
 874        }
 875
 876        ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
 877        if (ret)
 878                goto fail;
 879
 880        return obj;
 881
 882fail:
 883        drm_gem_object_unreference_unlocked(obj);
 884        return ERR_PTR(ret);
 885}
 886