linux/drivers/gpu/drm/msm/msm_gem.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2013 Red Hat
   3 * Author: Rob Clark <robdclark@gmail.com>
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms of the GNU General Public License version 2 as published by
   7 * the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it will be useful, but WITHOUT
  10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  12 * more details.
  13 *
  14 * You should have received a copy of the GNU General Public License along with
  15 * this program.  If not, see <http://www.gnu.org/licenses/>.
  16 */
  17
  18#include <linux/spinlock.h>
  19#include <linux/shmem_fs.h>
  20#include <linux/dma-buf.h>
  21#include <linux/pfn_t.h>
  22
  23#include "msm_drv.h"
  24#include "msm_fence.h"
  25#include "msm_gem.h"
  26#include "msm_gpu.h"
  27#include "msm_mmu.h"
  28
  29static void msm_gem_vunmap_locked(struct drm_gem_object *obj);
  30
  31
  32static dma_addr_t physaddr(struct drm_gem_object *obj)
  33{
  34        struct msm_gem_object *msm_obj = to_msm_bo(obj);
  35        struct msm_drm_private *priv = obj->dev->dev_private;
  36        return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
  37                        priv->vram.paddr;
  38}
  39
  40static bool use_pages(struct drm_gem_object *obj)
  41{
  42        struct msm_gem_object *msm_obj = to_msm_bo(obj);
  43        return !msm_obj->vram_node;
  44}
  45
  46/* allocate pages from VRAM carveout, used when no IOMMU: */
  47static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
  48{
  49        struct msm_gem_object *msm_obj = to_msm_bo(obj);
  50        struct msm_drm_private *priv = obj->dev->dev_private;
  51        dma_addr_t paddr;
  52        struct page **p;
  53        int ret, i;
  54
  55        p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
  56        if (!p)
  57                return ERR_PTR(-ENOMEM);
  58
  59        spin_lock(&priv->vram.lock);
  60        ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
  61        spin_unlock(&priv->vram.lock);
  62        if (ret) {
  63                kvfree(p);
  64                return ERR_PTR(ret);
  65        }
  66
  67        paddr = physaddr(obj);
  68        for (i = 0; i < npages; i++) {
  69                p[i] = phys_to_page(paddr);
  70                paddr += PAGE_SIZE;
  71        }
  72
  73        return p;
  74}
  75
  76static struct page **get_pages(struct drm_gem_object *obj)
  77{
  78        struct msm_gem_object *msm_obj = to_msm_bo(obj);
  79
  80        if (!msm_obj->pages) {
  81                struct drm_device *dev = obj->dev;
  82                struct page **p;
  83                int npages = obj->size >> PAGE_SHIFT;
  84
  85                if (use_pages(obj))
  86                        p = drm_gem_get_pages(obj);
  87                else
  88                        p = get_pages_vram(obj, npages);
  89
  90                if (IS_ERR(p)) {
  91                        dev_err(dev->dev, "could not get pages: %ld\n",
  92                                        PTR_ERR(p));
  93                        return p;
  94                }
  95
  96                msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
  97                if (IS_ERR(msm_obj->sgt)) {
  98                        dev_err(dev->dev, "failed to allocate sgt\n");
  99                        return ERR_CAST(msm_obj->sgt);
 100                }
 101
 102                msm_obj->pages = p;
 103
 104                /* For non-cached buffers, ensure the new pages are clean
 105                 * because display controller, GPU, etc. are not coherent:
 106                 */
 107                if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
 108                        dma_map_sg(dev->dev, msm_obj->sgt->sgl,
 109                                        msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
 110        }
 111
 112        return msm_obj->pages;
 113}
 114
 115static void put_pages_vram(struct drm_gem_object *obj)
 116{
 117        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 118        struct msm_drm_private *priv = obj->dev->dev_private;
 119
 120        spin_lock(&priv->vram.lock);
 121        drm_mm_remove_node(msm_obj->vram_node);
 122        spin_unlock(&priv->vram.lock);
 123
 124        kvfree(msm_obj->pages);
 125}
 126
 127static void put_pages(struct drm_gem_object *obj)
 128{
 129        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 130
 131        if (msm_obj->pages) {
 132                /* For non-cached buffers, ensure the new pages are clean
 133                 * because display controller, GPU, etc. are not coherent:
 134                 */
 135                if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
 136                        dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
 137                                        msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
 138                sg_free_table(msm_obj->sgt);
 139                kfree(msm_obj->sgt);
 140
 141                if (use_pages(obj))
 142                        drm_gem_put_pages(obj, msm_obj->pages, true, false);
 143                else
 144                        put_pages_vram(obj);
 145
 146                msm_obj->pages = NULL;
 147        }
 148}
 149
 150struct page **msm_gem_get_pages(struct drm_gem_object *obj)
 151{
 152        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 153        struct page **p;
 154
 155        mutex_lock(&msm_obj->lock);
 156
 157        if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
 158                mutex_unlock(&msm_obj->lock);
 159                return ERR_PTR(-EBUSY);
 160        }
 161
 162        p = get_pages(obj);
 163        mutex_unlock(&msm_obj->lock);
 164        return p;
 165}
 166
 167void msm_gem_put_pages(struct drm_gem_object *obj)
 168{
 169        /* when we start tracking the pin count, then do something here */
 170}
 171
 172int msm_gem_mmap_obj(struct drm_gem_object *obj,
 173                struct vm_area_struct *vma)
 174{
 175        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 176
 177        vma->vm_flags &= ~VM_PFNMAP;
 178        vma->vm_flags |= VM_MIXEDMAP;
 179
 180        if (msm_obj->flags & MSM_BO_WC) {
 181                vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
 182        } else if (msm_obj->flags & MSM_BO_UNCACHED) {
 183                vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
 184        } else {
 185                /*
 186                 * Shunt off cached objs to shmem file so they have their own
 187                 * address_space (so unmap_mapping_range does what we want,
 188                 * in particular in the case of mmap'd dmabufs)
 189                 */
 190                fput(vma->vm_file);
 191                get_file(obj->filp);
 192                vma->vm_pgoff = 0;
 193                vma->vm_file  = obj->filp;
 194
 195                vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
 196        }
 197
 198        return 0;
 199}
 200
 201int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 202{
 203        int ret;
 204
 205        ret = drm_gem_mmap(filp, vma);
 206        if (ret) {
 207                DBG("mmap failed: %d", ret);
 208                return ret;
 209        }
 210
 211        return msm_gem_mmap_obj(vma->vm_private_data, vma);
 212}
 213
 214int msm_gem_fault(struct vm_fault *vmf)
 215{
 216        struct vm_area_struct *vma = vmf->vma;
 217        struct drm_gem_object *obj = vma->vm_private_data;
 218        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 219        struct page **pages;
 220        unsigned long pfn;
 221        pgoff_t pgoff;
 222        int ret;
 223
 224        /*
 225         * vm_ops.open/drm_gem_mmap_obj and close get and put
 226         * a reference on obj. So, we dont need to hold one here.
 227         */
 228        ret = mutex_lock_interruptible(&msm_obj->lock);
 229        if (ret)
 230                goto out;
 231
 232        if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
 233                mutex_unlock(&msm_obj->lock);
 234                return VM_FAULT_SIGBUS;
 235        }
 236
 237        /* make sure we have pages attached now */
 238        pages = get_pages(obj);
 239        if (IS_ERR(pages)) {
 240                ret = PTR_ERR(pages);
 241                goto out_unlock;
 242        }
 243
 244        /* We don't use vmf->pgoff since that has the fake offset: */
 245        pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
 246
 247        pfn = page_to_pfn(pages[pgoff]);
 248
 249        VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
 250                        pfn, pfn << PAGE_SHIFT);
 251
 252        ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
 253
 254out_unlock:
 255        mutex_unlock(&msm_obj->lock);
 256out:
 257        switch (ret) {
 258        case -EAGAIN:
 259        case 0:
 260        case -ERESTARTSYS:
 261        case -EINTR:
 262        case -EBUSY:
 263                /*
 264                 * EBUSY is ok: this just means that another thread
 265                 * already did the job.
 266                 */
 267                return VM_FAULT_NOPAGE;
 268        case -ENOMEM:
 269                return VM_FAULT_OOM;
 270        default:
 271                return VM_FAULT_SIGBUS;
 272        }
 273}
 274
 275/** get mmap offset */
 276static uint64_t mmap_offset(struct drm_gem_object *obj)
 277{
 278        struct drm_device *dev = obj->dev;
 279        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 280        int ret;
 281
 282        WARN_ON(!mutex_is_locked(&msm_obj->lock));
 283
 284        /* Make it mmapable */
 285        ret = drm_gem_create_mmap_offset(obj);
 286
 287        if (ret) {
 288                dev_err(dev->dev, "could not allocate mmap offset\n");
 289                return 0;
 290        }
 291
 292        return drm_vma_node_offset_addr(&obj->vma_node);
 293}
 294
 295uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
 296{
 297        uint64_t offset;
 298        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 299
 300        mutex_lock(&msm_obj->lock);
 301        offset = mmap_offset(obj);
 302        mutex_unlock(&msm_obj->lock);
 303        return offset;
 304}
 305
 306static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
 307                struct msm_gem_address_space *aspace)
 308{
 309        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 310        struct msm_gem_vma *vma;
 311
 312        WARN_ON(!mutex_is_locked(&msm_obj->lock));
 313
 314        vma = kzalloc(sizeof(*vma), GFP_KERNEL);
 315        if (!vma)
 316                return ERR_PTR(-ENOMEM);
 317
 318        vma->aspace = aspace;
 319
 320        list_add_tail(&vma->list, &msm_obj->vmas);
 321
 322        return vma;
 323}
 324
 325static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
 326                struct msm_gem_address_space *aspace)
 327{
 328        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 329        struct msm_gem_vma *vma;
 330
 331        WARN_ON(!mutex_is_locked(&msm_obj->lock));
 332
 333        list_for_each_entry(vma, &msm_obj->vmas, list) {
 334                if (vma->aspace == aspace)
 335                        return vma;
 336        }
 337
 338        return NULL;
 339}
 340
 341static void del_vma(struct msm_gem_vma *vma)
 342{
 343        if (!vma)
 344                return;
 345
 346        list_del(&vma->list);
 347        kfree(vma);
 348}
 349
 350/* Called with msm_obj->lock locked */
 351static void
 352put_iova(struct drm_gem_object *obj)
 353{
 354        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 355        struct msm_gem_vma *vma, *tmp;
 356
 357        WARN_ON(!mutex_is_locked(&msm_obj->lock));
 358
 359        list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
 360                msm_gem_unmap_vma(vma->aspace, vma, msm_obj->sgt);
 361                del_vma(vma);
 362        }
 363}
 364
 365/* get iova, taking a reference.  Should have a matching put */
 366int msm_gem_get_iova(struct drm_gem_object *obj,
 367                struct msm_gem_address_space *aspace, uint64_t *iova)
 368{
 369        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 370        struct msm_gem_vma *vma;
 371        int ret = 0;
 372
 373        mutex_lock(&msm_obj->lock);
 374
 375        if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
 376                mutex_unlock(&msm_obj->lock);
 377                return -EBUSY;
 378        }
 379
 380        vma = lookup_vma(obj, aspace);
 381
 382        if (!vma) {
 383                struct page **pages;
 384
 385                vma = add_vma(obj, aspace);
 386                if (IS_ERR(vma)) {
 387                        ret = PTR_ERR(vma);
 388                        goto unlock;
 389                }
 390
 391                pages = get_pages(obj);
 392                if (IS_ERR(pages)) {
 393                        ret = PTR_ERR(pages);
 394                        goto fail;
 395                }
 396
 397                ret = msm_gem_map_vma(aspace, vma, msm_obj->sgt,
 398                                obj->size >> PAGE_SHIFT);
 399                if (ret)
 400                        goto fail;
 401        }
 402
 403        *iova = vma->iova;
 404
 405        mutex_unlock(&msm_obj->lock);
 406        return 0;
 407
 408fail:
 409        del_vma(vma);
 410unlock:
 411        mutex_unlock(&msm_obj->lock);
 412        return ret;
 413}
 414
 415/* get iova without taking a reference, used in places where you have
 416 * already done a 'msm_gem_get_iova()'.
 417 */
 418uint64_t msm_gem_iova(struct drm_gem_object *obj,
 419                struct msm_gem_address_space *aspace)
 420{
 421        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 422        struct msm_gem_vma *vma;
 423
 424        mutex_lock(&msm_obj->lock);
 425        vma = lookup_vma(obj, aspace);
 426        mutex_unlock(&msm_obj->lock);
 427        WARN_ON(!vma);
 428
 429        return vma ? vma->iova : 0;
 430}
 431
 432void msm_gem_put_iova(struct drm_gem_object *obj,
 433                struct msm_gem_address_space *aspace)
 434{
 435        // XXX TODO ..
 436        // NOTE: probably don't need a _locked() version.. we wouldn't
 437        // normally unmap here, but instead just mark that it could be
 438        // unmapped (if the iova refcnt drops to zero), but then later
 439        // if another _get_iova_locked() fails we can start unmapping
 440        // things that are no longer needed..
 441}
 442
 443int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
 444                struct drm_mode_create_dumb *args)
 445{
 446        args->pitch = align_pitch(args->width, args->bpp);
 447        args->size  = PAGE_ALIGN(args->pitch * args->height);
 448        return msm_gem_new_handle(dev, file, args->size,
 449                        MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
 450}
 451
 452int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
 453                uint32_t handle, uint64_t *offset)
 454{
 455        struct drm_gem_object *obj;
 456        int ret = 0;
 457
 458        /* GEM does all our handle to object mapping */
 459        obj = drm_gem_object_lookup(file, handle);
 460        if (obj == NULL) {
 461                ret = -ENOENT;
 462                goto fail;
 463        }
 464
 465        *offset = msm_gem_mmap_offset(obj);
 466
 467        drm_gem_object_unreference_unlocked(obj);
 468
 469fail:
 470        return ret;
 471}
 472
 473static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
 474{
 475        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 476        int ret = 0;
 477
 478        mutex_lock(&msm_obj->lock);
 479
 480        if (WARN_ON(msm_obj->madv > madv)) {
 481                dev_err(obj->dev->dev, "Invalid madv state: %u vs %u\n",
 482                        msm_obj->madv, madv);
 483                mutex_unlock(&msm_obj->lock);
 484                return ERR_PTR(-EBUSY);
 485        }
 486
 487        /* increment vmap_count *before* vmap() call, so shrinker can
 488         * check vmap_count (is_vunmapable()) outside of msm_obj->lock.
 489         * This guarantees that we won't try to msm_gem_vunmap() this
 490         * same object from within the vmap() call (while we already
 491         * hold msm_obj->lock)
 492         */
 493        msm_obj->vmap_count++;
 494
 495        if (!msm_obj->vaddr) {
 496                struct page **pages = get_pages(obj);
 497                if (IS_ERR(pages)) {
 498                        ret = PTR_ERR(pages);
 499                        goto fail;
 500                }
 501                msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
 502                                VM_MAP, pgprot_writecombine(PAGE_KERNEL));
 503                if (msm_obj->vaddr == NULL) {
 504                        ret = -ENOMEM;
 505                        goto fail;
 506                }
 507        }
 508
 509        mutex_unlock(&msm_obj->lock);
 510        return msm_obj->vaddr;
 511
 512fail:
 513        msm_obj->vmap_count--;
 514        mutex_unlock(&msm_obj->lock);
 515        return ERR_PTR(ret);
 516}
 517
 518void *msm_gem_get_vaddr(struct drm_gem_object *obj)
 519{
 520        return get_vaddr(obj, MSM_MADV_WILLNEED);
 521}
 522
 523/*
 524 * Don't use this!  It is for the very special case of dumping
 525 * submits from GPU hangs or faults, were the bo may already
 526 * be MSM_MADV_DONTNEED, but we know the buffer is still on the
 527 * active list.
 528 */
 529void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
 530{
 531        return get_vaddr(obj, __MSM_MADV_PURGED);
 532}
 533
 534void msm_gem_put_vaddr(struct drm_gem_object *obj)
 535{
 536        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 537
 538        mutex_lock(&msm_obj->lock);
 539        WARN_ON(msm_obj->vmap_count < 1);
 540        msm_obj->vmap_count--;
 541        mutex_unlock(&msm_obj->lock);
 542}
 543
 544/* Update madvise status, returns true if not purged, else
 545 * false or -errno.
 546 */
 547int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
 548{
 549        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 550
 551        mutex_lock(&msm_obj->lock);
 552
 553        WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
 554
 555        if (msm_obj->madv != __MSM_MADV_PURGED)
 556                msm_obj->madv = madv;
 557
 558        madv = msm_obj->madv;
 559
 560        mutex_unlock(&msm_obj->lock);
 561
 562        return (madv != __MSM_MADV_PURGED);
 563}
 564
 565void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass)
 566{
 567        struct drm_device *dev = obj->dev;
 568        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 569
 570        WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 571        WARN_ON(!is_purgeable(msm_obj));
 572        WARN_ON(obj->import_attach);
 573
 574        mutex_lock_nested(&msm_obj->lock, subclass);
 575
 576        put_iova(obj);
 577
 578        msm_gem_vunmap_locked(obj);
 579
 580        put_pages(obj);
 581
 582        msm_obj->madv = __MSM_MADV_PURGED;
 583
 584        drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
 585        drm_gem_free_mmap_offset(obj);
 586
 587        /* Our goal here is to return as much of the memory as
 588         * is possible back to the system as we are called from OOM.
 589         * To do this we must instruct the shmfs to drop all of its
 590         * backing pages, *now*.
 591         */
 592        shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
 593
 594        invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
 595                        0, (loff_t)-1);
 596
 597        mutex_unlock(&msm_obj->lock);
 598}
 599
 600static void msm_gem_vunmap_locked(struct drm_gem_object *obj)
 601{
 602        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 603
 604        WARN_ON(!mutex_is_locked(&msm_obj->lock));
 605
 606        if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
 607                return;
 608
 609        vunmap(msm_obj->vaddr);
 610        msm_obj->vaddr = NULL;
 611}
 612
 613void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass)
 614{
 615        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 616
 617        mutex_lock_nested(&msm_obj->lock, subclass);
 618        msm_gem_vunmap_locked(obj);
 619        mutex_unlock(&msm_obj->lock);
 620}
 621
 622/* must be called before _move_to_active().. */
 623int msm_gem_sync_object(struct drm_gem_object *obj,
 624                struct msm_fence_context *fctx, bool exclusive)
 625{
 626        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 627        struct reservation_object_list *fobj;
 628        struct dma_fence *fence;
 629        int i, ret;
 630
 631        fobj = reservation_object_get_list(msm_obj->resv);
 632        if (!fobj || (fobj->shared_count == 0)) {
 633                fence = reservation_object_get_excl(msm_obj->resv);
 634                /* don't need to wait on our own fences, since ring is fifo */
 635                if (fence && (fence->context != fctx->context)) {
 636                        ret = dma_fence_wait(fence, true);
 637                        if (ret)
 638                                return ret;
 639                }
 640        }
 641
 642        if (!exclusive || !fobj)
 643                return 0;
 644
 645        for (i = 0; i < fobj->shared_count; i++) {
 646                fence = rcu_dereference_protected(fobj->shared[i],
 647                                                reservation_object_held(msm_obj->resv));
 648                if (fence->context != fctx->context) {
 649                        ret = dma_fence_wait(fence, true);
 650                        if (ret)
 651                                return ret;
 652                }
 653        }
 654
 655        return 0;
 656}
 657
 658void msm_gem_move_to_active(struct drm_gem_object *obj,
 659                struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence)
 660{
 661        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 662        WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
 663        msm_obj->gpu = gpu;
 664        if (exclusive)
 665                reservation_object_add_excl_fence(msm_obj->resv, fence);
 666        else
 667                reservation_object_add_shared_fence(msm_obj->resv, fence);
 668        list_del_init(&msm_obj->mm_list);
 669        list_add_tail(&msm_obj->mm_list, &gpu->active_list);
 670}
 671
 672void msm_gem_move_to_inactive(struct drm_gem_object *obj)
 673{
 674        struct drm_device *dev = obj->dev;
 675        struct msm_drm_private *priv = dev->dev_private;
 676        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 677
 678        WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 679
 680        msm_obj->gpu = NULL;
 681        list_del_init(&msm_obj->mm_list);
 682        list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
 683}
 684
 685int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
 686{
 687        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 688        bool write = !!(op & MSM_PREP_WRITE);
 689        unsigned long remain =
 690                op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
 691        long ret;
 692
 693        ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write,
 694                                                  true,  remain);
 695        if (ret == 0)
 696                return remain == 0 ? -EBUSY : -ETIMEDOUT;
 697        else if (ret < 0)
 698                return ret;
 699
 700        /* TODO cache maintenance */
 701
 702        return 0;
 703}
 704
 705int msm_gem_cpu_fini(struct drm_gem_object *obj)
 706{
 707        /* TODO cache maintenance */
 708        return 0;
 709}
 710
 711#ifdef CONFIG_DEBUG_FS
 712static void describe_fence(struct dma_fence *fence, const char *type,
 713                struct seq_file *m)
 714{
 715        if (!dma_fence_is_signaled(fence))
 716                seq_printf(m, "\t%9s: %s %s seq %u\n", type,
 717                                fence->ops->get_driver_name(fence),
 718                                fence->ops->get_timeline_name(fence),
 719                                fence->seqno);
 720}
 721
 722void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
 723{
 724        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 725        struct reservation_object *robj = msm_obj->resv;
 726        struct reservation_object_list *fobj;
 727        struct dma_fence *fence;
 728        struct msm_gem_vma *vma;
 729        uint64_t off = drm_vma_node_start(&obj->vma_node);
 730        const char *madv;
 731
 732        mutex_lock(&msm_obj->lock);
 733
 734        switch (msm_obj->madv) {
 735        case __MSM_MADV_PURGED:
 736                madv = " purged";
 737                break;
 738        case MSM_MADV_DONTNEED:
 739                madv = " purgeable";
 740                break;
 741        case MSM_MADV_WILLNEED:
 742        default:
 743                madv = "";
 744                break;
 745        }
 746
 747        seq_printf(m, "%08x: %c %2d (%2d) %08llx %p\t",
 748                        msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
 749                        obj->name, kref_read(&obj->refcount),
 750                        off, msm_obj->vaddr);
 751
 752        /* FIXME: we need to print the address space here too */
 753        list_for_each_entry(vma, &msm_obj->vmas, list)
 754                seq_printf(m, " %08llx", vma->iova);
 755
 756        seq_printf(m, " %zu%s\n", obj->size, madv);
 757
 758        rcu_read_lock();
 759        fobj = rcu_dereference(robj->fence);
 760        if (fobj) {
 761                unsigned int i, shared_count = fobj->shared_count;
 762
 763                for (i = 0; i < shared_count; i++) {
 764                        fence = rcu_dereference(fobj->shared[i]);
 765                        describe_fence(fence, "Shared", m);
 766                }
 767        }
 768
 769        fence = rcu_dereference(robj->fence_excl);
 770        if (fence)
 771                describe_fence(fence, "Exclusive", m);
 772        rcu_read_unlock();
 773
 774        mutex_unlock(&msm_obj->lock);
 775}
 776
 777void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
 778{
 779        struct msm_gem_object *msm_obj;
 780        int count = 0;
 781        size_t size = 0;
 782
 783        list_for_each_entry(msm_obj, list, mm_list) {
 784                struct drm_gem_object *obj = &msm_obj->base;
 785                seq_printf(m, "   ");
 786                msm_gem_describe(obj, m);
 787                count++;
 788                size += obj->size;
 789        }
 790
 791        seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
 792}
 793#endif
 794
 795void msm_gem_free_object(struct drm_gem_object *obj)
 796{
 797        struct drm_device *dev = obj->dev;
 798        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 799
 800        WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 801
 802        /* object should not be on active list: */
 803        WARN_ON(is_active(msm_obj));
 804
 805        list_del(&msm_obj->mm_list);
 806
 807        mutex_lock(&msm_obj->lock);
 808
 809        put_iova(obj);
 810
 811        if (obj->import_attach) {
 812                if (msm_obj->vaddr)
 813                        dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
 814
 815                /* Don't drop the pages for imported dmabuf, as they are not
 816                 * ours, just free the array we allocated:
 817                 */
 818                if (msm_obj->pages)
 819                        kvfree(msm_obj->pages);
 820
 821                drm_prime_gem_destroy(obj, msm_obj->sgt);
 822        } else {
 823                msm_gem_vunmap_locked(obj);
 824                put_pages(obj);
 825        }
 826
 827        if (msm_obj->resv == &msm_obj->_resv)
 828                reservation_object_fini(msm_obj->resv);
 829
 830        drm_gem_object_release(obj);
 831
 832        mutex_unlock(&msm_obj->lock);
 833        kfree(msm_obj);
 834}
 835
 836/* convenience method to construct a GEM buffer object, and userspace handle */
 837int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
 838                uint32_t size, uint32_t flags, uint32_t *handle)
 839{
 840        struct drm_gem_object *obj;
 841        int ret;
 842
 843        obj = msm_gem_new(dev, size, flags);
 844
 845        if (IS_ERR(obj))
 846                return PTR_ERR(obj);
 847
 848        ret = drm_gem_handle_create(file, obj, handle);
 849
 850        /* drop reference from allocate - handle holds it now */
 851        drm_gem_object_unreference_unlocked(obj);
 852
 853        return ret;
 854}
 855
 856static int msm_gem_new_impl(struct drm_device *dev,
 857                uint32_t size, uint32_t flags,
 858                struct reservation_object *resv,
 859                struct drm_gem_object **obj,
 860                bool struct_mutex_locked)
 861{
 862        struct msm_drm_private *priv = dev->dev_private;
 863        struct msm_gem_object *msm_obj;
 864
 865        switch (flags & MSM_BO_CACHE_MASK) {
 866        case MSM_BO_UNCACHED:
 867        case MSM_BO_CACHED:
 868        case MSM_BO_WC:
 869                break;
 870        default:
 871                dev_err(dev->dev, "invalid cache flag: %x\n",
 872                                (flags & MSM_BO_CACHE_MASK));
 873                return -EINVAL;
 874        }
 875
 876        msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
 877        if (!msm_obj)
 878                return -ENOMEM;
 879
 880        mutex_init(&msm_obj->lock);
 881
 882        msm_obj->flags = flags;
 883        msm_obj->madv = MSM_MADV_WILLNEED;
 884
 885        if (resv) {
 886                msm_obj->resv = resv;
 887        } else {
 888                msm_obj->resv = &msm_obj->_resv;
 889                reservation_object_init(msm_obj->resv);
 890        }
 891
 892        INIT_LIST_HEAD(&msm_obj->submit_entry);
 893        INIT_LIST_HEAD(&msm_obj->vmas);
 894
 895        if (struct_mutex_locked) {
 896                WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 897                list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
 898        } else {
 899                mutex_lock(&dev->struct_mutex);
 900                list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
 901                mutex_unlock(&dev->struct_mutex);
 902        }
 903
 904        *obj = &msm_obj->base;
 905
 906        return 0;
 907}
 908
 909static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
 910                uint32_t size, uint32_t flags, bool struct_mutex_locked)
 911{
 912        struct msm_drm_private *priv = dev->dev_private;
 913        struct drm_gem_object *obj = NULL;
 914        bool use_vram = false;
 915        int ret;
 916
 917        size = PAGE_ALIGN(size);
 918
 919        if (!iommu_present(&platform_bus_type))
 920                use_vram = true;
 921        else if ((flags & MSM_BO_STOLEN) && priv->vram.size)
 922                use_vram = true;
 923
 924        if (WARN_ON(use_vram && !priv->vram.size))
 925                return ERR_PTR(-EINVAL);
 926
 927        /* Disallow zero sized objects as they make the underlying
 928         * infrastructure grumpy
 929         */
 930        if (size == 0)
 931                return ERR_PTR(-EINVAL);
 932
 933        ret = msm_gem_new_impl(dev, size, flags, NULL, &obj, struct_mutex_locked);
 934        if (ret)
 935                goto fail;
 936
 937        if (use_vram) {
 938                struct msm_gem_vma *vma;
 939                struct page **pages;
 940                struct msm_gem_object *msm_obj = to_msm_bo(obj);
 941
 942                mutex_lock(&msm_obj->lock);
 943
 944                vma = add_vma(obj, NULL);
 945                mutex_unlock(&msm_obj->lock);
 946                if (IS_ERR(vma)) {
 947                        ret = PTR_ERR(vma);
 948                        goto fail;
 949                }
 950
 951                to_msm_bo(obj)->vram_node = &vma->node;
 952
 953                drm_gem_private_object_init(dev, obj, size);
 954
 955                pages = get_pages(obj);
 956                if (IS_ERR(pages)) {
 957                        ret = PTR_ERR(pages);
 958                        goto fail;
 959                }
 960
 961                vma->iova = physaddr(obj);
 962        } else {
 963                ret = drm_gem_object_init(dev, obj, size);
 964                if (ret)
 965                        goto fail;
 966        }
 967
 968        return obj;
 969
 970fail:
 971        drm_gem_object_unreference_unlocked(obj);
 972        return ERR_PTR(ret);
 973}
 974
 975struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
 976                uint32_t size, uint32_t flags)
 977{
 978        return _msm_gem_new(dev, size, flags, true);
 979}
 980
 981struct drm_gem_object *msm_gem_new(struct drm_device *dev,
 982                uint32_t size, uint32_t flags)
 983{
 984        return _msm_gem_new(dev, size, flags, false);
 985}
 986
 987struct drm_gem_object *msm_gem_import(struct drm_device *dev,
 988                struct dma_buf *dmabuf, struct sg_table *sgt)
 989{
 990        struct msm_gem_object *msm_obj;
 991        struct drm_gem_object *obj;
 992        uint32_t size;
 993        int ret, npages;
 994
 995        /* if we don't have IOMMU, don't bother pretending we can import: */
 996        if (!iommu_present(&platform_bus_type)) {
 997                dev_err(dev->dev, "cannot import without IOMMU\n");
 998                return ERR_PTR(-EINVAL);
 999        }
1000
1001        size = PAGE_ALIGN(dmabuf->size);
1002
1003        ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj, false);
1004        if (ret)
1005                goto fail;
1006
1007        drm_gem_private_object_init(dev, obj, size);
1008
1009        npages = size / PAGE_SIZE;
1010
1011        msm_obj = to_msm_bo(obj);
1012        mutex_lock(&msm_obj->lock);
1013        msm_obj->sgt = sgt;
1014        msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
1015        if (!msm_obj->pages) {
1016                mutex_unlock(&msm_obj->lock);
1017                ret = -ENOMEM;
1018                goto fail;
1019        }
1020
1021        ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
1022        if (ret) {
1023                mutex_unlock(&msm_obj->lock);
1024                goto fail;
1025        }
1026
1027        mutex_unlock(&msm_obj->lock);
1028        return obj;
1029
1030fail:
1031        drm_gem_object_unreference_unlocked(obj);
1032        return ERR_PTR(ret);
1033}
1034
1035static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1036                uint32_t flags, struct msm_gem_address_space *aspace,
1037                struct drm_gem_object **bo, uint64_t *iova, bool locked)
1038{
1039        void *vaddr;
1040        struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked);
1041        int ret;
1042
1043        if (IS_ERR(obj))
1044                return ERR_CAST(obj);
1045
1046        if (iova) {
1047                ret = msm_gem_get_iova(obj, aspace, iova);
1048                if (ret) {
1049                        drm_gem_object_unreference(obj);
1050                        return ERR_PTR(ret);
1051                }
1052        }
1053
1054        vaddr = msm_gem_get_vaddr(obj);
1055        if (IS_ERR(vaddr)) {
1056                msm_gem_put_iova(obj, aspace);
1057                drm_gem_object_unreference(obj);
1058                return ERR_CAST(vaddr);
1059        }
1060
1061        if (bo)
1062                *bo = obj;
1063
1064        return vaddr;
1065}
1066
1067void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1068                uint32_t flags, struct msm_gem_address_space *aspace,
1069                struct drm_gem_object **bo, uint64_t *iova)
1070{
1071        return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false);
1072}
1073
1074void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
1075                uint32_t flags, struct msm_gem_address_space *aspace,
1076                struct drm_gem_object **bo, uint64_t *iova)
1077{
1078        return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
1079}
1080