linux/drivers/gpu/drm/omapdrm/omap_gem.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/
   4 * Author: Rob Clark <rob.clark@linaro.org>
   5 */
   6
   7#include <linux/dma-mapping.h>
   8#include <linux/seq_file.h>
   9#include <linux/shmem_fs.h>
  10#include <linux/spinlock.h>
  11#include <linux/pfn_t.h>
  12
  13#include <drm/drm_prime.h>
  14#include <drm/drm_vma_manager.h>
  15
  16#include "omap_drv.h"
  17#include "omap_dmm_tiler.h"
  18
  19/*
  20 * GEM buffer object implementation.
  21 */
  22
  23/* note: we use upper 8 bits of flags for driver-internal flags: */
  24#define OMAP_BO_MEM_DMA_API     0x01000000      /* memory allocated with the dma_alloc_* API */
  25#define OMAP_BO_MEM_SHMEM       0x02000000      /* memory allocated through shmem backing */
  26#define OMAP_BO_MEM_DMABUF      0x08000000      /* memory imported from a dmabuf */
  27
  28struct omap_gem_object {
  29        struct drm_gem_object base;
  30
  31        struct list_head mm_list;
  32
  33        u32 flags;
  34
  35        /** width/height for tiled formats (rounded up to slot boundaries) */
  36        u16 width, height;
  37
  38        /** roll applied when mapping to DMM */
  39        u32 roll;
  40
  41        /** protects dma_addr_cnt, block, pages, dma_addrs and vaddr */
  42        struct mutex lock;
  43
  44        /**
  45         * dma_addr contains the buffer DMA address. It is valid for
  46         *
  47         * - buffers allocated through the DMA mapping API (with the
  48         *   OMAP_BO_MEM_DMA_API flag set)
  49         *
  50         * - buffers imported from dmabuf (with the OMAP_BO_MEM_DMABUF flag set)
  51         *   if they are physically contiguous (when sgt->orig_nents == 1)
  52         *
  53         * - buffers mapped through the TILER when dma_addr_cnt is not zero, in
  54         *   which case the DMA address points to the TILER aperture
  55         *
  56         * Physically contiguous buffers have their DMA address equal to the
  57         * physical address as we don't remap those buffers through the TILER.
  58         *
  59         * Buffers mapped to the TILER have their DMA address pointing to the
  60         * TILER aperture. As TILER mappings are refcounted (through
  61         * dma_addr_cnt) the DMA address must be accessed through omap_gem_pin()
  62         * to ensure that the mapping won't disappear unexpectedly. References
  63         * must be released with omap_gem_unpin().
  64         */
  65        dma_addr_t dma_addr;
  66
  67        /**
  68         * # of users of dma_addr
  69         */
  70        refcount_t dma_addr_cnt;
  71
  72        /**
  73         * If the buffer has been imported from a dmabuf the OMAP_DB_DMABUF flag
  74         * is set and the sgt field is valid.
  75         */
  76        struct sg_table *sgt;
  77
  78        /**
  79         * tiler block used when buffer is remapped in DMM/TILER.
  80         */
  81        struct tiler_block *block;
  82
  83        /**
  84         * Array of backing pages, if allocated.  Note that pages are never
  85         * allocated for buffers originally allocated from contiguous memory
  86         */
  87        struct page **pages;
  88
  89        /** addresses corresponding to pages in above array */
  90        dma_addr_t *dma_addrs;
  91
  92        /**
  93         * Virtual address, if mapped.
  94         */
  95        void *vaddr;
  96};
  97
  98#define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
  99
 100/* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are
 101 * not necessarily pinned in TILER all the time, and (b) when they are
 102 * they are not necessarily page aligned, we reserve one or more small
 103 * regions in each of the 2d containers to use as a user-GART where we
 104 * can create a second page-aligned mapping of parts of the buffer
 105 * being accessed from userspace.
 106 *
 107 * Note that we could optimize slightly when we know that multiple
 108 * tiler containers are backed by the same PAT.. but I'll leave that
 109 * for later..
 110 */
 111#define NUM_USERGART_ENTRIES 2
 112struct omap_drm_usergart_entry {
 113        struct tiler_block *block;      /* the reserved tiler block */
 114        dma_addr_t dma_addr;
 115        struct drm_gem_object *obj;     /* the current pinned obj */
 116        pgoff_t obj_pgoff;              /* page offset of obj currently
 117                                           mapped in */
 118};
 119
 120struct omap_drm_usergart {
 121        struct omap_drm_usergart_entry entry[NUM_USERGART_ENTRIES];
 122        int height;                             /* height in rows */
 123        int height_shift;               /* ilog2(height in rows) */
 124        int slot_shift;                 /* ilog2(width per slot) */
 125        int stride_pfn;                 /* stride in pages */
 126        int last;                               /* index of last used entry */
 127};
 128
 129/* -----------------------------------------------------------------------------
 130 * Helpers
 131 */
 132
 133/** get mmap offset */
 134u64 omap_gem_mmap_offset(struct drm_gem_object *obj)
 135{
 136        struct drm_device *dev = obj->dev;
 137        int ret;
 138        size_t size;
 139
 140        /* Make it mmapable */
 141        size = omap_gem_mmap_size(obj);
 142        ret = drm_gem_create_mmap_offset_size(obj, size);
 143        if (ret) {
 144                dev_err(dev->dev, "could not allocate mmap offset\n");
 145                return 0;
 146        }
 147
 148        return drm_vma_node_offset_addr(&obj->vma_node);
 149}
 150
 151static bool omap_gem_is_contiguous(struct omap_gem_object *omap_obj)
 152{
 153        if (omap_obj->flags & OMAP_BO_MEM_DMA_API)
 154                return true;
 155
 156        if ((omap_obj->flags & OMAP_BO_MEM_DMABUF) && omap_obj->sgt->nents == 1)
 157                return true;
 158
 159        return false;
 160}
 161
 162/* -----------------------------------------------------------------------------
 163 * Eviction
 164 */
 165
 166static void omap_gem_evict_entry(struct drm_gem_object *obj,
 167                enum tiler_fmt fmt, struct omap_drm_usergart_entry *entry)
 168{
 169        struct omap_gem_object *omap_obj = to_omap_bo(obj);
 170        struct omap_drm_private *priv = obj->dev->dev_private;
 171        int n = priv->usergart[fmt].height;
 172        size_t size = PAGE_SIZE * n;
 173        loff_t off = omap_gem_mmap_offset(obj) +
 174                        (entry->obj_pgoff << PAGE_SHIFT);
 175        const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);
 176
 177        if (m > 1) {
 178                int i;
 179                /* if stride > than PAGE_SIZE then sparse mapping: */
 180                for (i = n; i > 0; i--) {
 181                        unmap_mapping_range(obj->dev->anon_inode->i_mapping,
 182                                            off, PAGE_SIZE, 1);
 183                        off += PAGE_SIZE * m;
 184                }
 185        } else {
 186                unmap_mapping_range(obj->dev->anon_inode->i_mapping,
 187                                    off, size, 1);
 188        }
 189
 190        entry->obj = NULL;
 191}
 192
 193/* Evict a buffer from usergart, if it is mapped there */
 194static void omap_gem_evict(struct drm_gem_object *obj)
 195{
 196        struct omap_gem_object *omap_obj = to_omap_bo(obj);
 197        struct omap_drm_private *priv = obj->dev->dev_private;
 198
 199        if (omap_obj->flags & OMAP_BO_TILED_MASK) {
 200                enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
 201                int i;
 202
 203                for (i = 0; i < NUM_USERGART_ENTRIES; i++) {
 204                        struct omap_drm_usergart_entry *entry =
 205                                &priv->usergart[fmt].entry[i];
 206
 207                        if (entry->obj == obj)
 208                                omap_gem_evict_entry(obj, fmt, entry);
 209                }
 210        }
 211}
 212
 213/* -----------------------------------------------------------------------------
 214 * Page Management
 215 */
 216
 217/*
 218 * Ensure backing pages are allocated. Must be called with the omap_obj.lock
 219 * held.
 220 */
 221static int omap_gem_attach_pages(struct drm_gem_object *obj)
 222{
 223        struct drm_device *dev = obj->dev;
 224        struct omap_gem_object *omap_obj = to_omap_bo(obj);
 225        struct page **pages;
 226        int npages = obj->size >> PAGE_SHIFT;
 227        int i, ret;
 228        dma_addr_t *addrs;
 229
 230        lockdep_assert_held(&omap_obj->lock);
 231
 232        /*
 233         * If not using shmem (in which case backing pages don't need to be
 234         * allocated) or if pages are already allocated we're done.
 235         */
 236        if (!(omap_obj->flags & OMAP_BO_MEM_SHMEM) || omap_obj->pages)
 237                return 0;
 238
 239        pages = drm_gem_get_pages(obj);
 240        if (IS_ERR(pages)) {
 241                dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
 242                return PTR_ERR(pages);
 243        }
 244
 245        /* for non-cached buffers, ensure the new pages are clean because
 246         * DSS, GPU, etc. are not cache coherent:
 247         */
 248        if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
 249                addrs = kmalloc_array(npages, sizeof(*addrs), GFP_KERNEL);
 250                if (!addrs) {
 251                        ret = -ENOMEM;
 252                        goto free_pages;
 253                }
 254
 255                for (i = 0; i < npages; i++) {
 256                        addrs[i] = dma_map_page(dev->dev, pages[i],
 257                                        0, PAGE_SIZE, DMA_TO_DEVICE);
 258
 259                        if (dma_mapping_error(dev->dev, addrs[i])) {
 260                                dev_warn(dev->dev,
 261                                        "%s: failed to map page\n", __func__);
 262
 263                                for (i = i - 1; i >= 0; --i) {
 264                                        dma_unmap_page(dev->dev, addrs[i],
 265                                                PAGE_SIZE, DMA_TO_DEVICE);
 266                                }
 267
 268                                ret = -ENOMEM;
 269                                goto free_addrs;
 270                        }
 271                }
 272        } else {
 273                addrs = kcalloc(npages, sizeof(*addrs), GFP_KERNEL);
 274                if (!addrs) {
 275                        ret = -ENOMEM;
 276                        goto free_pages;
 277                }
 278        }
 279
 280        omap_obj->dma_addrs = addrs;
 281        omap_obj->pages = pages;
 282
 283        return 0;
 284
 285free_addrs:
 286        kfree(addrs);
 287free_pages:
 288        drm_gem_put_pages(obj, pages, true, false);
 289
 290        return ret;
 291}
 292
 293/* Release backing pages. Must be called with the omap_obj.lock held. */
 294static void omap_gem_detach_pages(struct drm_gem_object *obj)
 295{
 296        struct omap_gem_object *omap_obj = to_omap_bo(obj);
 297        unsigned int npages = obj->size >> PAGE_SHIFT;
 298        unsigned int i;
 299
 300        lockdep_assert_held(&omap_obj->lock);
 301
 302        for (i = 0; i < npages; i++) {
 303                if (omap_obj->dma_addrs[i])
 304                        dma_unmap_page(obj->dev->dev, omap_obj->dma_addrs[i],
 305                                       PAGE_SIZE, DMA_TO_DEVICE);
 306        }
 307
 308        kfree(omap_obj->dma_addrs);
 309        omap_obj->dma_addrs = NULL;
 310
 311        drm_gem_put_pages(obj, omap_obj->pages, true, false);
 312        omap_obj->pages = NULL;
 313}
 314
 315/* get buffer flags */
 316u32 omap_gem_flags(struct drm_gem_object *obj)
 317{
 318        return to_omap_bo(obj)->flags;
 319}
 320
 321/** get mmap size */
 322size_t omap_gem_mmap_size(struct drm_gem_object *obj)
 323{
 324        struct omap_gem_object *omap_obj = to_omap_bo(obj);
 325        size_t size = obj->size;
 326
 327        if (omap_obj->flags & OMAP_BO_TILED_MASK) {
 328                /* for tiled buffers, the virtual size has stride rounded up
 329                 * to 4kb.. (to hide the fact that row n+1 might start 16kb or
 330                 * 32kb later!).  But we don't back the entire buffer with
 331                 * pages, only the valid picture part.. so need to adjust for
 332                 * this in the size used to mmap and generate mmap offset
 333                 */
 334                size = tiler_vsize(gem2fmt(omap_obj->flags),
 335                                omap_obj->width, omap_obj->height);
 336        }
 337
 338        return size;
 339}
 340
 341/* -----------------------------------------------------------------------------
 342 * Fault Handling
 343 */
 344
 345/* Normal handling for the case of faulting in non-tiled buffers */
 346static vm_fault_t omap_gem_fault_1d(struct drm_gem_object *obj,
 347                struct vm_area_struct *vma, struct vm_fault *vmf)
 348{
 349        struct omap_gem_object *omap_obj = to_omap_bo(obj);
 350        unsigned long pfn;
 351        pgoff_t pgoff;
 352
 353        /* We don't use vmf->pgoff since that has the fake offset: */
 354        pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
 355
 356        if (omap_obj->pages) {
 357                omap_gem_cpu_sync_page(obj, pgoff);
 358                pfn = page_to_pfn(omap_obj->pages[pgoff]);
 359        } else {
 360                BUG_ON(!omap_gem_is_contiguous(omap_obj));
 361                pfn = (omap_obj->dma_addr >> PAGE_SHIFT) + pgoff;
 362        }
 363
 364        VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
 365                        pfn, pfn << PAGE_SHIFT);
 366
 367        return vmf_insert_mixed(vma, vmf->address,
 368                        __pfn_to_pfn_t(pfn, PFN_DEV));
 369}
 370
 371/* Special handling for the case of faulting in 2d tiled buffers */
 372static vm_fault_t omap_gem_fault_2d(struct drm_gem_object *obj,
 373                struct vm_area_struct *vma, struct vm_fault *vmf)
 374{
 375        struct omap_gem_object *omap_obj = to_omap_bo(obj);
 376        struct omap_drm_private *priv = obj->dev->dev_private;
 377        struct omap_drm_usergart_entry *entry;
 378        enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
 379        struct page *pages[64];  /* XXX is this too much to have on stack? */
 380        unsigned long pfn;
 381        pgoff_t pgoff, base_pgoff;
 382        unsigned long vaddr;
 383        int i, err, slots;
 384        vm_fault_t ret = VM_FAULT_NOPAGE;
 385
 386        /*
 387         * Note the height of the slot is also equal to the number of pages
 388         * that need to be mapped in to fill 4kb wide CPU page.  If the slot
 389         * height is 64, then 64 pages fill a 4kb wide by 64 row region.
 390         */
 391        const int n = priv->usergart[fmt].height;
 392        const int n_shift = priv->usergart[fmt].height_shift;
 393
 394        /*
 395         * If buffer width in bytes > PAGE_SIZE then the virtual stride is
 396         * rounded up to next multiple of PAGE_SIZE.. this need to be taken
 397         * into account in some of the math, so figure out virtual stride
 398         * in pages
 399         */
 400        const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);
 401
 402        /* We don't use vmf->pgoff since that has the fake offset: */
 403        pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
 404
 405        /*
 406         * Actual address we start mapping at is rounded down to previous slot
 407         * boundary in the y direction:
 408         */
 409        base_pgoff = round_down(pgoff, m << n_shift);
 410
 411        /* figure out buffer width in slots */
 412        slots = omap_obj->width >> priv->usergart[fmt].slot_shift;
 413
 414        vaddr = vmf->address - ((pgoff - base_pgoff) << PAGE_SHIFT);
 415
 416        entry = &priv->usergart[fmt].entry[priv->usergart[fmt].last];
 417
 418        /* evict previous buffer using this usergart entry, if any: */
 419        if (entry->obj)
 420                omap_gem_evict_entry(entry->obj, fmt, entry);
 421
 422        entry->obj = obj;
 423        entry->obj_pgoff = base_pgoff;
 424
 425        /* now convert base_pgoff to phys offset from virt offset: */
 426        base_pgoff = (base_pgoff >> n_shift) * slots;
 427
 428        /* for wider-than 4k.. figure out which part of the slot-row we want: */
 429        if (m > 1) {
 430                int off = pgoff % m;
 431                entry->obj_pgoff += off;
 432                base_pgoff /= m;
 433                slots = min(slots - (off << n_shift), n);
 434                base_pgoff += off << n_shift;
 435                vaddr += off << PAGE_SHIFT;
 436        }
 437
 438        /*
 439         * Map in pages. Beyond the valid pixel part of the buffer, we set
 440         * pages[i] to NULL to get a dummy page mapped in.. if someone
 441         * reads/writes it they will get random/undefined content, but at
 442         * least it won't be corrupting whatever other random page used to
 443         * be mapped in, or other undefined behavior.
 444         */
 445        memcpy(pages, &omap_obj->pages[base_pgoff],
 446                        sizeof(struct page *) * slots);
 447        memset(pages + slots, 0,
 448                        sizeof(struct page *) * (n - slots));
 449
 450        err = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
 451        if (err) {
 452                ret = vmf_error(err);
 453                dev_err(obj->dev->dev, "failed to pin: %d\n", err);
 454                return ret;
 455        }
 456
 457        pfn = entry->dma_addr >> PAGE_SHIFT;
 458
 459        VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
 460                        pfn, pfn << PAGE_SHIFT);
 461
 462        for (i = n; i > 0; i--) {
 463                ret = vmf_insert_mixed(vma,
 464                        vaddr, __pfn_to_pfn_t(pfn, PFN_DEV));
 465                if (ret & VM_FAULT_ERROR)
 466                        break;
 467                pfn += priv->usergart[fmt].stride_pfn;
 468                vaddr += PAGE_SIZE * m;
 469        }
 470
 471        /* simple round-robin: */
 472        priv->usergart[fmt].last = (priv->usergart[fmt].last + 1)
 473                                 % NUM_USERGART_ENTRIES;
 474
 475        return ret;
 476}
 477
 478/**
 479 * omap_gem_fault               -       pagefault handler for GEM objects
 480 * @vmf: fault detail
 481 *
 482 * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
 483 * does most of the work for us including the actual map/unmap calls
 484 * but we need to do the actual page work.
 485 *
 486 * The VMA was set up by GEM. In doing so it also ensured that the
 487 * vma->vm_private_data points to the GEM object that is backing this
 488 * mapping.
 489 */
 490static vm_fault_t omap_gem_fault(struct vm_fault *vmf)
 491{
 492        struct vm_area_struct *vma = vmf->vma;
 493        struct drm_gem_object *obj = vma->vm_private_data;
 494        struct omap_gem_object *omap_obj = to_omap_bo(obj);
 495        int err;
 496        vm_fault_t ret;
 497
 498        /* Make sure we don't parallel update on a fault, nor move or remove
 499         * something from beneath our feet
 500         */
 501        mutex_lock(&omap_obj->lock);
 502
 503        /* if a shmem backed object, make sure we have pages attached now */
 504        err = omap_gem_attach_pages(obj);
 505        if (err) {
 506                ret = vmf_error(err);
 507                goto fail;
 508        }
 509
 510        /* where should we do corresponding put_pages().. we are mapping
 511         * the original page, rather than thru a GART, so we can't rely
 512         * on eviction to trigger this.  But munmap() or all mappings should
 513         * probably trigger put_pages()?
 514         */
 515
 516        if (omap_obj->flags & OMAP_BO_TILED_MASK)
 517                ret = omap_gem_fault_2d(obj, vma, vmf);
 518        else
 519                ret = omap_gem_fault_1d(obj, vma, vmf);
 520
 521
 522fail:
 523        mutex_unlock(&omap_obj->lock);
 524        return ret;
 525}
 526
 527/** We override mainly to fix up some of the vm mapping flags.. */
 528int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 529{
 530        int ret;
 531
 532        ret = drm_gem_mmap(filp, vma);
 533        if (ret) {
 534                DBG("mmap failed: %d", ret);
 535                return ret;
 536        }
 537
 538        return omap_gem_mmap_obj(vma->vm_private_data, vma);
 539}
 540
 541int omap_gem_mmap_obj(struct drm_gem_object *obj,
 542                struct vm_area_struct *vma)
 543{
 544        struct omap_gem_object *omap_obj = to_omap_bo(obj);
 545
 546        vma->vm_flags &= ~VM_PFNMAP;
 547        vma->vm_flags |= VM_MIXEDMAP;
 548
 549        if (omap_obj->flags & OMAP_BO_WC) {
 550                vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
 551        } else if (omap_obj->flags & OMAP_BO_UNCACHED) {
 552                vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
 553        } else {
 554                /*
 555                 * We do have some private objects, at least for scanout buffers
 556                 * on hardware without DMM/TILER.  But these are allocated write-
 557                 * combine
 558                 */
 559                if (WARN_ON(!obj->filp))
 560                        return -EINVAL;
 561
 562                /*
 563                 * Shunt off cached objs to shmem file so they have their own
 564                 * address_space (so unmap_mapping_range does what we want,
 565                 * in particular in the case of mmap'd dmabufs)
 566                 */
 567                vma->vm_pgoff = 0;
 568                vma_set_file(vma, obj->filp);
 569
 570                vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
 571        }
 572
 573        return 0;
 574}
 575
 576/* -----------------------------------------------------------------------------
 577 * Dumb Buffers
 578 */
 579
 580/**
 581 * omap_gem_dumb_create -       create a dumb buffer
 582 * @file: our client file
 583 * @dev: our device
 584 * @args: the requested arguments copied from userspace
 585 *
 586 * Allocate a buffer suitable for use for a frame buffer of the
 587 * form described by user space. Give userspace a handle by which
 588 * to reference it.
 589 */
 590int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
 591                struct drm_mode_create_dumb *args)
 592{
 593        union omap_gem_size gsize;
 594
 595        args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
 596
 597        args->size = PAGE_ALIGN(args->pitch * args->height);
 598
 599        gsize = (union omap_gem_size){
 600                .bytes = args->size,
 601        };
 602
 603        return omap_gem_new_handle(dev, file, gsize,
 604                        OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle);
 605}
 606
 607/**
 608 * omap_gem_dumb_map    -       buffer mapping for dumb interface
 609 * @file: our drm client file
 610 * @dev: drm device
 611 * @handle: GEM handle to the object (from dumb_create)
 612 * @offset: memory map offset placeholder
 613 *
 614 * Do the necessary setup to allow the mapping of the frame buffer
 615 * into user memory. We don't have to do much here at the moment.
 616 */
 617int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
 618                u32 handle, u64 *offset)
 619{
 620        struct drm_gem_object *obj;
 621        int ret = 0;
 622
 623        /* GEM does all our handle to object mapping */
 624        obj = drm_gem_object_lookup(file, handle);
 625        if (obj == NULL) {
 626                ret = -ENOENT;
 627                goto fail;
 628        }
 629
 630        *offset = omap_gem_mmap_offset(obj);
 631
 632        drm_gem_object_put(obj);
 633
 634fail:
 635        return ret;
 636}
 637
 638#ifdef CONFIG_DRM_FBDEV_EMULATION
 639/* Set scrolling position.  This allows us to implement fast scrolling
 640 * for console.
 641 *
 642 * Call only from non-atomic contexts.
 643 */
 644int omap_gem_roll(struct drm_gem_object *obj, u32 roll)
 645{
 646        struct omap_gem_object *omap_obj = to_omap_bo(obj);
 647        u32 npages = obj->size >> PAGE_SHIFT;
 648        int ret = 0;
 649
 650        if (roll > npages) {
 651                dev_err(obj->dev->dev, "invalid roll: %d\n", roll);
 652                return -EINVAL;
 653        }
 654
 655        omap_obj->roll = roll;
 656
 657        mutex_lock(&omap_obj->lock);
 658
 659        /* if we aren't mapped yet, we don't need to do anything */
 660        if (omap_obj->block) {
 661                ret = omap_gem_attach_pages(obj);
 662                if (ret)
 663                        goto fail;
 664
 665                ret = tiler_pin(omap_obj->block, omap_obj->pages, npages,
 666                                roll, true);
 667                if (ret)
 668                        dev_err(obj->dev->dev, "could not repin: %d\n", ret);
 669        }
 670
 671fail:
 672        mutex_unlock(&omap_obj->lock);
 673
 674        return ret;
 675}
 676#endif
 677
 678/* -----------------------------------------------------------------------------
 679 * Memory Management & DMA Sync
 680 */
 681
 682/*
 683 * shmem buffers that are mapped cached are not coherent.
 684 *
 685 * We keep track of dirty pages using page faulting to perform cache management.
 686 * When a page is mapped to the CPU in read/write mode the device can't access
 687 * it and omap_obj->dma_addrs[i] is NULL. When a page is mapped to the device
 688 * the omap_obj->dma_addrs[i] is set to the DMA address, and the page is
 689 * unmapped from the CPU.
 690 */
 691static inline bool omap_gem_is_cached_coherent(struct drm_gem_object *obj)
 692{
 693        struct omap_gem_object *omap_obj = to_omap_bo(obj);
 694
 695        return !((omap_obj->flags & OMAP_BO_MEM_SHMEM) &&
 696                ((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED));
 697}
 698
 699/* Sync the buffer for CPU access.. note pages should already be
 700 * attached, ie. omap_gem_get_pages()
 701 */
 702void omap_gem_cpu_sync_page(struct drm_gem_object *obj, int pgoff)
 703{
 704        struct drm_device *dev = obj->dev;
 705        struct omap_gem_object *omap_obj = to_omap_bo(obj);
 706
 707        if (omap_gem_is_cached_coherent(obj))
 708                return;
 709
 710        if (omap_obj->dma_addrs[pgoff]) {
 711                dma_unmap_page(dev->dev, omap_obj->dma_addrs[pgoff],
 712                                PAGE_SIZE, DMA_TO_DEVICE);
 713                omap_obj->dma_addrs[pgoff] = 0;
 714        }
 715}
 716
 717/* sync the buffer for DMA access */
 718void omap_gem_dma_sync_buffer(struct drm_gem_object *obj,
 719                enum dma_data_direction dir)
 720{
 721        struct drm_device *dev = obj->dev;
 722        struct omap_gem_object *omap_obj = to_omap_bo(obj);
 723        int i, npages = obj->size >> PAGE_SHIFT;
 724        struct page **pages = omap_obj->pages;
 725        bool dirty = false;
 726
 727        if (omap_gem_is_cached_coherent(obj))
 728                return;
 729
 730        for (i = 0; i < npages; i++) {
 731                if (!omap_obj->dma_addrs[i]) {
 732                        dma_addr_t addr;
 733
 734                        addr = dma_map_page(dev->dev, pages[i], 0,
 735                                            PAGE_SIZE, dir);
 736                        if (dma_mapping_error(dev->dev, addr)) {
 737                                dev_warn(dev->dev, "%s: failed to map page\n",
 738                                        __func__);
 739                                break;
 740                        }
 741
 742                        dirty = true;
 743                        omap_obj->dma_addrs[i] = addr;
 744                }
 745        }
 746
 747        if (dirty) {
 748                unmap_mapping_range(obj->filp->f_mapping, 0,
 749                                    omap_gem_mmap_size(obj), 1);
 750        }
 751}
 752
 753/**
 754 * omap_gem_pin() - Pin a GEM object in memory
 755 * @obj: the GEM object
 756 * @dma_addr: the DMA address
 757 *
 758 * Pin the given GEM object in memory and fill the dma_addr pointer with the
 759 * object's DMA address. If the buffer is not physically contiguous it will be
 760 * remapped through the TILER to provide a contiguous view.
 761 *
 762 * Pins are reference-counted, calling this function multiple times is allowed
 763 * as long the corresponding omap_gem_unpin() calls are balanced.
 764 *
 765 * Return 0 on success or a negative error code otherwise.
 766 */
 767int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
 768{
 769        struct omap_drm_private *priv = obj->dev->dev_private;
 770        struct omap_gem_object *omap_obj = to_omap_bo(obj);
 771        int ret = 0;
 772
 773        mutex_lock(&omap_obj->lock);
 774
 775        if (!omap_gem_is_contiguous(omap_obj) && priv->has_dmm) {
 776                if (refcount_read(&omap_obj->dma_addr_cnt) == 0) {
 777                        u32 npages = obj->size >> PAGE_SHIFT;
 778                        enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
 779                        struct tiler_block *block;
 780
 781                        BUG_ON(omap_obj->block);
 782
 783                        refcount_set(&omap_obj->dma_addr_cnt, 1);
 784
 785                        ret = omap_gem_attach_pages(obj);
 786                        if (ret)
 787                                goto fail;
 788
 789                        if (omap_obj->flags & OMAP_BO_TILED_MASK) {
 790                                block = tiler_reserve_2d(fmt,
 791                                                omap_obj->width,
 792                                                omap_obj->height, 0);
 793                        } else {
 794                                block = tiler_reserve_1d(obj->size);
 795                        }
 796
 797                        if (IS_ERR(block)) {
 798                                ret = PTR_ERR(block);
 799                                dev_err(obj->dev->dev,
 800                                        "could not remap: %d (%d)\n", ret, fmt);
 801                                goto fail;
 802                        }
 803
 804                        /* TODO: enable async refill.. */
 805                        ret = tiler_pin(block, omap_obj->pages, npages,
 806                                        omap_obj->roll, true);
 807                        if (ret) {
 808                                tiler_release(block);
 809                                dev_err(obj->dev->dev,
 810                                                "could not pin: %d\n", ret);
 811                                goto fail;
 812                        }
 813
 814                        omap_obj->dma_addr = tiler_ssptr(block);
 815                        omap_obj->block = block;
 816
 817                        DBG("got dma address: %pad", &omap_obj->dma_addr);
 818                } else {
 819                        refcount_inc(&omap_obj->dma_addr_cnt);
 820                }
 821
 822                if (dma_addr)
 823                        *dma_addr = omap_obj->dma_addr;
 824        } else if (omap_gem_is_contiguous(omap_obj)) {
 825                if (dma_addr)
 826                        *dma_addr = omap_obj->dma_addr;
 827        } else {
 828                ret = -EINVAL;
 829                goto fail;
 830        }
 831
 832fail:
 833        mutex_unlock(&omap_obj->lock);
 834
 835        return ret;
 836}
 837
 838/**
 839 * omap_gem_unpin_locked() - Unpin a GEM object from memory
 840 * @obj: the GEM object
 841 *
 842 * omap_gem_unpin() without locking.
 843 */
 844static void omap_gem_unpin_locked(struct drm_gem_object *obj)
 845{
 846        struct omap_drm_private *priv = obj->dev->dev_private;
 847        struct omap_gem_object *omap_obj = to_omap_bo(obj);
 848        int ret;
 849
 850        if (omap_gem_is_contiguous(omap_obj) || !priv->has_dmm)
 851                return;
 852
 853        if (refcount_dec_and_test(&omap_obj->dma_addr_cnt)) {
 854                ret = tiler_unpin(omap_obj->block);
 855                if (ret) {
 856                        dev_err(obj->dev->dev,
 857                                "could not unpin pages: %d\n", ret);
 858                }
 859                ret = tiler_release(omap_obj->block);
 860                if (ret) {
 861                        dev_err(obj->dev->dev,
 862                                "could not release unmap: %d\n", ret);
 863                }
 864                omap_obj->dma_addr = 0;
 865                omap_obj->block = NULL;
 866        }
 867}
 868
 869/**
 870 * omap_gem_unpin() - Unpin a GEM object from memory
 871 * @obj: the GEM object
 872 *
 873 * Unpin the given GEM object previously pinned with omap_gem_pin(). Pins are
 874 * reference-counted, the actual unpin will only be performed when the number
 875 * of calls to this function matches the number of calls to omap_gem_pin().
 876 */
 877void omap_gem_unpin(struct drm_gem_object *obj)
 878{
 879        struct omap_gem_object *omap_obj = to_omap_bo(obj);
 880
 881        mutex_lock(&omap_obj->lock);
 882        omap_gem_unpin_locked(obj);
 883        mutex_unlock(&omap_obj->lock);
 884}
 885
 886/* Get rotated scanout address (only valid if already pinned), at the
 887 * specified orientation and x,y offset from top-left corner of buffer
 888 * (only valid for tiled 2d buffers)
 889 */
 890int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, u32 orient,
 891                int x, int y, dma_addr_t *dma_addr)
 892{
 893        struct omap_gem_object *omap_obj = to_omap_bo(obj);
 894        int ret = -EINVAL;
 895
 896        mutex_lock(&omap_obj->lock);
 897
 898        if ((refcount_read(&omap_obj->dma_addr_cnt) > 0) && omap_obj->block &&
 899                        (omap_obj->flags & OMAP_BO_TILED_MASK)) {
 900                *dma_addr = tiler_tsptr(omap_obj->block, orient, x, y);
 901                ret = 0;
 902        }
 903
 904        mutex_unlock(&omap_obj->lock);
 905
 906        return ret;
 907}
 908
 909/* Get tiler stride for the buffer (only valid for 2d tiled buffers) */
 910int omap_gem_tiled_stride(struct drm_gem_object *obj, u32 orient)
 911{
 912        struct omap_gem_object *omap_obj = to_omap_bo(obj);
 913        int ret = -EINVAL;
 914        if (omap_obj->flags & OMAP_BO_TILED_MASK)
 915                ret = tiler_stride(gem2fmt(omap_obj->flags), orient);
 916        return ret;
 917}
 918
 919/* if !remap, and we don't have pages backing, then fail, rather than
 920 * increasing the pin count (which we don't really do yet anyways,
 921 * because we don't support swapping pages back out).  And 'remap'
 922 * might not be quite the right name, but I wanted to keep it working
 923 * similarly to omap_gem_pin().  Note though that mutex is not
 924 * aquired if !remap (because this can be called in atomic ctxt),
 925 * but probably omap_gem_unpin() should be changed to work in the
 926 * same way.  If !remap, a matching omap_gem_put_pages() call is not
 927 * required (and should not be made).
 928 */
 929int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
 930                bool remap)
 931{
 932        struct omap_gem_object *omap_obj = to_omap_bo(obj);
 933        int ret = 0;
 934
 935        mutex_lock(&omap_obj->lock);
 936
 937        if (remap) {
 938                ret = omap_gem_attach_pages(obj);
 939                if (ret)
 940                        goto unlock;
 941        }
 942
 943        if (!omap_obj->pages) {
 944                ret = -ENOMEM;
 945                goto unlock;
 946        }
 947
 948        *pages = omap_obj->pages;
 949
 950unlock:
 951        mutex_unlock(&omap_obj->lock);
 952
 953        return ret;
 954}
 955
 956/* release pages when DMA no longer being performed */
 957int omap_gem_put_pages(struct drm_gem_object *obj)
 958{
 959        /* do something here if we dynamically attach/detach pages.. at
 960         * least they would no longer need to be pinned if everyone has
 961         * released the pages..
 962         */
 963        return 0;
 964}
 965
 966#ifdef CONFIG_DRM_FBDEV_EMULATION
 967/*
 968 * Get kernel virtual address for CPU access.. this more or less only
 969 * exists for omap_fbdev.
 970 */
 971void *omap_gem_vaddr(struct drm_gem_object *obj)
 972{
 973        struct omap_gem_object *omap_obj = to_omap_bo(obj);
 974        void *vaddr;
 975        int ret;
 976
 977        mutex_lock(&omap_obj->lock);
 978
 979        if (!omap_obj->vaddr) {
 980                ret = omap_gem_attach_pages(obj);
 981                if (ret) {
 982                        vaddr = ERR_PTR(ret);
 983                        goto unlock;
 984                }
 985
 986                omap_obj->vaddr = vmap(omap_obj->pages, obj->size >> PAGE_SHIFT,
 987                                VM_MAP, pgprot_writecombine(PAGE_KERNEL));
 988        }
 989
 990        vaddr = omap_obj->vaddr;
 991
 992unlock:
 993        mutex_unlock(&omap_obj->lock);
 994        return vaddr;
 995}
 996#endif
 997
 998/* -----------------------------------------------------------------------------
 999 * Power Management
1000 */
1001
1002#ifdef CONFIG_PM
1003/* re-pin objects in DMM in resume path: */
1004int omap_gem_resume(struct drm_device *dev)
1005{
1006        struct omap_drm_private *priv = dev->dev_private;
1007        struct omap_gem_object *omap_obj;
1008        int ret = 0;
1009
1010        mutex_lock(&priv->list_lock);
1011        list_for_each_entry(omap_obj, &priv->obj_list, mm_list) {
1012                if (omap_obj->block) {
1013                        struct drm_gem_object *obj = &omap_obj->base;
1014                        u32 npages = obj->size >> PAGE_SHIFT;
1015
1016                        WARN_ON(!omap_obj->pages);  /* this can't happen */
1017                        ret = tiler_pin(omap_obj->block,
1018                                        omap_obj->pages, npages,
1019                                        omap_obj->roll, true);
1020                        if (ret) {
1021                                dev_err(dev->dev, "could not repin: %d\n", ret);
1022                                goto done;
1023                        }
1024                }
1025        }
1026
1027done:
1028        mutex_unlock(&priv->list_lock);
1029        return ret;
1030}
1031#endif
1032
1033/* -----------------------------------------------------------------------------
1034 * DebugFS
1035 */
1036
1037#ifdef CONFIG_DEBUG_FS
1038void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
1039{
1040        struct omap_gem_object *omap_obj = to_omap_bo(obj);
1041        u64 off;
1042
1043        off = drm_vma_node_start(&obj->vma_node);
1044
1045        mutex_lock(&omap_obj->lock);
1046
1047        seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
1048                        omap_obj->flags, obj->name, kref_read(&obj->refcount),
1049                        off, &omap_obj->dma_addr,
1050                        refcount_read(&omap_obj->dma_addr_cnt),
1051                        omap_obj->vaddr, omap_obj->roll);
1052
1053        if (omap_obj->flags & OMAP_BO_TILED_MASK) {
1054                seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height);
1055                if (omap_obj->block) {
1056                        struct tcm_area *area = &omap_obj->block->area;
1057                        seq_printf(m, " (%dx%d, %dx%d)",
1058                                        area->p0.x, area->p0.y,
1059                                        area->p1.x, area->p1.y);
1060                }
1061        } else {
1062                seq_printf(m, " %zu", obj->size);
1063        }
1064
1065        mutex_unlock(&omap_obj->lock);
1066
1067        seq_printf(m, "\n");
1068}
1069
1070void omap_gem_describe_objects(struct list_head *list, struct seq_file *m)
1071{
1072        struct omap_gem_object *omap_obj;
1073        int count = 0;
1074        size_t size = 0;
1075
1076        list_for_each_entry(omap_obj, list, mm_list) {
1077                struct drm_gem_object *obj = &omap_obj->base;
1078                seq_printf(m, "   ");
1079                omap_gem_describe(obj, m);
1080                count++;
1081                size += obj->size;
1082        }
1083
1084        seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
1085}
1086#endif
1087
1088/* -----------------------------------------------------------------------------
1089 * Constructor & Destructor
1090 */
1091
1092static void omap_gem_free_object(struct drm_gem_object *obj)
1093{
1094        struct drm_device *dev = obj->dev;
1095        struct omap_drm_private *priv = dev->dev_private;
1096        struct omap_gem_object *omap_obj = to_omap_bo(obj);
1097
1098        omap_gem_evict(obj);
1099
1100        mutex_lock(&priv->list_lock);
1101        list_del(&omap_obj->mm_list);
1102        mutex_unlock(&priv->list_lock);
1103
1104        /*
1105         * We own the sole reference to the object at this point, but to keep
1106         * lockdep happy, we must still take the omap_obj_lock to call
1107         * omap_gem_detach_pages(). This should hardly make any difference as
1108         * there can't be any lock contention.
1109         */
1110        mutex_lock(&omap_obj->lock);
1111
1112        /* The object should not be pinned. */
1113        WARN_ON(refcount_read(&omap_obj->dma_addr_cnt) > 0);
1114
1115        if (omap_obj->pages) {
1116                if (omap_obj->flags & OMAP_BO_MEM_DMABUF)
1117                        kfree(omap_obj->pages);
1118                else
1119                        omap_gem_detach_pages(obj);
1120        }
1121
1122        if (omap_obj->flags & OMAP_BO_MEM_DMA_API) {
1123                dma_free_wc(dev->dev, obj->size, omap_obj->vaddr,
1124                            omap_obj->dma_addr);
1125        } else if (omap_obj->vaddr) {
1126                vunmap(omap_obj->vaddr);
1127        } else if (obj->import_attach) {
1128                drm_prime_gem_destroy(obj, omap_obj->sgt);
1129        }
1130
1131        mutex_unlock(&omap_obj->lock);
1132
1133        drm_gem_object_release(obj);
1134
1135        mutex_destroy(&omap_obj->lock);
1136
1137        kfree(omap_obj);
1138}
1139
1140static bool omap_gem_validate_flags(struct drm_device *dev, u32 flags)
1141{
1142        struct omap_drm_private *priv = dev->dev_private;
1143
1144        switch (flags & OMAP_BO_CACHE_MASK) {
1145        case OMAP_BO_CACHED:
1146        case OMAP_BO_WC:
1147        case OMAP_BO_CACHE_MASK:
1148                break;
1149
1150        default:
1151                return false;
1152        }
1153
1154        if (flags & OMAP_BO_TILED_MASK) {
1155                if (!priv->usergart)
1156                        return false;
1157
1158                switch (flags & OMAP_BO_TILED_MASK) {
1159                case OMAP_BO_TILED_8:
1160                case OMAP_BO_TILED_16:
1161                case OMAP_BO_TILED_32:
1162                        break;
1163
1164                default:
1165                        return false;
1166                }
1167        }
1168
1169        return true;
1170}
1171
1172static const struct vm_operations_struct omap_gem_vm_ops = {
1173        .fault = omap_gem_fault,
1174        .open = drm_gem_vm_open,
1175        .close = drm_gem_vm_close,
1176};
1177
1178static const struct drm_gem_object_funcs omap_gem_object_funcs = {
1179        .free = omap_gem_free_object,
1180        .export = omap_gem_prime_export,
1181        .vm_ops = &omap_gem_vm_ops,
1182};
1183
1184/* GEM buffer object constructor */
1185struct drm_gem_object *omap_gem_new(struct drm_device *dev,
1186                union omap_gem_size gsize, u32 flags)
1187{
1188        struct omap_drm_private *priv = dev->dev_private;
1189        struct omap_gem_object *omap_obj;
1190        struct drm_gem_object *obj;
1191        struct address_space *mapping;
1192        size_t size;
1193        int ret;
1194
1195        if (!omap_gem_validate_flags(dev, flags))
1196                return NULL;
1197
1198        /* Validate the flags and compute the memory and cache flags. */
1199        if (flags & OMAP_BO_TILED_MASK) {
1200                /*
1201                 * Tiled buffers are always shmem paged backed. When they are
1202                 * scanned out, they are remapped into DMM/TILER.
1203                 */
1204                flags |= OMAP_BO_MEM_SHMEM;
1205
1206                /*
1207                 * Currently don't allow cached buffers. There is some caching
1208                 * stuff that needs to be handled better.
1209                 */
1210                flags &= ~(OMAP_BO_CACHED|OMAP_BO_WC|OMAP_BO_UNCACHED);
1211                flags |= tiler_get_cpu_cache_flags();
1212        } else if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
1213                /*
1214                 * If we don't have DMM, we must allocate scanout buffers
1215                 * from contiguous DMA memory.
1216                 */
1217                flags |= OMAP_BO_MEM_DMA_API;
1218        } else if (!(flags & OMAP_BO_MEM_DMABUF)) {
1219                /*
1220                 * All other buffers not backed by dma_buf are shmem-backed.
1221                 */
1222                flags |= OMAP_BO_MEM_SHMEM;
1223        }
1224
1225        /* Allocate the initialize the OMAP GEM object. */
1226        omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
1227        if (!omap_obj)
1228                return NULL;
1229
1230        obj = &omap_obj->base;
1231        omap_obj->flags = flags;
1232        mutex_init(&omap_obj->lock);
1233
1234        if (flags & OMAP_BO_TILED_MASK) {
1235                /*
1236                 * For tiled buffers align dimensions to slot boundaries and
1237                 * calculate size based on aligned dimensions.
1238                 */
1239                tiler_align(gem2fmt(flags), &gsize.tiled.width,
1240                            &gsize.tiled.height);
1241
1242                size = tiler_size(gem2fmt(flags), gsize.tiled.width,
1243                                  gsize.tiled.height);
1244
1245                omap_obj->width = gsize.tiled.width;
1246                omap_obj->height = gsize.tiled.height;
1247        } else {
1248                size = PAGE_ALIGN(gsize.bytes);
1249        }
1250
1251        obj->funcs = &omap_gem_object_funcs;
1252
1253        /* Initialize the GEM object. */
1254        if (!(flags & OMAP_BO_MEM_SHMEM)) {
1255                drm_gem_private_object_init(dev, obj, size);
1256        } else {
1257                ret = drm_gem_object_init(dev, obj, size);
1258                if (ret)
1259                        goto err_free;
1260
1261                mapping = obj->filp->f_mapping;
1262                mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32);
1263        }
1264
1265        /* Allocate memory if needed. */
1266        if (flags & OMAP_BO_MEM_DMA_API) {
1267                omap_obj->vaddr = dma_alloc_wc(dev->dev, size,
1268                                               &omap_obj->dma_addr,
1269                                               GFP_KERNEL);
1270                if (!omap_obj->vaddr)
1271                        goto err_release;
1272        }
1273
1274        mutex_lock(&priv->list_lock);
1275        list_add(&omap_obj->mm_list, &priv->obj_list);
1276        mutex_unlock(&priv->list_lock);
1277
1278        return obj;
1279
1280err_release:
1281        drm_gem_object_release(obj);
1282err_free:
1283        kfree(omap_obj);
1284        return NULL;
1285}
1286
1287struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
1288                                           struct sg_table *sgt)
1289{
1290        struct omap_drm_private *priv = dev->dev_private;
1291        struct omap_gem_object *omap_obj;
1292        struct drm_gem_object *obj;
1293        union omap_gem_size gsize;
1294
1295        /* Without a DMM only physically contiguous buffers can be supported. */
1296        if (sgt->orig_nents != 1 && !priv->has_dmm)
1297                return ERR_PTR(-EINVAL);
1298
1299        gsize.bytes = PAGE_ALIGN(size);
1300        obj = omap_gem_new(dev, gsize, OMAP_BO_MEM_DMABUF | OMAP_BO_WC);
1301        if (!obj)
1302                return ERR_PTR(-ENOMEM);
1303
1304        omap_obj = to_omap_bo(obj);
1305
1306        mutex_lock(&omap_obj->lock);
1307
1308        omap_obj->sgt = sgt;
1309
1310        if (sgt->orig_nents == 1) {
1311                omap_obj->dma_addr = sg_dma_address(sgt->sgl);
1312        } else {
1313                /* Create pages list from sgt */
1314                struct page **pages;
1315                unsigned int npages;
1316                unsigned int ret;
1317
1318                npages = DIV_ROUND_UP(size, PAGE_SIZE);
1319                pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
1320                if (!pages) {
1321                        omap_gem_free_object(obj);
1322                        obj = ERR_PTR(-ENOMEM);
1323                        goto done;
1324                }
1325
1326                omap_obj->pages = pages;
1327                ret = drm_prime_sg_to_page_array(sgt, pages, npages);
1328                if (ret) {
1329                        omap_gem_free_object(obj);
1330                        obj = ERR_PTR(-ENOMEM);
1331                        goto done;
1332                }
1333        }
1334
1335done:
1336        mutex_unlock(&omap_obj->lock);
1337        return obj;
1338}
1339
1340/* convenience method to construct a GEM buffer object, and userspace handle */
1341int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1342                union omap_gem_size gsize, u32 flags, u32 *handle)
1343{
1344        struct drm_gem_object *obj;
1345        int ret;
1346
1347        obj = omap_gem_new(dev, gsize, flags);
1348        if (!obj)
1349                return -ENOMEM;
1350
1351        ret = drm_gem_handle_create(file, obj, handle);
1352        if (ret) {
1353                omap_gem_free_object(obj);
1354                return ret;
1355        }
1356
1357        /* drop reference from allocate - handle holds it now */
1358        drm_gem_object_put(obj);
1359
1360        return 0;
1361}
1362
1363/* -----------------------------------------------------------------------------
1364 * Init & Cleanup
1365 */
1366
1367/* If DMM is used, we need to set some stuff up.. */
1368void omap_gem_init(struct drm_device *dev)
1369{
1370        struct omap_drm_private *priv = dev->dev_private;
1371        struct omap_drm_usergart *usergart;
1372        const enum tiler_fmt fmts[] = {
1373                        TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT
1374        };
1375        int i, j;
1376
1377        if (!dmm_is_available()) {
1378                /* DMM only supported on OMAP4 and later, so this isn't fatal */
1379                dev_warn(dev->dev, "DMM not available, disable DMM support\n");
1380                return;
1381        }
1382
1383        usergart = kcalloc(3, sizeof(*usergart), GFP_KERNEL);
1384        if (!usergart)
1385                return;
1386
1387        /* reserve 4k aligned/wide regions for userspace mappings: */
1388        for (i = 0; i < ARRAY_SIZE(fmts); i++) {
1389                u16 h = 1, w = PAGE_SIZE >> i;
1390
1391                tiler_align(fmts[i], &w, &h);
1392                /* note: since each region is 1 4kb page wide, and minimum
1393                 * number of rows, the height ends up being the same as the
1394                 * # of pages in the region
1395                 */
1396                usergart[i].height = h;
1397                usergart[i].height_shift = ilog2(h);
1398                usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT;
1399                usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i);
1400                for (j = 0; j < NUM_USERGART_ENTRIES; j++) {
1401                        struct omap_drm_usergart_entry *entry;
1402                        struct tiler_block *block;
1403
1404                        entry = &usergart[i].entry[j];
1405                        block = tiler_reserve_2d(fmts[i], w, h, PAGE_SIZE);
1406                        if (IS_ERR(block)) {
1407                                dev_err(dev->dev,
1408                                                "reserve failed: %d, %d, %ld\n",
1409                                                i, j, PTR_ERR(block));
1410                                return;
1411                        }
1412                        entry->dma_addr = tiler_ssptr(block);
1413                        entry->block = block;
1414
1415                        DBG("%d:%d: %dx%d: dma_addr=%pad stride=%d", i, j, w, h,
1416                                        &entry->dma_addr,
1417                                        usergart[i].stride_pfn << PAGE_SHIFT);
1418                }
1419        }
1420
1421        priv->usergart = usergart;
1422        priv->has_dmm = true;
1423}
1424
1425void omap_gem_deinit(struct drm_device *dev)
1426{
1427        struct omap_drm_private *priv = dev->dev_private;
1428
1429        /* I believe we can rely on there being no more outstanding GEM
1430         * objects which could depend on usergart/dmm at this point.
1431         */
1432        kfree(priv->usergart);
1433}
1434