linux/drivers/gpu/drm/omapdrm/omap_gem.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
   3 * Author: Rob Clark <rob.clark@linaro.org>
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms of the GNU General Public License version 2 as published by
   7 * the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it will be useful, but WITHOUT
  10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  12 * more details.
  13 *
  14 * You should have received a copy of the GNU General Public License along with
  15 * this program.  If not, see <http://www.gnu.org/licenses/>.
  16 */
  17
  18#include <linux/seq_file.h>
  19#include <linux/shmem_fs.h>
  20#include <linux/spinlock.h>
  21#include <linux/pfn_t.h>
  22
  23#include <drm/drm_vma_manager.h>
  24
  25#include "omap_drv.h"
  26#include "omap_dmm_tiler.h"
  27
  28/*
  29 * GEM buffer object implementation.
  30 */
  31
  32/* note: we use upper 8 bits of flags for driver-internal flags: */
  33#define OMAP_BO_MEM_DMA_API     0x01000000      /* memory allocated with the dma_alloc_* API */
  34#define OMAP_BO_MEM_SHMEM       0x02000000      /* memory allocated through shmem backing */
  35#define OMAP_BO_MEM_DMABUF      0x08000000      /* memory imported from a dmabuf */
  36
  37struct omap_gem_object {
  38        struct drm_gem_object base;
  39
  40        struct list_head mm_list;
  41
  42        u32 flags;
  43
  44        /** width/height for tiled formats (rounded up to slot boundaries) */
  45        u16 width, height;
  46
  47        /** roll applied when mapping to DMM */
  48        u32 roll;
  49
  50        /**
  51         * dma_addr contains the buffer DMA address. It is valid for
  52         *
  53         * - buffers allocated through the DMA mapping API (with the
  54         *   OMAP_BO_MEM_DMA_API flag set)
  55         *
  56         * - buffers imported from dmabuf (with the OMAP_BO_MEM_DMABUF flag set)
  57         *   if they are physically contiguous (when sgt->orig_nents == 1)
  58         *
  59         * - buffers mapped through the TILER when dma_addr_cnt is not zero, in
  60         *   which case the DMA address points to the TILER aperture
  61         *
  62         * Physically contiguous buffers have their DMA address equal to the
  63         * physical address as we don't remap those buffers through the TILER.
  64         *
  65         * Buffers mapped to the TILER have their DMA address pointing to the
  66         * TILER aperture. As TILER mappings are refcounted (through
  67         * dma_addr_cnt) the DMA address must be accessed through omap_gem_pin()
  68         * to ensure that the mapping won't disappear unexpectedly. References
  69         * must be released with omap_gem_unpin().
  70         */
  71        dma_addr_t dma_addr;
  72
  73        /**
  74         * # of users of dma_addr
  75         */
  76        u32 dma_addr_cnt;
  77
  78        /**
  79         * If the buffer has been imported from a dmabuf the OMAP_DB_DMABUF flag
  80         * is set and the sgt field is valid.
  81         */
  82        struct sg_table *sgt;
  83
  84        /**
  85         * tiler block used when buffer is remapped in DMM/TILER.
  86         */
  87        struct tiler_block *block;
  88
  89        /**
  90         * Array of backing pages, if allocated.  Note that pages are never
  91         * allocated for buffers originally allocated from contiguous memory
  92         */
  93        struct page **pages;
  94
  95        /** addresses corresponding to pages in above array */
  96        dma_addr_t *dma_addrs;
  97
  98        /**
  99         * Virtual address, if mapped.
 100         */
 101        void *vaddr;
 102};
 103
 104#define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
 105
 106/* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are
 107 * not necessarily pinned in TILER all the time, and (b) when they are
 108 * they are not necessarily page aligned, we reserve one or more small
 109 * regions in each of the 2d containers to use as a user-GART where we
 110 * can create a second page-aligned mapping of parts of the buffer
 111 * being accessed from userspace.
 112 *
 113 * Note that we could optimize slightly when we know that multiple
 114 * tiler containers are backed by the same PAT.. but I'll leave that
 115 * for later..
 116 */
 117#define NUM_USERGART_ENTRIES 2
 118struct omap_drm_usergart_entry {
 119        struct tiler_block *block;      /* the reserved tiler block */
 120        dma_addr_t dma_addr;
 121        struct drm_gem_object *obj;     /* the current pinned obj */
 122        pgoff_t obj_pgoff;              /* page offset of obj currently
 123                                           mapped in */
 124};
 125
 126struct omap_drm_usergart {
 127        struct omap_drm_usergart_entry entry[NUM_USERGART_ENTRIES];
 128        int height;                             /* height in rows */
 129        int height_shift;               /* ilog2(height in rows) */
 130        int slot_shift;                 /* ilog2(width per slot) */
 131        int stride_pfn;                 /* stride in pages */
 132        int last;                               /* index of last used entry */
 133};
 134
 135/* -----------------------------------------------------------------------------
 136 * Helpers
 137 */
 138
 139/** get mmap offset */
 140static u64 mmap_offset(struct drm_gem_object *obj)
 141{
 142        struct drm_device *dev = obj->dev;
 143        int ret;
 144        size_t size;
 145
 146        WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 147
 148        /* Make it mmapable */
 149        size = omap_gem_mmap_size(obj);
 150        ret = drm_gem_create_mmap_offset_size(obj, size);
 151        if (ret) {
 152                dev_err(dev->dev, "could not allocate mmap offset\n");
 153                return 0;
 154        }
 155
 156        return drm_vma_node_offset_addr(&obj->vma_node);
 157}
 158
 159static bool is_contiguous(struct omap_gem_object *omap_obj)
 160{
 161        if (omap_obj->flags & OMAP_BO_MEM_DMA_API)
 162                return true;
 163
 164        if ((omap_obj->flags & OMAP_BO_MEM_DMABUF) && omap_obj->sgt->nents == 1)
 165                return true;
 166
 167        return false;
 168}
 169
 170/* -----------------------------------------------------------------------------
 171 * Eviction
 172 */
 173
 174static void evict_entry(struct drm_gem_object *obj,
 175                enum tiler_fmt fmt, struct omap_drm_usergart_entry *entry)
 176{
 177        struct omap_gem_object *omap_obj = to_omap_bo(obj);
 178        struct omap_drm_private *priv = obj->dev->dev_private;
 179        int n = priv->usergart[fmt].height;
 180        size_t size = PAGE_SIZE * n;
 181        loff_t off = mmap_offset(obj) +
 182                        (entry->obj_pgoff << PAGE_SHIFT);
 183        const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);
 184
 185        if (m > 1) {
 186                int i;
 187                /* if stride > than PAGE_SIZE then sparse mapping: */
 188                for (i = n; i > 0; i--) {
 189                        unmap_mapping_range(obj->dev->anon_inode->i_mapping,
 190                                            off, PAGE_SIZE, 1);
 191                        off += PAGE_SIZE * m;
 192                }
 193        } else {
 194                unmap_mapping_range(obj->dev->anon_inode->i_mapping,
 195                                    off, size, 1);
 196        }
 197
 198        entry->obj = NULL;
 199}
 200
 201/* Evict a buffer from usergart, if it is mapped there */
 202static void evict(struct drm_gem_object *obj)
 203{
 204        struct omap_gem_object *omap_obj = to_omap_bo(obj);
 205        struct omap_drm_private *priv = obj->dev->dev_private;
 206
 207        if (omap_obj->flags & OMAP_BO_TILED) {
 208                enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
 209                int i;
 210
 211                for (i = 0; i < NUM_USERGART_ENTRIES; i++) {
 212                        struct omap_drm_usergart_entry *entry =
 213                                &priv->usergart[fmt].entry[i];
 214
 215                        if (entry->obj == obj)
 216                                evict_entry(obj, fmt, entry);
 217                }
 218        }
 219}
 220
 221/* -----------------------------------------------------------------------------
 222 * Page Management
 223 */
 224
 225/** ensure backing pages are allocated */
 226static int omap_gem_attach_pages(struct drm_gem_object *obj)
 227{
 228        struct drm_device *dev = obj->dev;
 229        struct omap_gem_object *omap_obj = to_omap_bo(obj);
 230        struct page **pages;
 231        int npages = obj->size >> PAGE_SHIFT;
 232        int i, ret;
 233        dma_addr_t *addrs;
 234
 235        WARN_ON(omap_obj->pages);
 236
 237        pages = drm_gem_get_pages(obj);
 238        if (IS_ERR(pages)) {
 239                dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
 240                return PTR_ERR(pages);
 241        }
 242
 243        /* for non-cached buffers, ensure the new pages are clean because
 244         * DSS, GPU, etc. are not cache coherent:
 245         */
 246        if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
 247                addrs = kmalloc(npages * sizeof(*addrs), GFP_KERNEL);
 248                if (!addrs) {
 249                        ret = -ENOMEM;
 250                        goto free_pages;
 251                }
 252
 253                for (i = 0; i < npages; i++) {
 254                        addrs[i] = dma_map_page(dev->dev, pages[i],
 255                                        0, PAGE_SIZE, DMA_TO_DEVICE);
 256
 257                        if (dma_mapping_error(dev->dev, addrs[i])) {
 258                                dev_warn(dev->dev,
 259                                        "%s: failed to map page\n", __func__);
 260
 261                                for (i = i - 1; i >= 0; --i) {
 262                                        dma_unmap_page(dev->dev, addrs[i],
 263                                                PAGE_SIZE, DMA_TO_DEVICE);
 264                                }
 265
 266                                ret = -ENOMEM;
 267                                goto free_addrs;
 268                        }
 269                }
 270        } else {
 271                addrs = kzalloc(npages * sizeof(*addrs), GFP_KERNEL);
 272                if (!addrs) {
 273                        ret = -ENOMEM;
 274                        goto free_pages;
 275                }
 276        }
 277
 278        omap_obj->dma_addrs = addrs;
 279        omap_obj->pages = pages;
 280
 281        return 0;
 282
 283free_addrs:
 284        kfree(addrs);
 285free_pages:
 286        drm_gem_put_pages(obj, pages, true, false);
 287
 288        return ret;
 289}
 290
 291/* acquire pages when needed (for example, for DMA where physically
 292 * contiguous buffer is not required
 293 */
 294static int get_pages(struct drm_gem_object *obj, struct page ***pages)
 295{
 296        struct omap_gem_object *omap_obj = to_omap_bo(obj);
 297        int ret = 0;
 298
 299        if ((omap_obj->flags & OMAP_BO_MEM_SHMEM) && !omap_obj->pages) {
 300                ret = omap_gem_attach_pages(obj);
 301                if (ret) {
 302                        dev_err(obj->dev->dev, "could not attach pages\n");
 303                        return ret;
 304                }
 305        }
 306
 307        /* TODO: even phys-contig.. we should have a list of pages? */
 308        *pages = omap_obj->pages;
 309
 310        return 0;
 311}
 312
 313/** release backing pages */
 314static void omap_gem_detach_pages(struct drm_gem_object *obj)
 315{
 316        struct omap_gem_object *omap_obj = to_omap_bo(obj);
 317        unsigned int npages = obj->size >> PAGE_SHIFT;
 318        unsigned int i;
 319
 320        for (i = 0; i < npages; i++) {
 321                if (omap_obj->dma_addrs[i])
 322                        dma_unmap_page(obj->dev->dev, omap_obj->dma_addrs[i],
 323                                       PAGE_SIZE, DMA_TO_DEVICE);
 324        }
 325
 326        kfree(omap_obj->dma_addrs);
 327        omap_obj->dma_addrs = NULL;
 328
 329        drm_gem_put_pages(obj, omap_obj->pages, true, false);
 330        omap_obj->pages = NULL;
 331}
 332
 333/* get buffer flags */
 334u32 omap_gem_flags(struct drm_gem_object *obj)
 335{
 336        return to_omap_bo(obj)->flags;
 337}
 338
 339u64 omap_gem_mmap_offset(struct drm_gem_object *obj)
 340{
 341        u64 offset;
 342
 343        mutex_lock(&obj->dev->struct_mutex);
 344        offset = mmap_offset(obj);
 345        mutex_unlock(&obj->dev->struct_mutex);
 346        return offset;
 347}
 348
 349/** get mmap size */
 350size_t omap_gem_mmap_size(struct drm_gem_object *obj)
 351{
 352        struct omap_gem_object *omap_obj = to_omap_bo(obj);
 353        size_t size = obj->size;
 354
 355        if (omap_obj->flags & OMAP_BO_TILED) {
 356                /* for tiled buffers, the virtual size has stride rounded up
 357                 * to 4kb.. (to hide the fact that row n+1 might start 16kb or
 358                 * 32kb later!).  But we don't back the entire buffer with
 359                 * pages, only the valid picture part.. so need to adjust for
 360                 * this in the size used to mmap and generate mmap offset
 361                 */
 362                size = tiler_vsize(gem2fmt(omap_obj->flags),
 363                                omap_obj->width, omap_obj->height);
 364        }
 365
 366        return size;
 367}
 368
 369/* -----------------------------------------------------------------------------
 370 * Fault Handling
 371 */
 372
 373/* Normal handling for the case of faulting in non-tiled buffers */
 374static int fault_1d(struct drm_gem_object *obj,
 375                struct vm_area_struct *vma, struct vm_fault *vmf)
 376{
 377        struct omap_gem_object *omap_obj = to_omap_bo(obj);
 378        unsigned long pfn;
 379        pgoff_t pgoff;
 380
 381        /* We don't use vmf->pgoff since that has the fake offset: */
 382        pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
 383
 384        if (omap_obj->pages) {
 385                omap_gem_cpu_sync_page(obj, pgoff);
 386                pfn = page_to_pfn(omap_obj->pages[pgoff]);
 387        } else {
 388                BUG_ON(!is_contiguous(omap_obj));
 389                pfn = (omap_obj->dma_addr >> PAGE_SHIFT) + pgoff;
 390        }
 391
 392        VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
 393                        pfn, pfn << PAGE_SHIFT);
 394
 395        return vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
 396}
 397
 398/* Special handling for the case of faulting in 2d tiled buffers */
 399static int fault_2d(struct drm_gem_object *obj,
 400                struct vm_area_struct *vma, struct vm_fault *vmf)
 401{
 402        struct omap_gem_object *omap_obj = to_omap_bo(obj);
 403        struct omap_drm_private *priv = obj->dev->dev_private;
 404        struct omap_drm_usergart_entry *entry;
 405        enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
 406        struct page *pages[64];  /* XXX is this too much to have on stack? */
 407        unsigned long pfn;
 408        pgoff_t pgoff, base_pgoff;
 409        unsigned long vaddr;
 410        int i, ret, slots;
 411
 412        /*
 413         * Note the height of the slot is also equal to the number of pages
 414         * that need to be mapped in to fill 4kb wide CPU page.  If the slot
 415         * height is 64, then 64 pages fill a 4kb wide by 64 row region.
 416         */
 417        const int n = priv->usergart[fmt].height;
 418        const int n_shift = priv->usergart[fmt].height_shift;
 419
 420        /*
 421         * If buffer width in bytes > PAGE_SIZE then the virtual stride is
 422         * rounded up to next multiple of PAGE_SIZE.. this need to be taken
 423         * into account in some of the math, so figure out virtual stride
 424         * in pages
 425         */
 426        const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);
 427
 428        /* We don't use vmf->pgoff since that has the fake offset: */
 429        pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
 430
 431        /*
 432         * Actual address we start mapping at is rounded down to previous slot
 433         * boundary in the y direction:
 434         */
 435        base_pgoff = round_down(pgoff, m << n_shift);
 436
 437        /* figure out buffer width in slots */
 438        slots = omap_obj->width >> priv->usergart[fmt].slot_shift;
 439
 440        vaddr = vmf->address - ((pgoff - base_pgoff) << PAGE_SHIFT);
 441
 442        entry = &priv->usergart[fmt].entry[priv->usergart[fmt].last];
 443
 444        /* evict previous buffer using this usergart entry, if any: */
 445        if (entry->obj)
 446                evict_entry(entry->obj, fmt, entry);
 447
 448        entry->obj = obj;
 449        entry->obj_pgoff = base_pgoff;
 450
 451        /* now convert base_pgoff to phys offset from virt offset: */
 452        base_pgoff = (base_pgoff >> n_shift) * slots;
 453
 454        /* for wider-than 4k.. figure out which part of the slot-row we want: */
 455        if (m > 1) {
 456                int off = pgoff % m;
 457                entry->obj_pgoff += off;
 458                base_pgoff /= m;
 459                slots = min(slots - (off << n_shift), n);
 460                base_pgoff += off << n_shift;
 461                vaddr += off << PAGE_SHIFT;
 462        }
 463
 464        /*
 465         * Map in pages. Beyond the valid pixel part of the buffer, we set
 466         * pages[i] to NULL to get a dummy page mapped in.. if someone
 467         * reads/writes it they will get random/undefined content, but at
 468         * least it won't be corrupting whatever other random page used to
 469         * be mapped in, or other undefined behavior.
 470         */
 471        memcpy(pages, &omap_obj->pages[base_pgoff],
 472                        sizeof(struct page *) * slots);
 473        memset(pages + slots, 0,
 474                        sizeof(struct page *) * (n - slots));
 475
 476        ret = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
 477        if (ret) {
 478                dev_err(obj->dev->dev, "failed to pin: %d\n", ret);
 479                return ret;
 480        }
 481
 482        pfn = entry->dma_addr >> PAGE_SHIFT;
 483
 484        VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
 485                        pfn, pfn << PAGE_SHIFT);
 486
 487        for (i = n; i > 0; i--) {
 488                vm_insert_mixed(vma, vaddr, __pfn_to_pfn_t(pfn, PFN_DEV));
 489                pfn += priv->usergart[fmt].stride_pfn;
 490                vaddr += PAGE_SIZE * m;
 491        }
 492
 493        /* simple round-robin: */
 494        priv->usergart[fmt].last = (priv->usergart[fmt].last + 1)
 495                                 % NUM_USERGART_ENTRIES;
 496
 497        return 0;
 498}
 499
 500/**
 501 * omap_gem_fault               -       pagefault handler for GEM objects
 502 * @vmf: fault detail
 503 *
 504 * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
 505 * does most of the work for us including the actual map/unmap calls
 506 * but we need to do the actual page work.
 507 *
 508 * The VMA was set up by GEM. In doing so it also ensured that the
 509 * vma->vm_private_data points to the GEM object that is backing this
 510 * mapping.
 511 */
 512int omap_gem_fault(struct vm_fault *vmf)
 513{
 514        struct vm_area_struct *vma = vmf->vma;
 515        struct drm_gem_object *obj = vma->vm_private_data;
 516        struct omap_gem_object *omap_obj = to_omap_bo(obj);
 517        struct drm_device *dev = obj->dev;
 518        struct page **pages;
 519        int ret;
 520
 521        /* Make sure we don't parallel update on a fault, nor move or remove
 522         * something from beneath our feet
 523         */
 524        mutex_lock(&dev->struct_mutex);
 525
 526        /* if a shmem backed object, make sure we have pages attached now */
 527        ret = get_pages(obj, &pages);
 528        if (ret)
 529                goto fail;
 530
 531        /* where should we do corresponding put_pages().. we are mapping
 532         * the original page, rather than thru a GART, so we can't rely
 533         * on eviction to trigger this.  But munmap() or all mappings should
 534         * probably trigger put_pages()?
 535         */
 536
 537        if (omap_obj->flags & OMAP_BO_TILED)
 538                ret = fault_2d(obj, vma, vmf);
 539        else
 540                ret = fault_1d(obj, vma, vmf);
 541
 542
 543fail:
 544        mutex_unlock(&dev->struct_mutex);
 545        switch (ret) {
 546        case 0:
 547        case -ERESTARTSYS:
 548        case -EINTR:
 549        case -EBUSY:
 550                /*
 551                 * EBUSY is ok: this just means that another thread
 552                 * already did the job.
 553                 */
 554                return VM_FAULT_NOPAGE;
 555        case -ENOMEM:
 556                return VM_FAULT_OOM;
 557        default:
 558                return VM_FAULT_SIGBUS;
 559        }
 560}
 561
 562/** We override mainly to fix up some of the vm mapping flags.. */
 563int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 564{
 565        int ret;
 566
 567        ret = drm_gem_mmap(filp, vma);
 568        if (ret) {
 569                DBG("mmap failed: %d", ret);
 570                return ret;
 571        }
 572
 573        return omap_gem_mmap_obj(vma->vm_private_data, vma);
 574}
 575
 576int omap_gem_mmap_obj(struct drm_gem_object *obj,
 577                struct vm_area_struct *vma)
 578{
 579        struct omap_gem_object *omap_obj = to_omap_bo(obj);
 580
 581        vma->vm_flags &= ~VM_PFNMAP;
 582        vma->vm_flags |= VM_MIXEDMAP;
 583
 584        if (omap_obj->flags & OMAP_BO_WC) {
 585                vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
 586        } else if (omap_obj->flags & OMAP_BO_UNCACHED) {
 587                vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
 588        } else {
 589                /*
 590                 * We do have some private objects, at least for scanout buffers
 591                 * on hardware without DMM/TILER.  But these are allocated write-
 592                 * combine
 593                 */
 594                if (WARN_ON(!obj->filp))
 595                        return -EINVAL;
 596
 597                /*
 598                 * Shunt off cached objs to shmem file so they have their own
 599                 * address_space (so unmap_mapping_range does what we want,
 600                 * in particular in the case of mmap'd dmabufs)
 601                 */
 602                fput(vma->vm_file);
 603                vma->vm_pgoff = 0;
 604                vma->vm_file  = get_file(obj->filp);
 605
 606                vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
 607        }
 608
 609        return 0;
 610}
 611
 612/* -----------------------------------------------------------------------------
 613 * Dumb Buffers
 614 */
 615
 616/**
 617 * omap_gem_dumb_create -       create a dumb buffer
 618 * @drm_file: our client file
 619 * @dev: our device
 620 * @args: the requested arguments copied from userspace
 621 *
 622 * Allocate a buffer suitable for use for a frame buffer of the
 623 * form described by user space. Give userspace a handle by which
 624 * to reference it.
 625 */
 626int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
 627                struct drm_mode_create_dumb *args)
 628{
 629        union omap_gem_size gsize;
 630
 631        args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
 632
 633        args->size = PAGE_ALIGN(args->pitch * args->height);
 634
 635        gsize = (union omap_gem_size){
 636                .bytes = args->size,
 637        };
 638
 639        return omap_gem_new_handle(dev, file, gsize,
 640                        OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle);
 641}
 642
 643/**
 644 * omap_gem_dumb_map    -       buffer mapping for dumb interface
 645 * @file: our drm client file
 646 * @dev: drm device
 647 * @handle: GEM handle to the object (from dumb_create)
 648 *
 649 * Do the necessary setup to allow the mapping of the frame buffer
 650 * into user memory. We don't have to do much here at the moment.
 651 */
 652int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
 653                u32 handle, u64 *offset)
 654{
 655        struct drm_gem_object *obj;
 656        int ret = 0;
 657
 658        /* GEM does all our handle to object mapping */
 659        obj = drm_gem_object_lookup(file, handle);
 660        if (obj == NULL) {
 661                ret = -ENOENT;
 662                goto fail;
 663        }
 664
 665        *offset = omap_gem_mmap_offset(obj);
 666
 667        drm_gem_object_unreference_unlocked(obj);
 668
 669fail:
 670        return ret;
 671}
 672
 673#ifdef CONFIG_DRM_FBDEV_EMULATION
 674/* Set scrolling position.  This allows us to implement fast scrolling
 675 * for console.
 676 *
 677 * Call only from non-atomic contexts.
 678 */
 679int omap_gem_roll(struct drm_gem_object *obj, u32 roll)
 680{
 681        struct omap_gem_object *omap_obj = to_omap_bo(obj);
 682        u32 npages = obj->size >> PAGE_SHIFT;
 683        int ret = 0;
 684
 685        if (roll > npages) {
 686                dev_err(obj->dev->dev, "invalid roll: %d\n", roll);
 687                return -EINVAL;
 688        }
 689
 690        omap_obj->roll = roll;
 691
 692        mutex_lock(&obj->dev->struct_mutex);
 693
 694        /* if we aren't mapped yet, we don't need to do anything */
 695        if (omap_obj->block) {
 696                struct page **pages;
 697                ret = get_pages(obj, &pages);
 698                if (ret)
 699                        goto fail;
 700                ret = tiler_pin(omap_obj->block, pages, npages, roll, true);
 701                if (ret)
 702                        dev_err(obj->dev->dev, "could not repin: %d\n", ret);
 703        }
 704
 705fail:
 706        mutex_unlock(&obj->dev->struct_mutex);
 707
 708        return ret;
 709}
 710#endif
 711
 712/* -----------------------------------------------------------------------------
 713 * Memory Management & DMA Sync
 714 */
 715
 716/*
 717 * shmem buffers that are mapped cached are not coherent.
 718 *
 719 * We keep track of dirty pages using page faulting to perform cache management.
 720 * When a page is mapped to the CPU in read/write mode the device can't access
 721 * it and omap_obj->dma_addrs[i] is NULL. When a page is mapped to the device
 722 * the omap_obj->dma_addrs[i] is set to the DMA address, and the page is
 723 * unmapped from the CPU.
 724 */
 725static inline bool is_cached_coherent(struct drm_gem_object *obj)
 726{
 727        struct omap_gem_object *omap_obj = to_omap_bo(obj);
 728
 729        return !((omap_obj->flags & OMAP_BO_MEM_SHMEM) &&
 730                ((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED));
 731}
 732
 733/* Sync the buffer for CPU access.. note pages should already be
 734 * attached, ie. omap_gem_get_pages()
 735 */
 736void omap_gem_cpu_sync_page(struct drm_gem_object *obj, int pgoff)
 737{
 738        struct drm_device *dev = obj->dev;
 739        struct omap_gem_object *omap_obj = to_omap_bo(obj);
 740
 741        if (is_cached_coherent(obj))
 742                return;
 743
 744        if (omap_obj->dma_addrs[pgoff]) {
 745                dma_unmap_page(dev->dev, omap_obj->dma_addrs[pgoff],
 746                                PAGE_SIZE, DMA_TO_DEVICE);
 747                omap_obj->dma_addrs[pgoff] = 0;
 748        }
 749}
 750
 751/* sync the buffer for DMA access */
 752void omap_gem_dma_sync_buffer(struct drm_gem_object *obj,
 753                enum dma_data_direction dir)
 754{
 755        struct drm_device *dev = obj->dev;
 756        struct omap_gem_object *omap_obj = to_omap_bo(obj);
 757        int i, npages = obj->size >> PAGE_SHIFT;
 758        struct page **pages = omap_obj->pages;
 759        bool dirty = false;
 760
 761        if (is_cached_coherent(obj))
 762                return;
 763
 764        for (i = 0; i < npages; i++) {
 765                if (!omap_obj->dma_addrs[i]) {
 766                        dma_addr_t addr;
 767
 768                        addr = dma_map_page(dev->dev, pages[i], 0,
 769                                            PAGE_SIZE, dir);
 770                        if (dma_mapping_error(dev->dev, addr)) {
 771                                dev_warn(dev->dev, "%s: failed to map page\n",
 772                                        __func__);
 773                                break;
 774                        }
 775
 776                        dirty = true;
 777                        omap_obj->dma_addrs[i] = addr;
 778                }
 779        }
 780
 781        if (dirty) {
 782                unmap_mapping_range(obj->filp->f_mapping, 0,
 783                                    omap_gem_mmap_size(obj), 1);
 784        }
 785}
 786
 787/**
 788 * omap_gem_pin() - Pin a GEM object in memory
 789 * @obj: the GEM object
 790 * @dma_addr: the DMA address
 791 *
 792 * Pin the given GEM object in memory and fill the dma_addr pointer with the
 793 * object's DMA address. If the buffer is not physically contiguous it will be
 794 * remapped through the TILER to provide a contiguous view.
 795 *
 796 * Pins are reference-counted, calling this function multiple times is allowed
 797 * as long the corresponding omap_gem_unpin() calls are balanced.
 798 *
 799 * Return 0 on success or a negative error code otherwise.
 800 */
 801int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
 802{
 803        struct omap_drm_private *priv = obj->dev->dev_private;
 804        struct omap_gem_object *omap_obj = to_omap_bo(obj);
 805        int ret = 0;
 806
 807        mutex_lock(&obj->dev->struct_mutex);
 808
 809        if (!is_contiguous(omap_obj) && priv->has_dmm) {
 810                if (omap_obj->dma_addr_cnt == 0) {
 811                        struct page **pages;
 812                        u32 npages = obj->size >> PAGE_SHIFT;
 813                        enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
 814                        struct tiler_block *block;
 815
 816                        BUG_ON(omap_obj->block);
 817
 818                        ret = get_pages(obj, &pages);
 819                        if (ret)
 820                                goto fail;
 821
 822                        if (omap_obj->flags & OMAP_BO_TILED) {
 823                                block = tiler_reserve_2d(fmt,
 824                                                omap_obj->width,
 825                                                omap_obj->height, 0);
 826                        } else {
 827                                block = tiler_reserve_1d(obj->size);
 828                        }
 829
 830                        if (IS_ERR(block)) {
 831                                ret = PTR_ERR(block);
 832                                dev_err(obj->dev->dev,
 833                                        "could not remap: %d (%d)\n", ret, fmt);
 834                                goto fail;
 835                        }
 836
 837                        /* TODO: enable async refill.. */
 838                        ret = tiler_pin(block, pages, npages,
 839                                        omap_obj->roll, true);
 840                        if (ret) {
 841                                tiler_release(block);
 842                                dev_err(obj->dev->dev,
 843                                                "could not pin: %d\n", ret);
 844                                goto fail;
 845                        }
 846
 847                        omap_obj->dma_addr = tiler_ssptr(block);
 848                        omap_obj->block = block;
 849
 850                        DBG("got dma address: %pad", &omap_obj->dma_addr);
 851                }
 852
 853                omap_obj->dma_addr_cnt++;
 854
 855                *dma_addr = omap_obj->dma_addr;
 856        } else if (is_contiguous(omap_obj)) {
 857                *dma_addr = omap_obj->dma_addr;
 858        } else {
 859                ret = -EINVAL;
 860                goto fail;
 861        }
 862
 863fail:
 864        mutex_unlock(&obj->dev->struct_mutex);
 865
 866        return ret;
 867}
 868
 869/**
 870 * omap_gem_unpin() - Unpin a GEM object from memory
 871 * @obj: the GEM object
 872 *
 873 * Unpin the given GEM object previously pinned with omap_gem_pin(). Pins are
 874 * reference-counted, the actualy unpin will only be performed when the number
 875 * of calls to this function matches the number of calls to omap_gem_pin().
 876 */
 877void omap_gem_unpin(struct drm_gem_object *obj)
 878{
 879        struct omap_gem_object *omap_obj = to_omap_bo(obj);
 880        int ret;
 881
 882        mutex_lock(&obj->dev->struct_mutex);
 883        if (omap_obj->dma_addr_cnt > 0) {
 884                omap_obj->dma_addr_cnt--;
 885                if (omap_obj->dma_addr_cnt == 0) {
 886                        ret = tiler_unpin(omap_obj->block);
 887                        if (ret) {
 888                                dev_err(obj->dev->dev,
 889                                        "could not unpin pages: %d\n", ret);
 890                        }
 891                        ret = tiler_release(omap_obj->block);
 892                        if (ret) {
 893                                dev_err(obj->dev->dev,
 894                                        "could not release unmap: %d\n", ret);
 895                        }
 896                        omap_obj->dma_addr = 0;
 897                        omap_obj->block = NULL;
 898                }
 899        }
 900
 901        mutex_unlock(&obj->dev->struct_mutex);
 902}
 903
 904/* Get rotated scanout address (only valid if already pinned), at the
 905 * specified orientation and x,y offset from top-left corner of buffer
 906 * (only valid for tiled 2d buffers)
 907 */
 908int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, u32 orient,
 909                int x, int y, dma_addr_t *dma_addr)
 910{
 911        struct omap_gem_object *omap_obj = to_omap_bo(obj);
 912        int ret = -EINVAL;
 913
 914        mutex_lock(&obj->dev->struct_mutex);
 915        if ((omap_obj->dma_addr_cnt > 0) && omap_obj->block &&
 916                        (omap_obj->flags & OMAP_BO_TILED)) {
 917                *dma_addr = tiler_tsptr(omap_obj->block, orient, x, y);
 918                ret = 0;
 919        }
 920        mutex_unlock(&obj->dev->struct_mutex);
 921        return ret;
 922}
 923
 924/* Get tiler stride for the buffer (only valid for 2d tiled buffers) */
 925int omap_gem_tiled_stride(struct drm_gem_object *obj, u32 orient)
 926{
 927        struct omap_gem_object *omap_obj = to_omap_bo(obj);
 928        int ret = -EINVAL;
 929        if (omap_obj->flags & OMAP_BO_TILED)
 930                ret = tiler_stride(gem2fmt(omap_obj->flags), orient);
 931        return ret;
 932}
 933
 934/* if !remap, and we don't have pages backing, then fail, rather than
 935 * increasing the pin count (which we don't really do yet anyways,
 936 * because we don't support swapping pages back out).  And 'remap'
 937 * might not be quite the right name, but I wanted to keep it working
 938 * similarly to omap_gem_pin().  Note though that mutex is not
 939 * aquired if !remap (because this can be called in atomic ctxt),
 940 * but probably omap_gem_unpin() should be changed to work in the
 941 * same way.  If !remap, a matching omap_gem_put_pages() call is not
 942 * required (and should not be made).
 943 */
 944int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
 945                bool remap)
 946{
 947        int ret;
 948        if (!remap) {
 949                struct omap_gem_object *omap_obj = to_omap_bo(obj);
 950                if (!omap_obj->pages)
 951                        return -ENOMEM;
 952                *pages = omap_obj->pages;
 953                return 0;
 954        }
 955        mutex_lock(&obj->dev->struct_mutex);
 956        ret = get_pages(obj, pages);
 957        mutex_unlock(&obj->dev->struct_mutex);
 958        return ret;
 959}
 960
 961/* release pages when DMA no longer being performed */
 962int omap_gem_put_pages(struct drm_gem_object *obj)
 963{
 964        /* do something here if we dynamically attach/detach pages.. at
 965         * least they would no longer need to be pinned if everyone has
 966         * released the pages..
 967         */
 968        return 0;
 969}
 970
 971#ifdef CONFIG_DRM_FBDEV_EMULATION
 972/* Get kernel virtual address for CPU access.. this more or less only
 973 * exists for omap_fbdev.  This should be called with struct_mutex
 974 * held.
 975 */
 976void *omap_gem_vaddr(struct drm_gem_object *obj)
 977{
 978        struct omap_gem_object *omap_obj = to_omap_bo(obj);
 979        WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
 980        if (!omap_obj->vaddr) {
 981                struct page **pages;
 982                int ret = get_pages(obj, &pages);
 983                if (ret)
 984                        return ERR_PTR(ret);
 985                omap_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
 986                                VM_MAP, pgprot_writecombine(PAGE_KERNEL));
 987        }
 988        return omap_obj->vaddr;
 989}
 990#endif
 991
 992/* -----------------------------------------------------------------------------
 993 * Power Management
 994 */
 995
 996#ifdef CONFIG_PM
 997/* re-pin objects in DMM in resume path: */
 998int omap_gem_resume(struct drm_device *dev)
 999{
1000        struct omap_drm_private *priv = dev->dev_private;
1001        struct omap_gem_object *omap_obj;
1002        int ret = 0;
1003
1004        list_for_each_entry(omap_obj, &priv->obj_list, mm_list) {
1005                if (omap_obj->block) {
1006                        struct drm_gem_object *obj = &omap_obj->base;
1007                        u32 npages = obj->size >> PAGE_SHIFT;
1008
1009                        WARN_ON(!omap_obj->pages);  /* this can't happen */
1010                        ret = tiler_pin(omap_obj->block,
1011                                        omap_obj->pages, npages,
1012                                        omap_obj->roll, true);
1013                        if (ret) {
1014                                dev_err(dev->dev, "could not repin: %d\n", ret);
1015                                return ret;
1016                        }
1017                }
1018        }
1019
1020        return 0;
1021}
1022#endif
1023
1024/* -----------------------------------------------------------------------------
1025 * DebugFS
1026 */
1027
1028#ifdef CONFIG_DEBUG_FS
1029void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
1030{
1031        struct omap_gem_object *omap_obj = to_omap_bo(obj);
1032        u64 off;
1033
1034        off = drm_vma_node_start(&obj->vma_node);
1035
1036        seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
1037                        omap_obj->flags, obj->name, kref_read(&obj->refcount),
1038                        off, &omap_obj->dma_addr, omap_obj->dma_addr_cnt,
1039                        omap_obj->vaddr, omap_obj->roll);
1040
1041        if (omap_obj->flags & OMAP_BO_TILED) {
1042                seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height);
1043                if (omap_obj->block) {
1044                        struct tcm_area *area = &omap_obj->block->area;
1045                        seq_printf(m, " (%dx%d, %dx%d)",
1046                                        area->p0.x, area->p0.y,
1047                                        area->p1.x, area->p1.y);
1048                }
1049        } else {
1050                seq_printf(m, " %zu", obj->size);
1051        }
1052
1053        seq_printf(m, "\n");
1054}
1055
1056void omap_gem_describe_objects(struct list_head *list, struct seq_file *m)
1057{
1058        struct omap_gem_object *omap_obj;
1059        int count = 0;
1060        size_t size = 0;
1061
1062        list_for_each_entry(omap_obj, list, mm_list) {
1063                struct drm_gem_object *obj = &omap_obj->base;
1064                seq_printf(m, "   ");
1065                omap_gem_describe(obj, m);
1066                count++;
1067                size += obj->size;
1068        }
1069
1070        seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
1071}
1072#endif
1073
1074/* -----------------------------------------------------------------------------
1075 * Constructor & Destructor
1076 */
1077
1078void omap_gem_free_object(struct drm_gem_object *obj)
1079{
1080        struct drm_device *dev = obj->dev;
1081        struct omap_drm_private *priv = dev->dev_private;
1082        struct omap_gem_object *omap_obj = to_omap_bo(obj);
1083
1084        evict(obj);
1085
1086        WARN_ON(!mutex_is_locked(&dev->struct_mutex));
1087
1088        spin_lock(&priv->list_lock);
1089        list_del(&omap_obj->mm_list);
1090        spin_unlock(&priv->list_lock);
1091
1092        /* this means the object is still pinned.. which really should
1093         * not happen.  I think..
1094         */
1095        WARN_ON(omap_obj->dma_addr_cnt > 0);
1096
1097        if (omap_obj->pages) {
1098                if (omap_obj->flags & OMAP_BO_MEM_DMABUF)
1099                        kfree(omap_obj->pages);
1100                else
1101                        omap_gem_detach_pages(obj);
1102        }
1103
1104        if (omap_obj->flags & OMAP_BO_MEM_DMA_API) {
1105                dma_free_wc(dev->dev, obj->size, omap_obj->vaddr,
1106                            omap_obj->dma_addr);
1107        } else if (omap_obj->vaddr) {
1108                vunmap(omap_obj->vaddr);
1109        } else if (obj->import_attach) {
1110                drm_prime_gem_destroy(obj, omap_obj->sgt);
1111        }
1112
1113        drm_gem_object_release(obj);
1114
1115        kfree(omap_obj);
1116}
1117
1118/* GEM buffer object constructor */
1119struct drm_gem_object *omap_gem_new(struct drm_device *dev,
1120                union omap_gem_size gsize, u32 flags)
1121{
1122        struct omap_drm_private *priv = dev->dev_private;
1123        struct omap_gem_object *omap_obj;
1124        struct drm_gem_object *obj;
1125        struct address_space *mapping;
1126        size_t size;
1127        int ret;
1128
1129        /* Validate the flags and compute the memory and cache flags. */
1130        if (flags & OMAP_BO_TILED) {
1131                if (!priv->usergart) {
1132                        dev_err(dev->dev, "Tiled buffers require DMM\n");
1133                        return NULL;
1134                }
1135
1136                /*
1137                 * Tiled buffers are always shmem paged backed. When they are
1138                 * scanned out, they are remapped into DMM/TILER.
1139                 */
1140                flags &= ~OMAP_BO_SCANOUT;
1141                flags |= OMAP_BO_MEM_SHMEM;
1142
1143                /*
1144                 * Currently don't allow cached buffers. There is some caching
1145                 * stuff that needs to be handled better.
1146                 */
1147                flags &= ~(OMAP_BO_CACHED|OMAP_BO_WC|OMAP_BO_UNCACHED);
1148                flags |= tiler_get_cpu_cache_flags();
1149        } else if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
1150                /*
1151                 * OMAP_BO_SCANOUT hints that the buffer doesn't need to be
1152                 * tiled. However, to lower the pressure on memory allocation,
1153                 * use contiguous memory only if no TILER is available.
1154                 */
1155                flags |= OMAP_BO_MEM_DMA_API;
1156        } else if (!(flags & OMAP_BO_MEM_DMABUF)) {
1157                /*
1158                 * All other buffers not backed by dma_buf are shmem-backed.
1159                 */
1160                flags |= OMAP_BO_MEM_SHMEM;
1161        }
1162
1163        /* Allocate the initialize the OMAP GEM object. */
1164        omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
1165        if (!omap_obj)
1166                return NULL;
1167
1168        obj = &omap_obj->base;
1169        omap_obj->flags = flags;
1170
1171        if (flags & OMAP_BO_TILED) {
1172                /*
1173                 * For tiled buffers align dimensions to slot boundaries and
1174                 * calculate size based on aligned dimensions.
1175                 */
1176                tiler_align(gem2fmt(flags), &gsize.tiled.width,
1177                            &gsize.tiled.height);
1178
1179                size = tiler_size(gem2fmt(flags), gsize.tiled.width,
1180                                  gsize.tiled.height);
1181
1182                omap_obj->width = gsize.tiled.width;
1183                omap_obj->height = gsize.tiled.height;
1184        } else {
1185                size = PAGE_ALIGN(gsize.bytes);
1186        }
1187
1188        /* Initialize the GEM object. */
1189        if (!(flags & OMAP_BO_MEM_SHMEM)) {
1190                drm_gem_private_object_init(dev, obj, size);
1191        } else {
1192                ret = drm_gem_object_init(dev, obj, size);
1193                if (ret)
1194                        goto err_free;
1195
1196                mapping = obj->filp->f_mapping;
1197                mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32);
1198        }
1199
1200        /* Allocate memory if needed. */
1201        if (flags & OMAP_BO_MEM_DMA_API) {
1202                omap_obj->vaddr = dma_alloc_wc(dev->dev, size,
1203                                               &omap_obj->dma_addr,
1204                                               GFP_KERNEL);
1205                if (!omap_obj->vaddr)
1206                        goto err_release;
1207        }
1208
1209        spin_lock(&priv->list_lock);
1210        list_add(&omap_obj->mm_list, &priv->obj_list);
1211        spin_unlock(&priv->list_lock);
1212
1213        return obj;
1214
1215err_release:
1216        drm_gem_object_release(obj);
1217err_free:
1218        kfree(omap_obj);
1219        return NULL;
1220}
1221
1222struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
1223                                           struct sg_table *sgt)
1224{
1225        struct omap_drm_private *priv = dev->dev_private;
1226        struct omap_gem_object *omap_obj;
1227        struct drm_gem_object *obj;
1228        union omap_gem_size gsize;
1229
1230        /* Without a DMM only physically contiguous buffers can be supported. */
1231        if (sgt->orig_nents != 1 && !priv->has_dmm)
1232                return ERR_PTR(-EINVAL);
1233
1234        mutex_lock(&dev->struct_mutex);
1235
1236        gsize.bytes = PAGE_ALIGN(size);
1237        obj = omap_gem_new(dev, gsize, OMAP_BO_MEM_DMABUF | OMAP_BO_WC);
1238        if (!obj) {
1239                obj = ERR_PTR(-ENOMEM);
1240                goto done;
1241        }
1242
1243        omap_obj = to_omap_bo(obj);
1244        omap_obj->sgt = sgt;
1245
1246        if (sgt->orig_nents == 1) {
1247                omap_obj->dma_addr = sg_dma_address(sgt->sgl);
1248        } else {
1249                /* Create pages list from sgt */
1250                struct sg_page_iter iter;
1251                struct page **pages;
1252                unsigned int npages;
1253                unsigned int i = 0;
1254
1255                npages = DIV_ROUND_UP(size, PAGE_SIZE);
1256                pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
1257                if (!pages) {
1258                        omap_gem_free_object(obj);
1259                        obj = ERR_PTR(-ENOMEM);
1260                        goto done;
1261                }
1262
1263                omap_obj->pages = pages;
1264
1265                for_each_sg_page(sgt->sgl, &iter, sgt->orig_nents, 0) {
1266                        pages[i++] = sg_page_iter_page(&iter);
1267                        if (i > npages)
1268                                break;
1269                }
1270
1271                if (WARN_ON(i != npages)) {
1272                        omap_gem_free_object(obj);
1273                        obj = ERR_PTR(-ENOMEM);
1274                        goto done;
1275                }
1276        }
1277
1278done:
1279        mutex_unlock(&dev->struct_mutex);
1280        return obj;
1281}
1282
1283/* convenience method to construct a GEM buffer object, and userspace handle */
1284int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1285                union omap_gem_size gsize, u32 flags, u32 *handle)
1286{
1287        struct drm_gem_object *obj;
1288        int ret;
1289
1290        obj = omap_gem_new(dev, gsize, flags);
1291        if (!obj)
1292                return -ENOMEM;
1293
1294        ret = drm_gem_handle_create(file, obj, handle);
1295        if (ret) {
1296                omap_gem_free_object(obj);
1297                return ret;
1298        }
1299
1300        /* drop reference from allocate - handle holds it now */
1301        drm_gem_object_unreference_unlocked(obj);
1302
1303        return 0;
1304}
1305
1306/* -----------------------------------------------------------------------------
1307 * Init & Cleanup
1308 */
1309
1310/* If DMM is used, we need to set some stuff up.. */
1311void omap_gem_init(struct drm_device *dev)
1312{
1313        struct omap_drm_private *priv = dev->dev_private;
1314        struct omap_drm_usergart *usergart;
1315        const enum tiler_fmt fmts[] = {
1316                        TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT
1317        };
1318        int i, j;
1319
1320        if (!dmm_is_available()) {
1321                /* DMM only supported on OMAP4 and later, so this isn't fatal */
1322                dev_warn(dev->dev, "DMM not available, disable DMM support\n");
1323                return;
1324        }
1325
1326        usergart = kcalloc(3, sizeof(*usergart), GFP_KERNEL);
1327        if (!usergart)
1328                return;
1329
1330        /* reserve 4k aligned/wide regions for userspace mappings: */
1331        for (i = 0; i < ARRAY_SIZE(fmts); i++) {
1332                u16 h = 1, w = PAGE_SIZE >> i;
1333
1334                tiler_align(fmts[i], &w, &h);
1335                /* note: since each region is 1 4kb page wide, and minimum
1336                 * number of rows, the height ends up being the same as the
1337                 * # of pages in the region
1338                 */
1339                usergart[i].height = h;
1340                usergart[i].height_shift = ilog2(h);
1341                usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT;
1342                usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i);
1343                for (j = 0; j < NUM_USERGART_ENTRIES; j++) {
1344                        struct omap_drm_usergart_entry *entry;
1345                        struct tiler_block *block;
1346
1347                        entry = &usergart[i].entry[j];
1348                        block = tiler_reserve_2d(fmts[i], w, h, PAGE_SIZE);
1349                        if (IS_ERR(block)) {
1350                                dev_err(dev->dev,
1351                                                "reserve failed: %d, %d, %ld\n",
1352                                                i, j, PTR_ERR(block));
1353                                return;
1354                        }
1355                        entry->dma_addr = tiler_ssptr(block);
1356                        entry->block = block;
1357
1358                        DBG("%d:%d: %dx%d: dma_addr=%pad stride=%d", i, j, w, h,
1359                                        &entry->dma_addr,
1360                                        usergart[i].stride_pfn << PAGE_SHIFT);
1361                }
1362        }
1363
1364        priv->usergart = usergart;
1365        priv->has_dmm = true;
1366}
1367
1368void omap_gem_deinit(struct drm_device *dev)
1369{
1370        struct omap_drm_private *priv = dev->dev_private;
1371
1372        /* I believe we can rely on there being no more outstanding GEM
1373         * objects which could depend on usergart/dmm at this point.
1374         */
1375        kfree(priv->usergart);
1376}
1377