linux/drivers/gpu/drm/tegra/gem.c
<<
>>
Prefs
   1/*
   2 * NVIDIA Tegra DRM GEM helper functions
   3 *
   4 * Copyright (C) 2012 Sascha Hauer, Pengutronix
   5 * Copyright (C) 2013 NVIDIA CORPORATION, All rights reserved.
   6 *
   7 * Based on the GEM/CMA helpers
   8 *
   9 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
  10 *
  11 * This program is free software; you can redistribute it and/or modify
  12 * it under the terms of the GNU General Public License version 2 as
  13 * published by the Free Software Foundation.
  14 */
  15
  16#include <linux/dma-buf.h>
  17#include <linux/iommu.h>
  18#include <drm/tegra_drm.h>
  19
  20#include "drm.h"
  21#include "gem.h"
  22
  23static inline struct tegra_bo *host1x_to_tegra_bo(struct host1x_bo *bo)
  24{
  25        return container_of(bo, struct tegra_bo, base);
  26}
  27
  28static void tegra_bo_put(struct host1x_bo *bo)
  29{
  30        struct tegra_bo *obj = host1x_to_tegra_bo(bo);
  31        struct drm_device *drm = obj->gem.dev;
  32
  33        mutex_lock(&drm->struct_mutex);
  34        drm_gem_object_unreference(&obj->gem);
  35        mutex_unlock(&drm->struct_mutex);
  36}
  37
  38static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt)
  39{
  40        struct tegra_bo *obj = host1x_to_tegra_bo(bo);
  41
  42        return obj->paddr;
  43}
  44
  45static void tegra_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
  46{
  47}
  48
  49static void *tegra_bo_mmap(struct host1x_bo *bo)
  50{
  51        struct tegra_bo *obj = host1x_to_tegra_bo(bo);
  52
  53        return obj->vaddr;
  54}
  55
  56static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
  57{
  58}
  59
  60static void *tegra_bo_kmap(struct host1x_bo *bo, unsigned int page)
  61{
  62        struct tegra_bo *obj = host1x_to_tegra_bo(bo);
  63
  64        return obj->vaddr + page * PAGE_SIZE;
  65}
  66
  67static void tegra_bo_kunmap(struct host1x_bo *bo, unsigned int page,
  68                            void *addr)
  69{
  70}
  71
  72static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
  73{
  74        struct tegra_bo *obj = host1x_to_tegra_bo(bo);
  75        struct drm_device *drm = obj->gem.dev;
  76
  77        mutex_lock(&drm->struct_mutex);
  78        drm_gem_object_reference(&obj->gem);
  79        mutex_unlock(&drm->struct_mutex);
  80
  81        return bo;
  82}
  83
  84static const struct host1x_bo_ops tegra_bo_ops = {
  85        .get = tegra_bo_get,
  86        .put = tegra_bo_put,
  87        .pin = tegra_bo_pin,
  88        .unpin = tegra_bo_unpin,
  89        .mmap = tegra_bo_mmap,
  90        .munmap = tegra_bo_munmap,
  91        .kmap = tegra_bo_kmap,
  92        .kunmap = tegra_bo_kunmap,
  93};
  94
  95/*
  96 * A generic iommu_map_sg() function is being reviewed and will hopefully be
  97 * merged soon. At that point this function can be dropped in favour of the
  98 * one provided by the IOMMU API.
  99 */
 100static ssize_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
 101                              struct scatterlist *sg, unsigned int nents,
 102                              int prot)
 103{
 104        struct scatterlist *s;
 105        size_t offset = 0;
 106        unsigned int i;
 107        int err;
 108
 109        for_each_sg(sg, s, nents, i) {
 110                phys_addr_t phys = page_to_phys(sg_page(s));
 111                size_t length = s->offset + s->length;
 112
 113                err = iommu_map(domain, iova + offset, phys, length, prot);
 114                if (err < 0) {
 115                        iommu_unmap(domain, iova, offset);
 116                        return err;
 117                }
 118
 119                offset += length;
 120        }
 121
 122        return offset;
 123}
 124
 125static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
 126{
 127        int prot = IOMMU_READ | IOMMU_WRITE;
 128        ssize_t err;
 129
 130        if (bo->mm)
 131                return -EBUSY;
 132
 133        bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL);
 134        if (!bo->mm)
 135                return -ENOMEM;
 136
 137        err = drm_mm_insert_node_generic(&tegra->mm, bo->mm, bo->gem.size,
 138                                         PAGE_SIZE, 0, 0, 0);
 139        if (err < 0) {
 140                dev_err(tegra->drm->dev, "out of I/O virtual memory: %zd\n",
 141                        err);
 142                goto free;
 143        }
 144
 145        bo->paddr = bo->mm->start;
 146
 147        err = __iommu_map_sg(tegra->domain, bo->paddr, bo->sgt->sgl,
 148                             bo->sgt->nents, prot);
 149        if (err < 0) {
 150                dev_err(tegra->drm->dev, "failed to map buffer: %zd\n", err);
 151                goto remove;
 152        }
 153
 154        bo->size = err;
 155
 156        return 0;
 157
 158remove:
 159        drm_mm_remove_node(bo->mm);
 160free:
 161        kfree(bo->mm);
 162        return err;
 163}
 164
 165static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo)
 166{
 167        if (!bo->mm)
 168                return 0;
 169
 170        iommu_unmap(tegra->domain, bo->paddr, bo->size);
 171        drm_mm_remove_node(bo->mm);
 172        kfree(bo->mm);
 173
 174        return 0;
 175}
 176
 177static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm,
 178                                              size_t size)
 179{
 180        struct tegra_bo *bo;
 181        int err;
 182
 183        bo = kzalloc(sizeof(*bo), GFP_KERNEL);
 184        if (!bo)
 185                return ERR_PTR(-ENOMEM);
 186
 187        host1x_bo_init(&bo->base, &tegra_bo_ops);
 188        size = round_up(size, PAGE_SIZE);
 189
 190        err = drm_gem_object_init(drm, &bo->gem, size);
 191        if (err < 0)
 192                goto free;
 193
 194        err = drm_gem_create_mmap_offset(&bo->gem);
 195        if (err < 0)
 196                goto release;
 197
 198        return bo;
 199
 200release:
 201        drm_gem_object_release(&bo->gem);
 202free:
 203        kfree(bo);
 204        return ERR_PTR(err);
 205}
 206
 207static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
 208{
 209        if (bo->pages) {
 210                drm_gem_put_pages(&bo->gem, bo->pages, true, true);
 211                sg_free_table(bo->sgt);
 212                kfree(bo->sgt);
 213        } else if (bo->vaddr) {
 214                dma_free_writecombine(drm->dev, bo->gem.size, bo->vaddr,
 215                                      bo->paddr);
 216        }
 217}
 218
 219static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
 220{
 221        struct scatterlist *s;
 222        struct sg_table *sgt;
 223        unsigned int i;
 224
 225        bo->pages = drm_gem_get_pages(&bo->gem);
 226        if (IS_ERR(bo->pages))
 227                return PTR_ERR(bo->pages);
 228
 229        bo->num_pages = bo->gem.size >> PAGE_SHIFT;
 230
 231        sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages);
 232        if (IS_ERR(sgt))
 233                goto put_pages;
 234
 235        /*
 236         * Fake up the SG table so that dma_map_sg() can be used to flush the
 237         * pages associated with it. Note that this relies on the fact that
 238         * the DMA API doesn't hook into IOMMU on Tegra, therefore mapping is
 239         * only cache maintenance.
 240         *
 241         * TODO: Replace this by drm_clflash_sg() once it can be implemented
 242         * without relying on symbols that are not exported.
 243         */
 244        for_each_sg(sgt->sgl, s, sgt->nents, i)
 245                sg_dma_address(s) = sg_phys(s);
 246
 247        if (dma_map_sg(drm->dev, sgt->sgl, sgt->nents, DMA_TO_DEVICE) == 0) {
 248                sgt = ERR_PTR(-ENOMEM);
 249                goto release_sgt;
 250        }
 251
 252        bo->sgt = sgt;
 253
 254        return 0;
 255
 256release_sgt:
 257        sg_free_table(sgt);
 258        kfree(sgt);
 259put_pages:
 260        drm_gem_put_pages(&bo->gem, bo->pages, false, false);
 261        return PTR_ERR(sgt);
 262}
 263
 264static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
 265{
 266        struct tegra_drm *tegra = drm->dev_private;
 267        int err;
 268
 269        if (tegra->domain) {
 270                err = tegra_bo_get_pages(drm, bo);
 271                if (err < 0)
 272                        return err;
 273
 274                err = tegra_bo_iommu_map(tegra, bo);
 275                if (err < 0) {
 276                        tegra_bo_free(drm, bo);
 277                        return err;
 278                }
 279        } else {
 280                size_t size = bo->gem.size;
 281
 282                bo->vaddr = dma_alloc_writecombine(drm->dev, size, &bo->paddr,
 283                                                   GFP_KERNEL | __GFP_NOWARN);
 284                if (!bo->vaddr) {
 285                        dev_err(drm->dev,
 286                                "failed to allocate buffer of size %zu\n",
 287                                size);
 288                        return -ENOMEM;
 289                }
 290        }
 291
 292        return 0;
 293}
 294
 295struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size,
 296                                 unsigned long flags)
 297{
 298        struct tegra_bo *bo;
 299        int err;
 300
 301        bo = tegra_bo_alloc_object(drm, size);
 302        if (IS_ERR(bo))
 303                return bo;
 304
 305        err = tegra_bo_alloc(drm, bo);
 306        if (err < 0)
 307                goto release;
 308
 309        if (flags & DRM_TEGRA_GEM_CREATE_TILED)
 310                bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED;
 311
 312        if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP)
 313                bo->flags |= TEGRA_BO_BOTTOM_UP;
 314
 315        return bo;
 316
 317release:
 318        drm_gem_object_release(&bo->gem);
 319        kfree(bo);
 320        return ERR_PTR(err);
 321}
 322
 323struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
 324                                             struct drm_device *drm,
 325                                             size_t size,
 326                                             unsigned long flags,
 327                                             u32 *handle)
 328{
 329        struct tegra_bo *bo;
 330        int err;
 331
 332        bo = tegra_bo_create(drm, size, flags);
 333        if (IS_ERR(bo))
 334                return bo;
 335
 336        err = drm_gem_handle_create(file, &bo->gem, handle);
 337        if (err) {
 338                tegra_bo_free_object(&bo->gem);
 339                return ERR_PTR(err);
 340        }
 341
 342        drm_gem_object_unreference_unlocked(&bo->gem);
 343
 344        return bo;
 345}
 346
 347static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
 348                                        struct dma_buf *buf)
 349{
 350        struct tegra_drm *tegra = drm->dev_private;
 351        struct dma_buf_attachment *attach;
 352        struct tegra_bo *bo;
 353        int err;
 354
 355        bo = tegra_bo_alloc_object(drm, buf->size);
 356        if (IS_ERR(bo))
 357                return bo;
 358
 359        attach = dma_buf_attach(buf, drm->dev);
 360        if (IS_ERR(attach)) {
 361                err = PTR_ERR(attach);
 362                goto free;
 363        }
 364
 365        get_dma_buf(buf);
 366
 367        bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
 368        if (!bo->sgt) {
 369                err = -ENOMEM;
 370                goto detach;
 371        }
 372
 373        if (IS_ERR(bo->sgt)) {
 374                err = PTR_ERR(bo->sgt);
 375                goto detach;
 376        }
 377
 378        if (tegra->domain) {
 379                err = tegra_bo_iommu_map(tegra, bo);
 380                if (err < 0)
 381                        goto detach;
 382        } else {
 383                if (bo->sgt->nents > 1) {
 384                        err = -EINVAL;
 385                        goto detach;
 386                }
 387
 388                bo->paddr = sg_dma_address(bo->sgt->sgl);
 389        }
 390
 391        bo->gem.import_attach = attach;
 392
 393        return bo;
 394
 395detach:
 396        if (!IS_ERR_OR_NULL(bo->sgt))
 397                dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE);
 398
 399        dma_buf_detach(buf, attach);
 400        dma_buf_put(buf);
 401free:
 402        drm_gem_object_release(&bo->gem);
 403        kfree(bo);
 404        return ERR_PTR(err);
 405}
 406
 407void tegra_bo_free_object(struct drm_gem_object *gem)
 408{
 409        struct tegra_drm *tegra = gem->dev->dev_private;
 410        struct tegra_bo *bo = to_tegra_bo(gem);
 411
 412        if (tegra->domain)
 413                tegra_bo_iommu_unmap(tegra, bo);
 414
 415        if (gem->import_attach) {
 416                dma_buf_unmap_attachment(gem->import_attach, bo->sgt,
 417                                         DMA_TO_DEVICE);
 418                drm_prime_gem_destroy(gem, NULL);
 419        } else {
 420                tegra_bo_free(gem->dev, bo);
 421        }
 422
 423        drm_gem_object_release(gem);
 424        kfree(bo);
 425}
 426
 427int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
 428                         struct drm_mode_create_dumb *args)
 429{
 430        unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
 431        struct tegra_drm *tegra = drm->dev_private;
 432        struct tegra_bo *bo;
 433
 434        args->pitch = round_up(min_pitch, tegra->pitch_align);
 435        args->size = args->pitch * args->height;
 436
 437        bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
 438                                         &args->handle);
 439        if (IS_ERR(bo))
 440                return PTR_ERR(bo);
 441
 442        return 0;
 443}
 444
 445int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
 446                             u32 handle, u64 *offset)
 447{
 448        struct drm_gem_object *gem;
 449        struct tegra_bo *bo;
 450
 451        mutex_lock(&drm->struct_mutex);
 452
 453        gem = drm_gem_object_lookup(drm, file, handle);
 454        if (!gem) {
 455                dev_err(drm->dev, "failed to lookup GEM object\n");
 456                mutex_unlock(&drm->struct_mutex);
 457                return -EINVAL;
 458        }
 459
 460        bo = to_tegra_bo(gem);
 461
 462        *offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
 463
 464        drm_gem_object_unreference(gem);
 465
 466        mutex_unlock(&drm->struct_mutex);
 467
 468        return 0;
 469}
 470
 471static int tegra_bo_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 472{
 473        struct drm_gem_object *gem = vma->vm_private_data;
 474        struct tegra_bo *bo = to_tegra_bo(gem);
 475        struct page *page;
 476        pgoff_t offset;
 477        int err;
 478
 479        if (!bo->pages)
 480                return VM_FAULT_SIGBUS;
 481
 482        offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> PAGE_SHIFT;
 483        page = bo->pages[offset];
 484
 485        err = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
 486        switch (err) {
 487        case -EAGAIN:
 488        case 0:
 489        case -ERESTARTSYS:
 490        case -EINTR:
 491        case -EBUSY:
 492                return VM_FAULT_NOPAGE;
 493
 494        case -ENOMEM:
 495                return VM_FAULT_OOM;
 496        }
 497
 498        return VM_FAULT_SIGBUS;
 499}
 500
 501const struct vm_operations_struct tegra_bo_vm_ops = {
 502        .fault = tegra_bo_fault,
 503        .open = drm_gem_vm_open,
 504        .close = drm_gem_vm_close,
 505};
 506
 507int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
 508{
 509        struct drm_gem_object *gem;
 510        struct tegra_bo *bo;
 511        int ret;
 512
 513        ret = drm_gem_mmap(file, vma);
 514        if (ret)
 515                return ret;
 516
 517        gem = vma->vm_private_data;
 518        bo = to_tegra_bo(gem);
 519
 520        if (!bo->pages) {
 521                unsigned long vm_pgoff = vma->vm_pgoff;
 522
 523                vma->vm_flags &= ~VM_PFNMAP;
 524                vma->vm_pgoff = 0;
 525
 526                ret = dma_mmap_writecombine(gem->dev->dev, vma, bo->vaddr,
 527                                            bo->paddr, gem->size);
 528                if (ret) {
 529                        drm_gem_vm_close(vma);
 530                        return ret;
 531                }
 532
 533                vma->vm_pgoff = vm_pgoff;
 534        } else {
 535                pgprot_t prot = vm_get_page_prot(vma->vm_flags);
 536
 537                vma->vm_flags |= VM_MIXEDMAP;
 538                vma->vm_flags &= ~VM_PFNMAP;
 539
 540                vma->vm_page_prot = pgprot_writecombine(prot);
 541        }
 542
 543        return 0;
 544}
 545
 546static struct sg_table *
 547tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
 548                            enum dma_data_direction dir)
 549{
 550        struct drm_gem_object *gem = attach->dmabuf->priv;
 551        struct tegra_bo *bo = to_tegra_bo(gem);
 552        struct sg_table *sgt;
 553
 554        sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
 555        if (!sgt)
 556                return NULL;
 557
 558        if (bo->pages) {
 559                struct scatterlist *sg;
 560                unsigned int i;
 561
 562                if (sg_alloc_table(sgt, bo->num_pages, GFP_KERNEL))
 563                        goto free;
 564
 565                for_each_sg(sgt->sgl, sg, bo->num_pages, i)
 566                        sg_set_page(sg, bo->pages[i], PAGE_SIZE, 0);
 567
 568                if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
 569                        goto free;
 570        } else {
 571                if (sg_alloc_table(sgt, 1, GFP_KERNEL))
 572                        goto free;
 573
 574                sg_dma_address(sgt->sgl) = bo->paddr;
 575                sg_dma_len(sgt->sgl) = gem->size;
 576        }
 577
 578        return sgt;
 579
 580free:
 581        sg_free_table(sgt);
 582        kfree(sgt);
 583        return NULL;
 584}
 585
 586static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
 587                                          struct sg_table *sgt,
 588                                          enum dma_data_direction dir)
 589{
 590        struct drm_gem_object *gem = attach->dmabuf->priv;
 591        struct tegra_bo *bo = to_tegra_bo(gem);
 592
 593        if (bo->pages)
 594                dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
 595
 596        sg_free_table(sgt);
 597        kfree(sgt);
 598}
 599
 600static void tegra_gem_prime_release(struct dma_buf *buf)
 601{
 602        drm_gem_dmabuf_release(buf);
 603}
 604
 605static void *tegra_gem_prime_kmap_atomic(struct dma_buf *buf,
 606                                         unsigned long page)
 607{
 608        return NULL;
 609}
 610
 611static void tegra_gem_prime_kunmap_atomic(struct dma_buf *buf,
 612                                          unsigned long page,
 613                                          void *addr)
 614{
 615}
 616
 617static void *tegra_gem_prime_kmap(struct dma_buf *buf, unsigned long page)
 618{
 619        return NULL;
 620}
 621
 622static void tegra_gem_prime_kunmap(struct dma_buf *buf, unsigned long page,
 623                                   void *addr)
 624{
 625}
 626
 627static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
 628{
 629        return -EINVAL;
 630}
 631
 632static void *tegra_gem_prime_vmap(struct dma_buf *buf)
 633{
 634        struct drm_gem_object *gem = buf->priv;
 635        struct tegra_bo *bo = to_tegra_bo(gem);
 636
 637        return bo->vaddr;
 638}
 639
 640static void tegra_gem_prime_vunmap(struct dma_buf *buf, void *vaddr)
 641{
 642}
 643
 644static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
 645        .map_dma_buf = tegra_gem_prime_map_dma_buf,
 646        .unmap_dma_buf = tegra_gem_prime_unmap_dma_buf,
 647        .release = tegra_gem_prime_release,
 648        .kmap_atomic = tegra_gem_prime_kmap_atomic,
 649        .kunmap_atomic = tegra_gem_prime_kunmap_atomic,
 650        .kmap = tegra_gem_prime_kmap,
 651        .kunmap = tegra_gem_prime_kunmap,
 652        .mmap = tegra_gem_prime_mmap,
 653        .vmap = tegra_gem_prime_vmap,
 654        .vunmap = tegra_gem_prime_vunmap,
 655};
 656
 657struct dma_buf *tegra_gem_prime_export(struct drm_device *drm,
 658                                       struct drm_gem_object *gem,
 659                                       int flags)
 660{
 661        return dma_buf_export(gem, &tegra_gem_prime_dmabuf_ops, gem->size,
 662                              flags, NULL);
 663}
 664
 665struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
 666                                              struct dma_buf *buf)
 667{
 668        struct tegra_bo *bo;
 669
 670        if (buf->ops == &tegra_gem_prime_dmabuf_ops) {
 671                struct drm_gem_object *gem = buf->priv;
 672
 673                if (gem->dev == drm) {
 674                        drm_gem_object_reference(gem);
 675                        return gem;
 676                }
 677        }
 678
 679        bo = tegra_bo_import(drm, buf);
 680        if (IS_ERR(bo))
 681                return ERR_CAST(bo);
 682
 683        return &bo->gem;
 684}
 685