linux/drivers/gpu/drm/tegra/gem.c
<<
>>
Prefs
   1/*
   2 * NVIDIA Tegra DRM GEM helper functions
   3 *
   4 * Copyright (C) 2012 Sascha Hauer, Pengutronix
   5 * Copyright (C) 2013 NVIDIA CORPORATION, All rights reserved.
   6 *
   7 * Based on the GEM/CMA helpers
   8 *
   9 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
  10 *
  11 * This program is free software; you can redistribute it and/or modify
  12 * it under the terms of the GNU General Public License version 2 as
  13 * published by the Free Software Foundation.
  14 */
  15
  16#include <linux/dma-buf.h>
  17#include <linux/iommu.h>
  18#include <drm/tegra_drm.h>
  19
  20#include "drm.h"
  21#include "gem.h"
  22
  23static inline struct tegra_bo *host1x_to_tegra_bo(struct host1x_bo *bo)
  24{
  25        return container_of(bo, struct tegra_bo, base);
  26}
  27
  28static void tegra_bo_put(struct host1x_bo *bo)
  29{
  30        struct tegra_bo *obj = host1x_to_tegra_bo(bo);
  31
  32        drm_gem_object_unreference_unlocked(&obj->gem);
  33}
  34
  35static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt)
  36{
  37        struct tegra_bo *obj = host1x_to_tegra_bo(bo);
  38
  39        return obj->paddr;
  40}
  41
  42static void tegra_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
  43{
  44}
  45
  46static void *tegra_bo_mmap(struct host1x_bo *bo)
  47{
  48        struct tegra_bo *obj = host1x_to_tegra_bo(bo);
  49
  50        return obj->vaddr;
  51}
  52
  53static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
  54{
  55}
  56
  57static void *tegra_bo_kmap(struct host1x_bo *bo, unsigned int page)
  58{
  59        struct tegra_bo *obj = host1x_to_tegra_bo(bo);
  60
  61        return obj->vaddr + page * PAGE_SIZE;
  62}
  63
  64static void tegra_bo_kunmap(struct host1x_bo *bo, unsigned int page,
  65                            void *addr)
  66{
  67}
  68
  69static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
  70{
  71        struct tegra_bo *obj = host1x_to_tegra_bo(bo);
  72
  73        drm_gem_object_reference(&obj->gem);
  74
  75        return bo;
  76}
  77
  78static const struct host1x_bo_ops tegra_bo_ops = {
  79        .get = tegra_bo_get,
  80        .put = tegra_bo_put,
  81        .pin = tegra_bo_pin,
  82        .unpin = tegra_bo_unpin,
  83        .mmap = tegra_bo_mmap,
  84        .munmap = tegra_bo_munmap,
  85        .kmap = tegra_bo_kmap,
  86        .kunmap = tegra_bo_kunmap,
  87};
  88
  89static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
  90{
  91        int prot = IOMMU_READ | IOMMU_WRITE;
  92        ssize_t err;
  93
  94        if (bo->mm)
  95                return -EBUSY;
  96
  97        bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL);
  98        if (!bo->mm)
  99                return -ENOMEM;
 100
 101        err = drm_mm_insert_node_generic(&tegra->mm, bo->mm, bo->gem.size,
 102                                         PAGE_SIZE, 0, 0, 0);
 103        if (err < 0) {
 104                dev_err(tegra->drm->dev, "out of I/O virtual memory: %zd\n",
 105                        err);
 106                goto free;
 107        }
 108
 109        bo->paddr = bo->mm->start;
 110
 111        err = iommu_map_sg(tegra->domain, bo->paddr, bo->sgt->sgl,
 112                           bo->sgt->nents, prot);
 113        if (err < 0) {
 114                dev_err(tegra->drm->dev, "failed to map buffer: %zd\n", err);
 115                goto remove;
 116        }
 117
 118        bo->size = err;
 119
 120        return 0;
 121
 122remove:
 123        drm_mm_remove_node(bo->mm);
 124free:
 125        kfree(bo->mm);
 126        return err;
 127}
 128
 129static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo)
 130{
 131        if (!bo->mm)
 132                return 0;
 133
 134        iommu_unmap(tegra->domain, bo->paddr, bo->size);
 135        drm_mm_remove_node(bo->mm);
 136        kfree(bo->mm);
 137
 138        return 0;
 139}
 140
 141static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm,
 142                                              size_t size)
 143{
 144        struct tegra_bo *bo;
 145        int err;
 146
 147        bo = kzalloc(sizeof(*bo), GFP_KERNEL);
 148        if (!bo)
 149                return ERR_PTR(-ENOMEM);
 150
 151        host1x_bo_init(&bo->base, &tegra_bo_ops);
 152        size = round_up(size, PAGE_SIZE);
 153
 154        err = drm_gem_object_init(drm, &bo->gem, size);
 155        if (err < 0)
 156                goto free;
 157
 158        err = drm_gem_create_mmap_offset(&bo->gem);
 159        if (err < 0)
 160                goto release;
 161
 162        return bo;
 163
 164release:
 165        drm_gem_object_release(&bo->gem);
 166free:
 167        kfree(bo);
 168        return ERR_PTR(err);
 169}
 170
 171static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
 172{
 173        if (bo->pages) {
 174                drm_gem_put_pages(&bo->gem, bo->pages, true, true);
 175                sg_free_table(bo->sgt);
 176                kfree(bo->sgt);
 177        } else if (bo->vaddr) {
 178                dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->paddr);
 179        }
 180}
 181
 182static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
 183{
 184        struct scatterlist *s;
 185        unsigned int i;
 186
 187        bo->pages = drm_gem_get_pages(&bo->gem);
 188        if (IS_ERR(bo->pages))
 189                return PTR_ERR(bo->pages);
 190
 191        bo->num_pages = bo->gem.size >> PAGE_SHIFT;
 192
 193        bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages);
 194        if (IS_ERR(bo->sgt))
 195                goto put_pages;
 196
 197        /*
 198         * Fake up the SG table so that dma_sync_sg_for_device() can be used
 199         * to flush the pages associated with it.
 200         *
 201         * TODO: Replace this by drm_clflash_sg() once it can be implemented
 202         * without relying on symbols that are not exported.
 203         */
 204        for_each_sg(bo->sgt->sgl, s, bo->sgt->nents, i)
 205                sg_dma_address(s) = sg_phys(s);
 206
 207        dma_sync_sg_for_device(drm->dev, bo->sgt->sgl, bo->sgt->nents,
 208                               DMA_TO_DEVICE);
 209
 210        return 0;
 211
 212put_pages:
 213        drm_gem_put_pages(&bo->gem, bo->pages, false, false);
 214        return PTR_ERR(bo->sgt);
 215}
 216
 217static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
 218{
 219        struct tegra_drm *tegra = drm->dev_private;
 220        int err;
 221
 222        if (tegra->domain) {
 223                err = tegra_bo_get_pages(drm, bo);
 224                if (err < 0)
 225                        return err;
 226
 227                err = tegra_bo_iommu_map(tegra, bo);
 228                if (err < 0) {
 229                        tegra_bo_free(drm, bo);
 230                        return err;
 231                }
 232        } else {
 233                size_t size = bo->gem.size;
 234
 235                bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->paddr,
 236                                         GFP_KERNEL | __GFP_NOWARN);
 237                if (!bo->vaddr) {
 238                        dev_err(drm->dev,
 239                                "failed to allocate buffer of size %zu\n",
 240                                size);
 241                        return -ENOMEM;
 242                }
 243        }
 244
 245        return 0;
 246}
 247
 248struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size,
 249                                 unsigned long flags)
 250{
 251        struct tegra_bo *bo;
 252        int err;
 253
 254        bo = tegra_bo_alloc_object(drm, size);
 255        if (IS_ERR(bo))
 256                return bo;
 257
 258        err = tegra_bo_alloc(drm, bo);
 259        if (err < 0)
 260                goto release;
 261
 262        if (flags & DRM_TEGRA_GEM_CREATE_TILED)
 263                bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED;
 264
 265        if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP)
 266                bo->flags |= TEGRA_BO_BOTTOM_UP;
 267
 268        return bo;
 269
 270release:
 271        drm_gem_object_release(&bo->gem);
 272        kfree(bo);
 273        return ERR_PTR(err);
 274}
 275
 276struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
 277                                             struct drm_device *drm,
 278                                             size_t size,
 279                                             unsigned long flags,
 280                                             u32 *handle)
 281{
 282        struct tegra_bo *bo;
 283        int err;
 284
 285        bo = tegra_bo_create(drm, size, flags);
 286        if (IS_ERR(bo))
 287                return bo;
 288
 289        err = drm_gem_handle_create(file, &bo->gem, handle);
 290        if (err) {
 291                tegra_bo_free_object(&bo->gem);
 292                return ERR_PTR(err);
 293        }
 294
 295        drm_gem_object_unreference_unlocked(&bo->gem);
 296
 297        return bo;
 298}
 299
 300static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
 301                                        struct dma_buf *buf)
 302{
 303        struct tegra_drm *tegra = drm->dev_private;
 304        struct dma_buf_attachment *attach;
 305        struct tegra_bo *bo;
 306        int err;
 307
 308        bo = tegra_bo_alloc_object(drm, buf->size);
 309        if (IS_ERR(bo))
 310                return bo;
 311
 312        attach = dma_buf_attach(buf, drm->dev);
 313        if (IS_ERR(attach)) {
 314                err = PTR_ERR(attach);
 315                goto free;
 316        }
 317
 318        get_dma_buf(buf);
 319
 320        bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
 321        if (!bo->sgt) {
 322                err = -ENOMEM;
 323                goto detach;
 324        }
 325
 326        if (IS_ERR(bo->sgt)) {
 327                err = PTR_ERR(bo->sgt);
 328                goto detach;
 329        }
 330
 331        if (tegra->domain) {
 332                err = tegra_bo_iommu_map(tegra, bo);
 333                if (err < 0)
 334                        goto detach;
 335        } else {
 336                if (bo->sgt->nents > 1) {
 337                        err = -EINVAL;
 338                        goto detach;
 339                }
 340
 341                bo->paddr = sg_dma_address(bo->sgt->sgl);
 342        }
 343
 344        bo->gem.import_attach = attach;
 345
 346        return bo;
 347
 348detach:
 349        if (!IS_ERR_OR_NULL(bo->sgt))
 350                dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE);
 351
 352        dma_buf_detach(buf, attach);
 353        dma_buf_put(buf);
 354free:
 355        drm_gem_object_release(&bo->gem);
 356        kfree(bo);
 357        return ERR_PTR(err);
 358}
 359
 360void tegra_bo_free_object(struct drm_gem_object *gem)
 361{
 362        struct tegra_drm *tegra = gem->dev->dev_private;
 363        struct tegra_bo *bo = to_tegra_bo(gem);
 364
 365        if (tegra->domain)
 366                tegra_bo_iommu_unmap(tegra, bo);
 367
 368        if (gem->import_attach) {
 369                dma_buf_unmap_attachment(gem->import_attach, bo->sgt,
 370                                         DMA_TO_DEVICE);
 371                drm_prime_gem_destroy(gem, NULL);
 372        } else {
 373                tegra_bo_free(gem->dev, bo);
 374        }
 375
 376        drm_gem_object_release(gem);
 377        kfree(bo);
 378}
 379
 380int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
 381                         struct drm_mode_create_dumb *args)
 382{
 383        unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
 384        struct tegra_drm *tegra = drm->dev_private;
 385        struct tegra_bo *bo;
 386
 387        args->pitch = round_up(min_pitch, tegra->pitch_align);
 388        args->size = args->pitch * args->height;
 389
 390        bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
 391                                         &args->handle);
 392        if (IS_ERR(bo))
 393                return PTR_ERR(bo);
 394
 395        return 0;
 396}
 397
 398int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
 399                             u32 handle, u64 *offset)
 400{
 401        struct drm_gem_object *gem;
 402        struct tegra_bo *bo;
 403
 404        gem = drm_gem_object_lookup(file, handle);
 405        if (!gem) {
 406                dev_err(drm->dev, "failed to lookup GEM object\n");
 407                return -EINVAL;
 408        }
 409
 410        bo = to_tegra_bo(gem);
 411
 412        *offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
 413
 414        drm_gem_object_unreference_unlocked(gem);
 415
 416        return 0;
 417}
 418
 419static int tegra_bo_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 420{
 421        struct drm_gem_object *gem = vma->vm_private_data;
 422        struct tegra_bo *bo = to_tegra_bo(gem);
 423        struct page *page;
 424        pgoff_t offset;
 425        int err;
 426
 427        if (!bo->pages)
 428                return VM_FAULT_SIGBUS;
 429
 430        offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> PAGE_SHIFT;
 431        page = bo->pages[offset];
 432
 433        err = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
 434        switch (err) {
 435        case -EAGAIN:
 436        case 0:
 437        case -ERESTARTSYS:
 438        case -EINTR:
 439        case -EBUSY:
 440                return VM_FAULT_NOPAGE;
 441
 442        case -ENOMEM:
 443                return VM_FAULT_OOM;
 444        }
 445
 446        return VM_FAULT_SIGBUS;
 447}
 448
 449const struct vm_operations_struct tegra_bo_vm_ops = {
 450        .fault = tegra_bo_fault,
 451        .open = drm_gem_vm_open,
 452        .close = drm_gem_vm_close,
 453};
 454
 455int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
 456{
 457        struct drm_gem_object *gem;
 458        struct tegra_bo *bo;
 459        int ret;
 460
 461        ret = drm_gem_mmap(file, vma);
 462        if (ret)
 463                return ret;
 464
 465        gem = vma->vm_private_data;
 466        bo = to_tegra_bo(gem);
 467
 468        if (!bo->pages) {
 469                unsigned long vm_pgoff = vma->vm_pgoff;
 470
 471                vma->vm_flags &= ~VM_PFNMAP;
 472                vma->vm_pgoff = 0;
 473
 474                ret = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->paddr,
 475                                  gem->size);
 476                if (ret) {
 477                        drm_gem_vm_close(vma);
 478                        return ret;
 479                }
 480
 481                vma->vm_pgoff = vm_pgoff;
 482        } else {
 483                pgprot_t prot = vm_get_page_prot(vma->vm_flags);
 484
 485                vma->vm_flags |= VM_MIXEDMAP;
 486                vma->vm_flags &= ~VM_PFNMAP;
 487
 488                vma->vm_page_prot = pgprot_writecombine(prot);
 489        }
 490
 491        return 0;
 492}
 493
 494static struct sg_table *
 495tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
 496                            enum dma_data_direction dir)
 497{
 498        struct drm_gem_object *gem = attach->dmabuf->priv;
 499        struct tegra_bo *bo = to_tegra_bo(gem);
 500        struct sg_table *sgt;
 501
 502        sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
 503        if (!sgt)
 504                return NULL;
 505
 506        if (bo->pages) {
 507                struct scatterlist *sg;
 508                unsigned int i;
 509
 510                if (sg_alloc_table(sgt, bo->num_pages, GFP_KERNEL))
 511                        goto free;
 512
 513                for_each_sg(sgt->sgl, sg, bo->num_pages, i)
 514                        sg_set_page(sg, bo->pages[i], PAGE_SIZE, 0);
 515
 516                if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
 517                        goto free;
 518        } else {
 519                if (sg_alloc_table(sgt, 1, GFP_KERNEL))
 520                        goto free;
 521
 522                sg_dma_address(sgt->sgl) = bo->paddr;
 523                sg_dma_len(sgt->sgl) = gem->size;
 524        }
 525
 526        return sgt;
 527
 528free:
 529        sg_free_table(sgt);
 530        kfree(sgt);
 531        return NULL;
 532}
 533
 534static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
 535                                          struct sg_table *sgt,
 536                                          enum dma_data_direction dir)
 537{
 538        struct drm_gem_object *gem = attach->dmabuf->priv;
 539        struct tegra_bo *bo = to_tegra_bo(gem);
 540
 541        if (bo->pages)
 542                dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
 543
 544        sg_free_table(sgt);
 545        kfree(sgt);
 546}
 547
 548static void tegra_gem_prime_release(struct dma_buf *buf)
 549{
 550        drm_gem_dmabuf_release(buf);
 551}
 552
 553static void *tegra_gem_prime_kmap_atomic(struct dma_buf *buf,
 554                                         unsigned long page)
 555{
 556        return NULL;
 557}
 558
 559static void tegra_gem_prime_kunmap_atomic(struct dma_buf *buf,
 560                                          unsigned long page,
 561                                          void *addr)
 562{
 563}
 564
 565static void *tegra_gem_prime_kmap(struct dma_buf *buf, unsigned long page)
 566{
 567        return NULL;
 568}
 569
 570static void tegra_gem_prime_kunmap(struct dma_buf *buf, unsigned long page,
 571                                   void *addr)
 572{
 573}
 574
 575static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
 576{
 577        return -EINVAL;
 578}
 579
 580static void *tegra_gem_prime_vmap(struct dma_buf *buf)
 581{
 582        struct drm_gem_object *gem = buf->priv;
 583        struct tegra_bo *bo = to_tegra_bo(gem);
 584
 585        return bo->vaddr;
 586}
 587
 588static void tegra_gem_prime_vunmap(struct dma_buf *buf, void *vaddr)
 589{
 590}
 591
 592static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
 593        .map_dma_buf = tegra_gem_prime_map_dma_buf,
 594        .unmap_dma_buf = tegra_gem_prime_unmap_dma_buf,
 595        .release = tegra_gem_prime_release,
 596        .kmap_atomic = tegra_gem_prime_kmap_atomic,
 597        .kunmap_atomic = tegra_gem_prime_kunmap_atomic,
 598        .kmap = tegra_gem_prime_kmap,
 599        .kunmap = tegra_gem_prime_kunmap,
 600        .mmap = tegra_gem_prime_mmap,
 601        .vmap = tegra_gem_prime_vmap,
 602        .vunmap = tegra_gem_prime_vunmap,
 603};
 604
 605struct dma_buf *tegra_gem_prime_export(struct drm_device *drm,
 606                                       struct drm_gem_object *gem,
 607                                       int flags)
 608{
 609        DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
 610
 611        exp_info.ops = &tegra_gem_prime_dmabuf_ops;
 612        exp_info.size = gem->size;
 613        exp_info.flags = flags;
 614        exp_info.priv = gem;
 615
 616        return dma_buf_export(&exp_info);
 617}
 618
 619struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
 620                                              struct dma_buf *buf)
 621{
 622        struct tegra_bo *bo;
 623
 624        if (buf->ops == &tegra_gem_prime_dmabuf_ops) {
 625                struct drm_gem_object *gem = buf->priv;
 626
 627                if (gem->dev == drm) {
 628                        drm_gem_object_reference(gem);
 629                        return gem;
 630                }
 631        }
 632
 633        bo = tegra_bo_import(drm, buf);
 634        if (IS_ERR(bo))
 635                return ERR_CAST(bo);
 636
 637        return &bo->gem;
 638}
 639