linux/drivers/gpu/drm/tegra/gem.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * NVIDIA Tegra DRM GEM helper functions
   4 *
   5 * Copyright (C) 2012 Sascha Hauer, Pengutronix
   6 * Copyright (C) 2013-2015 NVIDIA CORPORATION, All rights reserved.
   7 *
   8 * Based on the GEM/CMA helpers
   9 *
  10 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
  11 */
  12
  13#include <linux/dma-buf.h>
  14#include <linux/iommu.h>
  15
  16#include <drm/drm_drv.h>
  17#include <drm/drm_prime.h>
  18#include <drm/tegra_drm.h>
  19
  20#include "drm.h"
  21#include "gem.h"
  22
  23static void tegra_bo_put(struct host1x_bo *bo)
  24{
  25        struct tegra_bo *obj = host1x_to_tegra_bo(bo);
  26
  27        drm_gem_object_put_unlocked(&obj->gem);
  28}
  29
  30static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt)
  31{
  32        struct tegra_bo *obj = host1x_to_tegra_bo(bo);
  33
  34        *sgt = obj->sgt;
  35
  36        return obj->paddr;
  37}
  38
  39static void tegra_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
  40{
  41}
  42
  43static void *tegra_bo_mmap(struct host1x_bo *bo)
  44{
  45        struct tegra_bo *obj = host1x_to_tegra_bo(bo);
  46
  47        if (obj->vaddr)
  48                return obj->vaddr;
  49        else if (obj->gem.import_attach)
  50                return dma_buf_vmap(obj->gem.import_attach->dmabuf);
  51        else
  52                return vmap(obj->pages, obj->num_pages, VM_MAP,
  53                            pgprot_writecombine(PAGE_KERNEL));
  54}
  55
  56static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
  57{
  58        struct tegra_bo *obj = host1x_to_tegra_bo(bo);
  59
  60        if (obj->vaddr)
  61                return;
  62        else if (obj->gem.import_attach)
  63                dma_buf_vunmap(obj->gem.import_attach->dmabuf, addr);
  64        else
  65                vunmap(addr);
  66}
  67
  68static void *tegra_bo_kmap(struct host1x_bo *bo, unsigned int page)
  69{
  70        struct tegra_bo *obj = host1x_to_tegra_bo(bo);
  71
  72        if (obj->vaddr)
  73                return obj->vaddr + page * PAGE_SIZE;
  74        else if (obj->gem.import_attach)
  75                return dma_buf_kmap(obj->gem.import_attach->dmabuf, page);
  76        else
  77                return vmap(obj->pages + page, 1, VM_MAP,
  78                            pgprot_writecombine(PAGE_KERNEL));
  79}
  80
  81static void tegra_bo_kunmap(struct host1x_bo *bo, unsigned int page,
  82                            void *addr)
  83{
  84        struct tegra_bo *obj = host1x_to_tegra_bo(bo);
  85
  86        if (obj->vaddr)
  87                return;
  88        else if (obj->gem.import_attach)
  89                dma_buf_kunmap(obj->gem.import_attach->dmabuf, page, addr);
  90        else
  91                vunmap(addr);
  92}
  93
  94static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
  95{
  96        struct tegra_bo *obj = host1x_to_tegra_bo(bo);
  97
  98        drm_gem_object_get(&obj->gem);
  99
 100        return bo;
 101}
 102
 103static const struct host1x_bo_ops tegra_bo_ops = {
 104        .get = tegra_bo_get,
 105        .put = tegra_bo_put,
 106        .pin = tegra_bo_pin,
 107        .unpin = tegra_bo_unpin,
 108        .mmap = tegra_bo_mmap,
 109        .munmap = tegra_bo_munmap,
 110        .kmap = tegra_bo_kmap,
 111        .kunmap = tegra_bo_kunmap,
 112};
 113
 114static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
 115{
 116        int prot = IOMMU_READ | IOMMU_WRITE;
 117        int err;
 118
 119        if (bo->mm)
 120                return -EBUSY;
 121
 122        bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL);
 123        if (!bo->mm)
 124                return -ENOMEM;
 125
 126        mutex_lock(&tegra->mm_lock);
 127
 128        err = drm_mm_insert_node_generic(&tegra->mm,
 129                                         bo->mm, bo->gem.size, PAGE_SIZE, 0, 0);
 130        if (err < 0) {
 131                dev_err(tegra->drm->dev, "out of I/O virtual memory: %d\n",
 132                        err);
 133                goto unlock;
 134        }
 135
 136        bo->paddr = bo->mm->start;
 137
 138        bo->size = iommu_map_sg(tegra->domain, bo->paddr, bo->sgt->sgl,
 139                                bo->sgt->nents, prot);
 140        if (!bo->size) {
 141                dev_err(tegra->drm->dev, "failed to map buffer\n");
 142                err = -ENOMEM;
 143                goto remove;
 144        }
 145
 146        mutex_unlock(&tegra->mm_lock);
 147
 148        return 0;
 149
 150remove:
 151        drm_mm_remove_node(bo->mm);
 152unlock:
 153        mutex_unlock(&tegra->mm_lock);
 154        kfree(bo->mm);
 155        return err;
 156}
 157
 158static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo)
 159{
 160        if (!bo->mm)
 161                return 0;
 162
 163        mutex_lock(&tegra->mm_lock);
 164        iommu_unmap(tegra->domain, bo->paddr, bo->size);
 165        drm_mm_remove_node(bo->mm);
 166        mutex_unlock(&tegra->mm_lock);
 167
 168        kfree(bo->mm);
 169
 170        return 0;
 171}
 172
 173static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm,
 174                                              size_t size)
 175{
 176        struct tegra_bo *bo;
 177        int err;
 178
 179        bo = kzalloc(sizeof(*bo), GFP_KERNEL);
 180        if (!bo)
 181                return ERR_PTR(-ENOMEM);
 182
 183        host1x_bo_init(&bo->base, &tegra_bo_ops);
 184        size = round_up(size, PAGE_SIZE);
 185
 186        err = drm_gem_object_init(drm, &bo->gem, size);
 187        if (err < 0)
 188                goto free;
 189
 190        err = drm_gem_create_mmap_offset(&bo->gem);
 191        if (err < 0)
 192                goto release;
 193
 194        return bo;
 195
 196release:
 197        drm_gem_object_release(&bo->gem);
 198free:
 199        kfree(bo);
 200        return ERR_PTR(err);
 201}
 202
 203static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
 204{
 205        if (bo->pages) {
 206                dma_unmap_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents,
 207                             DMA_FROM_DEVICE);
 208                drm_gem_put_pages(&bo->gem, bo->pages, true, true);
 209                sg_free_table(bo->sgt);
 210                kfree(bo->sgt);
 211        } else if (bo->vaddr) {
 212                dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->paddr);
 213        }
 214}
 215
 216static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
 217{
 218        int err;
 219
 220        bo->pages = drm_gem_get_pages(&bo->gem);
 221        if (IS_ERR(bo->pages))
 222                return PTR_ERR(bo->pages);
 223
 224        bo->num_pages = bo->gem.size >> PAGE_SHIFT;
 225
 226        bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages);
 227        if (IS_ERR(bo->sgt)) {
 228                err = PTR_ERR(bo->sgt);
 229                goto put_pages;
 230        }
 231
 232        err = dma_map_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents,
 233                         DMA_FROM_DEVICE);
 234        if (err == 0) {
 235                err = -EFAULT;
 236                goto free_sgt;
 237        }
 238
 239        return 0;
 240
 241free_sgt:
 242        sg_free_table(bo->sgt);
 243        kfree(bo->sgt);
 244put_pages:
 245        drm_gem_put_pages(&bo->gem, bo->pages, false, false);
 246        return err;
 247}
 248
 249static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
 250{
 251        struct tegra_drm *tegra = drm->dev_private;
 252        int err;
 253
 254        if (tegra->domain) {
 255                err = tegra_bo_get_pages(drm, bo);
 256                if (err < 0)
 257                        return err;
 258
 259                err = tegra_bo_iommu_map(tegra, bo);
 260                if (err < 0) {
 261                        tegra_bo_free(drm, bo);
 262                        return err;
 263                }
 264        } else {
 265                size_t size = bo->gem.size;
 266
 267                bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->paddr,
 268                                         GFP_KERNEL | __GFP_NOWARN);
 269                if (!bo->vaddr) {
 270                        dev_err(drm->dev,
 271                                "failed to allocate buffer of size %zu\n",
 272                                size);
 273                        return -ENOMEM;
 274                }
 275        }
 276
 277        return 0;
 278}
 279
 280struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size,
 281                                 unsigned long flags)
 282{
 283        struct tegra_bo *bo;
 284        int err;
 285
 286        bo = tegra_bo_alloc_object(drm, size);
 287        if (IS_ERR(bo))
 288                return bo;
 289
 290        err = tegra_bo_alloc(drm, bo);
 291        if (err < 0)
 292                goto release;
 293
 294        if (flags & DRM_TEGRA_GEM_CREATE_TILED)
 295                bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED;
 296
 297        if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP)
 298                bo->flags |= TEGRA_BO_BOTTOM_UP;
 299
 300        return bo;
 301
 302release:
 303        drm_gem_object_release(&bo->gem);
 304        kfree(bo);
 305        return ERR_PTR(err);
 306}
 307
 308struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
 309                                             struct drm_device *drm,
 310                                             size_t size,
 311                                             unsigned long flags,
 312                                             u32 *handle)
 313{
 314        struct tegra_bo *bo;
 315        int err;
 316
 317        bo = tegra_bo_create(drm, size, flags);
 318        if (IS_ERR(bo))
 319                return bo;
 320
 321        err = drm_gem_handle_create(file, &bo->gem, handle);
 322        if (err) {
 323                tegra_bo_free_object(&bo->gem);
 324                return ERR_PTR(err);
 325        }
 326
 327        drm_gem_object_put_unlocked(&bo->gem);
 328
 329        return bo;
 330}
 331
 332static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
 333                                        struct dma_buf *buf)
 334{
 335        struct tegra_drm *tegra = drm->dev_private;
 336        struct dma_buf_attachment *attach;
 337        struct tegra_bo *bo;
 338        int err;
 339
 340        bo = tegra_bo_alloc_object(drm, buf->size);
 341        if (IS_ERR(bo))
 342                return bo;
 343
 344        attach = dma_buf_attach(buf, drm->dev);
 345        if (IS_ERR(attach)) {
 346                err = PTR_ERR(attach);
 347                goto free;
 348        }
 349
 350        get_dma_buf(buf);
 351
 352        bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
 353        if (IS_ERR(bo->sgt)) {
 354                err = PTR_ERR(bo->sgt);
 355                goto detach;
 356        }
 357
 358        if (tegra->domain) {
 359                err = tegra_bo_iommu_map(tegra, bo);
 360                if (err < 0)
 361                        goto detach;
 362        } else {
 363                if (bo->sgt->nents > 1) {
 364                        err = -EINVAL;
 365                        goto detach;
 366                }
 367
 368                bo->paddr = sg_dma_address(bo->sgt->sgl);
 369        }
 370
 371        bo->gem.import_attach = attach;
 372
 373        return bo;
 374
 375detach:
 376        if (!IS_ERR_OR_NULL(bo->sgt))
 377                dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE);
 378
 379        dma_buf_detach(buf, attach);
 380        dma_buf_put(buf);
 381free:
 382        drm_gem_object_release(&bo->gem);
 383        kfree(bo);
 384        return ERR_PTR(err);
 385}
 386
 387void tegra_bo_free_object(struct drm_gem_object *gem)
 388{
 389        struct tegra_drm *tegra = gem->dev->dev_private;
 390        struct tegra_bo *bo = to_tegra_bo(gem);
 391
 392        if (tegra->domain)
 393                tegra_bo_iommu_unmap(tegra, bo);
 394
 395        if (gem->import_attach) {
 396                dma_buf_unmap_attachment(gem->import_attach, bo->sgt,
 397                                         DMA_TO_DEVICE);
 398                drm_prime_gem_destroy(gem, NULL);
 399        } else {
 400                tegra_bo_free(gem->dev, bo);
 401        }
 402
 403        drm_gem_object_release(gem);
 404        kfree(bo);
 405}
 406
 407int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
 408                         struct drm_mode_create_dumb *args)
 409{
 410        unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
 411        struct tegra_drm *tegra = drm->dev_private;
 412        struct tegra_bo *bo;
 413
 414        args->pitch = round_up(min_pitch, tegra->pitch_align);
 415        args->size = args->pitch * args->height;
 416
 417        bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
 418                                         &args->handle);
 419        if (IS_ERR(bo))
 420                return PTR_ERR(bo);
 421
 422        return 0;
 423}
 424
 425static vm_fault_t tegra_bo_fault(struct vm_fault *vmf)
 426{
 427        struct vm_area_struct *vma = vmf->vma;
 428        struct drm_gem_object *gem = vma->vm_private_data;
 429        struct tegra_bo *bo = to_tegra_bo(gem);
 430        struct page *page;
 431        pgoff_t offset;
 432
 433        if (!bo->pages)
 434                return VM_FAULT_SIGBUS;
 435
 436        offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
 437        page = bo->pages[offset];
 438
 439        return vmf_insert_page(vma, vmf->address, page);
 440}
 441
 442const struct vm_operations_struct tegra_bo_vm_ops = {
 443        .fault = tegra_bo_fault,
 444        .open = drm_gem_vm_open,
 445        .close = drm_gem_vm_close,
 446};
 447
 448int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma)
 449{
 450        struct tegra_bo *bo = to_tegra_bo(gem);
 451
 452        if (!bo->pages) {
 453                unsigned long vm_pgoff = vma->vm_pgoff;
 454                int err;
 455
 456                /*
 457                 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(),
 458                 * and set the vm_pgoff (used as a fake buffer offset by DRM)
 459                 * to 0 as we want to map the whole buffer.
 460                 */
 461                vma->vm_flags &= ~VM_PFNMAP;
 462                vma->vm_pgoff = 0;
 463
 464                err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->paddr,
 465                                  gem->size);
 466                if (err < 0) {
 467                        drm_gem_vm_close(vma);
 468                        return err;
 469                }
 470
 471                vma->vm_pgoff = vm_pgoff;
 472        } else {
 473                pgprot_t prot = vm_get_page_prot(vma->vm_flags);
 474
 475                vma->vm_flags |= VM_MIXEDMAP;
 476                vma->vm_flags &= ~VM_PFNMAP;
 477
 478                vma->vm_page_prot = pgprot_writecombine(prot);
 479        }
 480
 481        return 0;
 482}
 483
 484int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
 485{
 486        struct drm_gem_object *gem;
 487        int err;
 488
 489        err = drm_gem_mmap(file, vma);
 490        if (err < 0)
 491                return err;
 492
 493        gem = vma->vm_private_data;
 494
 495        return __tegra_gem_mmap(gem, vma);
 496}
 497
 498static struct sg_table *
 499tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
 500                            enum dma_data_direction dir)
 501{
 502        struct drm_gem_object *gem = attach->dmabuf->priv;
 503        struct tegra_bo *bo = to_tegra_bo(gem);
 504        struct sg_table *sgt;
 505
 506        sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
 507        if (!sgt)
 508                return NULL;
 509
 510        if (bo->pages) {
 511                struct scatterlist *sg;
 512                unsigned int i;
 513
 514                if (sg_alloc_table(sgt, bo->num_pages, GFP_KERNEL))
 515                        goto free;
 516
 517                for_each_sg(sgt->sgl, sg, bo->num_pages, i)
 518                        sg_set_page(sg, bo->pages[i], PAGE_SIZE, 0);
 519
 520                if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
 521                        goto free;
 522        } else {
 523                if (sg_alloc_table(sgt, 1, GFP_KERNEL))
 524                        goto free;
 525
 526                sg_dma_address(sgt->sgl) = bo->paddr;
 527                sg_dma_len(sgt->sgl) = gem->size;
 528        }
 529
 530        return sgt;
 531
 532free:
 533        sg_free_table(sgt);
 534        kfree(sgt);
 535        return NULL;
 536}
 537
 538static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
 539                                          struct sg_table *sgt,
 540                                          enum dma_data_direction dir)
 541{
 542        struct drm_gem_object *gem = attach->dmabuf->priv;
 543        struct tegra_bo *bo = to_tegra_bo(gem);
 544
 545        if (bo->pages)
 546                dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
 547
 548        sg_free_table(sgt);
 549        kfree(sgt);
 550}
 551
 552static void tegra_gem_prime_release(struct dma_buf *buf)
 553{
 554        drm_gem_dmabuf_release(buf);
 555}
 556
 557static int tegra_gem_prime_begin_cpu_access(struct dma_buf *buf,
 558                                            enum dma_data_direction direction)
 559{
 560        struct drm_gem_object *gem = buf->priv;
 561        struct tegra_bo *bo = to_tegra_bo(gem);
 562        struct drm_device *drm = gem->dev;
 563
 564        if (bo->pages)
 565                dma_sync_sg_for_cpu(drm->dev, bo->sgt->sgl, bo->sgt->nents,
 566                                    DMA_FROM_DEVICE);
 567
 568        return 0;
 569}
 570
 571static int tegra_gem_prime_end_cpu_access(struct dma_buf *buf,
 572                                          enum dma_data_direction direction)
 573{
 574        struct drm_gem_object *gem = buf->priv;
 575        struct tegra_bo *bo = to_tegra_bo(gem);
 576        struct drm_device *drm = gem->dev;
 577
 578        if (bo->pages)
 579                dma_sync_sg_for_device(drm->dev, bo->sgt->sgl, bo->sgt->nents,
 580                                       DMA_TO_DEVICE);
 581
 582        return 0;
 583}
 584
 585static void *tegra_gem_prime_kmap(struct dma_buf *buf, unsigned long page)
 586{
 587        return NULL;
 588}
 589
 590static void tegra_gem_prime_kunmap(struct dma_buf *buf, unsigned long page,
 591                                   void *addr)
 592{
 593}
 594
 595static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
 596{
 597        struct drm_gem_object *gem = buf->priv;
 598        int err;
 599
 600        err = drm_gem_mmap_obj(gem, gem->size, vma);
 601        if (err < 0)
 602                return err;
 603
 604        return __tegra_gem_mmap(gem, vma);
 605}
 606
 607static void *tegra_gem_prime_vmap(struct dma_buf *buf)
 608{
 609        struct drm_gem_object *gem = buf->priv;
 610        struct tegra_bo *bo = to_tegra_bo(gem);
 611
 612        return bo->vaddr;
 613}
 614
 615static void tegra_gem_prime_vunmap(struct dma_buf *buf, void *vaddr)
 616{
 617}
 618
 619static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
 620        .map_dma_buf = tegra_gem_prime_map_dma_buf,
 621        .unmap_dma_buf = tegra_gem_prime_unmap_dma_buf,
 622        .release = tegra_gem_prime_release,
 623        .begin_cpu_access = tegra_gem_prime_begin_cpu_access,
 624        .end_cpu_access = tegra_gem_prime_end_cpu_access,
 625        .map = tegra_gem_prime_kmap,
 626        .unmap = tegra_gem_prime_kunmap,
 627        .mmap = tegra_gem_prime_mmap,
 628        .vmap = tegra_gem_prime_vmap,
 629        .vunmap = tegra_gem_prime_vunmap,
 630};
 631
 632struct dma_buf *tegra_gem_prime_export(struct drm_gem_object *gem,
 633                                       int flags)
 634{
 635        DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
 636
 637        exp_info.exp_name = KBUILD_MODNAME;
 638        exp_info.owner = gem->dev->driver->fops->owner;
 639        exp_info.ops = &tegra_gem_prime_dmabuf_ops;
 640        exp_info.size = gem->size;
 641        exp_info.flags = flags;
 642        exp_info.priv = gem;
 643
 644        return drm_gem_dmabuf_export(gem->dev, &exp_info);
 645}
 646
 647struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
 648                                              struct dma_buf *buf)
 649{
 650        struct tegra_bo *bo;
 651
 652        if (buf->ops == &tegra_gem_prime_dmabuf_ops) {
 653                struct drm_gem_object *gem = buf->priv;
 654
 655                if (gem->dev == drm) {
 656                        drm_gem_object_get(gem);
 657                        return gem;
 658                }
 659        }
 660
 661        bo = tegra_bo_import(drm, buf);
 662        if (IS_ERR(bo))
 663                return ERR_CAST(bo);
 664
 665        return &bo->gem;
 666}
 667