linux/drivers/gpu/drm/tegra/gem.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * NVIDIA Tegra DRM GEM helper functions
   4 *
   5 * Copyright (C) 2012 Sascha Hauer, Pengutronix
   6 * Copyright (C) 2013-2015 NVIDIA CORPORATION, All rights reserved.
   7 *
   8 * Based on the GEM/CMA helpers
   9 *
  10 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
  11 */
  12
  13#include <linux/dma-buf.h>
  14#include <linux/iommu.h>
  15#include <linux/module.h>
  16
  17#include <drm/drm_drv.h>
  18#include <drm/drm_prime.h>
  19#include <drm/tegra_drm.h>
  20
  21#include "drm.h"
  22#include "gem.h"
  23
  24MODULE_IMPORT_NS(DMA_BUF);
  25
  26static void tegra_bo_put(struct host1x_bo *bo)
  27{
  28        struct tegra_bo *obj = host1x_to_tegra_bo(bo);
  29
  30        drm_gem_object_put(&obj->gem);
  31}
  32
  33/* XXX move this into lib/scatterlist.c? */
  34static int sg_alloc_table_from_sg(struct sg_table *sgt, struct scatterlist *sg,
  35                                  unsigned int nents, gfp_t gfp_mask)
  36{
  37        struct scatterlist *dst;
  38        unsigned int i;
  39        int err;
  40
  41        err = sg_alloc_table(sgt, nents, gfp_mask);
  42        if (err < 0)
  43                return err;
  44
  45        dst = sgt->sgl;
  46
  47        for (i = 0; i < nents; i++) {
  48                sg_set_page(dst, sg_page(sg), sg->length, 0);
  49                dst = sg_next(dst);
  50                sg = sg_next(sg);
  51        }
  52
  53        return 0;
  54}
  55
  56static struct sg_table *tegra_bo_pin(struct device *dev, struct host1x_bo *bo,
  57                                     dma_addr_t *phys)
  58{
  59        struct tegra_bo *obj = host1x_to_tegra_bo(bo);
  60        struct sg_table *sgt;
  61        int err;
  62
  63        /*
  64         * If we've manually mapped the buffer object through the IOMMU, make
  65         * sure to return the IOVA address of our mapping.
  66         *
  67         * Similarly, for buffers that have been allocated by the DMA API the
  68         * physical address can be used for devices that are not attached to
  69         * an IOMMU. For these devices, callers must pass a valid pointer via
  70         * the @phys argument.
  71         *
  72         * Imported buffers were also already mapped at import time, so the
  73         * existing mapping can be reused.
  74         */
  75        if (phys) {
  76                *phys = obj->iova;
  77                return NULL;
  78        }
  79
  80        /*
  81         * If we don't have a mapping for this buffer yet, return an SG table
  82         * so that host1x can do the mapping for us via the DMA API.
  83         */
  84        sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
  85        if (!sgt)
  86                return ERR_PTR(-ENOMEM);
  87
  88        if (obj->pages) {
  89                /*
  90                 * If the buffer object was allocated from the explicit IOMMU
  91                 * API code paths, construct an SG table from the pages.
  92                 */
  93                err = sg_alloc_table_from_pages(sgt, obj->pages, obj->num_pages,
  94                                                0, obj->gem.size, GFP_KERNEL);
  95                if (err < 0)
  96                        goto free;
  97        } else if (obj->sgt) {
  98                /*
  99                 * If the buffer object already has an SG table but no pages
 100                 * were allocated for it, it means the buffer was imported and
 101                 * the SG table needs to be copied to avoid overwriting any
 102                 * other potential users of the original SG table.
 103                 */
 104                err = sg_alloc_table_from_sg(sgt, obj->sgt->sgl,
 105                                             obj->sgt->orig_nents, GFP_KERNEL);
 106                if (err < 0)
 107                        goto free;
 108        } else {
 109                /*
 110                 * If the buffer object had no pages allocated and if it was
 111                 * not imported, it had to be allocated with the DMA API, so
 112                 * the DMA API helper can be used.
 113                 */
 114                err = dma_get_sgtable(dev, sgt, obj->vaddr, obj->iova,
 115                                      obj->gem.size);
 116                if (err < 0)
 117                        goto free;
 118        }
 119
 120        return sgt;
 121
 122free:
 123        kfree(sgt);
 124        return ERR_PTR(err);
 125}
 126
 127static void tegra_bo_unpin(struct device *dev, struct sg_table *sgt)
 128{
 129        if (sgt) {
 130                sg_free_table(sgt);
 131                kfree(sgt);
 132        }
 133}
 134
 135static void *tegra_bo_mmap(struct host1x_bo *bo)
 136{
 137        struct tegra_bo *obj = host1x_to_tegra_bo(bo);
 138        struct dma_buf_map map;
 139        int ret;
 140
 141        if (obj->vaddr) {
 142                return obj->vaddr;
 143        } else if (obj->gem.import_attach) {
 144                ret = dma_buf_vmap(obj->gem.import_attach->dmabuf, &map);
 145                return ret ? NULL : map.vaddr;
 146        } else {
 147                return vmap(obj->pages, obj->num_pages, VM_MAP,
 148                            pgprot_writecombine(PAGE_KERNEL));
 149        }
 150}
 151
 152static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
 153{
 154        struct tegra_bo *obj = host1x_to_tegra_bo(bo);
 155        struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(addr);
 156
 157        if (obj->vaddr)
 158                return;
 159        else if (obj->gem.import_attach)
 160                dma_buf_vunmap(obj->gem.import_attach->dmabuf, &map);
 161        else
 162                vunmap(addr);
 163}
 164
 165static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
 166{
 167        struct tegra_bo *obj = host1x_to_tegra_bo(bo);
 168
 169        drm_gem_object_get(&obj->gem);
 170
 171        return bo;
 172}
 173
 174static const struct host1x_bo_ops tegra_bo_ops = {
 175        .get = tegra_bo_get,
 176        .put = tegra_bo_put,
 177        .pin = tegra_bo_pin,
 178        .unpin = tegra_bo_unpin,
 179        .mmap = tegra_bo_mmap,
 180        .munmap = tegra_bo_munmap,
 181};
 182
 183static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
 184{
 185        int prot = IOMMU_READ | IOMMU_WRITE;
 186        int err;
 187
 188        if (bo->mm)
 189                return -EBUSY;
 190
 191        bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL);
 192        if (!bo->mm)
 193                return -ENOMEM;
 194
 195        mutex_lock(&tegra->mm_lock);
 196
 197        err = drm_mm_insert_node_generic(&tegra->mm,
 198                                         bo->mm, bo->gem.size, PAGE_SIZE, 0, 0);
 199        if (err < 0) {
 200                dev_err(tegra->drm->dev, "out of I/O virtual memory: %d\n",
 201                        err);
 202                goto unlock;
 203        }
 204
 205        bo->iova = bo->mm->start;
 206
 207        bo->size = iommu_map_sgtable(tegra->domain, bo->iova, bo->sgt, prot);
 208        if (!bo->size) {
 209                dev_err(tegra->drm->dev, "failed to map buffer\n");
 210                err = -ENOMEM;
 211                goto remove;
 212        }
 213
 214        mutex_unlock(&tegra->mm_lock);
 215
 216        return 0;
 217
 218remove:
 219        drm_mm_remove_node(bo->mm);
 220unlock:
 221        mutex_unlock(&tegra->mm_lock);
 222        kfree(bo->mm);
 223        return err;
 224}
 225
 226static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo)
 227{
 228        if (!bo->mm)
 229                return 0;
 230
 231        mutex_lock(&tegra->mm_lock);
 232        iommu_unmap(tegra->domain, bo->iova, bo->size);
 233        drm_mm_remove_node(bo->mm);
 234        mutex_unlock(&tegra->mm_lock);
 235
 236        kfree(bo->mm);
 237
 238        return 0;
 239}
 240
 241static const struct drm_gem_object_funcs tegra_gem_object_funcs = {
 242        .free = tegra_bo_free_object,
 243        .export = tegra_gem_prime_export,
 244        .vm_ops = &tegra_bo_vm_ops,
 245};
 246
 247static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm,
 248                                              size_t size)
 249{
 250        struct tegra_bo *bo;
 251        int err;
 252
 253        bo = kzalloc(sizeof(*bo), GFP_KERNEL);
 254        if (!bo)
 255                return ERR_PTR(-ENOMEM);
 256
 257        bo->gem.funcs = &tegra_gem_object_funcs;
 258
 259        host1x_bo_init(&bo->base, &tegra_bo_ops);
 260        size = round_up(size, PAGE_SIZE);
 261
 262        err = drm_gem_object_init(drm, &bo->gem, size);
 263        if (err < 0)
 264                goto free;
 265
 266        err = drm_gem_create_mmap_offset(&bo->gem);
 267        if (err < 0)
 268                goto release;
 269
 270        return bo;
 271
 272release:
 273        drm_gem_object_release(&bo->gem);
 274free:
 275        kfree(bo);
 276        return ERR_PTR(err);
 277}
 278
 279static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
 280{
 281        if (bo->pages) {
 282                dma_unmap_sgtable(drm->dev, bo->sgt, DMA_FROM_DEVICE, 0);
 283                drm_gem_put_pages(&bo->gem, bo->pages, true, true);
 284                sg_free_table(bo->sgt);
 285                kfree(bo->sgt);
 286        } else if (bo->vaddr) {
 287                dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->iova);
 288        }
 289}
 290
 291static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
 292{
 293        int err;
 294
 295        bo->pages = drm_gem_get_pages(&bo->gem);
 296        if (IS_ERR(bo->pages))
 297                return PTR_ERR(bo->pages);
 298
 299        bo->num_pages = bo->gem.size >> PAGE_SHIFT;
 300
 301        bo->sgt = drm_prime_pages_to_sg(bo->gem.dev, bo->pages, bo->num_pages);
 302        if (IS_ERR(bo->sgt)) {
 303                err = PTR_ERR(bo->sgt);
 304                goto put_pages;
 305        }
 306
 307        err = dma_map_sgtable(drm->dev, bo->sgt, DMA_FROM_DEVICE, 0);
 308        if (err)
 309                goto free_sgt;
 310
 311        return 0;
 312
 313free_sgt:
 314        sg_free_table(bo->sgt);
 315        kfree(bo->sgt);
 316put_pages:
 317        drm_gem_put_pages(&bo->gem, bo->pages, false, false);
 318        return err;
 319}
 320
 321static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
 322{
 323        struct tegra_drm *tegra = drm->dev_private;
 324        int err;
 325
 326        if (tegra->domain) {
 327                err = tegra_bo_get_pages(drm, bo);
 328                if (err < 0)
 329                        return err;
 330
 331                err = tegra_bo_iommu_map(tegra, bo);
 332                if (err < 0) {
 333                        tegra_bo_free(drm, bo);
 334                        return err;
 335                }
 336        } else {
 337                size_t size = bo->gem.size;
 338
 339                bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->iova,
 340                                         GFP_KERNEL | __GFP_NOWARN);
 341                if (!bo->vaddr) {
 342                        dev_err(drm->dev,
 343                                "failed to allocate buffer of size %zu\n",
 344                                size);
 345                        return -ENOMEM;
 346                }
 347        }
 348
 349        return 0;
 350}
 351
 352struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size,
 353                                 unsigned long flags)
 354{
 355        struct tegra_bo *bo;
 356        int err;
 357
 358        bo = tegra_bo_alloc_object(drm, size);
 359        if (IS_ERR(bo))
 360                return bo;
 361
 362        err = tegra_bo_alloc(drm, bo);
 363        if (err < 0)
 364                goto release;
 365
 366        if (flags & DRM_TEGRA_GEM_CREATE_TILED)
 367                bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED;
 368
 369        if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP)
 370                bo->flags |= TEGRA_BO_BOTTOM_UP;
 371
 372        return bo;
 373
 374release:
 375        drm_gem_object_release(&bo->gem);
 376        kfree(bo);
 377        return ERR_PTR(err);
 378}
 379
 380struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
 381                                             struct drm_device *drm,
 382                                             size_t size,
 383                                             unsigned long flags,
 384                                             u32 *handle)
 385{
 386        struct tegra_bo *bo;
 387        int err;
 388
 389        bo = tegra_bo_create(drm, size, flags);
 390        if (IS_ERR(bo))
 391                return bo;
 392
 393        err = drm_gem_handle_create(file, &bo->gem, handle);
 394        if (err) {
 395                tegra_bo_free_object(&bo->gem);
 396                return ERR_PTR(err);
 397        }
 398
 399        drm_gem_object_put(&bo->gem);
 400
 401        return bo;
 402}
 403
 404static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
 405                                        struct dma_buf *buf)
 406{
 407        struct tegra_drm *tegra = drm->dev_private;
 408        struct dma_buf_attachment *attach;
 409        struct tegra_bo *bo;
 410        int err;
 411
 412        bo = tegra_bo_alloc_object(drm, buf->size);
 413        if (IS_ERR(bo))
 414                return bo;
 415
 416        attach = dma_buf_attach(buf, drm->dev);
 417        if (IS_ERR(attach)) {
 418                err = PTR_ERR(attach);
 419                goto free;
 420        }
 421
 422        get_dma_buf(buf);
 423
 424        bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
 425        if (IS_ERR(bo->sgt)) {
 426                err = PTR_ERR(bo->sgt);
 427                goto detach;
 428        }
 429
 430        if (tegra->domain) {
 431                err = tegra_bo_iommu_map(tegra, bo);
 432                if (err < 0)
 433                        goto detach;
 434        }
 435
 436        bo->gem.import_attach = attach;
 437
 438        return bo;
 439
 440detach:
 441        if (!IS_ERR_OR_NULL(bo->sgt))
 442                dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE);
 443
 444        dma_buf_detach(buf, attach);
 445        dma_buf_put(buf);
 446free:
 447        drm_gem_object_release(&bo->gem);
 448        kfree(bo);
 449        return ERR_PTR(err);
 450}
 451
 452void tegra_bo_free_object(struct drm_gem_object *gem)
 453{
 454        struct tegra_drm *tegra = gem->dev->dev_private;
 455        struct tegra_bo *bo = to_tegra_bo(gem);
 456
 457        if (tegra->domain)
 458                tegra_bo_iommu_unmap(tegra, bo);
 459
 460        if (gem->import_attach) {
 461                dma_buf_unmap_attachment(gem->import_attach, bo->sgt,
 462                                         DMA_TO_DEVICE);
 463                drm_prime_gem_destroy(gem, NULL);
 464        } else {
 465                tegra_bo_free(gem->dev, bo);
 466        }
 467
 468        drm_gem_object_release(gem);
 469        kfree(bo);
 470}
 471
 472int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
 473                         struct drm_mode_create_dumb *args)
 474{
 475        unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
 476        struct tegra_drm *tegra = drm->dev_private;
 477        struct tegra_bo *bo;
 478
 479        args->pitch = round_up(min_pitch, tegra->pitch_align);
 480        args->size = args->pitch * args->height;
 481
 482        bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
 483                                         &args->handle);
 484        if (IS_ERR(bo))
 485                return PTR_ERR(bo);
 486
 487        return 0;
 488}
 489
 490static vm_fault_t tegra_bo_fault(struct vm_fault *vmf)
 491{
 492        struct vm_area_struct *vma = vmf->vma;
 493        struct drm_gem_object *gem = vma->vm_private_data;
 494        struct tegra_bo *bo = to_tegra_bo(gem);
 495        struct page *page;
 496        pgoff_t offset;
 497
 498        if (!bo->pages)
 499                return VM_FAULT_SIGBUS;
 500
 501        offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
 502        page = bo->pages[offset];
 503
 504        return vmf_insert_page(vma, vmf->address, page);
 505}
 506
 507const struct vm_operations_struct tegra_bo_vm_ops = {
 508        .fault = tegra_bo_fault,
 509        .open = drm_gem_vm_open,
 510        .close = drm_gem_vm_close,
 511};
 512
 513int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma)
 514{
 515        struct tegra_bo *bo = to_tegra_bo(gem);
 516
 517        if (!bo->pages) {
 518                unsigned long vm_pgoff = vma->vm_pgoff;
 519                int err;
 520
 521                /*
 522                 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(),
 523                 * and set the vm_pgoff (used as a fake buffer offset by DRM)
 524                 * to 0 as we want to map the whole buffer.
 525                 */
 526                vma->vm_flags &= ~VM_PFNMAP;
 527                vma->vm_pgoff = 0;
 528
 529                err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->iova,
 530                                  gem->size);
 531                if (err < 0) {
 532                        drm_gem_vm_close(vma);
 533                        return err;
 534                }
 535
 536                vma->vm_pgoff = vm_pgoff;
 537        } else {
 538                pgprot_t prot = vm_get_page_prot(vma->vm_flags);
 539
 540                vma->vm_flags |= VM_MIXEDMAP;
 541                vma->vm_flags &= ~VM_PFNMAP;
 542
 543                vma->vm_page_prot = pgprot_writecombine(prot);
 544        }
 545
 546        return 0;
 547}
 548
 549int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
 550{
 551        struct drm_gem_object *gem;
 552        int err;
 553
 554        err = drm_gem_mmap(file, vma);
 555        if (err < 0)
 556                return err;
 557
 558        gem = vma->vm_private_data;
 559
 560        return __tegra_gem_mmap(gem, vma);
 561}
 562
 563static struct sg_table *
 564tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
 565                            enum dma_data_direction dir)
 566{
 567        struct drm_gem_object *gem = attach->dmabuf->priv;
 568        struct tegra_bo *bo = to_tegra_bo(gem);
 569        struct sg_table *sgt;
 570
 571        sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
 572        if (!sgt)
 573                return NULL;
 574
 575        if (bo->pages) {
 576                if (sg_alloc_table_from_pages(sgt, bo->pages, bo->num_pages,
 577                                              0, gem->size, GFP_KERNEL) < 0)
 578                        goto free;
 579        } else {
 580                if (dma_get_sgtable(attach->dev, sgt, bo->vaddr, bo->iova,
 581                                    gem->size) < 0)
 582                        goto free;
 583        }
 584
 585        if (dma_map_sgtable(attach->dev, sgt, dir, 0))
 586                goto free;
 587
 588        return sgt;
 589
 590free:
 591        sg_free_table(sgt);
 592        kfree(sgt);
 593        return NULL;
 594}
 595
 596static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
 597                                          struct sg_table *sgt,
 598                                          enum dma_data_direction dir)
 599{
 600        struct drm_gem_object *gem = attach->dmabuf->priv;
 601        struct tegra_bo *bo = to_tegra_bo(gem);
 602
 603        if (bo->pages)
 604                dma_unmap_sgtable(attach->dev, sgt, dir, 0);
 605
 606        sg_free_table(sgt);
 607        kfree(sgt);
 608}
 609
 610static void tegra_gem_prime_release(struct dma_buf *buf)
 611{
 612        drm_gem_dmabuf_release(buf);
 613}
 614
 615static int tegra_gem_prime_begin_cpu_access(struct dma_buf *buf,
 616                                            enum dma_data_direction direction)
 617{
 618        struct drm_gem_object *gem = buf->priv;
 619        struct tegra_bo *bo = to_tegra_bo(gem);
 620        struct drm_device *drm = gem->dev;
 621
 622        if (bo->pages)
 623                dma_sync_sgtable_for_cpu(drm->dev, bo->sgt, DMA_FROM_DEVICE);
 624
 625        return 0;
 626}
 627
 628static int tegra_gem_prime_end_cpu_access(struct dma_buf *buf,
 629                                          enum dma_data_direction direction)
 630{
 631        struct drm_gem_object *gem = buf->priv;
 632        struct tegra_bo *bo = to_tegra_bo(gem);
 633        struct drm_device *drm = gem->dev;
 634
 635        if (bo->pages)
 636                dma_sync_sgtable_for_device(drm->dev, bo->sgt, DMA_TO_DEVICE);
 637
 638        return 0;
 639}
 640
 641static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
 642{
 643        struct drm_gem_object *gem = buf->priv;
 644        int err;
 645
 646        err = drm_gem_mmap_obj(gem, gem->size, vma);
 647        if (err < 0)
 648                return err;
 649
 650        return __tegra_gem_mmap(gem, vma);
 651}
 652
 653static int tegra_gem_prime_vmap(struct dma_buf *buf, struct dma_buf_map *map)
 654{
 655        struct drm_gem_object *gem = buf->priv;
 656        struct tegra_bo *bo = to_tegra_bo(gem);
 657
 658        dma_buf_map_set_vaddr(map, bo->vaddr);
 659
 660        return 0;
 661}
 662
 663static void tegra_gem_prime_vunmap(struct dma_buf *buf, struct dma_buf_map *map)
 664{
 665}
 666
 667static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
 668        .map_dma_buf = tegra_gem_prime_map_dma_buf,
 669        .unmap_dma_buf = tegra_gem_prime_unmap_dma_buf,
 670        .release = tegra_gem_prime_release,
 671        .begin_cpu_access = tegra_gem_prime_begin_cpu_access,
 672        .end_cpu_access = tegra_gem_prime_end_cpu_access,
 673        .mmap = tegra_gem_prime_mmap,
 674        .vmap = tegra_gem_prime_vmap,
 675        .vunmap = tegra_gem_prime_vunmap,
 676};
 677
 678struct dma_buf *tegra_gem_prime_export(struct drm_gem_object *gem,
 679                                       int flags)
 680{
 681        DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
 682
 683        exp_info.exp_name = KBUILD_MODNAME;
 684        exp_info.owner = gem->dev->driver->fops->owner;
 685        exp_info.ops = &tegra_gem_prime_dmabuf_ops;
 686        exp_info.size = gem->size;
 687        exp_info.flags = flags;
 688        exp_info.priv = gem;
 689
 690        return drm_gem_dmabuf_export(gem->dev, &exp_info);
 691}
 692
 693struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
 694                                              struct dma_buf *buf)
 695{
 696        struct tegra_bo *bo;
 697
 698        if (buf->ops == &tegra_gem_prime_dmabuf_ops) {
 699                struct drm_gem_object *gem = buf->priv;
 700
 701                if (gem->dev == drm) {
 702                        drm_gem_object_get(gem);
 703                        return gem;
 704                }
 705        }
 706
 707        bo = tegra_bo_import(drm, buf);
 708        if (IS_ERR(bo))
 709                return ERR_CAST(bo);
 710
 711        return &bo->gem;
 712}
 713
 714struct host1x_bo *tegra_gem_lookup(struct drm_file *file, u32 handle)
 715{
 716        struct drm_gem_object *gem;
 717        struct tegra_bo *bo;
 718
 719        gem = drm_gem_object_lookup(file, handle);
 720        if (!gem)
 721                return NULL;
 722
 723        bo = to_tegra_bo(gem);
 724        return &bo->base;
 725}
 726