linux/drivers/gpu/drm/tegra/drm.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2012 Avionic Design GmbH
   4 * Copyright (C) 2012-2016 NVIDIA CORPORATION.  All rights reserved.
   5 */
   6
   7#include <linux/bitops.h>
   8#include <linux/host1x.h>
   9#include <linux/idr.h>
  10#include <linux/iommu.h>
  11#include <linux/module.h>
  12#include <linux/platform_device.h>
  13#include <linux/pm_runtime.h>
  14
  15#include <drm/drm_aperture.h>
  16#include <drm/drm_atomic.h>
  17#include <drm/drm_atomic_helper.h>
  18#include <drm/drm_debugfs.h>
  19#include <drm/drm_drv.h>
  20#include <drm/drm_fourcc.h>
  21#include <drm/drm_ioctl.h>
  22#include <drm/drm_prime.h>
  23#include <drm/drm_vblank.h>
  24
  25#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
  26#include <asm/dma-iommu.h>
  27#endif
  28
  29#include "dc.h"
  30#include "drm.h"
  31#include "gem.h"
  32#include "uapi.h"
  33
  34#define DRIVER_NAME "tegra"
  35#define DRIVER_DESC "NVIDIA Tegra graphics"
  36#define DRIVER_DATE "20120330"
  37#define DRIVER_MAJOR 1
  38#define DRIVER_MINOR 0
  39#define DRIVER_PATCHLEVEL 0
  40
  41#define CARVEOUT_SZ SZ_64M
  42#define CDMA_GATHER_FETCHES_MAX_NB 16383
  43
  44static int tegra_atomic_check(struct drm_device *drm,
  45                              struct drm_atomic_state *state)
  46{
  47        int err;
  48
  49        err = drm_atomic_helper_check(drm, state);
  50        if (err < 0)
  51                return err;
  52
  53        return tegra_display_hub_atomic_check(drm, state);
  54}
  55
  56static const struct drm_mode_config_funcs tegra_drm_mode_config_funcs = {
  57        .fb_create = tegra_fb_create,
  58#ifdef CONFIG_DRM_FBDEV_EMULATION
  59        .output_poll_changed = drm_fb_helper_output_poll_changed,
  60#endif
  61        .atomic_check = tegra_atomic_check,
  62        .atomic_commit = drm_atomic_helper_commit,
  63};
  64
  65static void tegra_atomic_post_commit(struct drm_device *drm,
  66                                     struct drm_atomic_state *old_state)
  67{
  68        struct drm_crtc_state *old_crtc_state __maybe_unused;
  69        struct drm_crtc *crtc;
  70        unsigned int i;
  71
  72        for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i)
  73                tegra_crtc_atomic_post_commit(crtc, old_state);
  74}
  75
  76static void tegra_atomic_commit_tail(struct drm_atomic_state *old_state)
  77{
  78        struct drm_device *drm = old_state->dev;
  79        struct tegra_drm *tegra = drm->dev_private;
  80
  81        if (tegra->hub) {
  82                bool fence_cookie = dma_fence_begin_signalling();
  83
  84                drm_atomic_helper_commit_modeset_disables(drm, old_state);
  85                tegra_display_hub_atomic_commit(drm, old_state);
  86                drm_atomic_helper_commit_planes(drm, old_state, 0);
  87                drm_atomic_helper_commit_modeset_enables(drm, old_state);
  88                drm_atomic_helper_commit_hw_done(old_state);
  89                dma_fence_end_signalling(fence_cookie);
  90                drm_atomic_helper_wait_for_vblanks(drm, old_state);
  91                drm_atomic_helper_cleanup_planes(drm, old_state);
  92        } else {
  93                drm_atomic_helper_commit_tail_rpm(old_state);
  94        }
  95
  96        tegra_atomic_post_commit(drm, old_state);
  97}
  98
  99static const struct drm_mode_config_helper_funcs
 100tegra_drm_mode_config_helpers = {
 101        .atomic_commit_tail = tegra_atomic_commit_tail,
 102};
 103
 104static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
 105{
 106        struct tegra_drm_file *fpriv;
 107
 108        fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
 109        if (!fpriv)
 110                return -ENOMEM;
 111
 112        idr_init_base(&fpriv->legacy_contexts, 1);
 113        xa_init_flags(&fpriv->contexts, XA_FLAGS_ALLOC1);
 114        xa_init(&fpriv->syncpoints);
 115        mutex_init(&fpriv->lock);
 116        filp->driver_priv = fpriv;
 117
 118        return 0;
 119}
 120
 121static void tegra_drm_context_free(struct tegra_drm_context *context)
 122{
 123        context->client->ops->close_channel(context);
 124        pm_runtime_put(context->client->base.dev);
 125        kfree(context);
 126}
 127
 128static int host1x_reloc_copy_from_user(struct host1x_reloc *dest,
 129                                       struct drm_tegra_reloc __user *src,
 130                                       struct drm_device *drm,
 131                                       struct drm_file *file)
 132{
 133        u32 cmdbuf, target;
 134        int err;
 135
 136        err = get_user(cmdbuf, &src->cmdbuf.handle);
 137        if (err < 0)
 138                return err;
 139
 140        err = get_user(dest->cmdbuf.offset, &src->cmdbuf.offset);
 141        if (err < 0)
 142                return err;
 143
 144        err = get_user(target, &src->target.handle);
 145        if (err < 0)
 146                return err;
 147
 148        err = get_user(dest->target.offset, &src->target.offset);
 149        if (err < 0)
 150                return err;
 151
 152        err = get_user(dest->shift, &src->shift);
 153        if (err < 0)
 154                return err;
 155
 156        dest->flags = HOST1X_RELOC_READ | HOST1X_RELOC_WRITE;
 157
 158        dest->cmdbuf.bo = tegra_gem_lookup(file, cmdbuf);
 159        if (!dest->cmdbuf.bo)
 160                return -ENOENT;
 161
 162        dest->target.bo = tegra_gem_lookup(file, target);
 163        if (!dest->target.bo)
 164                return -ENOENT;
 165
 166        return 0;
 167}
 168
 169int tegra_drm_submit(struct tegra_drm_context *context,
 170                     struct drm_tegra_submit *args, struct drm_device *drm,
 171                     struct drm_file *file)
 172{
 173        struct host1x_client *client = &context->client->base;
 174        unsigned int num_cmdbufs = args->num_cmdbufs;
 175        unsigned int num_relocs = args->num_relocs;
 176        struct drm_tegra_cmdbuf __user *user_cmdbufs;
 177        struct drm_tegra_reloc __user *user_relocs;
 178        struct drm_tegra_syncpt __user *user_syncpt;
 179        struct drm_tegra_syncpt syncpt;
 180        struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
 181        struct drm_gem_object **refs;
 182        struct host1x_syncpt *sp = NULL;
 183        struct host1x_job *job;
 184        unsigned int num_refs;
 185        int err;
 186
 187        user_cmdbufs = u64_to_user_ptr(args->cmdbufs);
 188        user_relocs = u64_to_user_ptr(args->relocs);
 189        user_syncpt = u64_to_user_ptr(args->syncpts);
 190
 191        /* We don't yet support other than one syncpt_incr struct per submit */
 192        if (args->num_syncpts != 1)
 193                return -EINVAL;
 194
 195        /* We don't yet support waitchks */
 196        if (args->num_waitchks != 0)
 197                return -EINVAL;
 198
 199        job = host1x_job_alloc(context->channel, args->num_cmdbufs,
 200                               args->num_relocs, false);
 201        if (!job)
 202                return -ENOMEM;
 203
 204        job->num_relocs = args->num_relocs;
 205        job->client = client;
 206        job->class = client->class;
 207        job->serialize = true;
 208        job->syncpt_recovery = true;
 209
 210        /*
 211         * Track referenced BOs so that they can be unreferenced after the
 212         * submission is complete.
 213         */
 214        num_refs = num_cmdbufs + num_relocs * 2;
 215
 216        refs = kmalloc_array(num_refs, sizeof(*refs), GFP_KERNEL);
 217        if (!refs) {
 218                err = -ENOMEM;
 219                goto put;
 220        }
 221
 222        /* reuse as an iterator later */
 223        num_refs = 0;
 224
 225        while (num_cmdbufs) {
 226                struct drm_tegra_cmdbuf cmdbuf;
 227                struct host1x_bo *bo;
 228                struct tegra_bo *obj;
 229                u64 offset;
 230
 231                if (copy_from_user(&cmdbuf, user_cmdbufs, sizeof(cmdbuf))) {
 232                        err = -EFAULT;
 233                        goto fail;
 234                }
 235
 236                /*
 237                 * The maximum number of CDMA gather fetches is 16383, a higher
 238                 * value means the words count is malformed.
 239                 */
 240                if (cmdbuf.words > CDMA_GATHER_FETCHES_MAX_NB) {
 241                        err = -EINVAL;
 242                        goto fail;
 243                }
 244
 245                bo = tegra_gem_lookup(file, cmdbuf.handle);
 246                if (!bo) {
 247                        err = -ENOENT;
 248                        goto fail;
 249                }
 250
 251                offset = (u64)cmdbuf.offset + (u64)cmdbuf.words * sizeof(u32);
 252                obj = host1x_to_tegra_bo(bo);
 253                refs[num_refs++] = &obj->gem;
 254
 255                /*
 256                 * Gather buffer base address must be 4-bytes aligned,
 257                 * unaligned offset is malformed and cause commands stream
 258                 * corruption on the buffer address relocation.
 259                 */
 260                if (offset & 3 || offset > obj->gem.size) {
 261                        err = -EINVAL;
 262                        goto fail;
 263                }
 264
 265                host1x_job_add_gather(job, bo, cmdbuf.words, cmdbuf.offset);
 266                num_cmdbufs--;
 267                user_cmdbufs++;
 268        }
 269
 270        /* copy and resolve relocations from submit */
 271        while (num_relocs--) {
 272                struct host1x_reloc *reloc;
 273                struct tegra_bo *obj;
 274
 275                err = host1x_reloc_copy_from_user(&job->relocs[num_relocs],
 276                                                  &user_relocs[num_relocs], drm,
 277                                                  file);
 278                if (err < 0)
 279                        goto fail;
 280
 281                reloc = &job->relocs[num_relocs];
 282                obj = host1x_to_tegra_bo(reloc->cmdbuf.bo);
 283                refs[num_refs++] = &obj->gem;
 284
 285                /*
 286                 * The unaligned cmdbuf offset will cause an unaligned write
 287                 * during of the relocations patching, corrupting the commands
 288                 * stream.
 289                 */
 290                if (reloc->cmdbuf.offset & 3 ||
 291                    reloc->cmdbuf.offset >= obj->gem.size) {
 292                        err = -EINVAL;
 293                        goto fail;
 294                }
 295
 296                obj = host1x_to_tegra_bo(reloc->target.bo);
 297                refs[num_refs++] = &obj->gem;
 298
 299                if (reloc->target.offset >= obj->gem.size) {
 300                        err = -EINVAL;
 301                        goto fail;
 302                }
 303        }
 304
 305        if (copy_from_user(&syncpt, user_syncpt, sizeof(syncpt))) {
 306                err = -EFAULT;
 307                goto fail;
 308        }
 309
 310        /* Syncpoint ref will be dropped on job release. */
 311        sp = host1x_syncpt_get_by_id(host1x, syncpt.id);
 312        if (!sp) {
 313                err = -ENOENT;
 314                goto fail;
 315        }
 316
 317        job->is_addr_reg = context->client->ops->is_addr_reg;
 318        job->is_valid_class = context->client->ops->is_valid_class;
 319        job->syncpt_incrs = syncpt.incrs;
 320        job->syncpt = sp;
 321        job->timeout = 10000;
 322
 323        if (args->timeout && args->timeout < 10000)
 324                job->timeout = args->timeout;
 325
 326        err = host1x_job_pin(job, context->client->base.dev);
 327        if (err)
 328                goto fail;
 329
 330        err = host1x_job_submit(job);
 331        if (err) {
 332                host1x_job_unpin(job);
 333                goto fail;
 334        }
 335
 336        args->fence = job->syncpt_end;
 337
 338fail:
 339        while (num_refs--)
 340                drm_gem_object_put(refs[num_refs]);
 341
 342        kfree(refs);
 343
 344put:
 345        host1x_job_put(job);
 346        return err;
 347}
 348
 349
 350#ifdef CONFIG_DRM_TEGRA_STAGING
 351static int tegra_gem_create(struct drm_device *drm, void *data,
 352                            struct drm_file *file)
 353{
 354        struct drm_tegra_gem_create *args = data;
 355        struct tegra_bo *bo;
 356
 357        bo = tegra_bo_create_with_handle(file, drm, args->size, args->flags,
 358                                         &args->handle);
 359        if (IS_ERR(bo))
 360                return PTR_ERR(bo);
 361
 362        return 0;
 363}
 364
 365static int tegra_gem_mmap(struct drm_device *drm, void *data,
 366                          struct drm_file *file)
 367{
 368        struct drm_tegra_gem_mmap *args = data;
 369        struct drm_gem_object *gem;
 370        struct tegra_bo *bo;
 371
 372        gem = drm_gem_object_lookup(file, args->handle);
 373        if (!gem)
 374                return -EINVAL;
 375
 376        bo = to_tegra_bo(gem);
 377
 378        args->offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
 379
 380        drm_gem_object_put(gem);
 381
 382        return 0;
 383}
 384
 385static int tegra_syncpt_read(struct drm_device *drm, void *data,
 386                             struct drm_file *file)
 387{
 388        struct host1x *host = dev_get_drvdata(drm->dev->parent);
 389        struct drm_tegra_syncpt_read *args = data;
 390        struct host1x_syncpt *sp;
 391
 392        sp = host1x_syncpt_get_by_id_noref(host, args->id);
 393        if (!sp)
 394                return -EINVAL;
 395
 396        args->value = host1x_syncpt_read_min(sp);
 397        return 0;
 398}
 399
 400static int tegra_syncpt_incr(struct drm_device *drm, void *data,
 401                             struct drm_file *file)
 402{
 403        struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
 404        struct drm_tegra_syncpt_incr *args = data;
 405        struct host1x_syncpt *sp;
 406
 407        sp = host1x_syncpt_get_by_id_noref(host1x, args->id);
 408        if (!sp)
 409                return -EINVAL;
 410
 411        return host1x_syncpt_incr(sp);
 412}
 413
 414static int tegra_syncpt_wait(struct drm_device *drm, void *data,
 415                             struct drm_file *file)
 416{
 417        struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
 418        struct drm_tegra_syncpt_wait *args = data;
 419        struct host1x_syncpt *sp;
 420
 421        sp = host1x_syncpt_get_by_id_noref(host1x, args->id);
 422        if (!sp)
 423                return -EINVAL;
 424
 425        return host1x_syncpt_wait(sp, args->thresh,
 426                                  msecs_to_jiffies(args->timeout),
 427                                  &args->value);
 428}
 429
 430static int tegra_client_open(struct tegra_drm_file *fpriv,
 431                             struct tegra_drm_client *client,
 432                             struct tegra_drm_context *context)
 433{
 434        int err;
 435
 436        err = pm_runtime_resume_and_get(client->base.dev);
 437        if (err)
 438                return err;
 439
 440        err = client->ops->open_channel(client, context);
 441        if (err < 0) {
 442                pm_runtime_put(client->base.dev);
 443                return err;
 444        }
 445
 446        err = idr_alloc(&fpriv->legacy_contexts, context, 1, 0, GFP_KERNEL);
 447        if (err < 0) {
 448                client->ops->close_channel(context);
 449                pm_runtime_put(client->base.dev);
 450                return err;
 451        }
 452
 453        context->client = client;
 454        context->id = err;
 455
 456        return 0;
 457}
 458
 459static int tegra_open_channel(struct drm_device *drm, void *data,
 460                              struct drm_file *file)
 461{
 462        struct tegra_drm_file *fpriv = file->driver_priv;
 463        struct tegra_drm *tegra = drm->dev_private;
 464        struct drm_tegra_open_channel *args = data;
 465        struct tegra_drm_context *context;
 466        struct tegra_drm_client *client;
 467        int err = -ENODEV;
 468
 469        context = kzalloc(sizeof(*context), GFP_KERNEL);
 470        if (!context)
 471                return -ENOMEM;
 472
 473        mutex_lock(&fpriv->lock);
 474
 475        list_for_each_entry(client, &tegra->clients, list)
 476                if (client->base.class == args->client) {
 477                        err = tegra_client_open(fpriv, client, context);
 478                        if (err < 0)
 479                                break;
 480
 481                        args->context = context->id;
 482                        break;
 483                }
 484
 485        if (err < 0)
 486                kfree(context);
 487
 488        mutex_unlock(&fpriv->lock);
 489        return err;
 490}
 491
 492static int tegra_close_channel(struct drm_device *drm, void *data,
 493                               struct drm_file *file)
 494{
 495        struct tegra_drm_file *fpriv = file->driver_priv;
 496        struct drm_tegra_close_channel *args = data;
 497        struct tegra_drm_context *context;
 498        int err = 0;
 499
 500        mutex_lock(&fpriv->lock);
 501
 502        context = idr_find(&fpriv->legacy_contexts, args->context);
 503        if (!context) {
 504                err = -EINVAL;
 505                goto unlock;
 506        }
 507
 508        idr_remove(&fpriv->legacy_contexts, context->id);
 509        tegra_drm_context_free(context);
 510
 511unlock:
 512        mutex_unlock(&fpriv->lock);
 513        return err;
 514}
 515
 516static int tegra_get_syncpt(struct drm_device *drm, void *data,
 517                            struct drm_file *file)
 518{
 519        struct tegra_drm_file *fpriv = file->driver_priv;
 520        struct drm_tegra_get_syncpt *args = data;
 521        struct tegra_drm_context *context;
 522        struct host1x_syncpt *syncpt;
 523        int err = 0;
 524
 525        mutex_lock(&fpriv->lock);
 526
 527        context = idr_find(&fpriv->legacy_contexts, args->context);
 528        if (!context) {
 529                err = -ENODEV;
 530                goto unlock;
 531        }
 532
 533        if (args->index >= context->client->base.num_syncpts) {
 534                err = -EINVAL;
 535                goto unlock;
 536        }
 537
 538        syncpt = context->client->base.syncpts[args->index];
 539        args->id = host1x_syncpt_id(syncpt);
 540
 541unlock:
 542        mutex_unlock(&fpriv->lock);
 543        return err;
 544}
 545
 546static int tegra_submit(struct drm_device *drm, void *data,
 547                        struct drm_file *file)
 548{
 549        struct tegra_drm_file *fpriv = file->driver_priv;
 550        struct drm_tegra_submit *args = data;
 551        struct tegra_drm_context *context;
 552        int err;
 553
 554        mutex_lock(&fpriv->lock);
 555
 556        context = idr_find(&fpriv->legacy_contexts, args->context);
 557        if (!context) {
 558                err = -ENODEV;
 559                goto unlock;
 560        }
 561
 562        err = context->client->ops->submit(context, args, drm, file);
 563
 564unlock:
 565        mutex_unlock(&fpriv->lock);
 566        return err;
 567}
 568
 569static int tegra_get_syncpt_base(struct drm_device *drm, void *data,
 570                                 struct drm_file *file)
 571{
 572        struct tegra_drm_file *fpriv = file->driver_priv;
 573        struct drm_tegra_get_syncpt_base *args = data;
 574        struct tegra_drm_context *context;
 575        struct host1x_syncpt_base *base;
 576        struct host1x_syncpt *syncpt;
 577        int err = 0;
 578
 579        mutex_lock(&fpriv->lock);
 580
 581        context = idr_find(&fpriv->legacy_contexts, args->context);
 582        if (!context) {
 583                err = -ENODEV;
 584                goto unlock;
 585        }
 586
 587        if (args->syncpt >= context->client->base.num_syncpts) {
 588                err = -EINVAL;
 589                goto unlock;
 590        }
 591
 592        syncpt = context->client->base.syncpts[args->syncpt];
 593
 594        base = host1x_syncpt_get_base(syncpt);
 595        if (!base) {
 596                err = -ENXIO;
 597                goto unlock;
 598        }
 599
 600        args->id = host1x_syncpt_base_id(base);
 601
 602unlock:
 603        mutex_unlock(&fpriv->lock);
 604        return err;
 605}
 606
 607static int tegra_gem_set_tiling(struct drm_device *drm, void *data,
 608                                struct drm_file *file)
 609{
 610        struct drm_tegra_gem_set_tiling *args = data;
 611        enum tegra_bo_tiling_mode mode;
 612        struct drm_gem_object *gem;
 613        unsigned long value = 0;
 614        struct tegra_bo *bo;
 615
 616        switch (args->mode) {
 617        case DRM_TEGRA_GEM_TILING_MODE_PITCH:
 618                mode = TEGRA_BO_TILING_MODE_PITCH;
 619
 620                if (args->value != 0)
 621                        return -EINVAL;
 622
 623                break;
 624
 625        case DRM_TEGRA_GEM_TILING_MODE_TILED:
 626                mode = TEGRA_BO_TILING_MODE_TILED;
 627
 628                if (args->value != 0)
 629                        return -EINVAL;
 630
 631                break;
 632
 633        case DRM_TEGRA_GEM_TILING_MODE_BLOCK:
 634                mode = TEGRA_BO_TILING_MODE_BLOCK;
 635
 636                if (args->value > 5)
 637                        return -EINVAL;
 638
 639                value = args->value;
 640                break;
 641
 642        default:
 643                return -EINVAL;
 644        }
 645
 646        gem = drm_gem_object_lookup(file, args->handle);
 647        if (!gem)
 648                return -ENOENT;
 649
 650        bo = to_tegra_bo(gem);
 651
 652        bo->tiling.mode = mode;
 653        bo->tiling.value = value;
 654
 655        drm_gem_object_put(gem);
 656
 657        return 0;
 658}
 659
 660static int tegra_gem_get_tiling(struct drm_device *drm, void *data,
 661                                struct drm_file *file)
 662{
 663        struct drm_tegra_gem_get_tiling *args = data;
 664        struct drm_gem_object *gem;
 665        struct tegra_bo *bo;
 666        int err = 0;
 667
 668        gem = drm_gem_object_lookup(file, args->handle);
 669        if (!gem)
 670                return -ENOENT;
 671
 672        bo = to_tegra_bo(gem);
 673
 674        switch (bo->tiling.mode) {
 675        case TEGRA_BO_TILING_MODE_PITCH:
 676                args->mode = DRM_TEGRA_GEM_TILING_MODE_PITCH;
 677                args->value = 0;
 678                break;
 679
 680        case TEGRA_BO_TILING_MODE_TILED:
 681                args->mode = DRM_TEGRA_GEM_TILING_MODE_TILED;
 682                args->value = 0;
 683                break;
 684
 685        case TEGRA_BO_TILING_MODE_BLOCK:
 686                args->mode = DRM_TEGRA_GEM_TILING_MODE_BLOCK;
 687                args->value = bo->tiling.value;
 688                break;
 689
 690        default:
 691                err = -EINVAL;
 692                break;
 693        }
 694
 695        drm_gem_object_put(gem);
 696
 697        return err;
 698}
 699
 700static int tegra_gem_set_flags(struct drm_device *drm, void *data,
 701                               struct drm_file *file)
 702{
 703        struct drm_tegra_gem_set_flags *args = data;
 704        struct drm_gem_object *gem;
 705        struct tegra_bo *bo;
 706
 707        if (args->flags & ~DRM_TEGRA_GEM_FLAGS)
 708                return -EINVAL;
 709
 710        gem = drm_gem_object_lookup(file, args->handle);
 711        if (!gem)
 712                return -ENOENT;
 713
 714        bo = to_tegra_bo(gem);
 715        bo->flags = 0;
 716
 717        if (args->flags & DRM_TEGRA_GEM_BOTTOM_UP)
 718                bo->flags |= TEGRA_BO_BOTTOM_UP;
 719
 720        drm_gem_object_put(gem);
 721
 722        return 0;
 723}
 724
 725static int tegra_gem_get_flags(struct drm_device *drm, void *data,
 726                               struct drm_file *file)
 727{
 728        struct drm_tegra_gem_get_flags *args = data;
 729        struct drm_gem_object *gem;
 730        struct tegra_bo *bo;
 731
 732        gem = drm_gem_object_lookup(file, args->handle);
 733        if (!gem)
 734                return -ENOENT;
 735
 736        bo = to_tegra_bo(gem);
 737        args->flags = 0;
 738
 739        if (bo->flags & TEGRA_BO_BOTTOM_UP)
 740                args->flags |= DRM_TEGRA_GEM_BOTTOM_UP;
 741
 742        drm_gem_object_put(gem);
 743
 744        return 0;
 745}
 746#endif
 747
 748static const struct drm_ioctl_desc tegra_drm_ioctls[] = {
 749#ifdef CONFIG_DRM_TEGRA_STAGING
 750        DRM_IOCTL_DEF_DRV(TEGRA_CHANNEL_OPEN, tegra_drm_ioctl_channel_open,
 751                          DRM_RENDER_ALLOW),
 752        DRM_IOCTL_DEF_DRV(TEGRA_CHANNEL_CLOSE, tegra_drm_ioctl_channel_close,
 753                          DRM_RENDER_ALLOW),
 754        DRM_IOCTL_DEF_DRV(TEGRA_CHANNEL_MAP, tegra_drm_ioctl_channel_map,
 755                          DRM_RENDER_ALLOW),
 756        DRM_IOCTL_DEF_DRV(TEGRA_CHANNEL_UNMAP, tegra_drm_ioctl_channel_unmap,
 757                          DRM_RENDER_ALLOW),
 758        DRM_IOCTL_DEF_DRV(TEGRA_CHANNEL_SUBMIT, tegra_drm_ioctl_channel_submit,
 759                          DRM_RENDER_ALLOW),
 760        DRM_IOCTL_DEF_DRV(TEGRA_SYNCPOINT_ALLOCATE, tegra_drm_ioctl_syncpoint_allocate,
 761                          DRM_RENDER_ALLOW),
 762        DRM_IOCTL_DEF_DRV(TEGRA_SYNCPOINT_FREE, tegra_drm_ioctl_syncpoint_free,
 763                          DRM_RENDER_ALLOW),
 764        DRM_IOCTL_DEF_DRV(TEGRA_SYNCPOINT_WAIT, tegra_drm_ioctl_syncpoint_wait,
 765                          DRM_RENDER_ALLOW),
 766
 767        DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, DRM_RENDER_ALLOW),
 768        DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap, DRM_RENDER_ALLOW),
 769        DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_READ, tegra_syncpt_read,
 770                          DRM_RENDER_ALLOW),
 771        DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_INCR, tegra_syncpt_incr,
 772                          DRM_RENDER_ALLOW),
 773        DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_WAIT, tegra_syncpt_wait,
 774                          DRM_RENDER_ALLOW),
 775        DRM_IOCTL_DEF_DRV(TEGRA_OPEN_CHANNEL, tegra_open_channel,
 776                          DRM_RENDER_ALLOW),
 777        DRM_IOCTL_DEF_DRV(TEGRA_CLOSE_CHANNEL, tegra_close_channel,
 778                          DRM_RENDER_ALLOW),
 779        DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT, tegra_get_syncpt,
 780                          DRM_RENDER_ALLOW),
 781        DRM_IOCTL_DEF_DRV(TEGRA_SUBMIT, tegra_submit,
 782                          DRM_RENDER_ALLOW),
 783        DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT_BASE, tegra_get_syncpt_base,
 784                          DRM_RENDER_ALLOW),
 785        DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_TILING, tegra_gem_set_tiling,
 786                          DRM_RENDER_ALLOW),
 787        DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_TILING, tegra_gem_get_tiling,
 788                          DRM_RENDER_ALLOW),
 789        DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_FLAGS, tegra_gem_set_flags,
 790                          DRM_RENDER_ALLOW),
 791        DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_FLAGS, tegra_gem_get_flags,
 792                          DRM_RENDER_ALLOW),
 793#endif
 794};
 795
 796static const struct file_operations tegra_drm_fops = {
 797        .owner = THIS_MODULE,
 798        .open = drm_open,
 799        .release = drm_release,
 800        .unlocked_ioctl = drm_ioctl,
 801        .mmap = tegra_drm_mmap,
 802        .poll = drm_poll,
 803        .read = drm_read,
 804        .compat_ioctl = drm_compat_ioctl,
 805        .llseek = noop_llseek,
 806};
 807
 808static int tegra_drm_context_cleanup(int id, void *p, void *data)
 809{
 810        struct tegra_drm_context *context = p;
 811
 812        tegra_drm_context_free(context);
 813
 814        return 0;
 815}
 816
 817static void tegra_drm_postclose(struct drm_device *drm, struct drm_file *file)
 818{
 819        struct tegra_drm_file *fpriv = file->driver_priv;
 820
 821        mutex_lock(&fpriv->lock);
 822        idr_for_each(&fpriv->legacy_contexts, tegra_drm_context_cleanup, NULL);
 823        tegra_drm_uapi_close_file(fpriv);
 824        mutex_unlock(&fpriv->lock);
 825
 826        idr_destroy(&fpriv->legacy_contexts);
 827        mutex_destroy(&fpriv->lock);
 828        kfree(fpriv);
 829}
 830
 831#ifdef CONFIG_DEBUG_FS
 832static int tegra_debugfs_framebuffers(struct seq_file *s, void *data)
 833{
 834        struct drm_info_node *node = (struct drm_info_node *)s->private;
 835        struct drm_device *drm = node->minor->dev;
 836        struct drm_framebuffer *fb;
 837
 838        mutex_lock(&drm->mode_config.fb_lock);
 839
 840        list_for_each_entry(fb, &drm->mode_config.fb_list, head) {
 841                seq_printf(s, "%3d: user size: %d x %d, depth %d, %d bpp, refcount %d\n",
 842                           fb->base.id, fb->width, fb->height,
 843                           fb->format->depth,
 844                           fb->format->cpp[0] * 8,
 845                           drm_framebuffer_read_refcount(fb));
 846        }
 847
 848        mutex_unlock(&drm->mode_config.fb_lock);
 849
 850        return 0;
 851}
 852
 853static int tegra_debugfs_iova(struct seq_file *s, void *data)
 854{
 855        struct drm_info_node *node = (struct drm_info_node *)s->private;
 856        struct drm_device *drm = node->minor->dev;
 857        struct tegra_drm *tegra = drm->dev_private;
 858        struct drm_printer p = drm_seq_file_printer(s);
 859
 860        if (tegra->domain) {
 861                mutex_lock(&tegra->mm_lock);
 862                drm_mm_print(&tegra->mm, &p);
 863                mutex_unlock(&tegra->mm_lock);
 864        }
 865
 866        return 0;
 867}
 868
 869static struct drm_info_list tegra_debugfs_list[] = {
 870        { "framebuffers", tegra_debugfs_framebuffers, 0 },
 871        { "iova", tegra_debugfs_iova, 0 },
 872};
 873
 874static void tegra_debugfs_init(struct drm_minor *minor)
 875{
 876        drm_debugfs_create_files(tegra_debugfs_list,
 877                                 ARRAY_SIZE(tegra_debugfs_list),
 878                                 minor->debugfs_root, minor);
 879}
 880#endif
 881
 882static const struct drm_driver tegra_drm_driver = {
 883        .driver_features = DRIVER_MODESET | DRIVER_GEM |
 884                           DRIVER_ATOMIC | DRIVER_RENDER | DRIVER_SYNCOBJ,
 885        .open = tegra_drm_open,
 886        .postclose = tegra_drm_postclose,
 887        .lastclose = drm_fb_helper_lastclose,
 888
 889#if defined(CONFIG_DEBUG_FS)
 890        .debugfs_init = tegra_debugfs_init,
 891#endif
 892
 893        .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
 894        .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
 895        .gem_prime_import = tegra_gem_prime_import,
 896
 897        .dumb_create = tegra_bo_dumb_create,
 898
 899        .ioctls = tegra_drm_ioctls,
 900        .num_ioctls = ARRAY_SIZE(tegra_drm_ioctls),
 901        .fops = &tegra_drm_fops,
 902
 903        .name = DRIVER_NAME,
 904        .desc = DRIVER_DESC,
 905        .date = DRIVER_DATE,
 906        .major = DRIVER_MAJOR,
 907        .minor = DRIVER_MINOR,
 908        .patchlevel = DRIVER_PATCHLEVEL,
 909};
 910
 911int tegra_drm_register_client(struct tegra_drm *tegra,
 912                              struct tegra_drm_client *client)
 913{
 914        /*
 915         * When MLOCKs are implemented, change to allocate a shared channel
 916         * only when MLOCKs are disabled.
 917         */
 918        client->shared_channel = host1x_channel_request(&client->base);
 919        if (!client->shared_channel)
 920                return -EBUSY;
 921
 922        mutex_lock(&tegra->clients_lock);
 923        list_add_tail(&client->list, &tegra->clients);
 924        client->drm = tegra;
 925        mutex_unlock(&tegra->clients_lock);
 926
 927        return 0;
 928}
 929
 930int tegra_drm_unregister_client(struct tegra_drm *tegra,
 931                                struct tegra_drm_client *client)
 932{
 933        mutex_lock(&tegra->clients_lock);
 934        list_del_init(&client->list);
 935        client->drm = NULL;
 936        mutex_unlock(&tegra->clients_lock);
 937
 938        if (client->shared_channel)
 939                host1x_channel_put(client->shared_channel);
 940
 941        return 0;
 942}
 943
 944int host1x_client_iommu_attach(struct host1x_client *client)
 945{
 946        struct iommu_domain *domain = iommu_get_domain_for_dev(client->dev);
 947        struct drm_device *drm = dev_get_drvdata(client->host);
 948        struct tegra_drm *tegra = drm->dev_private;
 949        struct iommu_group *group = NULL;
 950        int err;
 951
 952#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
 953        if (client->dev->archdata.mapping) {
 954                struct dma_iommu_mapping *mapping =
 955                                to_dma_iommu_mapping(client->dev);
 956                arm_iommu_detach_device(client->dev);
 957                arm_iommu_release_mapping(mapping);
 958
 959                domain = iommu_get_domain_for_dev(client->dev);
 960        }
 961#endif
 962
 963        /*
 964         * If the host1x client is already attached to an IOMMU domain that is
 965         * not the shared IOMMU domain, don't try to attach it to a different
 966         * domain. This allows using the IOMMU-backed DMA API.
 967         */
 968        if (domain && domain != tegra->domain)
 969                return 0;
 970
 971        if (tegra->domain) {
 972                group = iommu_group_get(client->dev);
 973                if (!group)
 974                        return -ENODEV;
 975
 976                if (domain != tegra->domain) {
 977                        err = iommu_attach_group(tegra->domain, group);
 978                        if (err < 0) {
 979                                iommu_group_put(group);
 980                                return err;
 981                        }
 982                }
 983
 984                tegra->use_explicit_iommu = true;
 985        }
 986
 987        client->group = group;
 988
 989        return 0;
 990}
 991
 992void host1x_client_iommu_detach(struct host1x_client *client)
 993{
 994        struct drm_device *drm = dev_get_drvdata(client->host);
 995        struct tegra_drm *tegra = drm->dev_private;
 996        struct iommu_domain *domain;
 997
 998        if (client->group) {
 999                /*
1000                 * Devices that are part of the same group may no longer be
1001                 * attached to a domain at this point because their group may
1002                 * have been detached by an earlier client.
1003                 */
1004                domain = iommu_get_domain_for_dev(client->dev);
1005                if (domain)
1006                        iommu_detach_group(tegra->domain, client->group);
1007
1008                iommu_group_put(client->group);
1009                client->group = NULL;
1010        }
1011}
1012
1013void *tegra_drm_alloc(struct tegra_drm *tegra, size_t size, dma_addr_t *dma)
1014{
1015        struct iova *alloc;
1016        void *virt;
1017        gfp_t gfp;
1018        int err;
1019
1020        if (tegra->domain)
1021                size = iova_align(&tegra->carveout.domain, size);
1022        else
1023                size = PAGE_ALIGN(size);
1024
1025        gfp = GFP_KERNEL | __GFP_ZERO;
1026        if (!tegra->domain) {
1027                /*
1028                 * Many units only support 32-bit addresses, even on 64-bit
1029                 * SoCs. If there is no IOMMU to translate into a 32-bit IO
1030                 * virtual address space, force allocations to be in the
1031                 * lower 32-bit range.
1032                 */
1033                gfp |= GFP_DMA;
1034        }
1035
1036        virt = (void *)__get_free_pages(gfp, get_order(size));
1037        if (!virt)
1038                return ERR_PTR(-ENOMEM);
1039
1040        if (!tegra->domain) {
1041                /*
1042                 * If IOMMU is disabled, devices address physical memory
1043                 * directly.
1044                 */
1045                *dma = virt_to_phys(virt);
1046                return virt;
1047        }
1048
1049        alloc = alloc_iova(&tegra->carveout.domain,
1050                           size >> tegra->carveout.shift,
1051                           tegra->carveout.limit, true);
1052        if (!alloc) {
1053                err = -EBUSY;
1054                goto free_pages;
1055        }
1056
1057        *dma = iova_dma_addr(&tegra->carveout.domain, alloc);
1058        err = iommu_map(tegra->domain, *dma, virt_to_phys(virt),
1059                        size, IOMMU_READ | IOMMU_WRITE);
1060        if (err < 0)
1061                goto free_iova;
1062
1063        return virt;
1064
1065free_iova:
1066        __free_iova(&tegra->carveout.domain, alloc);
1067free_pages:
1068        free_pages((unsigned long)virt, get_order(size));
1069
1070        return ERR_PTR(err);
1071}
1072
1073void tegra_drm_free(struct tegra_drm *tegra, size_t size, void *virt,
1074                    dma_addr_t dma)
1075{
1076        if (tegra->domain)
1077                size = iova_align(&tegra->carveout.domain, size);
1078        else
1079                size = PAGE_ALIGN(size);
1080
1081        if (tegra->domain) {
1082                iommu_unmap(tegra->domain, dma, size);
1083                free_iova(&tegra->carveout.domain,
1084                          iova_pfn(&tegra->carveout.domain, dma));
1085        }
1086
1087        free_pages((unsigned long)virt, get_order(size));
1088}
1089
1090static bool host1x_drm_wants_iommu(struct host1x_device *dev)
1091{
1092        struct host1x *host1x = dev_get_drvdata(dev->dev.parent);
1093        struct iommu_domain *domain;
1094
1095        /*
1096         * If the Tegra DRM clients are backed by an IOMMU, push buffers are
1097         * likely to be allocated beyond the 32-bit boundary if sufficient
1098         * system memory is available. This is problematic on earlier Tegra
1099         * generations where host1x supports a maximum of 32 address bits in
1100         * the GATHER opcode. In this case, unless host1x is behind an IOMMU
1101         * as well it won't be able to process buffers allocated beyond the
1102         * 32-bit boundary.
1103         *
1104         * The DMA API will use bounce buffers in this case, so that could
1105         * perhaps still be made to work, even if less efficient, but there
1106         * is another catch: in order to perform cache maintenance on pages
1107         * allocated for discontiguous buffers we need to map and unmap the
1108         * SG table representing these buffers. This is fine for something
1109         * small like a push buffer, but it exhausts the bounce buffer pool
1110         * (typically on the order of a few MiB) for framebuffers (many MiB
1111         * for any modern resolution).
1112         *
1113         * Work around this by making sure that Tegra DRM clients only use
1114         * an IOMMU if the parent host1x also uses an IOMMU.
1115         *
1116         * Note that there's still a small gap here that we don't cover: if
1117         * the DMA API is backed by an IOMMU there's no way to control which
1118         * device is attached to an IOMMU and which isn't, except via wiring
1119         * up the device tree appropriately. This is considered an problem
1120         * of integration, so care must be taken for the DT to be consistent.
1121         */
1122        domain = iommu_get_domain_for_dev(dev->dev.parent);
1123
1124        /*
1125         * Tegra20 and Tegra30 don't support addressing memory beyond the
1126         * 32-bit boundary, so the regular GATHER opcodes will always be
1127         * sufficient and whether or not the host1x is attached to an IOMMU
1128         * doesn't matter.
1129         */
1130        if (!domain && host1x_get_dma_mask(host1x) <= DMA_BIT_MASK(32))
1131                return true;
1132
1133        return domain != NULL;
1134}
1135
1136static int host1x_drm_probe(struct host1x_device *dev)
1137{
1138        struct tegra_drm *tegra;
1139        struct drm_device *drm;
1140        int err;
1141
1142        drm = drm_dev_alloc(&tegra_drm_driver, &dev->dev);
1143        if (IS_ERR(drm))
1144                return PTR_ERR(drm);
1145
1146        tegra = kzalloc(sizeof(*tegra), GFP_KERNEL);
1147        if (!tegra) {
1148                err = -ENOMEM;
1149                goto put;
1150        }
1151
1152        if (host1x_drm_wants_iommu(dev) && iommu_present(&platform_bus_type)) {
1153                tegra->domain = iommu_domain_alloc(&platform_bus_type);
1154                if (!tegra->domain) {
1155                        err = -ENOMEM;
1156                        goto free;
1157                }
1158
1159                err = iova_cache_get();
1160                if (err < 0)
1161                        goto domain;
1162        }
1163
1164        mutex_init(&tegra->clients_lock);
1165        INIT_LIST_HEAD(&tegra->clients);
1166
1167        dev_set_drvdata(&dev->dev, drm);
1168        drm->dev_private = tegra;
1169        tegra->drm = drm;
1170
1171        drm_mode_config_init(drm);
1172
1173        drm->mode_config.min_width = 0;
1174        drm->mode_config.min_height = 0;
1175        drm->mode_config.max_width = 0;
1176        drm->mode_config.max_height = 0;
1177
1178        drm->mode_config.normalize_zpos = true;
1179
1180        drm->mode_config.funcs = &tegra_drm_mode_config_funcs;
1181        drm->mode_config.helper_private = &tegra_drm_mode_config_helpers;
1182
1183        err = tegra_drm_fb_prepare(drm);
1184        if (err < 0)
1185                goto config;
1186
1187        drm_kms_helper_poll_init(drm);
1188
1189        err = host1x_device_init(dev);
1190        if (err < 0)
1191                goto fbdev;
1192
1193        /*
1194         * Now that all display controller have been initialized, the maximum
1195         * supported resolution is known and the bitmask for horizontal and
1196         * vertical bitfields can be computed.
1197         */
1198        tegra->hmask = drm->mode_config.max_width - 1;
1199        tegra->vmask = drm->mode_config.max_height - 1;
1200
1201        if (tegra->use_explicit_iommu) {
1202                u64 carveout_start, carveout_end, gem_start, gem_end;
1203                u64 dma_mask = dma_get_mask(&dev->dev);
1204                dma_addr_t start, end;
1205                unsigned long order;
1206
1207                start = tegra->domain->geometry.aperture_start & dma_mask;
1208                end = tegra->domain->geometry.aperture_end & dma_mask;
1209
1210                gem_start = start;
1211                gem_end = end - CARVEOUT_SZ;
1212                carveout_start = gem_end + 1;
1213                carveout_end = end;
1214
1215                order = __ffs(tegra->domain->pgsize_bitmap);
1216                init_iova_domain(&tegra->carveout.domain, 1UL << order,
1217                                 carveout_start >> order);
1218
1219                tegra->carveout.shift = iova_shift(&tegra->carveout.domain);
1220                tegra->carveout.limit = carveout_end >> tegra->carveout.shift;
1221
1222                drm_mm_init(&tegra->mm, gem_start, gem_end - gem_start + 1);
1223                mutex_init(&tegra->mm_lock);
1224
1225                DRM_DEBUG_DRIVER("IOMMU apertures:\n");
1226                DRM_DEBUG_DRIVER("  GEM: %#llx-%#llx\n", gem_start, gem_end);
1227                DRM_DEBUG_DRIVER("  Carveout: %#llx-%#llx\n", carveout_start,
1228                                 carveout_end);
1229        } else if (tegra->domain) {
1230                iommu_domain_free(tegra->domain);
1231                tegra->domain = NULL;
1232                iova_cache_put();
1233        }
1234
1235        if (tegra->hub) {
1236                err = tegra_display_hub_prepare(tegra->hub);
1237                if (err < 0)
1238                        goto device;
1239        }
1240
1241        /* syncpoints are used for full 32-bit hardware VBLANK counters */
1242        drm->max_vblank_count = 0xffffffff;
1243
1244        err = drm_vblank_init(drm, drm->mode_config.num_crtc);
1245        if (err < 0)
1246                goto hub;
1247
1248        drm_mode_config_reset(drm);
1249
1250        err = drm_aperture_remove_framebuffers(false, &tegra_drm_driver);
1251        if (err < 0)
1252                goto hub;
1253
1254        err = tegra_drm_fb_init(drm);
1255        if (err < 0)
1256                goto hub;
1257
1258        err = drm_dev_register(drm, 0);
1259        if (err < 0)
1260                goto fb;
1261
1262        return 0;
1263
1264fb:
1265        tegra_drm_fb_exit(drm);
1266hub:
1267        if (tegra->hub)
1268                tegra_display_hub_cleanup(tegra->hub);
1269device:
1270        if (tegra->domain) {
1271                mutex_destroy(&tegra->mm_lock);
1272                drm_mm_takedown(&tegra->mm);
1273                put_iova_domain(&tegra->carveout.domain);
1274                iova_cache_put();
1275        }
1276
1277        host1x_device_exit(dev);
1278fbdev:
1279        drm_kms_helper_poll_fini(drm);
1280        tegra_drm_fb_free(drm);
1281config:
1282        drm_mode_config_cleanup(drm);
1283domain:
1284        if (tegra->domain)
1285                iommu_domain_free(tegra->domain);
1286free:
1287        kfree(tegra);
1288put:
1289        drm_dev_put(drm);
1290        return err;
1291}
1292
1293static int host1x_drm_remove(struct host1x_device *dev)
1294{
1295        struct drm_device *drm = dev_get_drvdata(&dev->dev);
1296        struct tegra_drm *tegra = drm->dev_private;
1297        int err;
1298
1299        drm_dev_unregister(drm);
1300
1301        drm_kms_helper_poll_fini(drm);
1302        tegra_drm_fb_exit(drm);
1303        drm_atomic_helper_shutdown(drm);
1304        drm_mode_config_cleanup(drm);
1305
1306        if (tegra->hub)
1307                tegra_display_hub_cleanup(tegra->hub);
1308
1309        err = host1x_device_exit(dev);
1310        if (err < 0)
1311                dev_err(&dev->dev, "host1x device cleanup failed: %d\n", err);
1312
1313        if (tegra->domain) {
1314                mutex_destroy(&tegra->mm_lock);
1315                drm_mm_takedown(&tegra->mm);
1316                put_iova_domain(&tegra->carveout.domain);
1317                iova_cache_put();
1318                iommu_domain_free(tegra->domain);
1319        }
1320
1321        kfree(tegra);
1322        drm_dev_put(drm);
1323
1324        return 0;
1325}
1326
1327#ifdef CONFIG_PM_SLEEP
1328static int host1x_drm_suspend(struct device *dev)
1329{
1330        struct drm_device *drm = dev_get_drvdata(dev);
1331
1332        return drm_mode_config_helper_suspend(drm);
1333}
1334
1335static int host1x_drm_resume(struct device *dev)
1336{
1337        struct drm_device *drm = dev_get_drvdata(dev);
1338
1339        return drm_mode_config_helper_resume(drm);
1340}
1341#endif
1342
1343static SIMPLE_DEV_PM_OPS(host1x_drm_pm_ops, host1x_drm_suspend,
1344                         host1x_drm_resume);
1345
1346static const struct of_device_id host1x_drm_subdevs[] = {
1347        { .compatible = "nvidia,tegra20-dc", },
1348        { .compatible = "nvidia,tegra20-hdmi", },
1349        { .compatible = "nvidia,tegra20-gr2d", },
1350        { .compatible = "nvidia,tegra20-gr3d", },
1351        { .compatible = "nvidia,tegra30-dc", },
1352        { .compatible = "nvidia,tegra30-hdmi", },
1353        { .compatible = "nvidia,tegra30-gr2d", },
1354        { .compatible = "nvidia,tegra30-gr3d", },
1355        { .compatible = "nvidia,tegra114-dc", },
1356        { .compatible = "nvidia,tegra114-dsi", },
1357        { .compatible = "nvidia,tegra114-hdmi", },
1358        { .compatible = "nvidia,tegra114-gr2d", },
1359        { .compatible = "nvidia,tegra114-gr3d", },
1360        { .compatible = "nvidia,tegra124-dc", },
1361        { .compatible = "nvidia,tegra124-sor", },
1362        { .compatible = "nvidia,tegra124-hdmi", },
1363        { .compatible = "nvidia,tegra124-dsi", },
1364        { .compatible = "nvidia,tegra124-vic", },
1365        { .compatible = "nvidia,tegra132-dsi", },
1366        { .compatible = "nvidia,tegra210-dc", },
1367        { .compatible = "nvidia,tegra210-dsi", },
1368        { .compatible = "nvidia,tegra210-sor", },
1369        { .compatible = "nvidia,tegra210-sor1", },
1370        { .compatible = "nvidia,tegra210-vic", },
1371        { .compatible = "nvidia,tegra210-nvdec", },
1372        { .compatible = "nvidia,tegra186-display", },
1373        { .compatible = "nvidia,tegra186-dc", },
1374        { .compatible = "nvidia,tegra186-sor", },
1375        { .compatible = "nvidia,tegra186-sor1", },
1376        { .compatible = "nvidia,tegra186-vic", },
1377        { .compatible = "nvidia,tegra186-nvdec", },
1378        { .compatible = "nvidia,tegra194-display", },
1379        { .compatible = "nvidia,tegra194-dc", },
1380        { .compatible = "nvidia,tegra194-sor", },
1381        { .compatible = "nvidia,tegra194-vic", },
1382        { .compatible = "nvidia,tegra194-nvdec", },
1383        { /* sentinel */ }
1384};
1385
1386static struct host1x_driver host1x_drm_driver = {
1387        .driver = {
1388                .name = "drm",
1389                .pm = &host1x_drm_pm_ops,
1390        },
1391        .probe = host1x_drm_probe,
1392        .remove = host1x_drm_remove,
1393        .subdevs = host1x_drm_subdevs,
1394};
1395
1396static struct platform_driver * const drivers[] = {
1397        &tegra_display_hub_driver,
1398        &tegra_dc_driver,
1399        &tegra_hdmi_driver,
1400        &tegra_dsi_driver,
1401        &tegra_dpaux_driver,
1402        &tegra_sor_driver,
1403        &tegra_gr2d_driver,
1404        &tegra_gr3d_driver,
1405        &tegra_vic_driver,
1406        &tegra_nvdec_driver,
1407};
1408
1409static int __init host1x_drm_init(void)
1410{
1411        int err;
1412
1413        err = host1x_driver_register(&host1x_drm_driver);
1414        if (err < 0)
1415                return err;
1416
1417        err = platform_register_drivers(drivers, ARRAY_SIZE(drivers));
1418        if (err < 0)
1419                goto unregister_host1x;
1420
1421        return 0;
1422
1423unregister_host1x:
1424        host1x_driver_unregister(&host1x_drm_driver);
1425        return err;
1426}
1427module_init(host1x_drm_init);
1428
1429static void __exit host1x_drm_exit(void)
1430{
1431        platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
1432        host1x_driver_unregister(&host1x_drm_driver);
1433}
1434module_exit(host1x_drm_exit);
1435
1436MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
1437MODULE_DESCRIPTION("NVIDIA Tegra DRM driver");
1438MODULE_LICENSE("GPL v2");
1439