linux/drivers/gpu/drm/tegra/drm.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2012 Avionic Design GmbH
   3 * Copyright (C) 2012-2016 NVIDIA CORPORATION.  All rights reserved.
   4 *
   5 * This program is free software; you can redistribute it and/or modify
   6 * it under the terms of the GNU General Public License version 2 as
   7 * published by the Free Software Foundation.
   8 */
   9
  10#include <linux/bitops.h>
  11#include <linux/host1x.h>
  12#include <linux/idr.h>
  13#include <linux/iommu.h>
  14
  15#include <drm/drm_atomic.h>
  16#include <drm/drm_atomic_helper.h>
  17
  18#include "drm.h"
  19#include "gem.h"
  20
  21#define DRIVER_NAME "tegra"
  22#define DRIVER_DESC "NVIDIA Tegra graphics"
  23#define DRIVER_DATE "20120330"
  24#define DRIVER_MAJOR 0
  25#define DRIVER_MINOR 0
  26#define DRIVER_PATCHLEVEL 0
  27
  28#define CARVEOUT_SZ SZ_64M
  29#define CDMA_GATHER_FETCHES_MAX_NB 16383
  30
  31struct tegra_drm_file {
  32        struct idr contexts;
  33        struct mutex lock;
  34};
  35
  36static void tegra_atomic_schedule(struct tegra_drm *tegra,
  37                                  struct drm_atomic_state *state)
  38{
  39        tegra->commit.state = state;
  40        schedule_work(&tegra->commit.work);
  41}
  42
  43static void tegra_atomic_complete(struct tegra_drm *tegra,
  44                                  struct drm_atomic_state *state)
  45{
  46        struct drm_device *drm = tegra->drm;
  47
  48        /*
  49         * Everything below can be run asynchronously without the need to grab
  50         * any modeset locks at all under one condition: It must be guaranteed
  51         * that the asynchronous work has either been cancelled (if the driver
  52         * supports it, which at least requires that the framebuffers get
  53         * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
  54         * before the new state gets committed on the software side with
  55         * drm_atomic_helper_swap_state().
  56         *
  57         * This scheme allows new atomic state updates to be prepared and
  58         * checked in parallel to the asynchronous completion of the previous
  59         * update. Which is important since compositors need to figure out the
  60         * composition of the next frame right after having submitted the
  61         * current layout.
  62         */
  63
  64        drm_atomic_helper_commit_modeset_disables(drm, state);
  65        drm_atomic_helper_commit_modeset_enables(drm, state);
  66        drm_atomic_helper_commit_planes(drm, state,
  67                                        DRM_PLANE_COMMIT_ACTIVE_ONLY);
  68
  69        drm_atomic_helper_wait_for_vblanks(drm, state);
  70
  71        drm_atomic_helper_cleanup_planes(drm, state);
  72        drm_atomic_state_put(state);
  73}
  74
  75static void tegra_atomic_work(struct work_struct *work)
  76{
  77        struct tegra_drm *tegra = container_of(work, struct tegra_drm,
  78                                               commit.work);
  79
  80        tegra_atomic_complete(tegra, tegra->commit.state);
  81}
  82
  83static int tegra_atomic_commit(struct drm_device *drm,
  84                               struct drm_atomic_state *state, bool nonblock)
  85{
  86        struct tegra_drm *tegra = drm->dev_private;
  87        int err;
  88
  89        err = drm_atomic_helper_prepare_planes(drm, state);
  90        if (err)
  91                return err;
  92
  93        /* serialize outstanding nonblocking commits */
  94        mutex_lock(&tegra->commit.lock);
  95        flush_work(&tegra->commit.work);
  96
  97        /*
  98         * This is the point of no return - everything below never fails except
  99         * when the hw goes bonghits. Which means we can commit the new state on
 100         * the software side now.
 101         */
 102
 103        err = drm_atomic_helper_swap_state(state, true);
 104        if (err) {
 105                mutex_unlock(&tegra->commit.lock);
 106                drm_atomic_helper_cleanup_planes(drm, state);
 107                return err;
 108        }
 109
 110        drm_atomic_state_get(state);
 111        if (nonblock)
 112                tegra_atomic_schedule(tegra, state);
 113        else
 114                tegra_atomic_complete(tegra, state);
 115
 116        mutex_unlock(&tegra->commit.lock);
 117        return 0;
 118}
 119
 120static const struct drm_mode_config_funcs tegra_drm_mode_funcs = {
 121        .fb_create = tegra_fb_create,
 122#ifdef CONFIG_DRM_FBDEV_EMULATION
 123        .output_poll_changed = tegra_fb_output_poll_changed,
 124#endif
 125        .atomic_check = drm_atomic_helper_check,
 126        .atomic_commit = tegra_atomic_commit,
 127};
 128
 129static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
 130{
 131        struct host1x_device *device = to_host1x_device(drm->dev);
 132        struct tegra_drm *tegra;
 133        int err;
 134
 135        tegra = kzalloc(sizeof(*tegra), GFP_KERNEL);
 136        if (!tegra)
 137                return -ENOMEM;
 138
 139        if (iommu_present(&platform_bus_type)) {
 140                u64 carveout_start, carveout_end, gem_start, gem_end;
 141                struct iommu_domain_geometry *geometry;
 142                unsigned long order;
 143
 144                tegra->domain = iommu_domain_alloc(&platform_bus_type);
 145                if (!tegra->domain) {
 146                        err = -ENOMEM;
 147                        goto free;
 148                }
 149
 150                geometry = &tegra->domain->geometry;
 151                gem_start = geometry->aperture_start;
 152                gem_end = geometry->aperture_end - CARVEOUT_SZ;
 153                carveout_start = gem_end + 1;
 154                carveout_end = geometry->aperture_end;
 155
 156                order = __ffs(tegra->domain->pgsize_bitmap);
 157                init_iova_domain(&tegra->carveout.domain, 1UL << order,
 158                                 carveout_start >> order,
 159                                 carveout_end >> order);
 160
 161                tegra->carveout.shift = iova_shift(&tegra->carveout.domain);
 162                tegra->carveout.limit = carveout_end >> tegra->carveout.shift;
 163
 164                drm_mm_init(&tegra->mm, gem_start, gem_end - gem_start + 1);
 165                mutex_init(&tegra->mm_lock);
 166
 167                DRM_DEBUG("IOMMU apertures:\n");
 168                DRM_DEBUG("  GEM: %#llx-%#llx\n", gem_start, gem_end);
 169                DRM_DEBUG("  Carveout: %#llx-%#llx\n", carveout_start,
 170                          carveout_end);
 171        }
 172
 173        mutex_init(&tegra->clients_lock);
 174        INIT_LIST_HEAD(&tegra->clients);
 175
 176        mutex_init(&tegra->commit.lock);
 177        INIT_WORK(&tegra->commit.work, tegra_atomic_work);
 178
 179        drm->dev_private = tegra;
 180        tegra->drm = drm;
 181
 182        drm_mode_config_init(drm);
 183
 184        drm->mode_config.min_width = 0;
 185        drm->mode_config.min_height = 0;
 186
 187        drm->mode_config.max_width = 4096;
 188        drm->mode_config.max_height = 4096;
 189
 190        drm->mode_config.allow_fb_modifiers = true;
 191
 192        drm->mode_config.funcs = &tegra_drm_mode_funcs;
 193
 194        err = tegra_drm_fb_prepare(drm);
 195        if (err < 0)
 196                goto config;
 197
 198        drm_kms_helper_poll_init(drm);
 199
 200        err = host1x_device_init(device);
 201        if (err < 0)
 202                goto fbdev;
 203
 204        /*
 205         * We don't use the drm_irq_install() helpers provided by the DRM
 206         * core, so we need to set this manually in order to allow the
 207         * DRM_IOCTL_WAIT_VBLANK to operate correctly.
 208         */
 209        drm->irq_enabled = true;
 210
 211        /* syncpoints are used for full 32-bit hardware VBLANK counters */
 212        drm->max_vblank_count = 0xffffffff;
 213
 214        err = drm_vblank_init(drm, drm->mode_config.num_crtc);
 215        if (err < 0)
 216                goto device;
 217
 218        drm_mode_config_reset(drm);
 219
 220        err = tegra_drm_fb_init(drm);
 221        if (err < 0)
 222                goto device;
 223
 224        return 0;
 225
 226device:
 227        host1x_device_exit(device);
 228fbdev:
 229        drm_kms_helper_poll_fini(drm);
 230        tegra_drm_fb_free(drm);
 231config:
 232        drm_mode_config_cleanup(drm);
 233
 234        if (tegra->domain) {
 235                iommu_domain_free(tegra->domain);
 236                drm_mm_takedown(&tegra->mm);
 237                mutex_destroy(&tegra->mm_lock);
 238                put_iova_domain(&tegra->carveout.domain);
 239        }
 240free:
 241        kfree(tegra);
 242        return err;
 243}
 244
 245static void tegra_drm_unload(struct drm_device *drm)
 246{
 247        struct host1x_device *device = to_host1x_device(drm->dev);
 248        struct tegra_drm *tegra = drm->dev_private;
 249        int err;
 250
 251        drm_kms_helper_poll_fini(drm);
 252        tegra_drm_fb_exit(drm);
 253        drm_mode_config_cleanup(drm);
 254
 255        err = host1x_device_exit(device);
 256        if (err < 0)
 257                return;
 258
 259        if (tegra->domain) {
 260                iommu_domain_free(tegra->domain);
 261                drm_mm_takedown(&tegra->mm);
 262                mutex_destroy(&tegra->mm_lock);
 263                put_iova_domain(&tegra->carveout.domain);
 264        }
 265
 266        kfree(tegra);
 267}
 268
 269static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
 270{
 271        struct tegra_drm_file *fpriv;
 272
 273        fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
 274        if (!fpriv)
 275                return -ENOMEM;
 276
 277        idr_init(&fpriv->contexts);
 278        mutex_init(&fpriv->lock);
 279        filp->driver_priv = fpriv;
 280
 281        return 0;
 282}
 283
 284static void tegra_drm_context_free(struct tegra_drm_context *context)
 285{
 286        context->client->ops->close_channel(context);
 287        kfree(context);
 288}
 289
 290static void tegra_drm_lastclose(struct drm_device *drm)
 291{
 292#ifdef CONFIG_DRM_FBDEV_EMULATION
 293        struct tegra_drm *tegra = drm->dev_private;
 294
 295        tegra_fbdev_restore_mode(tegra->fbdev);
 296#endif
 297}
 298
 299static struct host1x_bo *
 300host1x_bo_lookup(struct drm_file *file, u32 handle)
 301{
 302        struct drm_gem_object *gem;
 303        struct tegra_bo *bo;
 304
 305        gem = drm_gem_object_lookup(file, handle);
 306        if (!gem)
 307                return NULL;
 308
 309        bo = to_tegra_bo(gem);
 310        return &bo->base;
 311}
 312
 313static int host1x_reloc_copy_from_user(struct host1x_reloc *dest,
 314                                       struct drm_tegra_reloc __user *src,
 315                                       struct drm_device *drm,
 316                                       struct drm_file *file)
 317{
 318        u32 cmdbuf, target;
 319        int err;
 320
 321        err = get_user(cmdbuf, &src->cmdbuf.handle);
 322        if (err < 0)
 323                return err;
 324
 325        err = get_user(dest->cmdbuf.offset, &src->cmdbuf.offset);
 326        if (err < 0)
 327                return err;
 328
 329        err = get_user(target, &src->target.handle);
 330        if (err < 0)
 331                return err;
 332
 333        err = get_user(dest->target.offset, &src->target.offset);
 334        if (err < 0)
 335                return err;
 336
 337        err = get_user(dest->shift, &src->shift);
 338        if (err < 0)
 339                return err;
 340
 341        dest->cmdbuf.bo = host1x_bo_lookup(file, cmdbuf);
 342        if (!dest->cmdbuf.bo)
 343                return -ENOENT;
 344
 345        dest->target.bo = host1x_bo_lookup(file, target);
 346        if (!dest->target.bo)
 347                return -ENOENT;
 348
 349        return 0;
 350}
 351
 352static int host1x_waitchk_copy_from_user(struct host1x_waitchk *dest,
 353                                         struct drm_tegra_waitchk __user *src,
 354                                         struct drm_file *file)
 355{
 356        u32 cmdbuf;
 357        int err;
 358
 359        err = get_user(cmdbuf, &src->handle);
 360        if (err < 0)
 361                return err;
 362
 363        err = get_user(dest->offset, &src->offset);
 364        if (err < 0)
 365                return err;
 366
 367        err = get_user(dest->syncpt_id, &src->syncpt);
 368        if (err < 0)
 369                return err;
 370
 371        err = get_user(dest->thresh, &src->thresh);
 372        if (err < 0)
 373                return err;
 374
 375        dest->bo = host1x_bo_lookup(file, cmdbuf);
 376        if (!dest->bo)
 377                return -ENOENT;
 378
 379        return 0;
 380}
 381
 382int tegra_drm_submit(struct tegra_drm_context *context,
 383                     struct drm_tegra_submit *args, struct drm_device *drm,
 384                     struct drm_file *file)
 385{
 386        unsigned int num_cmdbufs = args->num_cmdbufs;
 387        unsigned int num_relocs = args->num_relocs;
 388        unsigned int num_waitchks = args->num_waitchks;
 389        struct drm_tegra_cmdbuf __user *cmdbufs =
 390                (void __user *)(uintptr_t)args->cmdbufs;
 391        struct drm_tegra_reloc __user *relocs =
 392                (void __user *)(uintptr_t)args->relocs;
 393        struct drm_tegra_waitchk __user *waitchks =
 394                (void __user *)(uintptr_t)args->waitchks;
 395        struct drm_tegra_syncpt syncpt;
 396        struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
 397        struct drm_gem_object **refs;
 398        struct host1x_syncpt *sp;
 399        struct host1x_job *job;
 400        unsigned int num_refs;
 401        int err;
 402
 403        /* We don't yet support other than one syncpt_incr struct per submit */
 404        if (args->num_syncpts != 1)
 405                return -EINVAL;
 406
 407        /* We don't yet support waitchks */
 408        if (args->num_waitchks != 0)
 409                return -EINVAL;
 410
 411        job = host1x_job_alloc(context->channel, args->num_cmdbufs,
 412                               args->num_relocs, args->num_waitchks);
 413        if (!job)
 414                return -ENOMEM;
 415
 416        job->num_relocs = args->num_relocs;
 417        job->num_waitchk = args->num_waitchks;
 418        job->client = (u32)args->context;
 419        job->class = context->client->base.class;
 420        job->serialize = true;
 421
 422        /*
 423         * Track referenced BOs so that they can be unreferenced after the
 424         * submission is complete.
 425         */
 426        num_refs = num_cmdbufs + num_relocs * 2 + num_waitchks;
 427
 428        refs = kmalloc_array(num_refs, sizeof(*refs), GFP_KERNEL);
 429        if (!refs) {
 430                err = -ENOMEM;
 431                goto put;
 432        }
 433
 434        /* reuse as an iterator later */
 435        num_refs = 0;
 436
 437        while (num_cmdbufs) {
 438                struct drm_tegra_cmdbuf cmdbuf;
 439                struct host1x_bo *bo;
 440                struct tegra_bo *obj;
 441                u64 offset;
 442
 443                if (copy_from_user(&cmdbuf, cmdbufs, sizeof(cmdbuf))) {
 444                        err = -EFAULT;
 445                        goto fail;
 446                }
 447
 448                /*
 449                 * The maximum number of CDMA gather fetches is 16383, a higher
 450                 * value means the words count is malformed.
 451                 */
 452                if (cmdbuf.words > CDMA_GATHER_FETCHES_MAX_NB) {
 453                        err = -EINVAL;
 454                        goto fail;
 455                }
 456
 457                bo = host1x_bo_lookup(file, cmdbuf.handle);
 458                if (!bo) {
 459                        err = -ENOENT;
 460                        goto fail;
 461                }
 462
 463                offset = (u64)cmdbuf.offset + (u64)cmdbuf.words * sizeof(u32);
 464                obj = host1x_to_tegra_bo(bo);
 465                refs[num_refs++] = &obj->gem;
 466
 467                /*
 468                 * Gather buffer base address must be 4-bytes aligned,
 469                 * unaligned offset is malformed and cause commands stream
 470                 * corruption on the buffer address relocation.
 471                 */
 472                if (offset & 3 || offset >= obj->gem.size) {
 473                        err = -EINVAL;
 474                        goto fail;
 475                }
 476
 477                host1x_job_add_gather(job, bo, cmdbuf.words, cmdbuf.offset);
 478                num_cmdbufs--;
 479                cmdbufs++;
 480        }
 481
 482        /* copy and resolve relocations from submit */
 483        while (num_relocs--) {
 484                struct host1x_reloc *reloc;
 485                struct tegra_bo *obj;
 486
 487                err = host1x_reloc_copy_from_user(&job->relocarray[num_relocs],
 488                                                  &relocs[num_relocs], drm,
 489                                                  file);
 490                if (err < 0)
 491                        goto fail;
 492
 493                reloc = &job->relocarray[num_relocs];
 494                obj = host1x_to_tegra_bo(reloc->cmdbuf.bo);
 495                refs[num_refs++] = &obj->gem;
 496
 497                /*
 498                 * The unaligned cmdbuf offset will cause an unaligned write
 499                 * during of the relocations patching, corrupting the commands
 500                 * stream.
 501                 */
 502                if (reloc->cmdbuf.offset & 3 ||
 503                    reloc->cmdbuf.offset >= obj->gem.size) {
 504                        err = -EINVAL;
 505                        goto fail;
 506                }
 507
 508                obj = host1x_to_tegra_bo(reloc->target.bo);
 509                refs[num_refs++] = &obj->gem;
 510
 511                if (reloc->target.offset >= obj->gem.size) {
 512                        err = -EINVAL;
 513                        goto fail;
 514                }
 515        }
 516
 517        /* copy and resolve waitchks from submit */
 518        while (num_waitchks--) {
 519                struct host1x_waitchk *wait = &job->waitchk[num_waitchks];
 520                struct tegra_bo *obj;
 521
 522                err = host1x_waitchk_copy_from_user(wait,
 523                                                    &waitchks[num_waitchks],
 524                                                    file);
 525                if (err < 0)
 526                        goto fail;
 527
 528                obj = host1x_to_tegra_bo(wait->bo);
 529                refs[num_refs++] = &obj->gem;
 530
 531                /*
 532                 * The unaligned offset will cause an unaligned write during
 533                 * of the waitchks patching, corrupting the commands stream.
 534                 */
 535                if (wait->offset & 3 ||
 536                    wait->offset >= obj->gem.size) {
 537                        err = -EINVAL;
 538                        goto fail;
 539                }
 540        }
 541
 542        if (copy_from_user(&syncpt, (void __user *)(uintptr_t)args->syncpts,
 543                           sizeof(syncpt))) {
 544                err = -EFAULT;
 545                goto fail;
 546        }
 547
 548        /* check whether syncpoint ID is valid */
 549        sp = host1x_syncpt_get(host1x, syncpt.id);
 550        if (!sp) {
 551                err = -ENOENT;
 552                goto fail;
 553        }
 554
 555        job->is_addr_reg = context->client->ops->is_addr_reg;
 556        job->is_valid_class = context->client->ops->is_valid_class;
 557        job->syncpt_incrs = syncpt.incrs;
 558        job->syncpt_id = syncpt.id;
 559        job->timeout = 10000;
 560
 561        if (args->timeout && args->timeout < 10000)
 562                job->timeout = args->timeout;
 563
 564        err = host1x_job_pin(job, context->client->base.dev);
 565        if (err)
 566                goto fail;
 567
 568        err = host1x_job_submit(job);
 569        if (err) {
 570                host1x_job_unpin(job);
 571                goto fail;
 572        }
 573
 574        args->fence = job->syncpt_end;
 575
 576fail:
 577        while (num_refs--)
 578                drm_gem_object_put_unlocked(refs[num_refs]);
 579
 580        kfree(refs);
 581
 582put:
 583        host1x_job_put(job);
 584        return err;
 585}
 586
 587
 588#ifdef CONFIG_DRM_TEGRA_STAGING
 589static int tegra_gem_create(struct drm_device *drm, void *data,
 590                            struct drm_file *file)
 591{
 592        struct drm_tegra_gem_create *args = data;
 593        struct tegra_bo *bo;
 594
 595        bo = tegra_bo_create_with_handle(file, drm, args->size, args->flags,
 596                                         &args->handle);
 597        if (IS_ERR(bo))
 598                return PTR_ERR(bo);
 599
 600        return 0;
 601}
 602
 603static int tegra_gem_mmap(struct drm_device *drm, void *data,
 604                          struct drm_file *file)
 605{
 606        struct drm_tegra_gem_mmap *args = data;
 607        struct drm_gem_object *gem;
 608        struct tegra_bo *bo;
 609
 610        gem = drm_gem_object_lookup(file, args->handle);
 611        if (!gem)
 612                return -EINVAL;
 613
 614        bo = to_tegra_bo(gem);
 615
 616        args->offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
 617
 618        drm_gem_object_put_unlocked(gem);
 619
 620        return 0;
 621}
 622
 623static int tegra_syncpt_read(struct drm_device *drm, void *data,
 624                             struct drm_file *file)
 625{
 626        struct host1x *host = dev_get_drvdata(drm->dev->parent);
 627        struct drm_tegra_syncpt_read *args = data;
 628        struct host1x_syncpt *sp;
 629
 630        sp = host1x_syncpt_get(host, args->id);
 631        if (!sp)
 632                return -EINVAL;
 633
 634        args->value = host1x_syncpt_read_min(sp);
 635        return 0;
 636}
 637
 638static int tegra_syncpt_incr(struct drm_device *drm, void *data,
 639                             struct drm_file *file)
 640{
 641        struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
 642        struct drm_tegra_syncpt_incr *args = data;
 643        struct host1x_syncpt *sp;
 644
 645        sp = host1x_syncpt_get(host1x, args->id);
 646        if (!sp)
 647                return -EINVAL;
 648
 649        return host1x_syncpt_incr(sp);
 650}
 651
 652static int tegra_syncpt_wait(struct drm_device *drm, void *data,
 653                             struct drm_file *file)
 654{
 655        struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
 656        struct drm_tegra_syncpt_wait *args = data;
 657        struct host1x_syncpt *sp;
 658
 659        sp = host1x_syncpt_get(host1x, args->id);
 660        if (!sp)
 661                return -EINVAL;
 662
 663        return host1x_syncpt_wait(sp, args->thresh, args->timeout,
 664                                  &args->value);
 665}
 666
 667static int tegra_client_open(struct tegra_drm_file *fpriv,
 668                             struct tegra_drm_client *client,
 669                             struct tegra_drm_context *context)
 670{
 671        int err;
 672
 673        err = client->ops->open_channel(client, context);
 674        if (err < 0)
 675                return err;
 676
 677        err = idr_alloc(&fpriv->contexts, context, 1, 0, GFP_KERNEL);
 678        if (err < 0) {
 679                client->ops->close_channel(context);
 680                return err;
 681        }
 682
 683        context->client = client;
 684        context->id = err;
 685
 686        return 0;
 687}
 688
 689static int tegra_open_channel(struct drm_device *drm, void *data,
 690                              struct drm_file *file)
 691{
 692        struct tegra_drm_file *fpriv = file->driver_priv;
 693        struct tegra_drm *tegra = drm->dev_private;
 694        struct drm_tegra_open_channel *args = data;
 695        struct tegra_drm_context *context;
 696        struct tegra_drm_client *client;
 697        int err = -ENODEV;
 698
 699        context = kzalloc(sizeof(*context), GFP_KERNEL);
 700        if (!context)
 701                return -ENOMEM;
 702
 703        mutex_lock(&fpriv->lock);
 704
 705        list_for_each_entry(client, &tegra->clients, list)
 706                if (client->base.class == args->client) {
 707                        err = tegra_client_open(fpriv, client, context);
 708                        if (err < 0)
 709                                break;
 710
 711                        args->context = context->id;
 712                        break;
 713                }
 714
 715        if (err < 0)
 716                kfree(context);
 717
 718        mutex_unlock(&fpriv->lock);
 719        return err;
 720}
 721
 722static int tegra_close_channel(struct drm_device *drm, void *data,
 723                               struct drm_file *file)
 724{
 725        struct tegra_drm_file *fpriv = file->driver_priv;
 726        struct drm_tegra_close_channel *args = data;
 727        struct tegra_drm_context *context;
 728        int err = 0;
 729
 730        mutex_lock(&fpriv->lock);
 731
 732        context = idr_find(&fpriv->contexts, args->context);
 733        if (!context) {
 734                err = -EINVAL;
 735                goto unlock;
 736        }
 737
 738        idr_remove(&fpriv->contexts, context->id);
 739        tegra_drm_context_free(context);
 740
 741unlock:
 742        mutex_unlock(&fpriv->lock);
 743        return err;
 744}
 745
 746static int tegra_get_syncpt(struct drm_device *drm, void *data,
 747                            struct drm_file *file)
 748{
 749        struct tegra_drm_file *fpriv = file->driver_priv;
 750        struct drm_tegra_get_syncpt *args = data;
 751        struct tegra_drm_context *context;
 752        struct host1x_syncpt *syncpt;
 753        int err = 0;
 754
 755        mutex_lock(&fpriv->lock);
 756
 757        context = idr_find(&fpriv->contexts, args->context);
 758        if (!context) {
 759                err = -ENODEV;
 760                goto unlock;
 761        }
 762
 763        if (args->index >= context->client->base.num_syncpts) {
 764                err = -EINVAL;
 765                goto unlock;
 766        }
 767
 768        syncpt = context->client->base.syncpts[args->index];
 769        args->id = host1x_syncpt_id(syncpt);
 770
 771unlock:
 772        mutex_unlock(&fpriv->lock);
 773        return err;
 774}
 775
 776static int tegra_submit(struct drm_device *drm, void *data,
 777                        struct drm_file *file)
 778{
 779        struct tegra_drm_file *fpriv = file->driver_priv;
 780        struct drm_tegra_submit *args = data;
 781        struct tegra_drm_context *context;
 782        int err;
 783
 784        mutex_lock(&fpriv->lock);
 785
 786        context = idr_find(&fpriv->contexts, args->context);
 787        if (!context) {
 788                err = -ENODEV;
 789                goto unlock;
 790        }
 791
 792        err = context->client->ops->submit(context, args, drm, file);
 793
 794unlock:
 795        mutex_unlock(&fpriv->lock);
 796        return err;
 797}
 798
 799static int tegra_get_syncpt_base(struct drm_device *drm, void *data,
 800                                 struct drm_file *file)
 801{
 802        struct tegra_drm_file *fpriv = file->driver_priv;
 803        struct drm_tegra_get_syncpt_base *args = data;
 804        struct tegra_drm_context *context;
 805        struct host1x_syncpt_base *base;
 806        struct host1x_syncpt *syncpt;
 807        int err = 0;
 808
 809        mutex_lock(&fpriv->lock);
 810
 811        context = idr_find(&fpriv->contexts, args->context);
 812        if (!context) {
 813                err = -ENODEV;
 814                goto unlock;
 815        }
 816
 817        if (args->syncpt >= context->client->base.num_syncpts) {
 818                err = -EINVAL;
 819                goto unlock;
 820        }
 821
 822        syncpt = context->client->base.syncpts[args->syncpt];
 823
 824        base = host1x_syncpt_get_base(syncpt);
 825        if (!base) {
 826                err = -ENXIO;
 827                goto unlock;
 828        }
 829
 830        args->id = host1x_syncpt_base_id(base);
 831
 832unlock:
 833        mutex_unlock(&fpriv->lock);
 834        return err;
 835}
 836
 837static int tegra_gem_set_tiling(struct drm_device *drm, void *data,
 838                                struct drm_file *file)
 839{
 840        struct drm_tegra_gem_set_tiling *args = data;
 841        enum tegra_bo_tiling_mode mode;
 842        struct drm_gem_object *gem;
 843        unsigned long value = 0;
 844        struct tegra_bo *bo;
 845
 846        switch (args->mode) {
 847        case DRM_TEGRA_GEM_TILING_MODE_PITCH:
 848                mode = TEGRA_BO_TILING_MODE_PITCH;
 849
 850                if (args->value != 0)
 851                        return -EINVAL;
 852
 853                break;
 854
 855        case DRM_TEGRA_GEM_TILING_MODE_TILED:
 856                mode = TEGRA_BO_TILING_MODE_TILED;
 857
 858                if (args->value != 0)
 859                        return -EINVAL;
 860
 861                break;
 862
 863        case DRM_TEGRA_GEM_TILING_MODE_BLOCK:
 864                mode = TEGRA_BO_TILING_MODE_BLOCK;
 865
 866                if (args->value > 5)
 867                        return -EINVAL;
 868
 869                value = args->value;
 870                break;
 871
 872        default:
 873                return -EINVAL;
 874        }
 875
 876        gem = drm_gem_object_lookup(file, args->handle);
 877        if (!gem)
 878                return -ENOENT;
 879
 880        bo = to_tegra_bo(gem);
 881
 882        bo->tiling.mode = mode;
 883        bo->tiling.value = value;
 884
 885        drm_gem_object_put_unlocked(gem);
 886
 887        return 0;
 888}
 889
 890static int tegra_gem_get_tiling(struct drm_device *drm, void *data,
 891                                struct drm_file *file)
 892{
 893        struct drm_tegra_gem_get_tiling *args = data;
 894        struct drm_gem_object *gem;
 895        struct tegra_bo *bo;
 896        int err = 0;
 897
 898        gem = drm_gem_object_lookup(file, args->handle);
 899        if (!gem)
 900                return -ENOENT;
 901
 902        bo = to_tegra_bo(gem);
 903
 904        switch (bo->tiling.mode) {
 905        case TEGRA_BO_TILING_MODE_PITCH:
 906                args->mode = DRM_TEGRA_GEM_TILING_MODE_PITCH;
 907                args->value = 0;
 908                break;
 909
 910        case TEGRA_BO_TILING_MODE_TILED:
 911                args->mode = DRM_TEGRA_GEM_TILING_MODE_TILED;
 912                args->value = 0;
 913                break;
 914
 915        case TEGRA_BO_TILING_MODE_BLOCK:
 916                args->mode = DRM_TEGRA_GEM_TILING_MODE_BLOCK;
 917                args->value = bo->tiling.value;
 918                break;
 919
 920        default:
 921                err = -EINVAL;
 922                break;
 923        }
 924
 925        drm_gem_object_put_unlocked(gem);
 926
 927        return err;
 928}
 929
 930static int tegra_gem_set_flags(struct drm_device *drm, void *data,
 931                               struct drm_file *file)
 932{
 933        struct drm_tegra_gem_set_flags *args = data;
 934        struct drm_gem_object *gem;
 935        struct tegra_bo *bo;
 936
 937        if (args->flags & ~DRM_TEGRA_GEM_FLAGS)
 938                return -EINVAL;
 939
 940        gem = drm_gem_object_lookup(file, args->handle);
 941        if (!gem)
 942                return -ENOENT;
 943
 944        bo = to_tegra_bo(gem);
 945        bo->flags = 0;
 946
 947        if (args->flags & DRM_TEGRA_GEM_BOTTOM_UP)
 948                bo->flags |= TEGRA_BO_BOTTOM_UP;
 949
 950        drm_gem_object_put_unlocked(gem);
 951
 952        return 0;
 953}
 954
 955static int tegra_gem_get_flags(struct drm_device *drm, void *data,
 956                               struct drm_file *file)
 957{
 958        struct drm_tegra_gem_get_flags *args = data;
 959        struct drm_gem_object *gem;
 960        struct tegra_bo *bo;
 961
 962        gem = drm_gem_object_lookup(file, args->handle);
 963        if (!gem)
 964                return -ENOENT;
 965
 966        bo = to_tegra_bo(gem);
 967        args->flags = 0;
 968
 969        if (bo->flags & TEGRA_BO_BOTTOM_UP)
 970                args->flags |= DRM_TEGRA_GEM_BOTTOM_UP;
 971
 972        drm_gem_object_put_unlocked(gem);
 973
 974        return 0;
 975}
 976#endif
 977
 978static const struct drm_ioctl_desc tegra_drm_ioctls[] = {
 979#ifdef CONFIG_DRM_TEGRA_STAGING
 980        DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create,
 981                          DRM_UNLOCKED | DRM_RENDER_ALLOW),
 982        DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap,
 983                          DRM_UNLOCKED | DRM_RENDER_ALLOW),
 984        DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_READ, tegra_syncpt_read,
 985                          DRM_UNLOCKED | DRM_RENDER_ALLOW),
 986        DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_INCR, tegra_syncpt_incr,
 987                          DRM_UNLOCKED | DRM_RENDER_ALLOW),
 988        DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_WAIT, tegra_syncpt_wait,
 989                          DRM_UNLOCKED | DRM_RENDER_ALLOW),
 990        DRM_IOCTL_DEF_DRV(TEGRA_OPEN_CHANNEL, tegra_open_channel,
 991                          DRM_UNLOCKED | DRM_RENDER_ALLOW),
 992        DRM_IOCTL_DEF_DRV(TEGRA_CLOSE_CHANNEL, tegra_close_channel,
 993                          DRM_UNLOCKED | DRM_RENDER_ALLOW),
 994        DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT, tegra_get_syncpt,
 995                          DRM_UNLOCKED | DRM_RENDER_ALLOW),
 996        DRM_IOCTL_DEF_DRV(TEGRA_SUBMIT, tegra_submit,
 997                          DRM_UNLOCKED | DRM_RENDER_ALLOW),
 998        DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT_BASE, tegra_get_syncpt_base,
 999                          DRM_UNLOCKED | DRM_RENDER_ALLOW),
1000        DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_TILING, tegra_gem_set_tiling,
1001                          DRM_UNLOCKED | DRM_RENDER_ALLOW),
1002        DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_TILING, tegra_gem_get_tiling,
1003                          DRM_UNLOCKED | DRM_RENDER_ALLOW),
1004        DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_FLAGS, tegra_gem_set_flags,
1005                          DRM_UNLOCKED | DRM_RENDER_ALLOW),
1006        DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_FLAGS, tegra_gem_get_flags,
1007                          DRM_UNLOCKED | DRM_RENDER_ALLOW),
1008#endif
1009};
1010
1011static const struct file_operations tegra_drm_fops = {
1012        .owner = THIS_MODULE,
1013        .open = drm_open,
1014        .release = drm_release,
1015        .unlocked_ioctl = drm_ioctl,
1016        .mmap = tegra_drm_mmap,
1017        .poll = drm_poll,
1018        .read = drm_read,
1019        .compat_ioctl = drm_compat_ioctl,
1020        .llseek = noop_llseek,
1021};
1022
1023static int tegra_drm_context_cleanup(int id, void *p, void *data)
1024{
1025        struct tegra_drm_context *context = p;
1026
1027        tegra_drm_context_free(context);
1028
1029        return 0;
1030}
1031
1032static void tegra_drm_postclose(struct drm_device *drm, struct drm_file *file)
1033{
1034        struct tegra_drm_file *fpriv = file->driver_priv;
1035
1036        mutex_lock(&fpriv->lock);
1037        idr_for_each(&fpriv->contexts, tegra_drm_context_cleanup, NULL);
1038        mutex_unlock(&fpriv->lock);
1039
1040        idr_destroy(&fpriv->contexts);
1041        mutex_destroy(&fpriv->lock);
1042        kfree(fpriv);
1043}
1044
1045#ifdef CONFIG_DEBUG_FS
1046static int tegra_debugfs_framebuffers(struct seq_file *s, void *data)
1047{
1048        struct drm_info_node *node = (struct drm_info_node *)s->private;
1049        struct drm_device *drm = node->minor->dev;
1050        struct drm_framebuffer *fb;
1051
1052        mutex_lock(&drm->mode_config.fb_lock);
1053
1054        list_for_each_entry(fb, &drm->mode_config.fb_list, head) {
1055                seq_printf(s, "%3d: user size: %d x %d, depth %d, %d bpp, refcount %d\n",
1056                           fb->base.id, fb->width, fb->height,
1057                           fb->format->depth,
1058                           fb->format->cpp[0] * 8,
1059                           drm_framebuffer_read_refcount(fb));
1060        }
1061
1062        mutex_unlock(&drm->mode_config.fb_lock);
1063
1064        return 0;
1065}
1066
1067static int tegra_debugfs_iova(struct seq_file *s, void *data)
1068{
1069        struct drm_info_node *node = (struct drm_info_node *)s->private;
1070        struct drm_device *drm = node->minor->dev;
1071        struct tegra_drm *tegra = drm->dev_private;
1072        struct drm_printer p = drm_seq_file_printer(s);
1073
1074        if (tegra->domain) {
1075                mutex_lock(&tegra->mm_lock);
1076                drm_mm_print(&tegra->mm, &p);
1077                mutex_unlock(&tegra->mm_lock);
1078        }
1079
1080        return 0;
1081}
1082
1083static struct drm_info_list tegra_debugfs_list[] = {
1084        { "framebuffers", tegra_debugfs_framebuffers, 0 },
1085        { "iova", tegra_debugfs_iova, 0 },
1086};
1087
1088static int tegra_debugfs_init(struct drm_minor *minor)
1089{
1090        return drm_debugfs_create_files(tegra_debugfs_list,
1091                                        ARRAY_SIZE(tegra_debugfs_list),
1092                                        minor->debugfs_root, minor);
1093}
1094#endif
1095
1096static struct drm_driver tegra_drm_driver = {
1097        .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
1098                           DRIVER_ATOMIC | DRIVER_RENDER,
1099        .load = tegra_drm_load,
1100        .unload = tegra_drm_unload,
1101        .open = tegra_drm_open,
1102        .postclose = tegra_drm_postclose,
1103        .lastclose = tegra_drm_lastclose,
1104
1105#if defined(CONFIG_DEBUG_FS)
1106        .debugfs_init = tegra_debugfs_init,
1107#endif
1108
1109        .gem_free_object_unlocked = tegra_bo_free_object,
1110        .gem_vm_ops = &tegra_bo_vm_ops,
1111
1112        .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
1113        .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
1114        .gem_prime_export = tegra_gem_prime_export,
1115        .gem_prime_import = tegra_gem_prime_import,
1116
1117        .dumb_create = tegra_bo_dumb_create,
1118
1119        .ioctls = tegra_drm_ioctls,
1120        .num_ioctls = ARRAY_SIZE(tegra_drm_ioctls),
1121        .fops = &tegra_drm_fops,
1122
1123        .name = DRIVER_NAME,
1124        .desc = DRIVER_DESC,
1125        .date = DRIVER_DATE,
1126        .major = DRIVER_MAJOR,
1127        .minor = DRIVER_MINOR,
1128        .patchlevel = DRIVER_PATCHLEVEL,
1129};
1130
1131int tegra_drm_register_client(struct tegra_drm *tegra,
1132                              struct tegra_drm_client *client)
1133{
1134        mutex_lock(&tegra->clients_lock);
1135        list_add_tail(&client->list, &tegra->clients);
1136        mutex_unlock(&tegra->clients_lock);
1137
1138        return 0;
1139}
1140
1141int tegra_drm_unregister_client(struct tegra_drm *tegra,
1142                                struct tegra_drm_client *client)
1143{
1144        mutex_lock(&tegra->clients_lock);
1145        list_del_init(&client->list);
1146        mutex_unlock(&tegra->clients_lock);
1147
1148        return 0;
1149}
1150
1151void *tegra_drm_alloc(struct tegra_drm *tegra, size_t size,
1152                              dma_addr_t *dma)
1153{
1154        struct iova *alloc;
1155        void *virt;
1156        gfp_t gfp;
1157        int err;
1158
1159        if (tegra->domain)
1160                size = iova_align(&tegra->carveout.domain, size);
1161        else
1162                size = PAGE_ALIGN(size);
1163
1164        gfp = GFP_KERNEL | __GFP_ZERO;
1165        if (!tegra->domain) {
1166                /*
1167                 * Many units only support 32-bit addresses, even on 64-bit
1168                 * SoCs. If there is no IOMMU to translate into a 32-bit IO
1169                 * virtual address space, force allocations to be in the
1170                 * lower 32-bit range.
1171                 */
1172                gfp |= GFP_DMA;
1173        }
1174
1175        virt = (void *)__get_free_pages(gfp, get_order(size));
1176        if (!virt)
1177                return ERR_PTR(-ENOMEM);
1178
1179        if (!tegra->domain) {
1180                /*
1181                 * If IOMMU is disabled, devices address physical memory
1182                 * directly.
1183                 */
1184                *dma = virt_to_phys(virt);
1185                return virt;
1186        }
1187
1188        alloc = alloc_iova(&tegra->carveout.domain,
1189                           size >> tegra->carveout.shift,
1190                           tegra->carveout.limit, true);
1191        if (!alloc) {
1192                err = -EBUSY;
1193                goto free_pages;
1194        }
1195
1196        *dma = iova_dma_addr(&tegra->carveout.domain, alloc);
1197        err = iommu_map(tegra->domain, *dma, virt_to_phys(virt),
1198                        size, IOMMU_READ | IOMMU_WRITE);
1199        if (err < 0)
1200                goto free_iova;
1201
1202        return virt;
1203
1204free_iova:
1205        __free_iova(&tegra->carveout.domain, alloc);
1206free_pages:
1207        free_pages((unsigned long)virt, get_order(size));
1208
1209        return ERR_PTR(err);
1210}
1211
1212void tegra_drm_free(struct tegra_drm *tegra, size_t size, void *virt,
1213                    dma_addr_t dma)
1214{
1215        if (tegra->domain)
1216                size = iova_align(&tegra->carveout.domain, size);
1217        else
1218                size = PAGE_ALIGN(size);
1219
1220        if (tegra->domain) {
1221                iommu_unmap(tegra->domain, dma, size);
1222                free_iova(&tegra->carveout.domain,
1223                          iova_pfn(&tegra->carveout.domain, dma));
1224        }
1225
1226        free_pages((unsigned long)virt, get_order(size));
1227}
1228
1229static int host1x_drm_probe(struct host1x_device *dev)
1230{
1231        struct drm_driver *driver = &tegra_drm_driver;
1232        struct drm_device *drm;
1233        int err;
1234
1235        drm = drm_dev_alloc(driver, &dev->dev);
1236        if (IS_ERR(drm))
1237                return PTR_ERR(drm);
1238
1239        dev_set_drvdata(&dev->dev, drm);
1240
1241        err = drm_dev_register(drm, 0);
1242        if (err < 0)
1243                goto unref;
1244
1245        return 0;
1246
1247unref:
1248        drm_dev_unref(drm);
1249        return err;
1250}
1251
1252static int host1x_drm_remove(struct host1x_device *dev)
1253{
1254        struct drm_device *drm = dev_get_drvdata(&dev->dev);
1255
1256        drm_dev_unregister(drm);
1257        drm_dev_unref(drm);
1258
1259        return 0;
1260}
1261
1262#ifdef CONFIG_PM_SLEEP
1263static int host1x_drm_suspend(struct device *dev)
1264{
1265        struct drm_device *drm = dev_get_drvdata(dev);
1266        struct tegra_drm *tegra = drm->dev_private;
1267
1268        drm_kms_helper_poll_disable(drm);
1269        tegra_drm_fb_suspend(drm);
1270
1271        tegra->state = drm_atomic_helper_suspend(drm);
1272        if (IS_ERR(tegra->state)) {
1273                tegra_drm_fb_resume(drm);
1274                drm_kms_helper_poll_enable(drm);
1275                return PTR_ERR(tegra->state);
1276        }
1277
1278        return 0;
1279}
1280
1281static int host1x_drm_resume(struct device *dev)
1282{
1283        struct drm_device *drm = dev_get_drvdata(dev);
1284        struct tegra_drm *tegra = drm->dev_private;
1285
1286        drm_atomic_helper_resume(drm, tegra->state);
1287        tegra_drm_fb_resume(drm);
1288        drm_kms_helper_poll_enable(drm);
1289
1290        return 0;
1291}
1292#endif
1293
1294static SIMPLE_DEV_PM_OPS(host1x_drm_pm_ops, host1x_drm_suspend,
1295                         host1x_drm_resume);
1296
1297static const struct of_device_id host1x_drm_subdevs[] = {
1298        { .compatible = "nvidia,tegra20-dc", },
1299        { .compatible = "nvidia,tegra20-hdmi", },
1300        { .compatible = "nvidia,tegra20-gr2d", },
1301        { .compatible = "nvidia,tegra20-gr3d", },
1302        { .compatible = "nvidia,tegra30-dc", },
1303        { .compatible = "nvidia,tegra30-hdmi", },
1304        { .compatible = "nvidia,tegra30-gr2d", },
1305        { .compatible = "nvidia,tegra30-gr3d", },
1306        { .compatible = "nvidia,tegra114-dsi", },
1307        { .compatible = "nvidia,tegra114-hdmi", },
1308        { .compatible = "nvidia,tegra114-gr3d", },
1309        { .compatible = "nvidia,tegra124-dc", },
1310        { .compatible = "nvidia,tegra124-sor", },
1311        { .compatible = "nvidia,tegra124-hdmi", },
1312        { .compatible = "nvidia,tegra124-dsi", },
1313        { .compatible = "nvidia,tegra124-vic", },
1314        { .compatible = "nvidia,tegra132-dsi", },
1315        { .compatible = "nvidia,tegra210-dc", },
1316        { .compatible = "nvidia,tegra210-dsi", },
1317        { .compatible = "nvidia,tegra210-sor", },
1318        { .compatible = "nvidia,tegra210-sor1", },
1319        { .compatible = "nvidia,tegra210-vic", },
1320        { /* sentinel */ }
1321};
1322
1323static struct host1x_driver host1x_drm_driver = {
1324        .driver = {
1325                .name = "drm",
1326                .pm = &host1x_drm_pm_ops,
1327        },
1328        .probe = host1x_drm_probe,
1329        .remove = host1x_drm_remove,
1330        .subdevs = host1x_drm_subdevs,
1331};
1332
1333static struct platform_driver * const drivers[] = {
1334        &tegra_dc_driver,
1335        &tegra_hdmi_driver,
1336        &tegra_dsi_driver,
1337        &tegra_dpaux_driver,
1338        &tegra_sor_driver,
1339        &tegra_gr2d_driver,
1340        &tegra_gr3d_driver,
1341        &tegra_vic_driver,
1342};
1343
1344static int __init host1x_drm_init(void)
1345{
1346        int err;
1347
1348        err = host1x_driver_register(&host1x_drm_driver);
1349        if (err < 0)
1350                return err;
1351
1352        err = platform_register_drivers(drivers, ARRAY_SIZE(drivers));
1353        if (err < 0)
1354                goto unregister_host1x;
1355
1356        return 0;
1357
1358unregister_host1x:
1359        host1x_driver_unregister(&host1x_drm_driver);
1360        return err;
1361}
1362module_init(host1x_drm_init);
1363
1364static void __exit host1x_drm_exit(void)
1365{
1366        platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
1367        host1x_driver_unregister(&host1x_drm_driver);
1368}
1369module_exit(host1x_drm_exit);
1370
1371MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
1372MODULE_DESCRIPTION("NVIDIA Tegra DRM driver");
1373MODULE_LICENSE("GPL v2");
1374