linux/drivers/gpu/drm/i915/i915_gem_context.c
<<
>>
Prefs
   1/*
   2 * Copyright © 2011-2012 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 *
  23 * Authors:
  24 *    Ben Widawsky <ben@bwidawsk.net>
  25 *
  26 */
  27
  28/*
  29 * This file implements HW context support. On gen5+ a HW context consists of an
  30 * opaque GPU object which is referenced at times of context saves and restores.
  31 * With RC6 enabled, the context is also referenced as the GPU enters and exists
  32 * from RC6 (GPU has it's own internal power context, except on gen5). Though
  33 * something like a context does exist for the media ring, the code only
  34 * supports contexts for the render ring.
  35 *
  36 * In software, there is a distinction between contexts created by the user,
  37 * and the default HW context. The default HW context is used by GPU clients
  38 * that do not request setup of their own hardware context. The default
  39 * context's state is never restored to help prevent programming errors. This
  40 * would happen if a client ran and piggy-backed off another clients GPU state.
  41 * The default context only exists to give the GPU some offset to load as the
  42 * current to invoke a save of the context we actually care about. In fact, the
  43 * code could likely be constructed, albeit in a more complicated fashion, to
  44 * never use the default context, though that limits the driver's ability to
  45 * swap out, and/or destroy other contexts.
  46 *
  47 * All other contexts are created as a request by the GPU client. These contexts
  48 * store GPU state, and thus allow GPU clients to not re-emit state (and
  49 * potentially query certain state) at any time. The kernel driver makes
  50 * certain that the appropriate commands are inserted.
  51 *
  52 * The context life cycle is semi-complicated in that context BOs may live
  53 * longer than the context itself because of the way the hardware, and object
  54 * tracking works. Below is a very crude representation of the state machine
  55 * describing the context life.
  56 *                                         refcount     pincount     active
  57 * S0: initial state                          0            0           0
  58 * S1: context created                        1            0           0
  59 * S2: context is currently running           2            1           X
  60 * S3: GPU referenced, but not current        2            0           1
  61 * S4: context is current, but destroyed      1            1           0
  62 * S5: like S3, but destroyed                 1            0           1
  63 *
  64 * The most common (but not all) transitions:
  65 * S0->S1: client creates a context
  66 * S1->S2: client submits execbuf with context
  67 * S2->S3: other clients submits execbuf with context
  68 * S3->S1: context object was retired
  69 * S3->S2: clients submits another execbuf
  70 * S2->S4: context destroy called with current context
  71 * S3->S5->S0: destroy path
  72 * S4->S5->S0: destroy path on current context
  73 *
  74 * There are two confusing terms used above:
  75 *  The "current context" means the context which is currently running on the
  76 *  GPU. The GPU has loaded its state already and has stored away the gtt
  77 *  offset of the BO. The GPU is not actively referencing the data at this
  78 *  offset, but it will on the next context switch. The only way to avoid this
  79 *  is to do a GPU reset.
  80 *
  81 *  An "active context' is one which was previously the "current context" and is
  82 *  on the active list waiting for the next context switch to occur. Until this
  83 *  happens, the object must remain at the same gtt offset. It is therefore
  84 *  possible to destroy a context, but it is still active.
  85 *
  86 */
  87
  88#include <linux/log2.h>
  89#include <drm/drmP.h>
  90#include <drm/i915_drm.h>
  91#include "i915_drv.h"
  92#include "i915_trace.h"
  93#include "intel_workarounds.h"
  94
  95#define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
  96
  97static void lut_close(struct i915_gem_context *ctx)
  98{
  99        struct i915_lut_handle *lut, *ln;
 100        struct radix_tree_iter iter;
 101        void __rcu **slot;
 102
 103        list_for_each_entry_safe(lut, ln, &ctx->handles_list, ctx_link) {
 104                list_del(&lut->obj_link);
 105                kmem_cache_free(ctx->i915->luts, lut);
 106        }
 107
 108        /* backport note:  __i915_gem_object_reset_page_iter() does
 109         * radix_tree_delete() within radix_tree_for_each_slot().  That
 110         * *may* end up being safe just because the case where a node
 111         * in the radix tree is removed and the tree shrunk happens
 112         * when we reach the end of a node.
 113         *
 114         * TODO look if we can get away with not restarting the loop
 115         * someday.. but I think this shouldn't be perf critical so
 116         * after bigger fires..
 117         */
 118        rcu_read_lock();
 119restart:
 120        radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) {
 121                struct i915_vma *vma = rcu_dereference_raw(*slot);
 122
 123                radix_tree_delete(&ctx->handles_vma, iter.index);
 124
 125                __i915_gem_object_release_unless_active(vma->obj);
 126                goto restart;
 127        }
 128        rcu_read_unlock();
 129}
 130
 131static inline int new_hw_id(struct drm_i915_private *i915, gfp_t gfp)
 132{
 133        unsigned int max;
 134
 135        lockdep_assert_held(&i915->contexts.mutex);
 136
 137        if (INTEL_GEN(i915) >= 11)
 138                max = GEN11_MAX_CONTEXT_HW_ID;
 139        else if (USES_GUC_SUBMISSION(i915))
 140                /*
 141                 * When using GuC in proxy submission, GuC consumes the
 142                 * highest bit in the context id to indicate proxy submission.
 143                 */
 144                max = MAX_GUC_CONTEXT_HW_ID;
 145        else
 146                max = MAX_CONTEXT_HW_ID;
 147
 148        return ida_simple_get(&i915->contexts.hw_ida, 0, max, gfp);
 149}
 150
 151static int steal_hw_id(struct drm_i915_private *i915)
 152{
 153        struct i915_gem_context *ctx, *cn;
 154        LIST_HEAD(pinned);
 155        int id = -ENOSPC;
 156
 157        lockdep_assert_held(&i915->contexts.mutex);
 158
 159        list_for_each_entry_safe(ctx, cn,
 160                                 &i915->contexts.hw_id_list, hw_id_link) {
 161                if (atomic_read(&ctx->hw_id_pin_count)) {
 162                        list_move_tail(&ctx->hw_id_link, &pinned);
 163                        continue;
 164                }
 165
 166                GEM_BUG_ON(!ctx->hw_id); /* perma-pinned kernel context */
 167                list_del_init(&ctx->hw_id_link);
 168                id = ctx->hw_id;
 169                break;
 170        }
 171
 172        /*
 173         * Remember how far we got up on the last repossesion scan, so the
 174         * list is kept in a "least recently scanned" order.
 175         */
 176        list_splice_tail(&pinned, &i915->contexts.hw_id_list);
 177        return id;
 178}
 179
 180static int assign_hw_id(struct drm_i915_private *i915, unsigned int *out)
 181{
 182        int ret;
 183
 184        lockdep_assert_held(&i915->contexts.mutex);
 185
 186        /*
 187         * We prefer to steal/stall ourselves and our users over that of the
 188         * entire system. That may be a little unfair to our users, and
 189         * even hurt high priority clients. The choice is whether to oomkill
 190         * something else, or steal a context id.
 191         */
 192        ret = new_hw_id(i915, GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
 193        if (unlikely(ret < 0)) {
 194                ret = steal_hw_id(i915);
 195                if (ret < 0) /* once again for the correct errno code */
 196                        ret = new_hw_id(i915, GFP_KERNEL);
 197                if (ret < 0)
 198                        return ret;
 199        }
 200
 201        *out = ret;
 202        return 0;
 203}
 204
 205static void release_hw_id(struct i915_gem_context *ctx)
 206{
 207        struct drm_i915_private *i915 = ctx->i915;
 208
 209        if (list_empty(&ctx->hw_id_link))
 210                return;
 211
 212        mutex_lock(&i915->contexts.mutex);
 213        if (!list_empty(&ctx->hw_id_link)) {
 214                ida_simple_remove(&i915->contexts.hw_ida, ctx->hw_id);
 215                list_del_init(&ctx->hw_id_link);
 216        }
 217        mutex_unlock(&i915->contexts.mutex);
 218}
 219
 220static void i915_gem_context_free(struct i915_gem_context *ctx)
 221{
 222        unsigned int n;
 223
 224        lockdep_assert_held(&ctx->i915->drm.struct_mutex);
 225        GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
 226
 227        release_hw_id(ctx);
 228        i915_ppgtt_put(ctx->ppgtt);
 229
 230        for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
 231                struct intel_context *ce = &ctx->__engine[n];
 232
 233                if (ce->ops)
 234                        ce->ops->destroy(ce);
 235        }
 236
 237        kfree(ctx->name);
 238        put_pid(ctx->pid);
 239
 240        list_del(&ctx->link);
 241
 242        kfree_rcu(ctx, rcu);
 243}
 244
 245static void contexts_free(struct drm_i915_private *i915)
 246{
 247        struct llist_node *freed = llist_del_all(&i915->contexts.free_list);
 248        struct i915_gem_context *ctx, *cn;
 249
 250        lockdep_assert_held(&i915->drm.struct_mutex);
 251
 252        llist_for_each_entry_safe(ctx, cn, freed, free_link)
 253                i915_gem_context_free(ctx);
 254}
 255
 256static void contexts_free_first(struct drm_i915_private *i915)
 257{
 258        struct i915_gem_context *ctx;
 259        struct llist_node *freed;
 260
 261        lockdep_assert_held(&i915->drm.struct_mutex);
 262
 263        freed = llist_del_first(&i915->contexts.free_list);
 264        if (!freed)
 265                return;
 266
 267        ctx = container_of(freed, typeof(*ctx), free_link);
 268        i915_gem_context_free(ctx);
 269}
 270
 271static void contexts_free_worker(struct work_struct *work)
 272{
 273        struct drm_i915_private *i915 =
 274                container_of(work, typeof(*i915), contexts.free_work);
 275
 276        mutex_lock(&i915->drm.struct_mutex);
 277        contexts_free(i915);
 278        mutex_unlock(&i915->drm.struct_mutex);
 279}
 280
 281void i915_gem_context_release(struct kref *ref)
 282{
 283        struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
 284        struct drm_i915_private *i915 = ctx->i915;
 285
 286        trace_i915_context_free(ctx);
 287        if (llist_add(&ctx->free_link, &i915->contexts.free_list))
 288                queue_work(i915->wq, &i915->contexts.free_work);
 289}
 290
 291static void context_close(struct i915_gem_context *ctx)
 292{
 293        i915_gem_context_set_closed(ctx);
 294
 295        /*
 296         * This context will never again be assinged to HW, so we can
 297         * reuse its ID for the next context.
 298         */
 299        release_hw_id(ctx);
 300
 301        /*
 302         * The LUT uses the VMA as a backpointer to unref the object,
 303         * so we need to clear the LUT before we close all the VMA (inside
 304         * the ppgtt).
 305         */
 306        lut_close(ctx);
 307        if (ctx->ppgtt)
 308                i915_ppgtt_close(&ctx->ppgtt->vm);
 309
 310        ctx->file_priv = ERR_PTR(-EBADF);
 311        i915_gem_context_put(ctx);
 312}
 313
 314static u32 default_desc_template(const struct drm_i915_private *i915,
 315                                 const struct i915_hw_ppgtt *ppgtt)
 316{
 317        u32 address_mode;
 318        u32 desc;
 319
 320        desc = GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
 321
 322        address_mode = INTEL_LEGACY_32B_CONTEXT;
 323        if (ppgtt && i915_vm_is_48bit(&ppgtt->vm))
 324                address_mode = INTEL_LEGACY_64B_CONTEXT;
 325        desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT;
 326
 327        if (IS_GEN8(i915))
 328                desc |= GEN8_CTX_L3LLC_COHERENT;
 329
 330        /* TODO: WaDisableLiteRestore when we start using semaphore
 331         * signalling between Command Streamers
 332         * ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE;
 333         */
 334
 335        return desc;
 336}
 337
 338static struct i915_gem_context *
 339__create_hw_context(struct drm_i915_private *dev_priv,
 340                    struct drm_i915_file_private *file_priv)
 341{
 342        struct i915_gem_context *ctx;
 343        unsigned int n;
 344        int ret;
 345
 346        ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
 347        if (ctx == NULL)
 348                return ERR_PTR(-ENOMEM);
 349
 350        kref_init(&ctx->ref);
 351        list_add_tail(&ctx->link, &dev_priv->contexts.list);
 352        ctx->i915 = dev_priv;
 353        ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
 354
 355        for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
 356                struct intel_context *ce = &ctx->__engine[n];
 357
 358                ce->gem_context = ctx;
 359        }
 360
 361        INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
 362        INIT_LIST_HEAD(&ctx->handles_list);
 363        INIT_LIST_HEAD(&ctx->hw_id_link);
 364
 365        /* Default context will never have a file_priv */
 366        ret = DEFAULT_CONTEXT_HANDLE;
 367        if (file_priv) {
 368                ret = idr_alloc(&file_priv->context_idr, ctx,
 369                                DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL);
 370                if (ret < 0)
 371                        goto err_lut;
 372        }
 373        ctx->user_handle = ret;
 374
 375        ctx->file_priv = file_priv;
 376        if (file_priv) {
 377                ctx->pid = get_task_pid(current, PIDTYPE_PID);
 378                ctx->name = kasprintf(GFP_KERNEL, "%s[%d]/%x",
 379                                      current->comm,
 380                                      pid_nr(ctx->pid),
 381                                      ctx->user_handle);
 382                if (!ctx->name) {
 383                        ret = -ENOMEM;
 384                        goto err_pid;
 385                }
 386        }
 387
 388        /* NB: Mark all slices as needing a remap so that when the context first
 389         * loads it will restore whatever remap state already exists. If there
 390         * is no remap info, it will be a NOP. */
 391        ctx->remap_slice = ALL_L3_SLICES(dev_priv);
 392
 393        i915_gem_context_set_bannable(ctx);
 394        ctx->ring_size = 4 * PAGE_SIZE;
 395        ctx->desc_template =
 396                default_desc_template(dev_priv, dev_priv->mm.aliasing_ppgtt);
 397
 398        return ctx;
 399
 400err_pid:
 401        put_pid(ctx->pid);
 402        idr_remove(&file_priv->context_idr, ctx->user_handle);
 403err_lut:
 404        context_close(ctx);
 405        return ERR_PTR(ret);
 406}
 407
 408static void __destroy_hw_context(struct i915_gem_context *ctx,
 409                                 struct drm_i915_file_private *file_priv)
 410{
 411        idr_remove(&file_priv->context_idr, ctx->user_handle);
 412        context_close(ctx);
 413}
 414
 415static struct i915_gem_context *
 416i915_gem_create_context(struct drm_i915_private *dev_priv,
 417                        struct drm_i915_file_private *file_priv)
 418{
 419        struct i915_gem_context *ctx;
 420
 421        lockdep_assert_held(&dev_priv->drm.struct_mutex);
 422
 423        /* Reap the most stale context */
 424        contexts_free_first(dev_priv);
 425
 426        ctx = __create_hw_context(dev_priv, file_priv);
 427        if (IS_ERR(ctx))
 428                return ctx;
 429
 430        if (HAS_FULL_PPGTT(dev_priv)) {
 431                struct i915_hw_ppgtt *ppgtt;
 432
 433                ppgtt = i915_ppgtt_create(dev_priv, file_priv);
 434                if (IS_ERR(ppgtt)) {
 435                        DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
 436                                         PTR_ERR(ppgtt));
 437                        __destroy_hw_context(ctx, file_priv);
 438                        return ERR_CAST(ppgtt);
 439                }
 440
 441                ctx->ppgtt = ppgtt;
 442                ctx->desc_template = default_desc_template(dev_priv, ppgtt);
 443        }
 444
 445        trace_i915_context_create(ctx);
 446
 447        return ctx;
 448}
 449
 450/**
 451 * i915_gem_context_create_gvt - create a GVT GEM context
 452 * @dev: drm device *
 453 *
 454 * This function is used to create a GVT specific GEM context.
 455 *
 456 * Returns:
 457 * pointer to i915_gem_context on success, error pointer if failed
 458 *
 459 */
 460struct i915_gem_context *
 461i915_gem_context_create_gvt(struct drm_device *dev)
 462{
 463        struct i915_gem_context *ctx;
 464        int ret;
 465
 466        if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
 467                return ERR_PTR(-ENODEV);
 468
 469        ret = i915_mutex_lock_interruptible(dev);
 470        if (ret)
 471                return ERR_PTR(ret);
 472
 473        ctx = i915_gem_create_context(to_i915(dev), NULL);
 474        if (IS_ERR(ctx))
 475                goto out;
 476
 477        ctx->file_priv = ERR_PTR(-EBADF);
 478        i915_gem_context_set_closed(ctx); /* not user accessible */
 479        i915_gem_context_clear_bannable(ctx);
 480        i915_gem_context_set_force_single_submission(ctx);
 481        if (!USES_GUC_SUBMISSION(to_i915(dev)))
 482                ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */
 483
 484        GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
 485out:
 486        mutex_unlock(&dev->struct_mutex);
 487        return ctx;
 488}
 489
 490static void
 491destroy_kernel_context(struct i915_gem_context **ctxp)
 492{
 493        struct i915_gem_context *ctx;
 494
 495        /* Keep the context ref so that we can free it immediately ourselves */
 496        ctx = i915_gem_context_get(fetch_and_zero(ctxp));
 497        GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
 498
 499        context_close(ctx);
 500        i915_gem_context_free(ctx);
 501}
 502
 503struct i915_gem_context *
 504i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
 505{
 506        struct i915_gem_context *ctx;
 507        int err;
 508
 509        ctx = i915_gem_create_context(i915, NULL);
 510        if (IS_ERR(ctx))
 511                return ctx;
 512
 513        err = i915_gem_context_pin_hw_id(ctx);
 514        if (err) {
 515                destroy_kernel_context(&ctx);
 516                return ERR_PTR(err);
 517        }
 518
 519        i915_gem_context_clear_bannable(ctx);
 520        ctx->sched.priority = I915_USER_PRIORITY(prio);
 521        ctx->ring_size = PAGE_SIZE;
 522
 523        GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
 524
 525        return ctx;
 526}
 527
 528static void init_contexts(struct drm_i915_private *i915)
 529{
 530        mutex_init(&i915->contexts.mutex);
 531        INIT_LIST_HEAD(&i915->contexts.list);
 532
 533        /* Using the simple ida interface, the max is limited by sizeof(int) */
 534        BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
 535        BUILD_BUG_ON(GEN11_MAX_CONTEXT_HW_ID > INT_MAX);
 536        ida_init(&i915->contexts.hw_ida);
 537        INIT_LIST_HEAD(&i915->contexts.hw_id_list);
 538
 539        INIT_WORK(&i915->contexts.free_work, contexts_free_worker);
 540        init_llist_head(&i915->contexts.free_list);
 541}
 542
 543static bool needs_preempt_context(struct drm_i915_private *i915)
 544{
 545        return HAS_LOGICAL_RING_PREEMPTION(i915);
 546}
 547
 548int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
 549{
 550        struct i915_gem_context *ctx;
 551
 552        /* Reassure ourselves we are only called once */
 553        GEM_BUG_ON(dev_priv->kernel_context);
 554        GEM_BUG_ON(dev_priv->preempt_context);
 555
 556        intel_engine_init_ctx_wa(dev_priv->engine[RCS]);
 557        init_contexts(dev_priv);
 558
 559        /* lowest priority; idle task */
 560        ctx = i915_gem_context_create_kernel(dev_priv, I915_PRIORITY_MIN);
 561        if (IS_ERR(ctx)) {
 562                DRM_ERROR("Failed to create default global context\n");
 563                return PTR_ERR(ctx);
 564        }
 565        /*
 566         * For easy recognisablity, we want the kernel context to be 0 and then
 567         * all user contexts will have non-zero hw_id. Kernel contexts are
 568         * permanently pinned, so that we never suffer a stall and can
 569         * use them from any allocation context (e.g. for evicting other
 570         * contexts and from inside the shrinker).
 571         */
 572        GEM_BUG_ON(ctx->hw_id);
 573        GEM_BUG_ON(!atomic_read(&ctx->hw_id_pin_count));
 574        dev_priv->kernel_context = ctx;
 575
 576        /* highest priority; preempting task */
 577        if (needs_preempt_context(dev_priv)) {
 578                ctx = i915_gem_context_create_kernel(dev_priv, INT_MAX);
 579                if (!IS_ERR(ctx))
 580                        dev_priv->preempt_context = ctx;
 581                else
 582                        DRM_ERROR("Failed to create preempt context; disabling preemption\n");
 583        }
 584
 585        DRM_DEBUG_DRIVER("%s context support initialized\n",
 586                         DRIVER_CAPS(dev_priv)->has_logical_contexts ?
 587                         "logical" : "fake");
 588        return 0;
 589}
 590
 591void i915_gem_contexts_lost(struct drm_i915_private *dev_priv)
 592{
 593        struct intel_engine_cs *engine;
 594        enum intel_engine_id id;
 595
 596        lockdep_assert_held(&dev_priv->drm.struct_mutex);
 597
 598        for_each_engine(engine, dev_priv, id)
 599                intel_engine_lost_context(engine);
 600}
 601
 602void i915_gem_contexts_fini(struct drm_i915_private *i915)
 603{
 604        lockdep_assert_held(&i915->drm.struct_mutex);
 605
 606        if (i915->preempt_context)
 607                destroy_kernel_context(&i915->preempt_context);
 608        destroy_kernel_context(&i915->kernel_context);
 609
 610        /* Must free all deferred contexts (via flush_workqueue) first */
 611        GEM_BUG_ON(!list_empty(&i915->contexts.hw_id_list));
 612        ida_destroy(&i915->contexts.hw_ida);
 613}
 614
 615static int context_idr_cleanup(int id, void *p, void *data)
 616{
 617        struct i915_gem_context *ctx = p;
 618
 619        context_close(ctx);
 620        return 0;
 621}
 622
 623int i915_gem_context_open(struct drm_i915_private *i915,
 624                          struct drm_file *file)
 625{
 626        struct drm_i915_file_private *file_priv = file->driver_priv;
 627        struct i915_gem_context *ctx;
 628
 629        idr_init(&file_priv->context_idr);
 630
 631        mutex_lock(&i915->drm.struct_mutex);
 632        ctx = i915_gem_create_context(i915, file_priv);
 633        mutex_unlock(&i915->drm.struct_mutex);
 634        if (IS_ERR(ctx)) {
 635                idr_destroy(&file_priv->context_idr);
 636                return PTR_ERR(ctx);
 637        }
 638
 639        GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
 640
 641        return 0;
 642}
 643
 644void i915_gem_context_close(struct drm_file *file)
 645{
 646        struct drm_i915_file_private *file_priv = file->driver_priv;
 647
 648        lockdep_assert_held(&file_priv->dev_priv->drm.struct_mutex);
 649
 650        idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
 651        idr_destroy(&file_priv->context_idr);
 652}
 653
 654static struct i915_request *
 655last_request_on_engine(struct i915_timeline *timeline,
 656                       struct intel_engine_cs *engine)
 657{
 658        struct i915_request *rq;
 659
 660        GEM_BUG_ON(timeline == &engine->timeline);
 661
 662        rq = i915_gem_active_raw(&timeline->last_request,
 663                                 &engine->i915->drm.struct_mutex);
 664        if (rq && rq->engine == engine) {
 665                GEM_TRACE("last request for %s on engine %s: %llx:%d\n",
 666                          timeline->name, engine->name,
 667                          rq->fence.context, rq->fence.seqno);
 668                GEM_BUG_ON(rq->timeline != timeline);
 669                return rq;
 670        }
 671
 672        return NULL;
 673}
 674
 675static bool engine_has_kernel_context_barrier(struct intel_engine_cs *engine)
 676{
 677        struct drm_i915_private *i915 = engine->i915;
 678        const struct intel_context * const ce =
 679                to_intel_context(i915->kernel_context, engine);
 680        struct i915_timeline *barrier = ce->ring->timeline;
 681        struct intel_ring *ring;
 682        bool any_active = false;
 683
 684        lockdep_assert_held(&i915->drm.struct_mutex);
 685        list_for_each_entry(ring, &i915->gt.active_rings, active_link) {
 686                struct i915_request *rq;
 687
 688                rq = last_request_on_engine(ring->timeline, engine);
 689                if (!rq)
 690                        continue;
 691
 692                any_active = true;
 693
 694                if (rq->hw_context == ce)
 695                        continue;
 696
 697                /*
 698                 * Was this request submitted after the previous
 699                 * switch-to-kernel-context?
 700                 */
 701                if (!i915_timeline_sync_is_later(barrier, &rq->fence)) {
 702                        GEM_TRACE("%s needs barrier for %llx:%d\n",
 703                                  ring->timeline->name,
 704                                  rq->fence.context,
 705                                  rq->fence.seqno);
 706                        return false;
 707                }
 708
 709                GEM_TRACE("%s has barrier after %llx:%d\n",
 710                          ring->timeline->name,
 711                          rq->fence.context,
 712                          rq->fence.seqno);
 713        }
 714
 715        /*
 716         * If any other timeline was still active and behind the last barrier,
 717         * then our last switch-to-kernel-context must still be queued and
 718         * will run last (leaving the engine in the kernel context when it
 719         * eventually idles).
 720         */
 721        if (any_active)
 722                return true;
 723
 724        /* The engine is idle; check that it is idling in the kernel context. */
 725        return engine->last_retired_context == ce;
 726}
 727
 728int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915)
 729{
 730        struct intel_engine_cs *engine;
 731        enum intel_engine_id id;
 732
 733        GEM_TRACE("awake?=%s\n", yesno(i915->gt.awake));
 734
 735        lockdep_assert_held(&i915->drm.struct_mutex);
 736        GEM_BUG_ON(!i915->kernel_context);
 737
 738        i915_retire_requests(i915);
 739
 740        for_each_engine(engine, i915, id) {
 741                struct intel_ring *ring;
 742                struct i915_request *rq;
 743
 744                GEM_BUG_ON(!to_intel_context(i915->kernel_context, engine));
 745                if (engine_has_kernel_context_barrier(engine))
 746                        continue;
 747
 748                GEM_TRACE("emit barrier on %s\n", engine->name);
 749
 750                rq = i915_request_alloc(engine, i915->kernel_context);
 751                if (IS_ERR(rq))
 752                        return PTR_ERR(rq);
 753
 754                /* Queue this switch after all other activity */
 755                list_for_each_entry(ring, &i915->gt.active_rings, active_link) {
 756                        struct i915_request *prev;
 757
 758                        prev = last_request_on_engine(ring->timeline, engine);
 759                        if (!prev)
 760                                continue;
 761
 762                        if (prev->gem_context == i915->kernel_context)
 763                                continue;
 764
 765                        GEM_TRACE("add barrier on %s for %llx:%d\n",
 766                                  engine->name,
 767                                  prev->fence.context,
 768                                  prev->fence.seqno);
 769                        i915_sw_fence_await_sw_fence_gfp(&rq->submit,
 770                                                         &prev->submit,
 771                                                         I915_FENCE_GFP);
 772                        i915_timeline_sync_set(rq->timeline, &prev->fence);
 773                }
 774
 775                i915_request_add(rq);
 776        }
 777
 778        return 0;
 779}
 780
 781static bool client_is_banned(struct drm_i915_file_private *file_priv)
 782{
 783        return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED;
 784}
 785
 786int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
 787                                  struct drm_file *file)
 788{
 789        struct drm_i915_private *dev_priv = to_i915(dev);
 790        struct drm_i915_gem_context_create *args = data;
 791        struct drm_i915_file_private *file_priv = file->driver_priv;
 792        struct i915_gem_context *ctx;
 793        int ret;
 794
 795        if (!DRIVER_CAPS(dev_priv)->has_logical_contexts)
 796                return -ENODEV;
 797
 798        if (args->pad != 0)
 799                return -EINVAL;
 800
 801        if (client_is_banned(file_priv)) {
 802                DRM_DEBUG("client %s[%d] banned from creating ctx\n",
 803                          current->comm,
 804                          pid_nr(get_task_pid(current, PIDTYPE_PID)));
 805
 806                return -EIO;
 807        }
 808
 809        ret = i915_mutex_lock_interruptible(dev);
 810        if (ret)
 811                return ret;
 812
 813        ctx = i915_gem_create_context(dev_priv, file_priv);
 814        mutex_unlock(&dev->struct_mutex);
 815        if (IS_ERR(ctx))
 816                return PTR_ERR(ctx);
 817
 818        GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
 819
 820        args->ctx_id = ctx->user_handle;
 821        DRM_DEBUG("HW context %d created\n", args->ctx_id);
 822
 823        return 0;
 824}
 825
 826int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
 827                                   struct drm_file *file)
 828{
 829        struct drm_i915_gem_context_destroy *args = data;
 830        struct drm_i915_file_private *file_priv = file->driver_priv;
 831        struct i915_gem_context *ctx;
 832        int ret;
 833
 834        if (args->pad != 0)
 835                return -EINVAL;
 836
 837        if (args->ctx_id == DEFAULT_CONTEXT_HANDLE)
 838                return -ENOENT;
 839
 840        ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
 841        if (!ctx)
 842                return -ENOENT;
 843
 844        ret = mutex_lock_interruptible(&dev->struct_mutex);
 845        if (ret)
 846                goto out;
 847
 848        __destroy_hw_context(ctx, file_priv);
 849        mutex_unlock(&dev->struct_mutex);
 850
 851out:
 852        i915_gem_context_put(ctx);
 853        return 0;
 854}
 855
 856int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
 857                                    struct drm_file *file)
 858{
 859        struct drm_i915_file_private *file_priv = file->driver_priv;
 860        struct drm_i915_gem_context_param *args = data;
 861        struct i915_gem_context *ctx;
 862        int ret = 0;
 863
 864        ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
 865        if (!ctx)
 866                return -ENOENT;
 867
 868        args->size = 0;
 869        switch (args->param) {
 870        case I915_CONTEXT_PARAM_BAN_PERIOD:
 871                ret = -EINVAL;
 872                break;
 873        case I915_CONTEXT_PARAM_NO_ZEROMAP:
 874                args->value = test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
 875                break;
 876        case I915_CONTEXT_PARAM_GTT_SIZE:
 877                if (ctx->ppgtt)
 878                        args->value = ctx->ppgtt->vm.total;
 879                else if (to_i915(dev)->mm.aliasing_ppgtt)
 880                        args->value = to_i915(dev)->mm.aliasing_ppgtt->vm.total;
 881                else
 882                        args->value = to_i915(dev)->ggtt.vm.total;
 883                break;
 884        case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
 885                args->value = i915_gem_context_no_error_capture(ctx);
 886                break;
 887        case I915_CONTEXT_PARAM_BANNABLE:
 888                args->value = i915_gem_context_is_bannable(ctx);
 889                break;
 890        case I915_CONTEXT_PARAM_PRIORITY:
 891                args->value = ctx->sched.priority >> I915_USER_PRIORITY_SHIFT;
 892                break;
 893        default:
 894                ret = -EINVAL;
 895                break;
 896        }
 897
 898        i915_gem_context_put(ctx);
 899        return ret;
 900}
 901
 902int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
 903                                    struct drm_file *file)
 904{
 905        struct drm_i915_file_private *file_priv = file->driver_priv;
 906        struct drm_i915_gem_context_param *args = data;
 907        struct i915_gem_context *ctx;
 908        int ret = 0;
 909
 910        ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
 911        if (!ctx)
 912                return -ENOENT;
 913
 914        switch (args->param) {
 915        case I915_CONTEXT_PARAM_BAN_PERIOD:
 916                ret = -EINVAL;
 917                break;
 918        case I915_CONTEXT_PARAM_NO_ZEROMAP:
 919                if (args->size)
 920                        ret = -EINVAL;
 921                else if (args->value)
 922                        set_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
 923                else
 924                        clear_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
 925                break;
 926        case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
 927                if (args->size)
 928                        ret = -EINVAL;
 929                else if (args->value)
 930                        i915_gem_context_set_no_error_capture(ctx);
 931                else
 932                        i915_gem_context_clear_no_error_capture(ctx);
 933                break;
 934        case I915_CONTEXT_PARAM_BANNABLE:
 935                if (args->size)
 936                        ret = -EINVAL;
 937                else if (!capable(CAP_SYS_ADMIN) && !args->value)
 938                        ret = -EPERM;
 939                else if (args->value)
 940                        i915_gem_context_set_bannable(ctx);
 941                else
 942                        i915_gem_context_clear_bannable(ctx);
 943                break;
 944
 945        case I915_CONTEXT_PARAM_PRIORITY:
 946                {
 947                        s64 priority = args->value;
 948
 949                        if (args->size)
 950                                ret = -EINVAL;
 951                        else if (!(to_i915(dev)->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
 952                                ret = -ENODEV;
 953                        else if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
 954                                 priority < I915_CONTEXT_MIN_USER_PRIORITY)
 955                                ret = -EINVAL;
 956                        else if (priority > I915_CONTEXT_DEFAULT_PRIORITY &&
 957                                 !capable(CAP_SYS_NICE))
 958                                ret = -EPERM;
 959                        else
 960                                ctx->sched.priority =
 961                                        I915_USER_PRIORITY(priority);
 962                }
 963                break;
 964
 965        default:
 966                ret = -EINVAL;
 967                break;
 968        }
 969
 970        i915_gem_context_put(ctx);
 971        return ret;
 972}
 973
 974int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
 975                                       void *data, struct drm_file *file)
 976{
 977        struct drm_i915_private *dev_priv = to_i915(dev);
 978        struct drm_i915_reset_stats *args = data;
 979        struct i915_gem_context *ctx;
 980        int ret;
 981
 982        if (args->flags || args->pad)
 983                return -EINVAL;
 984
 985        ret = -ENOENT;
 986        rcu_read_lock();
 987        ctx = __i915_gem_context_lookup_rcu(file->driver_priv, args->ctx_id);
 988        if (!ctx)
 989                goto out;
 990
 991        /*
 992         * We opt for unserialised reads here. This may result in tearing
 993         * in the extremely unlikely event of a GPU hang on this context
 994         * as we are querying them. If we need that extra layer of protection,
 995         * we should wrap the hangstats with a seqlock.
 996         */
 997
 998        if (capable(CAP_SYS_ADMIN))
 999                args->reset_count = i915_reset_count(&dev_priv->gpu_error);
1000        else
1001                args->reset_count = 0;
1002
1003        args->batch_active = atomic_read(&ctx->guilty_count);
1004        args->batch_pending = atomic_read(&ctx->active_count);
1005
1006        ret = 0;
1007out:
1008        rcu_read_unlock();
1009        return ret;
1010}
1011
1012int __i915_gem_context_pin_hw_id(struct i915_gem_context *ctx)
1013{
1014        struct drm_i915_private *i915 = ctx->i915;
1015        int err = 0;
1016
1017        mutex_lock(&i915->contexts.mutex);
1018
1019        GEM_BUG_ON(i915_gem_context_is_closed(ctx));
1020
1021        if (list_empty(&ctx->hw_id_link)) {
1022                GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count));
1023
1024                err = assign_hw_id(i915, &ctx->hw_id);
1025                if (err)
1026                        goto out_unlock;
1027
1028                list_add_tail(&ctx->hw_id_link, &i915->contexts.hw_id_list);
1029        }
1030
1031        GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count) == ~0u);
1032        atomic_inc(&ctx->hw_id_pin_count);
1033
1034out_unlock:
1035        mutex_unlock(&i915->contexts.mutex);
1036        return err;
1037}
1038
1039#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1040#include "selftests/mock_context.c"
1041#include "selftests/i915_gem_context.c"
1042#endif
1043