linux/drivers/gpu/drm/i915/i915_gem_context.c
<<
>>
Prefs
   1/*
   2 * Copyright © 2011-2012 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 *
  23 * Authors:
  24 *    Ben Widawsky <ben@bwidawsk.net>
  25 *
  26 */
  27
  28/*
  29 * This file implements HW context support. On gen5+ a HW context consists of an
  30 * opaque GPU object which is referenced at times of context saves and restores.
  31 * With RC6 enabled, the context is also referenced as the GPU enters and exists
  32 * from RC6 (GPU has it's own internal power context, except on gen5). Though
  33 * something like a context does exist for the media ring, the code only
  34 * supports contexts for the render ring.
  35 *
  36 * In software, there is a distinction between contexts created by the user,
  37 * and the default HW context. The default HW context is used by GPU clients
  38 * that do not request setup of their own hardware context. The default
  39 * context's state is never restored to help prevent programming errors. This
  40 * would happen if a client ran and piggy-backed off another clients GPU state.
  41 * The default context only exists to give the GPU some offset to load as the
  42 * current to invoke a save of the context we actually care about. In fact, the
  43 * code could likely be constructed, albeit in a more complicated fashion, to
  44 * never use the default context, though that limits the driver's ability to
  45 * swap out, and/or destroy other contexts.
  46 *
  47 * All other contexts are created as a request by the GPU client. These contexts
  48 * store GPU state, and thus allow GPU clients to not re-emit state (and
  49 * potentially query certain state) at any time. The kernel driver makes
  50 * certain that the appropriate commands are inserted.
  51 *
  52 * The context life cycle is semi-complicated in that context BOs may live
  53 * longer than the context itself because of the way the hardware, and object
  54 * tracking works. Below is a very crude representation of the state machine
  55 * describing the context life.
  56 *                                         refcount     pincount     active
  57 * S0: initial state                          0            0           0
  58 * S1: context created                        1            0           0
  59 * S2: context is currently running           2            1           X
  60 * S3: GPU referenced, but not current        2            0           1
  61 * S4: context is current, but destroyed      1            1           0
  62 * S5: like S3, but destroyed                 1            0           1
  63 *
  64 * The most common (but not all) transitions:
  65 * S0->S1: client creates a context
  66 * S1->S2: client submits execbuf with context
  67 * S2->S3: other clients submits execbuf with context
  68 * S3->S1: context object was retired
  69 * S3->S2: clients submits another execbuf
  70 * S2->S4: context destroy called with current context
  71 * S3->S5->S0: destroy path
  72 * S4->S5->S0: destroy path on current context
  73 *
  74 * There are two confusing terms used above:
  75 *  The "current context" means the context which is currently running on the
  76 *  GPU. The GPU has loaded its state already and has stored away the gtt
  77 *  offset of the BO. The GPU is not actively referencing the data at this
  78 *  offset, but it will on the next context switch. The only way to avoid this
  79 *  is to do a GPU reset.
  80 *
  81 *  An "active context' is one which was previously the "current context" and is
  82 *  on the active list waiting for the next context switch to occur. Until this
  83 *  happens, the object must remain at the same gtt offset. It is therefore
  84 *  possible to destroy a context, but it is still active.
  85 *
  86 */
  87
  88#include <linux/log2.h>
  89#include <drm/i915_drm.h>
  90#include "i915_drv.h"
  91#include "i915_trace.h"
  92#include "intel_lrc_reg.h"
  93#include "intel_workarounds.h"
  94
  95#define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
  96
  97static void lut_close(struct i915_gem_context *ctx)
  98{
  99        struct i915_lut_handle *lut, *ln;
 100        struct radix_tree_iter iter;
 101        void __rcu **slot;
 102
 103        list_for_each_entry_safe(lut, ln, &ctx->handles_list, ctx_link) {
 104                list_del(&lut->obj_link);
 105                kmem_cache_free(ctx->i915->luts, lut);
 106        }
 107
 108        rcu_read_lock();
 109        radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) {
 110                struct i915_vma *vma = rcu_dereference_raw(*slot);
 111
 112                radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
 113                __i915_gem_object_release_unless_active(vma->obj);
 114        }
 115        rcu_read_unlock();
 116}
 117
 118static inline int new_hw_id(struct drm_i915_private *i915, gfp_t gfp)
 119{
 120        unsigned int max;
 121
 122        lockdep_assert_held(&i915->contexts.mutex);
 123
 124        if (INTEL_GEN(i915) >= 11)
 125                max = GEN11_MAX_CONTEXT_HW_ID;
 126        else if (USES_GUC_SUBMISSION(i915))
 127                /*
 128                 * When using GuC in proxy submission, GuC consumes the
 129                 * highest bit in the context id to indicate proxy submission.
 130                 */
 131                max = MAX_GUC_CONTEXT_HW_ID;
 132        else
 133                max = MAX_CONTEXT_HW_ID;
 134
 135        return ida_simple_get(&i915->contexts.hw_ida, 0, max, gfp);
 136}
 137
 138static int steal_hw_id(struct drm_i915_private *i915)
 139{
 140        struct i915_gem_context *ctx, *cn;
 141        LIST_HEAD(pinned);
 142        int id = -ENOSPC;
 143
 144        lockdep_assert_held(&i915->contexts.mutex);
 145
 146        list_for_each_entry_safe(ctx, cn,
 147                                 &i915->contexts.hw_id_list, hw_id_link) {
 148                if (atomic_read(&ctx->hw_id_pin_count)) {
 149                        list_move_tail(&ctx->hw_id_link, &pinned);
 150                        continue;
 151                }
 152
 153                GEM_BUG_ON(!ctx->hw_id); /* perma-pinned kernel context */
 154                list_del_init(&ctx->hw_id_link);
 155                id = ctx->hw_id;
 156                break;
 157        }
 158
 159        /*
 160         * Remember how far we got up on the last repossesion scan, so the
 161         * list is kept in a "least recently scanned" order.
 162         */
 163        list_splice_tail(&pinned, &i915->contexts.hw_id_list);
 164        return id;
 165}
 166
 167static int assign_hw_id(struct drm_i915_private *i915, unsigned int *out)
 168{
 169        int ret;
 170
 171        lockdep_assert_held(&i915->contexts.mutex);
 172
 173        /*
 174         * We prefer to steal/stall ourselves and our users over that of the
 175         * entire system. That may be a little unfair to our users, and
 176         * even hurt high priority clients. The choice is whether to oomkill
 177         * something else, or steal a context id.
 178         */
 179        ret = new_hw_id(i915, GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
 180        if (unlikely(ret < 0)) {
 181                ret = steal_hw_id(i915);
 182                if (ret < 0) /* once again for the correct errno code */
 183                        ret = new_hw_id(i915, GFP_KERNEL);
 184                if (ret < 0)
 185                        return ret;
 186        }
 187
 188        *out = ret;
 189        return 0;
 190}
 191
 192static void release_hw_id(struct i915_gem_context *ctx)
 193{
 194        struct drm_i915_private *i915 = ctx->i915;
 195
 196        if (list_empty(&ctx->hw_id_link))
 197                return;
 198
 199        mutex_lock(&i915->contexts.mutex);
 200        if (!list_empty(&ctx->hw_id_link)) {
 201                ida_simple_remove(&i915->contexts.hw_ida, ctx->hw_id);
 202                list_del_init(&ctx->hw_id_link);
 203        }
 204        mutex_unlock(&i915->contexts.mutex);
 205}
 206
 207static void i915_gem_context_free(struct i915_gem_context *ctx)
 208{
 209        unsigned int n;
 210
 211        lockdep_assert_held(&ctx->i915->drm.struct_mutex);
 212        GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
 213
 214        release_hw_id(ctx);
 215        i915_ppgtt_put(ctx->ppgtt);
 216
 217        for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
 218                struct intel_context *ce = &ctx->__engine[n];
 219
 220                if (ce->ops)
 221                        ce->ops->destroy(ce);
 222        }
 223
 224        kfree(ctx->name);
 225        put_pid(ctx->pid);
 226
 227        list_del(&ctx->link);
 228
 229        kfree_rcu(ctx, rcu);
 230}
 231
 232static void contexts_free(struct drm_i915_private *i915)
 233{
 234        struct llist_node *freed = llist_del_all(&i915->contexts.free_list);
 235        struct i915_gem_context *ctx, *cn;
 236
 237        lockdep_assert_held(&i915->drm.struct_mutex);
 238
 239        llist_for_each_entry_safe(ctx, cn, freed, free_link)
 240                i915_gem_context_free(ctx);
 241}
 242
 243static void contexts_free_first(struct drm_i915_private *i915)
 244{
 245        struct i915_gem_context *ctx;
 246        struct llist_node *freed;
 247
 248        lockdep_assert_held(&i915->drm.struct_mutex);
 249
 250        freed = llist_del_first(&i915->contexts.free_list);
 251        if (!freed)
 252                return;
 253
 254        ctx = container_of(freed, typeof(*ctx), free_link);
 255        i915_gem_context_free(ctx);
 256}
 257
 258static void contexts_free_worker(struct work_struct *work)
 259{
 260        struct drm_i915_private *i915 =
 261                container_of(work, typeof(*i915), contexts.free_work);
 262
 263        mutex_lock(&i915->drm.struct_mutex);
 264        contexts_free(i915);
 265        mutex_unlock(&i915->drm.struct_mutex);
 266}
 267
 268void i915_gem_context_release(struct kref *ref)
 269{
 270        struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
 271        struct drm_i915_private *i915 = ctx->i915;
 272
 273        trace_i915_context_free(ctx);
 274        if (llist_add(&ctx->free_link, &i915->contexts.free_list))
 275                queue_work(i915->wq, &i915->contexts.free_work);
 276}
 277
 278static void context_close(struct i915_gem_context *ctx)
 279{
 280        i915_gem_context_set_closed(ctx);
 281
 282        /*
 283         * This context will never again be assinged to HW, so we can
 284         * reuse its ID for the next context.
 285         */
 286        release_hw_id(ctx);
 287
 288        /*
 289         * The LUT uses the VMA as a backpointer to unref the object,
 290         * so we need to clear the LUT before we close all the VMA (inside
 291         * the ppgtt).
 292         */
 293        lut_close(ctx);
 294        if (ctx->ppgtt)
 295                i915_ppgtt_close(&ctx->ppgtt->vm);
 296
 297        ctx->file_priv = ERR_PTR(-EBADF);
 298        i915_gem_context_put(ctx);
 299}
 300
 301static u32 default_desc_template(const struct drm_i915_private *i915,
 302                                 const struct i915_hw_ppgtt *ppgtt)
 303{
 304        u32 address_mode;
 305        u32 desc;
 306
 307        desc = GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
 308
 309        address_mode = INTEL_LEGACY_32B_CONTEXT;
 310        if (ppgtt && i915_vm_is_48bit(&ppgtt->vm))
 311                address_mode = INTEL_LEGACY_64B_CONTEXT;
 312        desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT;
 313
 314        if (IS_GEN(i915, 8))
 315                desc |= GEN8_CTX_L3LLC_COHERENT;
 316
 317        /* TODO: WaDisableLiteRestore when we start using semaphore
 318         * signalling between Command Streamers
 319         * ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE;
 320         */
 321
 322        return desc;
 323}
 324
 325static void intel_context_retire(struct i915_active_request *active,
 326                                 struct i915_request *rq)
 327{
 328        struct intel_context *ce =
 329                container_of(active, typeof(*ce), active_tracker);
 330
 331        intel_context_unpin(ce);
 332}
 333
 334void
 335intel_context_init(struct intel_context *ce,
 336                   struct i915_gem_context *ctx,
 337                   struct intel_engine_cs *engine)
 338{
 339        ce->gem_context = ctx;
 340
 341        INIT_LIST_HEAD(&ce->signal_link);
 342        INIT_LIST_HEAD(&ce->signals);
 343
 344        /* Use the whole device by default */
 345        ce->sseu = intel_device_default_sseu(ctx->i915);
 346
 347        i915_active_request_init(&ce->active_tracker,
 348                                 NULL, intel_context_retire);
 349}
 350
 351static struct i915_gem_context *
 352__create_hw_context(struct drm_i915_private *dev_priv,
 353                    struct drm_i915_file_private *file_priv)
 354{
 355        struct i915_gem_context *ctx;
 356        unsigned int n;
 357        int ret;
 358
 359        ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
 360        if (ctx == NULL)
 361                return ERR_PTR(-ENOMEM);
 362
 363        kref_init(&ctx->ref);
 364        list_add_tail(&ctx->link, &dev_priv->contexts.list);
 365        ctx->i915 = dev_priv;
 366        ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
 367
 368        for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++)
 369                intel_context_init(&ctx->__engine[n], ctx, dev_priv->engine[n]);
 370
 371        INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
 372        INIT_LIST_HEAD(&ctx->handles_list);
 373        INIT_LIST_HEAD(&ctx->hw_id_link);
 374
 375        /* Default context will never have a file_priv */
 376        ret = DEFAULT_CONTEXT_HANDLE;
 377        if (file_priv) {
 378                ret = idr_alloc(&file_priv->context_idr, ctx,
 379                                DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL);
 380                if (ret < 0)
 381                        goto err_lut;
 382        }
 383        ctx->user_handle = ret;
 384
 385        ctx->file_priv = file_priv;
 386        if (file_priv) {
 387                ctx->pid = get_task_pid(current, PIDTYPE_PID);
 388                ctx->name = kasprintf(GFP_KERNEL, "%s[%d]/%x",
 389                                      current->comm,
 390                                      pid_nr(ctx->pid),
 391                                      ctx->user_handle);
 392                if (!ctx->name) {
 393                        ret = -ENOMEM;
 394                        goto err_pid;
 395                }
 396        }
 397
 398        /* NB: Mark all slices as needing a remap so that when the context first
 399         * loads it will restore whatever remap state already exists. If there
 400         * is no remap info, it will be a NOP. */
 401        ctx->remap_slice = ALL_L3_SLICES(dev_priv);
 402
 403        i915_gem_context_set_bannable(ctx);
 404        ctx->ring_size = 4 * PAGE_SIZE;
 405        ctx->desc_template =
 406                default_desc_template(dev_priv, dev_priv->mm.aliasing_ppgtt);
 407
 408        return ctx;
 409
 410err_pid:
 411        put_pid(ctx->pid);
 412        idr_remove(&file_priv->context_idr, ctx->user_handle);
 413err_lut:
 414        context_close(ctx);
 415        return ERR_PTR(ret);
 416}
 417
 418static void __destroy_hw_context(struct i915_gem_context *ctx,
 419                                 struct drm_i915_file_private *file_priv)
 420{
 421        idr_remove(&file_priv->context_idr, ctx->user_handle);
 422        context_close(ctx);
 423}
 424
 425static struct i915_gem_context *
 426i915_gem_create_context(struct drm_i915_private *dev_priv,
 427                        struct drm_i915_file_private *file_priv)
 428{
 429        struct i915_gem_context *ctx;
 430
 431        lockdep_assert_held(&dev_priv->drm.struct_mutex);
 432
 433        /* Reap the most stale context */
 434        contexts_free_first(dev_priv);
 435
 436        ctx = __create_hw_context(dev_priv, file_priv);
 437        if (IS_ERR(ctx))
 438                return ctx;
 439
 440        if (HAS_FULL_PPGTT(dev_priv)) {
 441                struct i915_hw_ppgtt *ppgtt;
 442
 443                ppgtt = i915_ppgtt_create(dev_priv, file_priv);
 444                if (IS_ERR(ppgtt)) {
 445                        DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
 446                                         PTR_ERR(ppgtt));
 447                        __destroy_hw_context(ctx, file_priv);
 448                        return ERR_CAST(ppgtt);
 449                }
 450
 451                ctx->ppgtt = ppgtt;
 452                ctx->desc_template = default_desc_template(dev_priv, ppgtt);
 453        }
 454
 455        trace_i915_context_create(ctx);
 456
 457        return ctx;
 458}
 459
 460/**
 461 * i915_gem_context_create_gvt - create a GVT GEM context
 462 * @dev: drm device *
 463 *
 464 * This function is used to create a GVT specific GEM context.
 465 *
 466 * Returns:
 467 * pointer to i915_gem_context on success, error pointer if failed
 468 *
 469 */
 470struct i915_gem_context *
 471i915_gem_context_create_gvt(struct drm_device *dev)
 472{
 473        struct i915_gem_context *ctx;
 474        int ret;
 475
 476        if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
 477                return ERR_PTR(-ENODEV);
 478
 479        ret = i915_mutex_lock_interruptible(dev);
 480        if (ret)
 481                return ERR_PTR(ret);
 482
 483        ctx = i915_gem_create_context(to_i915(dev), NULL);
 484        if (IS_ERR(ctx))
 485                goto out;
 486
 487        ctx->file_priv = ERR_PTR(-EBADF);
 488        i915_gem_context_set_closed(ctx); /* not user accessible */
 489        i915_gem_context_clear_bannable(ctx);
 490        i915_gem_context_set_force_single_submission(ctx);
 491        if (!USES_GUC_SUBMISSION(to_i915(dev)))
 492                ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */
 493
 494        GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
 495out:
 496        mutex_unlock(&dev->struct_mutex);
 497        return ctx;
 498}
 499
 500static void
 501destroy_kernel_context(struct i915_gem_context **ctxp)
 502{
 503        struct i915_gem_context *ctx;
 504
 505        /* Keep the context ref so that we can free it immediately ourselves */
 506        ctx = i915_gem_context_get(fetch_and_zero(ctxp));
 507        GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
 508
 509        context_close(ctx);
 510        i915_gem_context_free(ctx);
 511}
 512
 513struct i915_gem_context *
 514i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
 515{
 516        struct i915_gem_context *ctx;
 517        int err;
 518
 519        ctx = i915_gem_create_context(i915, NULL);
 520        if (IS_ERR(ctx))
 521                return ctx;
 522
 523        err = i915_gem_context_pin_hw_id(ctx);
 524        if (err) {
 525                destroy_kernel_context(&ctx);
 526                return ERR_PTR(err);
 527        }
 528
 529        i915_gem_context_clear_bannable(ctx);
 530        ctx->sched.priority = I915_USER_PRIORITY(prio);
 531        ctx->ring_size = PAGE_SIZE;
 532
 533        GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
 534
 535        return ctx;
 536}
 537
 538static void init_contexts(struct drm_i915_private *i915)
 539{
 540        mutex_init(&i915->contexts.mutex);
 541        INIT_LIST_HEAD(&i915->contexts.list);
 542
 543        /* Using the simple ida interface, the max is limited by sizeof(int) */
 544        BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
 545        BUILD_BUG_ON(GEN11_MAX_CONTEXT_HW_ID > INT_MAX);
 546        ida_init(&i915->contexts.hw_ida);
 547        INIT_LIST_HEAD(&i915->contexts.hw_id_list);
 548
 549        INIT_WORK(&i915->contexts.free_work, contexts_free_worker);
 550        init_llist_head(&i915->contexts.free_list);
 551}
 552
 553static bool needs_preempt_context(struct drm_i915_private *i915)
 554{
 555        return HAS_LOGICAL_RING_PREEMPTION(i915);
 556}
 557
 558int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
 559{
 560        struct i915_gem_context *ctx;
 561
 562        /* Reassure ourselves we are only called once */
 563        GEM_BUG_ON(dev_priv->kernel_context);
 564        GEM_BUG_ON(dev_priv->preempt_context);
 565
 566        intel_engine_init_ctx_wa(dev_priv->engine[RCS]);
 567        init_contexts(dev_priv);
 568
 569        /* lowest priority; idle task */
 570        ctx = i915_gem_context_create_kernel(dev_priv, I915_PRIORITY_MIN);
 571        if (IS_ERR(ctx)) {
 572                DRM_ERROR("Failed to create default global context\n");
 573                return PTR_ERR(ctx);
 574        }
 575        /*
 576         * For easy recognisablity, we want the kernel context to be 0 and then
 577         * all user contexts will have non-zero hw_id. Kernel contexts are
 578         * permanently pinned, so that we never suffer a stall and can
 579         * use them from any allocation context (e.g. for evicting other
 580         * contexts and from inside the shrinker).
 581         */
 582        GEM_BUG_ON(ctx->hw_id);
 583        GEM_BUG_ON(!atomic_read(&ctx->hw_id_pin_count));
 584        dev_priv->kernel_context = ctx;
 585
 586        /* highest priority; preempting task */
 587        if (needs_preempt_context(dev_priv)) {
 588                ctx = i915_gem_context_create_kernel(dev_priv, INT_MAX);
 589                if (!IS_ERR(ctx))
 590                        dev_priv->preempt_context = ctx;
 591                else
 592                        DRM_ERROR("Failed to create preempt context; disabling preemption\n");
 593        }
 594
 595        DRM_DEBUG_DRIVER("%s context support initialized\n",
 596                         DRIVER_CAPS(dev_priv)->has_logical_contexts ?
 597                         "logical" : "fake");
 598        return 0;
 599}
 600
 601void i915_gem_contexts_lost(struct drm_i915_private *dev_priv)
 602{
 603        struct intel_engine_cs *engine;
 604        enum intel_engine_id id;
 605
 606        lockdep_assert_held(&dev_priv->drm.struct_mutex);
 607
 608        for_each_engine(engine, dev_priv, id)
 609                intel_engine_lost_context(engine);
 610}
 611
 612void i915_gem_contexts_fini(struct drm_i915_private *i915)
 613{
 614        lockdep_assert_held(&i915->drm.struct_mutex);
 615
 616        if (i915->preempt_context)
 617                destroy_kernel_context(&i915->preempt_context);
 618        destroy_kernel_context(&i915->kernel_context);
 619
 620        /* Must free all deferred contexts (via flush_workqueue) first */
 621        GEM_BUG_ON(!list_empty(&i915->contexts.hw_id_list));
 622        ida_destroy(&i915->contexts.hw_ida);
 623}
 624
 625static int context_idr_cleanup(int id, void *p, void *data)
 626{
 627        struct i915_gem_context *ctx = p;
 628
 629        context_close(ctx);
 630        return 0;
 631}
 632
 633int i915_gem_context_open(struct drm_i915_private *i915,
 634                          struct drm_file *file)
 635{
 636        struct drm_i915_file_private *file_priv = file->driver_priv;
 637        struct i915_gem_context *ctx;
 638
 639        idr_init(&file_priv->context_idr);
 640
 641        mutex_lock(&i915->drm.struct_mutex);
 642        ctx = i915_gem_create_context(i915, file_priv);
 643        mutex_unlock(&i915->drm.struct_mutex);
 644        if (IS_ERR(ctx)) {
 645                idr_destroy(&file_priv->context_idr);
 646                return PTR_ERR(ctx);
 647        }
 648
 649        GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
 650
 651        return 0;
 652}
 653
 654void i915_gem_context_close(struct drm_file *file)
 655{
 656        struct drm_i915_file_private *file_priv = file->driver_priv;
 657
 658        lockdep_assert_held(&file_priv->dev_priv->drm.struct_mutex);
 659
 660        idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
 661        idr_destroy(&file_priv->context_idr);
 662}
 663
 664static struct i915_request *
 665last_request_on_engine(struct i915_timeline *timeline,
 666                       struct intel_engine_cs *engine)
 667{
 668        struct i915_request *rq;
 669
 670        GEM_BUG_ON(timeline == &engine->timeline);
 671
 672        rq = i915_active_request_raw(&timeline->last_request,
 673                                     &engine->i915->drm.struct_mutex);
 674        if (rq && rq->engine == engine) {
 675                GEM_TRACE("last request for %s on engine %s: %llx:%llu\n",
 676                          timeline->name, engine->name,
 677                          rq->fence.context, rq->fence.seqno);
 678                GEM_BUG_ON(rq->timeline != timeline);
 679                return rq;
 680        }
 681
 682        return NULL;
 683}
 684
 685static bool engine_has_kernel_context_barrier(struct intel_engine_cs *engine)
 686{
 687        struct drm_i915_private *i915 = engine->i915;
 688        const struct intel_context * const ce =
 689                to_intel_context(i915->kernel_context, engine);
 690        struct i915_timeline *barrier = ce->ring->timeline;
 691        struct intel_ring *ring;
 692        bool any_active = false;
 693
 694        lockdep_assert_held(&i915->drm.struct_mutex);
 695        list_for_each_entry(ring, &i915->gt.active_rings, active_link) {
 696                struct i915_request *rq;
 697
 698                rq = last_request_on_engine(ring->timeline, engine);
 699                if (!rq)
 700                        continue;
 701
 702                any_active = true;
 703
 704                if (rq->hw_context == ce)
 705                        continue;
 706
 707                /*
 708                 * Was this request submitted after the previous
 709                 * switch-to-kernel-context?
 710                 */
 711                if (!i915_timeline_sync_is_later(barrier, &rq->fence)) {
 712                        GEM_TRACE("%s needs barrier for %llx:%lld\n",
 713                                  ring->timeline->name,
 714                                  rq->fence.context,
 715                                  rq->fence.seqno);
 716                        return false;
 717                }
 718
 719                GEM_TRACE("%s has barrier after %llx:%lld\n",
 720                          ring->timeline->name,
 721                          rq->fence.context,
 722                          rq->fence.seqno);
 723        }
 724
 725        /*
 726         * If any other timeline was still active and behind the last barrier,
 727         * then our last switch-to-kernel-context must still be queued and
 728         * will run last (leaving the engine in the kernel context when it
 729         * eventually idles).
 730         */
 731        if (any_active)
 732                return true;
 733
 734        /* The engine is idle; check that it is idling in the kernel context. */
 735        return engine->last_retired_context == ce;
 736}
 737
 738int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915)
 739{
 740        struct intel_engine_cs *engine;
 741        enum intel_engine_id id;
 742
 743        GEM_TRACE("awake?=%s\n", yesno(i915->gt.awake));
 744
 745        lockdep_assert_held(&i915->drm.struct_mutex);
 746        GEM_BUG_ON(!i915->kernel_context);
 747
 748        i915_retire_requests(i915);
 749
 750        for_each_engine(engine, i915, id) {
 751                struct intel_ring *ring;
 752                struct i915_request *rq;
 753
 754                GEM_BUG_ON(!to_intel_context(i915->kernel_context, engine));
 755                if (engine_has_kernel_context_barrier(engine))
 756                        continue;
 757
 758                GEM_TRACE("emit barrier on %s\n", engine->name);
 759
 760                rq = i915_request_alloc(engine, i915->kernel_context);
 761                if (IS_ERR(rq))
 762                        return PTR_ERR(rq);
 763
 764                /* Queue this switch after all other activity */
 765                list_for_each_entry(ring, &i915->gt.active_rings, active_link) {
 766                        struct i915_request *prev;
 767
 768                        prev = last_request_on_engine(ring->timeline, engine);
 769                        if (!prev)
 770                                continue;
 771
 772                        if (prev->gem_context == i915->kernel_context)
 773                                continue;
 774
 775                        GEM_TRACE("add barrier on %s for %llx:%lld\n",
 776                                  engine->name,
 777                                  prev->fence.context,
 778                                  prev->fence.seqno);
 779                        i915_sw_fence_await_sw_fence_gfp(&rq->submit,
 780                                                         &prev->submit,
 781                                                         I915_FENCE_GFP);
 782                        i915_timeline_sync_set(rq->timeline, &prev->fence);
 783                }
 784
 785                i915_request_add(rq);
 786        }
 787
 788        return 0;
 789}
 790
 791static bool client_is_banned(struct drm_i915_file_private *file_priv)
 792{
 793        return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED;
 794}
 795
 796int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
 797                                  struct drm_file *file)
 798{
 799        struct drm_i915_private *dev_priv = to_i915(dev);
 800        struct drm_i915_gem_context_create *args = data;
 801        struct drm_i915_file_private *file_priv = file->driver_priv;
 802        struct i915_gem_context *ctx;
 803        int ret;
 804
 805        if (!DRIVER_CAPS(dev_priv)->has_logical_contexts)
 806                return -ENODEV;
 807
 808        if (args->pad != 0)
 809                return -EINVAL;
 810
 811        if (client_is_banned(file_priv)) {
 812                DRM_DEBUG("client %s[%d] banned from creating ctx\n",
 813                          current->comm,
 814                          pid_nr(get_task_pid(current, PIDTYPE_PID)));
 815
 816                return -EIO;
 817        }
 818
 819        ret = i915_mutex_lock_interruptible(dev);
 820        if (ret)
 821                return ret;
 822
 823        ctx = i915_gem_create_context(dev_priv, file_priv);
 824        mutex_unlock(&dev->struct_mutex);
 825        if (IS_ERR(ctx))
 826                return PTR_ERR(ctx);
 827
 828        GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
 829
 830        args->ctx_id = ctx->user_handle;
 831        DRM_DEBUG("HW context %d created\n", args->ctx_id);
 832
 833        return 0;
 834}
 835
 836int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
 837                                   struct drm_file *file)
 838{
 839        struct drm_i915_gem_context_destroy *args = data;
 840        struct drm_i915_file_private *file_priv = file->driver_priv;
 841        struct i915_gem_context *ctx;
 842        int ret;
 843
 844        if (args->pad != 0)
 845                return -EINVAL;
 846
 847        if (args->ctx_id == DEFAULT_CONTEXT_HANDLE)
 848                return -ENOENT;
 849
 850        ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
 851        if (!ctx)
 852                return -ENOENT;
 853
 854        ret = mutex_lock_interruptible(&dev->struct_mutex);
 855        if (ret)
 856                goto out;
 857
 858        __destroy_hw_context(ctx, file_priv);
 859        mutex_unlock(&dev->struct_mutex);
 860
 861out:
 862        i915_gem_context_put(ctx);
 863        return 0;
 864}
 865
 866static int get_sseu(struct i915_gem_context *ctx,
 867                    struct drm_i915_gem_context_param *args)
 868{
 869        struct drm_i915_gem_context_param_sseu user_sseu;
 870        struct intel_engine_cs *engine;
 871        struct intel_context *ce;
 872        int ret;
 873
 874        if (args->size == 0)
 875                goto out;
 876        else if (args->size < sizeof(user_sseu))
 877                return -EINVAL;
 878
 879        if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
 880                           sizeof(user_sseu)))
 881                return -EFAULT;
 882
 883        if (user_sseu.flags || user_sseu.rsvd)
 884                return -EINVAL;
 885
 886        engine = intel_engine_lookup_user(ctx->i915,
 887                                          user_sseu.engine_class,
 888                                          user_sseu.engine_instance);
 889        if (!engine)
 890                return -EINVAL;
 891
 892        /* Only use for mutex here is to serialize get_param and set_param. */
 893        ret = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
 894        if (ret)
 895                return ret;
 896
 897        ce = to_intel_context(ctx, engine);
 898
 899        user_sseu.slice_mask = ce->sseu.slice_mask;
 900        user_sseu.subslice_mask = ce->sseu.subslice_mask;
 901        user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice;
 902        user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice;
 903
 904        mutex_unlock(&ctx->i915->drm.struct_mutex);
 905
 906        if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu,
 907                         sizeof(user_sseu)))
 908                return -EFAULT;
 909
 910out:
 911        args->size = sizeof(user_sseu);
 912
 913        return 0;
 914}
 915
 916int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
 917                                    struct drm_file *file)
 918{
 919        struct drm_i915_file_private *file_priv = file->driver_priv;
 920        struct drm_i915_gem_context_param *args = data;
 921        struct i915_gem_context *ctx;
 922        int ret = 0;
 923
 924        ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
 925        if (!ctx)
 926                return -ENOENT;
 927
 928        switch (args->param) {
 929        case I915_CONTEXT_PARAM_BAN_PERIOD:
 930                ret = -EINVAL;
 931                break;
 932        case I915_CONTEXT_PARAM_NO_ZEROMAP:
 933                args->size = 0;
 934                args->value = test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
 935                break;
 936        case I915_CONTEXT_PARAM_GTT_SIZE:
 937                args->size = 0;
 938
 939                if (ctx->ppgtt)
 940                        args->value = ctx->ppgtt->vm.total;
 941                else if (to_i915(dev)->mm.aliasing_ppgtt)
 942                        args->value = to_i915(dev)->mm.aliasing_ppgtt->vm.total;
 943                else
 944                        args->value = to_i915(dev)->ggtt.vm.total;
 945                break;
 946        case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
 947                args->size = 0;
 948                args->value = i915_gem_context_no_error_capture(ctx);
 949                break;
 950        case I915_CONTEXT_PARAM_BANNABLE:
 951                args->size = 0;
 952                args->value = i915_gem_context_is_bannable(ctx);
 953                break;
 954        case I915_CONTEXT_PARAM_PRIORITY:
 955                args->size = 0;
 956                args->value = ctx->sched.priority >> I915_USER_PRIORITY_SHIFT;
 957                break;
 958        case I915_CONTEXT_PARAM_SSEU:
 959                ret = get_sseu(ctx, args);
 960                break;
 961        default:
 962                ret = -EINVAL;
 963                break;
 964        }
 965
 966        i915_gem_context_put(ctx);
 967        return ret;
 968}
 969
 970static int gen8_emit_rpcs_config(struct i915_request *rq,
 971                                 struct intel_context *ce,
 972                                 struct intel_sseu sseu)
 973{
 974        u64 offset;
 975        u32 *cs;
 976
 977        cs = intel_ring_begin(rq, 4);
 978        if (IS_ERR(cs))
 979                return PTR_ERR(cs);
 980
 981        offset = i915_ggtt_offset(ce->state) +
 982                 LRC_STATE_PN * PAGE_SIZE +
 983                 (CTX_R_PWR_CLK_STATE + 1) * 4;
 984
 985        *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
 986        *cs++ = lower_32_bits(offset);
 987        *cs++ = upper_32_bits(offset);
 988        *cs++ = gen8_make_rpcs(rq->i915, &sseu);
 989
 990        intel_ring_advance(rq, cs);
 991
 992        return 0;
 993}
 994
 995static int
 996gen8_modify_rpcs_gpu(struct intel_context *ce,
 997                     struct intel_engine_cs *engine,
 998                     struct intel_sseu sseu)
 999{
1000        struct drm_i915_private *i915 = engine->i915;
1001        struct i915_request *rq, *prev;
1002        intel_wakeref_t wakeref;
1003        int ret;
1004
1005        GEM_BUG_ON(!ce->pin_count);
1006
1007        lockdep_assert_held(&i915->drm.struct_mutex);
1008
1009        /* Submitting requests etc needs the hw awake. */
1010        wakeref = intel_runtime_pm_get(i915);
1011
1012        rq = i915_request_alloc(engine, i915->kernel_context);
1013        if (IS_ERR(rq)) {
1014                ret = PTR_ERR(rq);
1015                goto out_put;
1016        }
1017
1018        /* Queue this switch after all other activity by this context. */
1019        prev = i915_active_request_raw(&ce->ring->timeline->last_request,
1020                                       &i915->drm.struct_mutex);
1021        if (prev && !i915_request_completed(prev)) {
1022                ret = i915_request_await_dma_fence(rq, &prev->fence);
1023                if (ret < 0)
1024                        goto out_add;
1025        }
1026
1027        /* Order all following requests to be after. */
1028        ret = i915_timeline_set_barrier(ce->ring->timeline, rq);
1029        if (ret)
1030                goto out_add;
1031
1032        ret = gen8_emit_rpcs_config(rq, ce, sseu);
1033        if (ret)
1034                goto out_add;
1035
1036        /*
1037         * Guarantee context image and the timeline remains pinned until the
1038         * modifying request is retired by setting the ce activity tracker.
1039         *
1040         * But we only need to take one pin on the account of it. Or in other
1041         * words transfer the pinned ce object to tracked active request.
1042         */
1043        if (!i915_active_request_isset(&ce->active_tracker))
1044                __intel_context_pin(ce);
1045        __i915_active_request_set(&ce->active_tracker, rq);
1046
1047out_add:
1048        i915_request_add(rq);
1049out_put:
1050        intel_runtime_pm_put(i915, wakeref);
1051
1052        return ret;
1053}
1054
1055static int
1056__i915_gem_context_reconfigure_sseu(struct i915_gem_context *ctx,
1057                                    struct intel_engine_cs *engine,
1058                                    struct intel_sseu sseu)
1059{
1060        struct intel_context *ce = to_intel_context(ctx, engine);
1061        int ret = 0;
1062
1063        GEM_BUG_ON(INTEL_GEN(ctx->i915) < 8);
1064        GEM_BUG_ON(engine->id != RCS);
1065
1066        /* Nothing to do if unmodified. */
1067        if (!memcmp(&ce->sseu, &sseu, sizeof(sseu)))
1068                return 0;
1069
1070        /*
1071         * If context is not idle we have to submit an ordered request to modify
1072         * its context image via the kernel context. Pristine and idle contexts
1073         * will be configured on pinning.
1074         */
1075        if (ce->pin_count)
1076                ret = gen8_modify_rpcs_gpu(ce, engine, sseu);
1077
1078        if (!ret)
1079                ce->sseu = sseu;
1080
1081        return ret;
1082}
1083
1084static int
1085i915_gem_context_reconfigure_sseu(struct i915_gem_context *ctx,
1086                                  struct intel_engine_cs *engine,
1087                                  struct intel_sseu sseu)
1088{
1089        int ret;
1090
1091        ret = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
1092        if (ret)
1093                return ret;
1094
1095        ret = __i915_gem_context_reconfigure_sseu(ctx, engine, sseu);
1096
1097        mutex_unlock(&ctx->i915->drm.struct_mutex);
1098
1099        return ret;
1100}
1101
1102static int
1103user_to_context_sseu(struct drm_i915_private *i915,
1104                     const struct drm_i915_gem_context_param_sseu *user,
1105                     struct intel_sseu *context)
1106{
1107        const struct sseu_dev_info *device = &RUNTIME_INFO(i915)->sseu;
1108
1109        /* No zeros in any field. */
1110        if (!user->slice_mask || !user->subslice_mask ||
1111            !user->min_eus_per_subslice || !user->max_eus_per_subslice)
1112                return -EINVAL;
1113
1114        /* Max > min. */
1115        if (user->max_eus_per_subslice < user->min_eus_per_subslice)
1116                return -EINVAL;
1117
1118        /*
1119         * Some future proofing on the types since the uAPI is wider than the
1120         * current internal implementation.
1121         */
1122        if (overflows_type(user->slice_mask, context->slice_mask) ||
1123            overflows_type(user->subslice_mask, context->subslice_mask) ||
1124            overflows_type(user->min_eus_per_subslice,
1125                           context->min_eus_per_subslice) ||
1126            overflows_type(user->max_eus_per_subslice,
1127                           context->max_eus_per_subslice))
1128                return -EINVAL;
1129
1130        /* Check validity against hardware. */
1131        if (user->slice_mask & ~device->slice_mask)
1132                return -EINVAL;
1133
1134        if (user->subslice_mask & ~device->subslice_mask[0])
1135                return -EINVAL;
1136
1137        if (user->max_eus_per_subslice > device->max_eus_per_subslice)
1138                return -EINVAL;
1139
1140        context->slice_mask = user->slice_mask;
1141        context->subslice_mask = user->subslice_mask;
1142        context->min_eus_per_subslice = user->min_eus_per_subslice;
1143        context->max_eus_per_subslice = user->max_eus_per_subslice;
1144
1145        /* Part specific restrictions. */
1146        if (IS_GEN(i915, 11)) {
1147                unsigned int hw_s = hweight8(device->slice_mask);
1148                unsigned int hw_ss_per_s = hweight8(device->subslice_mask[0]);
1149                unsigned int req_s = hweight8(context->slice_mask);
1150                unsigned int req_ss = hweight8(context->subslice_mask);
1151
1152                /*
1153                 * Only full subslice enablement is possible if more than one
1154                 * slice is turned on.
1155                 */
1156                if (req_s > 1 && req_ss != hw_ss_per_s)
1157                        return -EINVAL;
1158
1159                /*
1160                 * If more than four (SScount bitfield limit) subslices are
1161                 * requested then the number has to be even.
1162                 */
1163                if (req_ss > 4 && (req_ss & 1))
1164                        return -EINVAL;
1165
1166                /*
1167                 * If only one slice is enabled and subslice count is below the
1168                 * device full enablement, it must be at most half of the all
1169                 * available subslices.
1170                 */
1171                if (req_s == 1 && req_ss < hw_ss_per_s &&
1172                    req_ss > (hw_ss_per_s / 2))
1173                        return -EINVAL;
1174
1175                /* ABI restriction - VME use case only. */
1176
1177                /* All slices or one slice only. */
1178                if (req_s != 1 && req_s != hw_s)
1179                        return -EINVAL;
1180
1181                /*
1182                 * Half subslices or full enablement only when one slice is
1183                 * enabled.
1184                 */
1185                if (req_s == 1 &&
1186                    (req_ss != hw_ss_per_s && req_ss != (hw_ss_per_s / 2)))
1187                        return -EINVAL;
1188
1189                /* No EU configuration changes. */
1190                if ((user->min_eus_per_subslice !=
1191                     device->max_eus_per_subslice) ||
1192                    (user->max_eus_per_subslice !=
1193                     device->max_eus_per_subslice))
1194                        return -EINVAL;
1195        }
1196
1197        return 0;
1198}
1199
1200static int set_sseu(struct i915_gem_context *ctx,
1201                    struct drm_i915_gem_context_param *args)
1202{
1203        struct drm_i915_private *i915 = ctx->i915;
1204        struct drm_i915_gem_context_param_sseu user_sseu;
1205        struct intel_engine_cs *engine;
1206        struct intel_sseu sseu;
1207        int ret;
1208
1209        if (args->size < sizeof(user_sseu))
1210                return -EINVAL;
1211
1212        if (!IS_GEN(i915, 11))
1213                return -ENODEV;
1214
1215        if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
1216                           sizeof(user_sseu)))
1217                return -EFAULT;
1218
1219        if (user_sseu.flags || user_sseu.rsvd)
1220                return -EINVAL;
1221
1222        engine = intel_engine_lookup_user(i915,
1223                                          user_sseu.engine_class,
1224                                          user_sseu.engine_instance);
1225        if (!engine)
1226                return -EINVAL;
1227
1228        /* Only render engine supports RPCS configuration. */
1229        if (engine->class != RENDER_CLASS)
1230                return -ENODEV;
1231
1232        ret = user_to_context_sseu(i915, &user_sseu, &sseu);
1233        if (ret)
1234                return ret;
1235
1236        ret = i915_gem_context_reconfigure_sseu(ctx, engine, sseu);
1237        if (ret)
1238                return ret;
1239
1240        args->size = sizeof(user_sseu);
1241
1242        return 0;
1243}
1244
1245int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
1246                                    struct drm_file *file)
1247{
1248        struct drm_i915_file_private *file_priv = file->driver_priv;
1249        struct drm_i915_gem_context_param *args = data;
1250        struct i915_gem_context *ctx;
1251        int ret = 0;
1252
1253        ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
1254        if (!ctx)
1255                return -ENOENT;
1256
1257        switch (args->param) {
1258        case I915_CONTEXT_PARAM_BAN_PERIOD:
1259                ret = -EINVAL;
1260                break;
1261        case I915_CONTEXT_PARAM_NO_ZEROMAP:
1262                if (args->size)
1263                        ret = -EINVAL;
1264                else if (args->value)
1265                        set_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
1266                else
1267                        clear_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
1268                break;
1269        case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
1270                if (args->size)
1271                        ret = -EINVAL;
1272                else if (args->value)
1273                        i915_gem_context_set_no_error_capture(ctx);
1274                else
1275                        i915_gem_context_clear_no_error_capture(ctx);
1276                break;
1277        case I915_CONTEXT_PARAM_BANNABLE:
1278                if (args->size)
1279                        ret = -EINVAL;
1280                else if (!capable(CAP_SYS_ADMIN) && !args->value)
1281                        ret = -EPERM;
1282                else if (args->value)
1283                        i915_gem_context_set_bannable(ctx);
1284                else
1285                        i915_gem_context_clear_bannable(ctx);
1286                break;
1287
1288        case I915_CONTEXT_PARAM_PRIORITY:
1289                {
1290                        s64 priority = args->value;
1291
1292                        if (args->size)
1293                                ret = -EINVAL;
1294                        else if (!(to_i915(dev)->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
1295                                ret = -ENODEV;
1296                        else if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
1297                                 priority < I915_CONTEXT_MIN_USER_PRIORITY)
1298                                ret = -EINVAL;
1299                        else if (priority > I915_CONTEXT_DEFAULT_PRIORITY &&
1300                                 !capable(CAP_SYS_NICE))
1301                                ret = -EPERM;
1302                        else
1303                                ctx->sched.priority =
1304                                        I915_USER_PRIORITY(priority);
1305                }
1306                break;
1307        case I915_CONTEXT_PARAM_SSEU:
1308                ret = set_sseu(ctx, args);
1309                break;
1310        default:
1311                ret = -EINVAL;
1312                break;
1313        }
1314
1315        i915_gem_context_put(ctx);
1316        return ret;
1317}
1318
1319int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
1320                                       void *data, struct drm_file *file)
1321{
1322        struct drm_i915_private *dev_priv = to_i915(dev);
1323        struct drm_i915_reset_stats *args = data;
1324        struct i915_gem_context *ctx;
1325        int ret;
1326
1327        if (args->flags || args->pad)
1328                return -EINVAL;
1329
1330        ret = -ENOENT;
1331        rcu_read_lock();
1332        ctx = __i915_gem_context_lookup_rcu(file->driver_priv, args->ctx_id);
1333        if (!ctx)
1334                goto out;
1335
1336        /*
1337         * We opt for unserialised reads here. This may result in tearing
1338         * in the extremely unlikely event of a GPU hang on this context
1339         * as we are querying them. If we need that extra layer of protection,
1340         * we should wrap the hangstats with a seqlock.
1341         */
1342
1343        if (capable(CAP_SYS_ADMIN))
1344                args->reset_count = i915_reset_count(&dev_priv->gpu_error);
1345        else
1346                args->reset_count = 0;
1347
1348        args->batch_active = atomic_read(&ctx->guilty_count);
1349        args->batch_pending = atomic_read(&ctx->active_count);
1350
1351        ret = 0;
1352out:
1353        rcu_read_unlock();
1354        return ret;
1355}
1356
1357int __i915_gem_context_pin_hw_id(struct i915_gem_context *ctx)
1358{
1359        struct drm_i915_private *i915 = ctx->i915;
1360        int err = 0;
1361
1362        mutex_lock(&i915->contexts.mutex);
1363
1364        GEM_BUG_ON(i915_gem_context_is_closed(ctx));
1365
1366        if (list_empty(&ctx->hw_id_link)) {
1367                GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count));
1368
1369                err = assign_hw_id(i915, &ctx->hw_id);
1370                if (err)
1371                        goto out_unlock;
1372
1373                list_add_tail(&ctx->hw_id_link, &i915->contexts.hw_id_list);
1374        }
1375
1376        GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count) == ~0u);
1377        atomic_inc(&ctx->hw_id_pin_count);
1378
1379out_unlock:
1380        mutex_unlock(&i915->contexts.mutex);
1381        return err;
1382}
1383
1384#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1385#include "selftests/mock_context.c"
1386#include "selftests/i915_gem_context.c"
1387#endif
1388