linux/drivers/gpu/drm/i915/gem/i915_gem_context.c
<<
>>
Prefs
   1/*
   2 * SPDX-License-Identifier: MIT
   3 *
   4 * Copyright © 2011-2012 Intel Corporation
   5 */
   6
   7/*
   8 * This file implements HW context support. On gen5+ a HW context consists of an
   9 * opaque GPU object which is referenced at times of context saves and restores.
  10 * With RC6 enabled, the context is also referenced as the GPU enters and exists
  11 * from RC6 (GPU has it's own internal power context, except on gen5). Though
  12 * something like a context does exist for the media ring, the code only
  13 * supports contexts for the render ring.
  14 *
  15 * In software, there is a distinction between contexts created by the user,
  16 * and the default HW context. The default HW context is used by GPU clients
  17 * that do not request setup of their own hardware context. The default
  18 * context's state is never restored to help prevent programming errors. This
  19 * would happen if a client ran and piggy-backed off another clients GPU state.
  20 * The default context only exists to give the GPU some offset to load as the
  21 * current to invoke a save of the context we actually care about. In fact, the
  22 * code could likely be constructed, albeit in a more complicated fashion, to
  23 * never use the default context, though that limits the driver's ability to
  24 * swap out, and/or destroy other contexts.
  25 *
  26 * All other contexts are created as a request by the GPU client. These contexts
  27 * store GPU state, and thus allow GPU clients to not re-emit state (and
  28 * potentially query certain state) at any time. The kernel driver makes
  29 * certain that the appropriate commands are inserted.
  30 *
  31 * The context life cycle is semi-complicated in that context BOs may live
  32 * longer than the context itself because of the way the hardware, and object
  33 * tracking works. Below is a very crude representation of the state machine
  34 * describing the context life.
  35 *                                         refcount     pincount     active
  36 * S0: initial state                          0            0           0
  37 * S1: context created                        1            0           0
  38 * S2: context is currently running           2            1           X
  39 * S3: GPU referenced, but not current        2            0           1
  40 * S4: context is current, but destroyed      1            1           0
  41 * S5: like S3, but destroyed                 1            0           1
  42 *
  43 * The most common (but not all) transitions:
  44 * S0->S1: client creates a context
  45 * S1->S2: client submits execbuf with context
  46 * S2->S3: other clients submits execbuf with context
  47 * S3->S1: context object was retired
  48 * S3->S2: clients submits another execbuf
  49 * S2->S4: context destroy called with current context
  50 * S3->S5->S0: destroy path
  51 * S4->S5->S0: destroy path on current context
  52 *
  53 * There are two confusing terms used above:
  54 *  The "current context" means the context which is currently running on the
  55 *  GPU. The GPU has loaded its state already and has stored away the gtt
  56 *  offset of the BO. The GPU is not actively referencing the data at this
  57 *  offset, but it will on the next context switch. The only way to avoid this
  58 *  is to do a GPU reset.
  59 *
  60 *  An "active context' is one which was previously the "current context" and is
  61 *  on the active list waiting for the next context switch to occur. Until this
  62 *  happens, the object must remain at the same gtt offset. It is therefore
  63 *  possible to destroy a context, but it is still active.
  64 *
  65 */
  66
  67#include <linux/log2.h>
  68#include <linux/nospec.h>
  69
  70#include <drm/i915_drm.h>
  71
  72#include "gt/intel_lrc_reg.h"
  73#include "gt/intel_engine_user.h"
  74
  75#include "i915_gem_context.h"
  76#include "i915_globals.h"
  77#include "i915_trace.h"
  78#include "i915_user_extensions.h"
  79
  80#define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
  81
  82static struct i915_global_gem_context {
  83        struct i915_global base;
  84        struct kmem_cache *slab_luts;
  85} global;
  86
  87struct i915_lut_handle *i915_lut_handle_alloc(void)
  88{
  89        return kmem_cache_alloc(global.slab_luts, GFP_KERNEL);
  90}
  91
  92void i915_lut_handle_free(struct i915_lut_handle *lut)
  93{
  94        return kmem_cache_free(global.slab_luts, lut);
  95}
  96
  97static void lut_close(struct i915_gem_context *ctx)
  98{
  99        struct radix_tree_iter iter;
 100        void __rcu **slot;
 101
 102        lockdep_assert_held(&ctx->mutex);
 103
 104        rcu_read_lock();
 105        radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) {
 106                struct i915_vma *vma = rcu_dereference_raw(*slot);
 107                struct drm_i915_gem_object *obj = vma->obj;
 108                struct i915_lut_handle *lut;
 109
 110                if (!kref_get_unless_zero(&obj->base.refcount))
 111                        continue;
 112
 113                rcu_read_unlock();
 114                i915_gem_object_lock(obj);
 115                list_for_each_entry(lut, &obj->lut_list, obj_link) {
 116                        if (lut->ctx != ctx)
 117                                continue;
 118
 119                        if (lut->handle != iter.index)
 120                                continue;
 121
 122                        list_del(&lut->obj_link);
 123                        break;
 124                }
 125                i915_gem_object_unlock(obj);
 126                rcu_read_lock();
 127
 128                if (&lut->obj_link != &obj->lut_list) {
 129                        i915_lut_handle_free(lut);
 130                        radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
 131                        if (atomic_dec_and_test(&vma->open_count) &&
 132                            !i915_vma_is_ggtt(vma))
 133                                i915_vma_close(vma);
 134                        i915_gem_object_put(obj);
 135                }
 136
 137                i915_gem_object_put(obj);
 138        }
 139        rcu_read_unlock();
 140}
 141
 142static struct intel_context *
 143lookup_user_engine(struct i915_gem_context *ctx,
 144                   unsigned long flags,
 145                   const struct i915_engine_class_instance *ci)
 146#define LOOKUP_USER_INDEX BIT(0)
 147{
 148        int idx;
 149
 150        if (!!(flags & LOOKUP_USER_INDEX) != i915_gem_context_user_engines(ctx))
 151                return ERR_PTR(-EINVAL);
 152
 153        if (!i915_gem_context_user_engines(ctx)) {
 154                struct intel_engine_cs *engine;
 155
 156                engine = intel_engine_lookup_user(ctx->i915,
 157                                                  ci->engine_class,
 158                                                  ci->engine_instance);
 159                if (!engine)
 160                        return ERR_PTR(-EINVAL);
 161
 162                idx = engine->legacy_idx;
 163        } else {
 164                idx = ci->engine_instance;
 165        }
 166
 167        return i915_gem_context_get_engine(ctx, idx);
 168}
 169
 170static inline int new_hw_id(struct drm_i915_private *i915, gfp_t gfp)
 171{
 172        unsigned int max;
 173
 174        lockdep_assert_held(&i915->contexts.mutex);
 175
 176        if (INTEL_GEN(i915) >= 12)
 177                max = GEN12_MAX_CONTEXT_HW_ID;
 178        else if (INTEL_GEN(i915) >= 11)
 179                max = GEN11_MAX_CONTEXT_HW_ID;
 180        else if (USES_GUC_SUBMISSION(i915))
 181                /*
 182                 * When using GuC in proxy submission, GuC consumes the
 183                 * highest bit in the context id to indicate proxy submission.
 184                 */
 185                max = MAX_GUC_CONTEXT_HW_ID;
 186        else
 187                max = MAX_CONTEXT_HW_ID;
 188
 189        return ida_simple_get(&i915->contexts.hw_ida, 0, max, gfp);
 190}
 191
 192static int steal_hw_id(struct drm_i915_private *i915)
 193{
 194        struct i915_gem_context *ctx, *cn;
 195        LIST_HEAD(pinned);
 196        int id = -ENOSPC;
 197
 198        lockdep_assert_held(&i915->contexts.mutex);
 199
 200        list_for_each_entry_safe(ctx, cn,
 201                                 &i915->contexts.hw_id_list, hw_id_link) {
 202                if (atomic_read(&ctx->hw_id_pin_count)) {
 203                        list_move_tail(&ctx->hw_id_link, &pinned);
 204                        continue;
 205                }
 206
 207                GEM_BUG_ON(!ctx->hw_id); /* perma-pinned kernel context */
 208                list_del_init(&ctx->hw_id_link);
 209                id = ctx->hw_id;
 210                break;
 211        }
 212
 213        /*
 214         * Remember how far we got up on the last repossesion scan, so the
 215         * list is kept in a "least recently scanned" order.
 216         */
 217        list_splice_tail(&pinned, &i915->contexts.hw_id_list);
 218        return id;
 219}
 220
 221static int assign_hw_id(struct drm_i915_private *i915, unsigned int *out)
 222{
 223        int ret;
 224
 225        lockdep_assert_held(&i915->contexts.mutex);
 226
 227        /*
 228         * We prefer to steal/stall ourselves and our users over that of the
 229         * entire system. That may be a little unfair to our users, and
 230         * even hurt high priority clients. The choice is whether to oomkill
 231         * something else, or steal a context id.
 232         */
 233        ret = new_hw_id(i915, GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
 234        if (unlikely(ret < 0)) {
 235                ret = steal_hw_id(i915);
 236                if (ret < 0) /* once again for the correct errno code */
 237                        ret = new_hw_id(i915, GFP_KERNEL);
 238                if (ret < 0)
 239                        return ret;
 240        }
 241
 242        *out = ret;
 243        return 0;
 244}
 245
 246static void release_hw_id(struct i915_gem_context *ctx)
 247{
 248        struct drm_i915_private *i915 = ctx->i915;
 249
 250        if (list_empty(&ctx->hw_id_link))
 251                return;
 252
 253        mutex_lock(&i915->contexts.mutex);
 254        if (!list_empty(&ctx->hw_id_link)) {
 255                ida_simple_remove(&i915->contexts.hw_ida, ctx->hw_id);
 256                list_del_init(&ctx->hw_id_link);
 257        }
 258        mutex_unlock(&i915->contexts.mutex);
 259}
 260
 261static void __free_engines(struct i915_gem_engines *e, unsigned int count)
 262{
 263        while (count--) {
 264                if (!e->engines[count])
 265                        continue;
 266
 267                intel_context_put(e->engines[count]);
 268        }
 269        kfree(e);
 270}
 271
 272static void free_engines(struct i915_gem_engines *e)
 273{
 274        __free_engines(e, e->num_engines);
 275}
 276
 277static void free_engines_rcu(struct rcu_head *rcu)
 278{
 279        free_engines(container_of(rcu, struct i915_gem_engines, rcu));
 280}
 281
 282static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
 283{
 284        const struct intel_gt *gt = &ctx->i915->gt;
 285        struct intel_engine_cs *engine;
 286        struct i915_gem_engines *e;
 287        enum intel_engine_id id;
 288
 289        e = kzalloc(struct_size(e, engines, I915_NUM_ENGINES), GFP_KERNEL);
 290        if (!e)
 291                return ERR_PTR(-ENOMEM);
 292
 293        init_rcu_head(&e->rcu);
 294        for_each_engine(engine, gt, id) {
 295                struct intel_context *ce;
 296
 297                ce = intel_context_create(ctx, engine);
 298                if (IS_ERR(ce)) {
 299                        __free_engines(e, id);
 300                        return ERR_CAST(ce);
 301                }
 302
 303                e->engines[id] = ce;
 304                e->num_engines = id + 1;
 305        }
 306
 307        return e;
 308}
 309
 310static void i915_gem_context_free(struct i915_gem_context *ctx)
 311{
 312        lockdep_assert_held(&ctx->i915->drm.struct_mutex);
 313        GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
 314
 315        release_hw_id(ctx);
 316        if (ctx->vm)
 317                i915_vm_put(ctx->vm);
 318
 319        free_engines(rcu_access_pointer(ctx->engines));
 320        mutex_destroy(&ctx->engines_mutex);
 321
 322        kfree(ctx->jump_whitelist);
 323
 324        if (ctx->timeline)
 325                intel_timeline_put(ctx->timeline);
 326
 327        kfree(ctx->name);
 328        put_pid(ctx->pid);
 329
 330        list_del(&ctx->link);
 331        mutex_destroy(&ctx->mutex);
 332
 333        kfree_rcu(ctx, rcu);
 334}
 335
 336static void contexts_free(struct drm_i915_private *i915)
 337{
 338        struct llist_node *freed = llist_del_all(&i915->contexts.free_list);
 339        struct i915_gem_context *ctx, *cn;
 340
 341        lockdep_assert_held(&i915->drm.struct_mutex);
 342
 343        llist_for_each_entry_safe(ctx, cn, freed, free_link)
 344                i915_gem_context_free(ctx);
 345}
 346
 347static void contexts_free_first(struct drm_i915_private *i915)
 348{
 349        struct i915_gem_context *ctx;
 350        struct llist_node *freed;
 351
 352        lockdep_assert_held(&i915->drm.struct_mutex);
 353
 354        freed = llist_del_first(&i915->contexts.free_list);
 355        if (!freed)
 356                return;
 357
 358        ctx = container_of(freed, typeof(*ctx), free_link);
 359        i915_gem_context_free(ctx);
 360}
 361
 362static void contexts_free_worker(struct work_struct *work)
 363{
 364        struct drm_i915_private *i915 =
 365                container_of(work, typeof(*i915), contexts.free_work);
 366
 367        mutex_lock(&i915->drm.struct_mutex);
 368        contexts_free(i915);
 369        mutex_unlock(&i915->drm.struct_mutex);
 370}
 371
 372void i915_gem_context_release(struct kref *ref)
 373{
 374        struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
 375        struct drm_i915_private *i915 = ctx->i915;
 376
 377        trace_i915_context_free(ctx);
 378        if (llist_add(&ctx->free_link, &i915->contexts.free_list))
 379                queue_work(i915->wq, &i915->contexts.free_work);
 380}
 381
 382static void context_close(struct i915_gem_context *ctx)
 383{
 384        mutex_lock(&ctx->mutex);
 385
 386        i915_gem_context_set_closed(ctx);
 387        ctx->file_priv = ERR_PTR(-EBADF);
 388
 389        /*
 390         * This context will never again be assinged to HW, so we can
 391         * reuse its ID for the next context.
 392         */
 393        release_hw_id(ctx);
 394
 395        /*
 396         * The LUT uses the VMA as a backpointer to unref the object,
 397         * so we need to clear the LUT before we close all the VMA (inside
 398         * the ppgtt).
 399         */
 400        lut_close(ctx);
 401
 402        mutex_unlock(&ctx->mutex);
 403        i915_gem_context_put(ctx);
 404}
 405
 406static struct i915_gem_context *
 407__create_context(struct drm_i915_private *i915)
 408{
 409        struct i915_gem_context *ctx;
 410        struct i915_gem_engines *e;
 411        int err;
 412        int i;
 413
 414        ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
 415        if (!ctx)
 416                return ERR_PTR(-ENOMEM);
 417
 418        kref_init(&ctx->ref);
 419        list_add_tail(&ctx->link, &i915->contexts.list);
 420        ctx->i915 = i915;
 421        ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
 422        mutex_init(&ctx->mutex);
 423
 424        mutex_init(&ctx->engines_mutex);
 425        e = default_engines(ctx);
 426        if (IS_ERR(e)) {
 427                err = PTR_ERR(e);
 428                goto err_free;
 429        }
 430        RCU_INIT_POINTER(ctx->engines, e);
 431
 432        INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
 433        INIT_LIST_HEAD(&ctx->hw_id_link);
 434
 435        /* NB: Mark all slices as needing a remap so that when the context first
 436         * loads it will restore whatever remap state already exists. If there
 437         * is no remap info, it will be a NOP. */
 438        ctx->remap_slice = ALL_L3_SLICES(i915);
 439
 440        i915_gem_context_set_bannable(ctx);
 441        i915_gem_context_set_recoverable(ctx);
 442
 443        for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
 444                ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
 445
 446        ctx->jump_whitelist = NULL;
 447        ctx->jump_whitelist_cmds = 0;
 448
 449        return ctx;
 450
 451err_free:
 452        kfree(ctx);
 453        return ERR_PTR(err);
 454}
 455
 456static void
 457context_apply_all(struct i915_gem_context *ctx,
 458                  void (*fn)(struct intel_context *ce, void *data),
 459                  void *data)
 460{
 461        struct i915_gem_engines_iter it;
 462        struct intel_context *ce;
 463
 464        for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it)
 465                fn(ce, data);
 466        i915_gem_context_unlock_engines(ctx);
 467}
 468
 469static void __apply_ppgtt(struct intel_context *ce, void *vm)
 470{
 471        i915_vm_put(ce->vm);
 472        ce->vm = i915_vm_get(vm);
 473}
 474
 475static struct i915_address_space *
 476__set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm)
 477{
 478        struct i915_address_space *old = ctx->vm;
 479
 480        GEM_BUG_ON(old && i915_vm_is_4lvl(vm) != i915_vm_is_4lvl(old));
 481
 482        ctx->vm = i915_vm_get(vm);
 483        context_apply_all(ctx, __apply_ppgtt, vm);
 484
 485        return old;
 486}
 487
 488static void __assign_ppgtt(struct i915_gem_context *ctx,
 489                           struct i915_address_space *vm)
 490{
 491        if (vm == ctx->vm)
 492                return;
 493
 494        vm = __set_ppgtt(ctx, vm);
 495        if (vm)
 496                i915_vm_put(vm);
 497}
 498
 499static void __set_timeline(struct intel_timeline **dst,
 500                           struct intel_timeline *src)
 501{
 502        struct intel_timeline *old = *dst;
 503
 504        *dst = src ? intel_timeline_get(src) : NULL;
 505
 506        if (old)
 507                intel_timeline_put(old);
 508}
 509
 510static void __apply_timeline(struct intel_context *ce, void *timeline)
 511{
 512        __set_timeline(&ce->timeline, timeline);
 513}
 514
 515static void __assign_timeline(struct i915_gem_context *ctx,
 516                              struct intel_timeline *timeline)
 517{
 518        __set_timeline(&ctx->timeline, timeline);
 519        context_apply_all(ctx, __apply_timeline, timeline);
 520}
 521
 522static struct i915_gem_context *
 523i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
 524{
 525        struct i915_gem_context *ctx;
 526
 527        lockdep_assert_held(&dev_priv->drm.struct_mutex);
 528
 529        if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE &&
 530            !HAS_EXECLISTS(dev_priv))
 531                return ERR_PTR(-EINVAL);
 532
 533        /* Reap the most stale context */
 534        contexts_free_first(dev_priv);
 535
 536        ctx = __create_context(dev_priv);
 537        if (IS_ERR(ctx))
 538                return ctx;
 539
 540        if (HAS_FULL_PPGTT(dev_priv)) {
 541                struct i915_ppgtt *ppgtt;
 542
 543                ppgtt = i915_ppgtt_create(dev_priv);
 544                if (IS_ERR(ppgtt)) {
 545                        DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
 546                                         PTR_ERR(ppgtt));
 547                        context_close(ctx);
 548                        return ERR_CAST(ppgtt);
 549                }
 550
 551                __assign_ppgtt(ctx, &ppgtt->vm);
 552                i915_vm_put(&ppgtt->vm);
 553        }
 554
 555        if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) {
 556                struct intel_timeline *timeline;
 557
 558                timeline = intel_timeline_create(&dev_priv->gt, NULL);
 559                if (IS_ERR(timeline)) {
 560                        context_close(ctx);
 561                        return ERR_CAST(timeline);
 562                }
 563
 564                __assign_timeline(ctx, timeline);
 565                intel_timeline_put(timeline);
 566        }
 567
 568        trace_i915_context_create(ctx);
 569
 570        return ctx;
 571}
 572
 573static void
 574destroy_kernel_context(struct i915_gem_context **ctxp)
 575{
 576        struct i915_gem_context *ctx;
 577
 578        /* Keep the context ref so that we can free it immediately ourselves */
 579        ctx = i915_gem_context_get(fetch_and_zero(ctxp));
 580        GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
 581
 582        context_close(ctx);
 583        i915_gem_context_free(ctx);
 584}
 585
 586struct i915_gem_context *
 587i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
 588{
 589        struct i915_gem_context *ctx;
 590        int err;
 591
 592        ctx = i915_gem_create_context(i915, 0);
 593        if (IS_ERR(ctx))
 594                return ctx;
 595
 596        err = i915_gem_context_pin_hw_id(ctx);
 597        if (err) {
 598                destroy_kernel_context(&ctx);
 599                return ERR_PTR(err);
 600        }
 601
 602        i915_gem_context_clear_bannable(ctx);
 603        ctx->sched.priority = I915_USER_PRIORITY(prio);
 604
 605        GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
 606
 607        return ctx;
 608}
 609
 610static void init_contexts(struct drm_i915_private *i915)
 611{
 612        mutex_init(&i915->contexts.mutex);
 613        INIT_LIST_HEAD(&i915->contexts.list);
 614
 615        /* Using the simple ida interface, the max is limited by sizeof(int) */
 616        BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
 617        BUILD_BUG_ON(GEN11_MAX_CONTEXT_HW_ID > INT_MAX);
 618        ida_init(&i915->contexts.hw_ida);
 619        INIT_LIST_HEAD(&i915->contexts.hw_id_list);
 620
 621        INIT_WORK(&i915->contexts.free_work, contexts_free_worker);
 622        init_llist_head(&i915->contexts.free_list);
 623}
 624
 625int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
 626{
 627        struct i915_gem_context *ctx;
 628
 629        /* Reassure ourselves we are only called once */
 630        GEM_BUG_ON(dev_priv->kernel_context);
 631
 632        init_contexts(dev_priv);
 633
 634        /* lowest priority; idle task */
 635        ctx = i915_gem_context_create_kernel(dev_priv, I915_PRIORITY_MIN);
 636        if (IS_ERR(ctx)) {
 637                DRM_ERROR("Failed to create default global context\n");
 638                return PTR_ERR(ctx);
 639        }
 640        /*
 641         * For easy recognisablity, we want the kernel context to be 0 and then
 642         * all user contexts will have non-zero hw_id. Kernel contexts are
 643         * permanently pinned, so that we never suffer a stall and can
 644         * use them from any allocation context (e.g. for evicting other
 645         * contexts and from inside the shrinker).
 646         */
 647        GEM_BUG_ON(ctx->hw_id);
 648        GEM_BUG_ON(!atomic_read(&ctx->hw_id_pin_count));
 649        dev_priv->kernel_context = ctx;
 650
 651        DRM_DEBUG_DRIVER("%s context support initialized\n",
 652                         DRIVER_CAPS(dev_priv)->has_logical_contexts ?
 653                         "logical" : "fake");
 654        return 0;
 655}
 656
 657void i915_gem_contexts_fini(struct drm_i915_private *i915)
 658{
 659        lockdep_assert_held(&i915->drm.struct_mutex);
 660
 661        destroy_kernel_context(&i915->kernel_context);
 662
 663        /* Must free all deferred contexts (via flush_workqueue) first */
 664        GEM_BUG_ON(!list_empty(&i915->contexts.hw_id_list));
 665        ida_destroy(&i915->contexts.hw_ida);
 666}
 667
 668static int context_idr_cleanup(int id, void *p, void *data)
 669{
 670        context_close(p);
 671        return 0;
 672}
 673
 674static int vm_idr_cleanup(int id, void *p, void *data)
 675{
 676        i915_vm_put(p);
 677        return 0;
 678}
 679
 680static int gem_context_register(struct i915_gem_context *ctx,
 681                                struct drm_i915_file_private *fpriv)
 682{
 683        int ret;
 684
 685        ctx->file_priv = fpriv;
 686        if (ctx->vm)
 687                ctx->vm->file = fpriv;
 688
 689        ctx->pid = get_task_pid(current, PIDTYPE_PID);
 690        ctx->name = kasprintf(GFP_KERNEL, "%s[%d]",
 691                              current->comm, pid_nr(ctx->pid));
 692        if (!ctx->name) {
 693                ret = -ENOMEM;
 694                goto err_pid;
 695        }
 696
 697        /* And finally expose ourselves to userspace via the idr */
 698        mutex_lock(&fpriv->context_idr_lock);
 699        ret = idr_alloc(&fpriv->context_idr, ctx, 0, 0, GFP_KERNEL);
 700        mutex_unlock(&fpriv->context_idr_lock);
 701        if (ret >= 0)
 702                goto out;
 703
 704        kfree(fetch_and_zero(&ctx->name));
 705err_pid:
 706        put_pid(fetch_and_zero(&ctx->pid));
 707out:
 708        return ret;
 709}
 710
 711int i915_gem_context_open(struct drm_i915_private *i915,
 712                          struct drm_file *file)
 713{
 714        struct drm_i915_file_private *file_priv = file->driver_priv;
 715        struct i915_gem_context *ctx;
 716        int err;
 717
 718        mutex_init(&file_priv->context_idr_lock);
 719        mutex_init(&file_priv->vm_idr_lock);
 720
 721        idr_init(&file_priv->context_idr);
 722        idr_init_base(&file_priv->vm_idr, 1);
 723
 724        mutex_lock(&i915->drm.struct_mutex);
 725        ctx = i915_gem_create_context(i915, 0);
 726        mutex_unlock(&i915->drm.struct_mutex);
 727        if (IS_ERR(ctx)) {
 728                err = PTR_ERR(ctx);
 729                goto err;
 730        }
 731
 732        err = gem_context_register(ctx, file_priv);
 733        if (err < 0)
 734                goto err_ctx;
 735
 736        GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
 737        GEM_BUG_ON(err > 0);
 738
 739        return 0;
 740
 741err_ctx:
 742        context_close(ctx);
 743err:
 744        idr_destroy(&file_priv->vm_idr);
 745        idr_destroy(&file_priv->context_idr);
 746        mutex_destroy(&file_priv->vm_idr_lock);
 747        mutex_destroy(&file_priv->context_idr_lock);
 748        return err;
 749}
 750
 751void i915_gem_context_close(struct drm_file *file)
 752{
 753        struct drm_i915_file_private *file_priv = file->driver_priv;
 754
 755        idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
 756        idr_destroy(&file_priv->context_idr);
 757        mutex_destroy(&file_priv->context_idr_lock);
 758
 759        idr_for_each(&file_priv->vm_idr, vm_idr_cleanup, NULL);
 760        idr_destroy(&file_priv->vm_idr);
 761        mutex_destroy(&file_priv->vm_idr_lock);
 762}
 763
 764int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
 765                             struct drm_file *file)
 766{
 767        struct drm_i915_private *i915 = to_i915(dev);
 768        struct drm_i915_gem_vm_control *args = data;
 769        struct drm_i915_file_private *file_priv = file->driver_priv;
 770        struct i915_ppgtt *ppgtt;
 771        int err;
 772
 773        if (!HAS_FULL_PPGTT(i915))
 774                return -ENODEV;
 775
 776        if (args->flags)
 777                return -EINVAL;
 778
 779        ppgtt = i915_ppgtt_create(i915);
 780        if (IS_ERR(ppgtt))
 781                return PTR_ERR(ppgtt);
 782
 783        ppgtt->vm.file = file_priv;
 784
 785        if (args->extensions) {
 786                err = i915_user_extensions(u64_to_user_ptr(args->extensions),
 787                                           NULL, 0,
 788                                           ppgtt);
 789                if (err)
 790                        goto err_put;
 791        }
 792
 793        err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
 794        if (err)
 795                goto err_put;
 796
 797        err = idr_alloc(&file_priv->vm_idr, &ppgtt->vm, 0, 0, GFP_KERNEL);
 798        if (err < 0)
 799                goto err_unlock;
 800
 801        GEM_BUG_ON(err == 0); /* reserved for invalid/unassigned ppgtt */
 802
 803        mutex_unlock(&file_priv->vm_idr_lock);
 804
 805        args->vm_id = err;
 806        return 0;
 807
 808err_unlock:
 809        mutex_unlock(&file_priv->vm_idr_lock);
 810err_put:
 811        i915_vm_put(&ppgtt->vm);
 812        return err;
 813}
 814
 815int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
 816                              struct drm_file *file)
 817{
 818        struct drm_i915_file_private *file_priv = file->driver_priv;
 819        struct drm_i915_gem_vm_control *args = data;
 820        struct i915_address_space *vm;
 821        int err;
 822        u32 id;
 823
 824        if (args->flags)
 825                return -EINVAL;
 826
 827        if (args->extensions)
 828                return -EINVAL;
 829
 830        id = args->vm_id;
 831        if (!id)
 832                return -ENOENT;
 833
 834        err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
 835        if (err)
 836                return err;
 837
 838        vm = idr_remove(&file_priv->vm_idr, id);
 839
 840        mutex_unlock(&file_priv->vm_idr_lock);
 841        if (!vm)
 842                return -ENOENT;
 843
 844        i915_vm_put(vm);
 845        return 0;
 846}
 847
 848struct context_barrier_task {
 849        struct i915_active base;
 850        void (*task)(void *data);
 851        void *data;
 852};
 853
 854static void cb_retire(struct i915_active *base)
 855{
 856        struct context_barrier_task *cb = container_of(base, typeof(*cb), base);
 857
 858        if (cb->task)
 859                cb->task(cb->data);
 860
 861        i915_active_fini(&cb->base);
 862        kfree(cb);
 863}
 864
 865I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault);
 866static int context_barrier_task(struct i915_gem_context *ctx,
 867                                intel_engine_mask_t engines,
 868                                bool (*skip)(struct intel_context *ce, void *data),
 869                                int (*emit)(struct i915_request *rq, void *data),
 870                                void (*task)(void *data),
 871                                void *data)
 872{
 873        struct drm_i915_private *i915 = ctx->i915;
 874        struct context_barrier_task *cb;
 875        struct i915_gem_engines_iter it;
 876        struct intel_context *ce;
 877        int err = 0;
 878
 879        lockdep_assert_held(&i915->drm.struct_mutex);
 880        GEM_BUG_ON(!task);
 881
 882        cb = kmalloc(sizeof(*cb), GFP_KERNEL);
 883        if (!cb)
 884                return -ENOMEM;
 885
 886        i915_active_init(i915, &cb->base, NULL, cb_retire);
 887        err = i915_active_acquire(&cb->base);
 888        if (err) {
 889                kfree(cb);
 890                return err;
 891        }
 892
 893        for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
 894                struct i915_request *rq;
 895
 896                if (I915_SELFTEST_ONLY(context_barrier_inject_fault &
 897                                       ce->engine->mask)) {
 898                        err = -ENXIO;
 899                        break;
 900                }
 901
 902                if (!(ce->engine->mask & engines))
 903                        continue;
 904
 905                if (skip && skip(ce, data))
 906                        continue;
 907
 908                rq = intel_context_create_request(ce);
 909                if (IS_ERR(rq)) {
 910                        err = PTR_ERR(rq);
 911                        break;
 912                }
 913
 914                err = 0;
 915                if (emit)
 916                        err = emit(rq, data);
 917                if (err == 0)
 918                        err = i915_active_ref(&cb->base, rq->timeline, rq);
 919
 920                i915_request_add(rq);
 921                if (err)
 922                        break;
 923        }
 924        i915_gem_context_unlock_engines(ctx);
 925
 926        cb->task = err ? NULL : task; /* caller needs to unwind instead */
 927        cb->data = data;
 928
 929        i915_active_release(&cb->base);
 930
 931        return err;
 932}
 933
 934static int get_ppgtt(struct drm_i915_file_private *file_priv,
 935                     struct i915_gem_context *ctx,
 936                     struct drm_i915_gem_context_param *args)
 937{
 938        struct i915_address_space *vm;
 939        int ret;
 940
 941        if (!ctx->vm)
 942                return -ENODEV;
 943
 944        /* XXX rcu acquire? */
 945        ret = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
 946        if (ret)
 947                return ret;
 948
 949        vm = i915_vm_get(ctx->vm);
 950        mutex_unlock(&ctx->i915->drm.struct_mutex);
 951
 952        ret = mutex_lock_interruptible(&file_priv->vm_idr_lock);
 953        if (ret)
 954                goto err_put;
 955
 956        ret = idr_alloc(&file_priv->vm_idr, vm, 0, 0, GFP_KERNEL);
 957        GEM_BUG_ON(!ret);
 958        if (ret < 0)
 959                goto err_unlock;
 960
 961        i915_vm_get(vm);
 962
 963        args->size = 0;
 964        args->value = ret;
 965
 966        ret = 0;
 967err_unlock:
 968        mutex_unlock(&file_priv->vm_idr_lock);
 969err_put:
 970        i915_vm_put(vm);
 971        return ret;
 972}
 973
 974static void set_ppgtt_barrier(void *data)
 975{
 976        struct i915_address_space *old = data;
 977
 978        if (INTEL_GEN(old->i915) < 8)
 979                gen6_ppgtt_unpin_all(i915_vm_to_ppgtt(old));
 980
 981        i915_vm_put(old);
 982}
 983
 984static int emit_ppgtt_update(struct i915_request *rq, void *data)
 985{
 986        struct i915_address_space *vm = rq->hw_context->vm;
 987        struct intel_engine_cs *engine = rq->engine;
 988        u32 base = engine->mmio_base;
 989        u32 *cs;
 990        int i;
 991
 992        if (i915_vm_is_4lvl(vm)) {
 993                struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
 994                const dma_addr_t pd_daddr = px_dma(ppgtt->pd);
 995
 996                cs = intel_ring_begin(rq, 6);
 997                if (IS_ERR(cs))
 998                        return PTR_ERR(cs);
 999
1000                *cs++ = MI_LOAD_REGISTER_IMM(2);
1001
1002                *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 0));
1003                *cs++ = upper_32_bits(pd_daddr);
1004                *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 0));
1005                *cs++ = lower_32_bits(pd_daddr);
1006
1007                *cs++ = MI_NOOP;
1008                intel_ring_advance(rq, cs);
1009        } else if (HAS_LOGICAL_RING_CONTEXTS(engine->i915)) {
1010                struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1011
1012                cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
1013                if (IS_ERR(cs))
1014                        return PTR_ERR(cs);
1015
1016                *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES);
1017                for (i = GEN8_3LVL_PDPES; i--; ) {
1018                        const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
1019
1020                        *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i));
1021                        *cs++ = upper_32_bits(pd_daddr);
1022                        *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i));
1023                        *cs++ = lower_32_bits(pd_daddr);
1024                }
1025                *cs++ = MI_NOOP;
1026                intel_ring_advance(rq, cs);
1027        } else {
1028                /* ppGTT is not part of the legacy context image */
1029                gen6_ppgtt_pin(i915_vm_to_ppgtt(vm));
1030        }
1031
1032        return 0;
1033}
1034
1035static bool skip_ppgtt_update(struct intel_context *ce, void *data)
1036{
1037        if (HAS_LOGICAL_RING_CONTEXTS(ce->engine->i915))
1038                return !ce->state;
1039        else
1040                return !atomic_read(&ce->pin_count);
1041}
1042
1043static int set_ppgtt(struct drm_i915_file_private *file_priv,
1044                     struct i915_gem_context *ctx,
1045                     struct drm_i915_gem_context_param *args)
1046{
1047        struct i915_address_space *vm, *old;
1048        int err;
1049
1050        if (args->size)
1051                return -EINVAL;
1052
1053        if (!ctx->vm)
1054                return -ENODEV;
1055
1056        if (upper_32_bits(args->value))
1057                return -ENOENT;
1058
1059        err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
1060        if (err)
1061                return err;
1062
1063        vm = idr_find(&file_priv->vm_idr, args->value);
1064        if (vm)
1065                i915_vm_get(vm);
1066        mutex_unlock(&file_priv->vm_idr_lock);
1067        if (!vm)
1068                return -ENOENT;
1069
1070        err = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
1071        if (err)
1072                goto out;
1073
1074        if (vm == ctx->vm)
1075                goto unlock;
1076
1077        /* Teardown the existing obj:vma cache, it will have to be rebuilt. */
1078        mutex_lock(&ctx->mutex);
1079        lut_close(ctx);
1080        mutex_unlock(&ctx->mutex);
1081
1082        old = __set_ppgtt(ctx, vm);
1083
1084        /*
1085         * We need to flush any requests using the current ppgtt before
1086         * we release it as the requests do not hold a reference themselves,
1087         * only indirectly through the context.
1088         */
1089        err = context_barrier_task(ctx, ALL_ENGINES,
1090                                   skip_ppgtt_update,
1091                                   emit_ppgtt_update,
1092                                   set_ppgtt_barrier,
1093                                   old);
1094        if (err) {
1095                i915_vm_put(__set_ppgtt(ctx, old));
1096                i915_vm_put(old);
1097        }
1098
1099unlock:
1100        mutex_unlock(&ctx->i915->drm.struct_mutex);
1101
1102out:
1103        i915_vm_put(vm);
1104        return err;
1105}
1106
1107static int gen8_emit_rpcs_config(struct i915_request *rq,
1108                                 struct intel_context *ce,
1109                                 struct intel_sseu sseu)
1110{
1111        u64 offset;
1112        u32 *cs;
1113
1114        cs = intel_ring_begin(rq, 4);
1115        if (IS_ERR(cs))
1116                return PTR_ERR(cs);
1117
1118        offset = i915_ggtt_offset(ce->state) +
1119                 LRC_STATE_PN * PAGE_SIZE +
1120                 (CTX_R_PWR_CLK_STATE + 1) * 4;
1121
1122        *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
1123        *cs++ = lower_32_bits(offset);
1124        *cs++ = upper_32_bits(offset);
1125        *cs++ = intel_sseu_make_rpcs(rq->i915, &sseu);
1126
1127        intel_ring_advance(rq, cs);
1128
1129        return 0;
1130}
1131
1132static int
1133gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
1134{
1135        struct i915_request *rq;
1136        int ret;
1137
1138        lockdep_assert_held(&ce->pin_mutex);
1139
1140        /*
1141         * If the context is not idle, we have to submit an ordered request to
1142         * modify its context image via the kernel context (writing to our own
1143         * image, or into the registers directory, does not stick). Pristine
1144         * and idle contexts will be configured on pinning.
1145         */
1146        if (!intel_context_is_pinned(ce))
1147                return 0;
1148
1149        rq = i915_request_create(ce->engine->kernel_context);
1150        if (IS_ERR(rq))
1151                return PTR_ERR(rq);
1152
1153        /* Serialise with the remote context */
1154        ret = intel_context_prepare_remote_request(ce, rq);
1155        if (ret == 0)
1156                ret = gen8_emit_rpcs_config(rq, ce, sseu);
1157
1158        i915_request_add(rq);
1159        return ret;
1160}
1161
1162static int
1163__intel_context_reconfigure_sseu(struct intel_context *ce,
1164                                 struct intel_sseu sseu)
1165{
1166        int ret;
1167
1168        GEM_BUG_ON(INTEL_GEN(ce->engine->i915) < 8);
1169
1170        ret = intel_context_lock_pinned(ce);
1171        if (ret)
1172                return ret;
1173
1174        /* Nothing to do if unmodified. */
1175        if (!memcmp(&ce->sseu, &sseu, sizeof(sseu)))
1176                goto unlock;
1177
1178        ret = gen8_modify_rpcs(ce, sseu);
1179        if (!ret)
1180                ce->sseu = sseu;
1181
1182unlock:
1183        intel_context_unlock_pinned(ce);
1184        return ret;
1185}
1186
1187static int
1188intel_context_reconfigure_sseu(struct intel_context *ce, struct intel_sseu sseu)
1189{
1190        struct drm_i915_private *i915 = ce->engine->i915;
1191        int ret;
1192
1193        ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
1194        if (ret)
1195                return ret;
1196
1197        ret = __intel_context_reconfigure_sseu(ce, sseu);
1198
1199        mutex_unlock(&i915->drm.struct_mutex);
1200
1201        return ret;
1202}
1203
1204static int
1205user_to_context_sseu(struct drm_i915_private *i915,
1206                     const struct drm_i915_gem_context_param_sseu *user,
1207                     struct intel_sseu *context)
1208{
1209        const struct sseu_dev_info *device = &RUNTIME_INFO(i915)->sseu;
1210
1211        /* No zeros in any field. */
1212        if (!user->slice_mask || !user->subslice_mask ||
1213            !user->min_eus_per_subslice || !user->max_eus_per_subslice)
1214                return -EINVAL;
1215
1216        /* Max > min. */
1217        if (user->max_eus_per_subslice < user->min_eus_per_subslice)
1218                return -EINVAL;
1219
1220        /*
1221         * Some future proofing on the types since the uAPI is wider than the
1222         * current internal implementation.
1223         */
1224        if (overflows_type(user->slice_mask, context->slice_mask) ||
1225            overflows_type(user->subslice_mask, context->subslice_mask) ||
1226            overflows_type(user->min_eus_per_subslice,
1227                           context->min_eus_per_subslice) ||
1228            overflows_type(user->max_eus_per_subslice,
1229                           context->max_eus_per_subslice))
1230                return -EINVAL;
1231
1232        /* Check validity against hardware. */
1233        if (user->slice_mask & ~device->slice_mask)
1234                return -EINVAL;
1235
1236        if (user->subslice_mask & ~device->subslice_mask[0])
1237                return -EINVAL;
1238
1239        if (user->max_eus_per_subslice > device->max_eus_per_subslice)
1240                return -EINVAL;
1241
1242        context->slice_mask = user->slice_mask;
1243        context->subslice_mask = user->subslice_mask;
1244        context->min_eus_per_subslice = user->min_eus_per_subslice;
1245        context->max_eus_per_subslice = user->max_eus_per_subslice;
1246
1247        /* Part specific restrictions. */
1248        if (IS_GEN(i915, 11)) {
1249                unsigned int hw_s = hweight8(device->slice_mask);
1250                unsigned int hw_ss_per_s = hweight8(device->subslice_mask[0]);
1251                unsigned int req_s = hweight8(context->slice_mask);
1252                unsigned int req_ss = hweight8(context->subslice_mask);
1253
1254                /*
1255                 * Only full subslice enablement is possible if more than one
1256                 * slice is turned on.
1257                 */
1258                if (req_s > 1 && req_ss != hw_ss_per_s)
1259                        return -EINVAL;
1260
1261                /*
1262                 * If more than four (SScount bitfield limit) subslices are
1263                 * requested then the number has to be even.
1264                 */
1265                if (req_ss > 4 && (req_ss & 1))
1266                        return -EINVAL;
1267
1268                /*
1269                 * If only one slice is enabled and subslice count is below the
1270                 * device full enablement, it must be at most half of the all
1271                 * available subslices.
1272                 */
1273                if (req_s == 1 && req_ss < hw_ss_per_s &&
1274                    req_ss > (hw_ss_per_s / 2))
1275                        return -EINVAL;
1276
1277                /* ABI restriction - VME use case only. */
1278
1279                /* All slices or one slice only. */
1280                if (req_s != 1 && req_s != hw_s)
1281                        return -EINVAL;
1282
1283                /*
1284                 * Half subslices or full enablement only when one slice is
1285                 * enabled.
1286                 */
1287                if (req_s == 1 &&
1288                    (req_ss != hw_ss_per_s && req_ss != (hw_ss_per_s / 2)))
1289                        return -EINVAL;
1290
1291                /* No EU configuration changes. */
1292                if ((user->min_eus_per_subslice !=
1293                     device->max_eus_per_subslice) ||
1294                    (user->max_eus_per_subslice !=
1295                     device->max_eus_per_subslice))
1296                        return -EINVAL;
1297        }
1298
1299        return 0;
1300}
1301
1302static int set_sseu(struct i915_gem_context *ctx,
1303                    struct drm_i915_gem_context_param *args)
1304{
1305        struct drm_i915_private *i915 = ctx->i915;
1306        struct drm_i915_gem_context_param_sseu user_sseu;
1307        struct intel_context *ce;
1308        struct intel_sseu sseu;
1309        unsigned long lookup;
1310        int ret;
1311
1312        if (args->size < sizeof(user_sseu))
1313                return -EINVAL;
1314
1315        if (!IS_GEN(i915, 11))
1316                return -ENODEV;
1317
1318        if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
1319                           sizeof(user_sseu)))
1320                return -EFAULT;
1321
1322        if (user_sseu.rsvd)
1323                return -EINVAL;
1324
1325        if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
1326                return -EINVAL;
1327
1328        lookup = 0;
1329        if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
1330                lookup |= LOOKUP_USER_INDEX;
1331
1332        ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
1333        if (IS_ERR(ce))
1334                return PTR_ERR(ce);
1335
1336        /* Only render engine supports RPCS configuration. */
1337        if (ce->engine->class != RENDER_CLASS) {
1338                ret = -ENODEV;
1339                goto out_ce;
1340        }
1341
1342        ret = user_to_context_sseu(i915, &user_sseu, &sseu);
1343        if (ret)
1344                goto out_ce;
1345
1346        ret = intel_context_reconfigure_sseu(ce, sseu);
1347        if (ret)
1348                goto out_ce;
1349
1350        args->size = sizeof(user_sseu);
1351
1352out_ce:
1353        intel_context_put(ce);
1354        return ret;
1355}
1356
1357struct set_engines {
1358        struct i915_gem_context *ctx;
1359        struct i915_gem_engines *engines;
1360};
1361
1362static int
1363set_engines__load_balance(struct i915_user_extension __user *base, void *data)
1364{
1365        struct i915_context_engines_load_balance __user *ext =
1366                container_of_user(base, typeof(*ext), base);
1367        const struct set_engines *set = data;
1368        struct intel_engine_cs *stack[16];
1369        struct intel_engine_cs **siblings;
1370        struct intel_context *ce;
1371        u16 num_siblings, idx;
1372        unsigned int n;
1373        int err;
1374
1375        if (!HAS_EXECLISTS(set->ctx->i915))
1376                return -ENODEV;
1377
1378        if (USES_GUC_SUBMISSION(set->ctx->i915))
1379                return -ENODEV; /* not implement yet */
1380
1381        if (get_user(idx, &ext->engine_index))
1382                return -EFAULT;
1383
1384        if (idx >= set->engines->num_engines) {
1385                DRM_DEBUG("Invalid placement value, %d >= %d\n",
1386                          idx, set->engines->num_engines);
1387                return -EINVAL;
1388        }
1389
1390        idx = array_index_nospec(idx, set->engines->num_engines);
1391        if (set->engines->engines[idx]) {
1392                DRM_DEBUG("Invalid placement[%d], already occupied\n", idx);
1393                return -EEXIST;
1394        }
1395
1396        if (get_user(num_siblings, &ext->num_siblings))
1397                return -EFAULT;
1398
1399        err = check_user_mbz(&ext->flags);
1400        if (err)
1401                return err;
1402
1403        err = check_user_mbz(&ext->mbz64);
1404        if (err)
1405                return err;
1406
1407        siblings = stack;
1408        if (num_siblings > ARRAY_SIZE(stack)) {
1409                siblings = kmalloc_array(num_siblings,
1410                                         sizeof(*siblings),
1411                                         GFP_KERNEL);
1412                if (!siblings)
1413                        return -ENOMEM;
1414        }
1415
1416        for (n = 0; n < num_siblings; n++) {
1417                struct i915_engine_class_instance ci;
1418
1419                if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) {
1420                        err = -EFAULT;
1421                        goto out_siblings;
1422                }
1423
1424                siblings[n] = intel_engine_lookup_user(set->ctx->i915,
1425                                                       ci.engine_class,
1426                                                       ci.engine_instance);
1427                if (!siblings[n]) {
1428                        DRM_DEBUG("Invalid sibling[%d]: { class:%d, inst:%d }\n",
1429                                  n, ci.engine_class, ci.engine_instance);
1430                        err = -EINVAL;
1431                        goto out_siblings;
1432                }
1433        }
1434
1435        ce = intel_execlists_create_virtual(set->ctx, siblings, n);
1436        if (IS_ERR(ce)) {
1437                err = PTR_ERR(ce);
1438                goto out_siblings;
1439        }
1440
1441        if (cmpxchg(&set->engines->engines[idx], NULL, ce)) {
1442                intel_context_put(ce);
1443                err = -EEXIST;
1444                goto out_siblings;
1445        }
1446
1447out_siblings:
1448        if (siblings != stack)
1449                kfree(siblings);
1450
1451        return err;
1452}
1453
1454static int
1455set_engines__bond(struct i915_user_extension __user *base, void *data)
1456{
1457        struct i915_context_engines_bond __user *ext =
1458                container_of_user(base, typeof(*ext), base);
1459        const struct set_engines *set = data;
1460        struct i915_engine_class_instance ci;
1461        struct intel_engine_cs *virtual;
1462        struct intel_engine_cs *master;
1463        u16 idx, num_bonds;
1464        int err, n;
1465
1466        if (get_user(idx, &ext->virtual_index))
1467                return -EFAULT;
1468
1469        if (idx >= set->engines->num_engines) {
1470                DRM_DEBUG("Invalid index for virtual engine: %d >= %d\n",
1471                          idx, set->engines->num_engines);
1472                return -EINVAL;
1473        }
1474
1475        idx = array_index_nospec(idx, set->engines->num_engines);
1476        if (!set->engines->engines[idx]) {
1477                DRM_DEBUG("Invalid engine at %d\n", idx);
1478                return -EINVAL;
1479        }
1480        virtual = set->engines->engines[idx]->engine;
1481
1482        err = check_user_mbz(&ext->flags);
1483        if (err)
1484                return err;
1485
1486        for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) {
1487                err = check_user_mbz(&ext->mbz64[n]);
1488                if (err)
1489                        return err;
1490        }
1491
1492        if (copy_from_user(&ci, &ext->master, sizeof(ci)))
1493                return -EFAULT;
1494
1495        master = intel_engine_lookup_user(set->ctx->i915,
1496                                          ci.engine_class, ci.engine_instance);
1497        if (!master) {
1498                DRM_DEBUG("Unrecognised master engine: { class:%u, instance:%u }\n",
1499                          ci.engine_class, ci.engine_instance);
1500                return -EINVAL;
1501        }
1502
1503        if (get_user(num_bonds, &ext->num_bonds))
1504                return -EFAULT;
1505
1506        for (n = 0; n < num_bonds; n++) {
1507                struct intel_engine_cs *bond;
1508
1509                if (copy_from_user(&ci, &ext->engines[n], sizeof(ci)))
1510                        return -EFAULT;
1511
1512                bond = intel_engine_lookup_user(set->ctx->i915,
1513                                                ci.engine_class,
1514                                                ci.engine_instance);
1515                if (!bond) {
1516                        DRM_DEBUG("Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n",
1517                                  n, ci.engine_class, ci.engine_instance);
1518                        return -EINVAL;
1519                }
1520
1521                /*
1522                 * A non-virtual engine has no siblings to choose between; and
1523                 * a submit fence will always be directed to the one engine.
1524                 */
1525                if (intel_engine_is_virtual(virtual)) {
1526                        err = intel_virtual_engine_attach_bond(virtual,
1527                                                               master,
1528                                                               bond);
1529                        if (err)
1530                                return err;
1531                }
1532        }
1533
1534        return 0;
1535}
1536
1537static const i915_user_extension_fn set_engines__extensions[] = {
1538        [I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE] = set_engines__load_balance,
1539        [I915_CONTEXT_ENGINES_EXT_BOND] = set_engines__bond,
1540};
1541
1542static int
1543set_engines(struct i915_gem_context *ctx,
1544            const struct drm_i915_gem_context_param *args)
1545{
1546        struct i915_context_param_engines __user *user =
1547                u64_to_user_ptr(args->value);
1548        struct set_engines set = { .ctx = ctx };
1549        unsigned int num_engines, n;
1550        u64 extensions;
1551        int err;
1552
1553        if (!args->size) { /* switch back to legacy user_ring_map */
1554                if (!i915_gem_context_user_engines(ctx))
1555                        return 0;
1556
1557                set.engines = default_engines(ctx);
1558                if (IS_ERR(set.engines))
1559                        return PTR_ERR(set.engines);
1560
1561                goto replace;
1562        }
1563
1564        BUILD_BUG_ON(!IS_ALIGNED(sizeof(*user), sizeof(*user->engines)));
1565        if (args->size < sizeof(*user) ||
1566            !IS_ALIGNED(args->size, sizeof(*user->engines))) {
1567                DRM_DEBUG("Invalid size for engine array: %d\n",
1568                          args->size);
1569                return -EINVAL;
1570        }
1571
1572        /*
1573         * Note that I915_EXEC_RING_MASK limits execbuf to only using the
1574         * first 64 engines defined here.
1575         */
1576        num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines);
1577
1578        set.engines = kmalloc(struct_size(set.engines, engines, num_engines),
1579                              GFP_KERNEL);
1580        if (!set.engines)
1581                return -ENOMEM;
1582
1583        init_rcu_head(&set.engines->rcu);
1584        for (n = 0; n < num_engines; n++) {
1585                struct i915_engine_class_instance ci;
1586                struct intel_engine_cs *engine;
1587                struct intel_context *ce;
1588
1589                if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) {
1590                        __free_engines(set.engines, n);
1591                        return -EFAULT;
1592                }
1593
1594                if (ci.engine_class == (u16)I915_ENGINE_CLASS_INVALID &&
1595                    ci.engine_instance == (u16)I915_ENGINE_CLASS_INVALID_NONE) {
1596                        set.engines->engines[n] = NULL;
1597                        continue;
1598                }
1599
1600                engine = intel_engine_lookup_user(ctx->i915,
1601                                                  ci.engine_class,
1602                                                  ci.engine_instance);
1603                if (!engine) {
1604                        DRM_DEBUG("Invalid engine[%d]: { class:%d, instance:%d }\n",
1605                                  n, ci.engine_class, ci.engine_instance);
1606                        __free_engines(set.engines, n);
1607                        return -ENOENT;
1608                }
1609
1610                ce = intel_context_create(ctx, engine);
1611                if (IS_ERR(ce)) {
1612                        __free_engines(set.engines, n);
1613                        return PTR_ERR(ce);
1614                }
1615
1616                set.engines->engines[n] = ce;
1617        }
1618        set.engines->num_engines = num_engines;
1619
1620        err = -EFAULT;
1621        if (!get_user(extensions, &user->extensions))
1622                err = i915_user_extensions(u64_to_user_ptr(extensions),
1623                                           set_engines__extensions,
1624                                           ARRAY_SIZE(set_engines__extensions),
1625                                           &set);
1626        if (err) {
1627                free_engines(set.engines);
1628                return err;
1629        }
1630
1631replace:
1632        mutex_lock(&ctx->engines_mutex);
1633        if (args->size)
1634                i915_gem_context_set_user_engines(ctx);
1635        else
1636                i915_gem_context_clear_user_engines(ctx);
1637        rcu_swap_protected(ctx->engines, set.engines, 1);
1638        mutex_unlock(&ctx->engines_mutex);
1639
1640        call_rcu(&set.engines->rcu, free_engines_rcu);
1641
1642        return 0;
1643}
1644
1645static struct i915_gem_engines *
1646__copy_engines(struct i915_gem_engines *e)
1647{
1648        struct i915_gem_engines *copy;
1649        unsigned int n;
1650
1651        copy = kmalloc(struct_size(e, engines, e->num_engines), GFP_KERNEL);
1652        if (!copy)
1653                return ERR_PTR(-ENOMEM);
1654
1655        init_rcu_head(&copy->rcu);
1656        for (n = 0; n < e->num_engines; n++) {
1657                if (e->engines[n])
1658                        copy->engines[n] = intel_context_get(e->engines[n]);
1659                else
1660                        copy->engines[n] = NULL;
1661        }
1662        copy->num_engines = n;
1663
1664        return copy;
1665}
1666
1667static int
1668get_engines(struct i915_gem_context *ctx,
1669            struct drm_i915_gem_context_param *args)
1670{
1671        struct i915_context_param_engines __user *user;
1672        struct i915_gem_engines *e;
1673        size_t n, count, size;
1674        int err = 0;
1675
1676        err = mutex_lock_interruptible(&ctx->engines_mutex);
1677        if (err)
1678                return err;
1679
1680        e = NULL;
1681        if (i915_gem_context_user_engines(ctx))
1682                e = __copy_engines(i915_gem_context_engines(ctx));
1683        mutex_unlock(&ctx->engines_mutex);
1684        if (IS_ERR_OR_NULL(e)) {
1685                args->size = 0;
1686                return PTR_ERR_OR_ZERO(e);
1687        }
1688
1689        count = e->num_engines;
1690
1691        /* Be paranoid in case we have an impedance mismatch */
1692        if (!check_struct_size(user, engines, count, &size)) {
1693                err = -EINVAL;
1694                goto err_free;
1695        }
1696        if (overflows_type(size, args->size)) {
1697                err = -EINVAL;
1698                goto err_free;
1699        }
1700
1701        if (!args->size) {
1702                args->size = size;
1703                goto err_free;
1704        }
1705
1706        if (args->size < size) {
1707                err = -EINVAL;
1708                goto err_free;
1709        }
1710
1711        user = u64_to_user_ptr(args->value);
1712        if (!access_ok(user, size)) {
1713                err = -EFAULT;
1714                goto err_free;
1715        }
1716
1717        if (put_user(0, &user->extensions)) {
1718                err = -EFAULT;
1719                goto err_free;
1720        }
1721
1722        for (n = 0; n < count; n++) {
1723                struct i915_engine_class_instance ci = {
1724                        .engine_class = I915_ENGINE_CLASS_INVALID,
1725                        .engine_instance = I915_ENGINE_CLASS_INVALID_NONE,
1726                };
1727
1728                if (e->engines[n]) {
1729                        ci.engine_class = e->engines[n]->engine->uabi_class;
1730                        ci.engine_instance = e->engines[n]->engine->uabi_instance;
1731                }
1732
1733                if (copy_to_user(&user->engines[n], &ci, sizeof(ci))) {
1734                        err = -EFAULT;
1735                        goto err_free;
1736                }
1737        }
1738
1739        args->size = size;
1740
1741err_free:
1742        free_engines(e);
1743        return err;
1744}
1745
1746static int ctx_setparam(struct drm_i915_file_private *fpriv,
1747                        struct i915_gem_context *ctx,
1748                        struct drm_i915_gem_context_param *args)
1749{
1750        int ret = 0;
1751
1752        switch (args->param) {
1753        case I915_CONTEXT_PARAM_NO_ZEROMAP:
1754                if (args->size)
1755                        ret = -EINVAL;
1756                else if (args->value)
1757                        set_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
1758                else
1759                        clear_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
1760                break;
1761
1762        case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
1763                if (args->size)
1764                        ret = -EINVAL;
1765                else if (args->value)
1766                        i915_gem_context_set_no_error_capture(ctx);
1767                else
1768                        i915_gem_context_clear_no_error_capture(ctx);
1769                break;
1770
1771        case I915_CONTEXT_PARAM_BANNABLE:
1772                if (args->size)
1773                        ret = -EINVAL;
1774                else if (!capable(CAP_SYS_ADMIN) && !args->value)
1775                        ret = -EPERM;
1776                else if (args->value)
1777                        i915_gem_context_set_bannable(ctx);
1778                else
1779                        i915_gem_context_clear_bannable(ctx);
1780                break;
1781
1782        case I915_CONTEXT_PARAM_RECOVERABLE:
1783                if (args->size)
1784                        ret = -EINVAL;
1785                else if (args->value)
1786                        i915_gem_context_set_recoverable(ctx);
1787                else
1788                        i915_gem_context_clear_recoverable(ctx);
1789                break;
1790
1791        case I915_CONTEXT_PARAM_PRIORITY:
1792                {
1793                        s64 priority = args->value;
1794
1795                        if (args->size)
1796                                ret = -EINVAL;
1797                        else if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
1798                                ret = -ENODEV;
1799                        else if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
1800                                 priority < I915_CONTEXT_MIN_USER_PRIORITY)
1801                                ret = -EINVAL;
1802                        else if (priority > I915_CONTEXT_DEFAULT_PRIORITY &&
1803                                 !capable(CAP_SYS_NICE))
1804                                ret = -EPERM;
1805                        else
1806                                ctx->sched.priority =
1807                                        I915_USER_PRIORITY(priority);
1808                }
1809                break;
1810
1811        case I915_CONTEXT_PARAM_SSEU:
1812                ret = set_sseu(ctx, args);
1813                break;
1814
1815        case I915_CONTEXT_PARAM_VM:
1816                ret = set_ppgtt(fpriv, ctx, args);
1817                break;
1818
1819        case I915_CONTEXT_PARAM_ENGINES:
1820                ret = set_engines(ctx, args);
1821                break;
1822
1823        case I915_CONTEXT_PARAM_BAN_PERIOD:
1824        default:
1825                ret = -EINVAL;
1826                break;
1827        }
1828
1829        return ret;
1830}
1831
1832struct create_ext {
1833        struct i915_gem_context *ctx;
1834        struct drm_i915_file_private *fpriv;
1835};
1836
1837static int create_setparam(struct i915_user_extension __user *ext, void *data)
1838{
1839        struct drm_i915_gem_context_create_ext_setparam local;
1840        const struct create_ext *arg = data;
1841
1842        if (copy_from_user(&local, ext, sizeof(local)))
1843                return -EFAULT;
1844
1845        if (local.param.ctx_id)
1846                return -EINVAL;
1847
1848        return ctx_setparam(arg->fpriv, arg->ctx, &local.param);
1849}
1850
1851static int clone_engines(struct i915_gem_context *dst,
1852                         struct i915_gem_context *src)
1853{
1854        struct i915_gem_engines *e = i915_gem_context_lock_engines(src);
1855        struct i915_gem_engines *clone;
1856        bool user_engines;
1857        unsigned long n;
1858
1859        clone = kmalloc(struct_size(e, engines, e->num_engines), GFP_KERNEL);
1860        if (!clone)
1861                goto err_unlock;
1862
1863        init_rcu_head(&clone->rcu);
1864        for (n = 0; n < e->num_engines; n++) {
1865                struct intel_engine_cs *engine;
1866
1867                if (!e->engines[n]) {
1868                        clone->engines[n] = NULL;
1869                        continue;
1870                }
1871                engine = e->engines[n]->engine;
1872
1873                /*
1874                 * Virtual engines are singletons; they can only exist
1875                 * inside a single context, because they embed their
1876                 * HW context... As each virtual context implies a single
1877                 * timeline (each engine can only dequeue a single request
1878                 * at any time), it would be surprising for two contexts
1879                 * to use the same engine. So let's create a copy of
1880                 * the virtual engine instead.
1881                 */
1882                if (intel_engine_is_virtual(engine))
1883                        clone->engines[n] =
1884                                intel_execlists_clone_virtual(dst, engine);
1885                else
1886                        clone->engines[n] = intel_context_create(dst, engine);
1887                if (IS_ERR_OR_NULL(clone->engines[n])) {
1888                        __free_engines(clone, n);
1889                        goto err_unlock;
1890                }
1891        }
1892        clone->num_engines = n;
1893
1894        user_engines = i915_gem_context_user_engines(src);
1895        i915_gem_context_unlock_engines(src);
1896
1897        free_engines(dst->engines);
1898        RCU_INIT_POINTER(dst->engines, clone);
1899        if (user_engines)
1900                i915_gem_context_set_user_engines(dst);
1901        else
1902                i915_gem_context_clear_user_engines(dst);
1903        return 0;
1904
1905err_unlock:
1906        i915_gem_context_unlock_engines(src);
1907        return -ENOMEM;
1908}
1909
1910static int clone_flags(struct i915_gem_context *dst,
1911                       struct i915_gem_context *src)
1912{
1913        dst->user_flags = src->user_flags;
1914        return 0;
1915}
1916
1917static int clone_schedattr(struct i915_gem_context *dst,
1918                           struct i915_gem_context *src)
1919{
1920        dst->sched = src->sched;
1921        return 0;
1922}
1923
1924static int clone_sseu(struct i915_gem_context *dst,
1925                      struct i915_gem_context *src)
1926{
1927        struct i915_gem_engines *e = i915_gem_context_lock_engines(src);
1928        struct i915_gem_engines *clone;
1929        unsigned long n;
1930        int err;
1931
1932        clone = dst->engines; /* no locking required; sole access */
1933        if (e->num_engines != clone->num_engines) {
1934                err = -EINVAL;
1935                goto unlock;
1936        }
1937
1938        for (n = 0; n < e->num_engines; n++) {
1939                struct intel_context *ce = e->engines[n];
1940
1941                if (clone->engines[n]->engine->class != ce->engine->class) {
1942                        /* Must have compatible engine maps! */
1943                        err = -EINVAL;
1944                        goto unlock;
1945                }
1946
1947                /* serialises with set_sseu */
1948                err = intel_context_lock_pinned(ce);
1949                if (err)
1950                        goto unlock;
1951
1952                clone->engines[n]->sseu = ce->sseu;
1953                intel_context_unlock_pinned(ce);
1954        }
1955
1956        err = 0;
1957unlock:
1958        i915_gem_context_unlock_engines(src);
1959        return err;
1960}
1961
1962static int clone_timeline(struct i915_gem_context *dst,
1963                          struct i915_gem_context *src)
1964{
1965        if (src->timeline)
1966                __assign_timeline(dst, src->timeline);
1967
1968        return 0;
1969}
1970
1971static int clone_vm(struct i915_gem_context *dst,
1972                    struct i915_gem_context *src)
1973{
1974        struct i915_address_space *vm;
1975
1976        rcu_read_lock();
1977        do {
1978                vm = READ_ONCE(src->vm);
1979                if (!vm)
1980                        break;
1981
1982                if (!kref_get_unless_zero(&vm->ref))
1983                        continue;
1984
1985                /*
1986                 * This ppgtt may have be reallocated between
1987                 * the read and the kref, and reassigned to a third
1988                 * context. In order to avoid inadvertent sharing
1989                 * of this ppgtt with that third context (and not
1990                 * src), we have to confirm that we have the same
1991                 * ppgtt after passing through the strong memory
1992                 * barrier implied by a successful
1993                 * kref_get_unless_zero().
1994                 *
1995                 * Once we have acquired the current ppgtt of src,
1996                 * we no longer care if it is released from src, as
1997                 * it cannot be reallocated elsewhere.
1998                 */
1999
2000                if (vm == READ_ONCE(src->vm))
2001                        break;
2002
2003                i915_vm_put(vm);
2004        } while (1);
2005        rcu_read_unlock();
2006
2007        if (vm) {
2008                __assign_ppgtt(dst, vm);
2009                i915_vm_put(vm);
2010        }
2011
2012        return 0;
2013}
2014
2015static int create_clone(struct i915_user_extension __user *ext, void *data)
2016{
2017        static int (* const fn[])(struct i915_gem_context *dst,
2018                                  struct i915_gem_context *src) = {
2019#define MAP(x, y) [ilog2(I915_CONTEXT_CLONE_##x)] = y
2020                MAP(ENGINES, clone_engines),
2021                MAP(FLAGS, clone_flags),
2022                MAP(SCHEDATTR, clone_schedattr),
2023                MAP(SSEU, clone_sseu),
2024                MAP(TIMELINE, clone_timeline),
2025                MAP(VM, clone_vm),
2026#undef MAP
2027        };
2028        struct drm_i915_gem_context_create_ext_clone local;
2029        const struct create_ext *arg = data;
2030        struct i915_gem_context *dst = arg->ctx;
2031        struct i915_gem_context *src;
2032        int err, bit;
2033
2034        if (copy_from_user(&local, ext, sizeof(local)))
2035                return -EFAULT;
2036
2037        BUILD_BUG_ON(GENMASK(BITS_PER_TYPE(local.flags) - 1, ARRAY_SIZE(fn)) !=
2038                     I915_CONTEXT_CLONE_UNKNOWN);
2039
2040        if (local.flags & I915_CONTEXT_CLONE_UNKNOWN)
2041                return -EINVAL;
2042
2043        if (local.rsvd)
2044                return -EINVAL;
2045
2046        rcu_read_lock();
2047        src = __i915_gem_context_lookup_rcu(arg->fpriv, local.clone_id);
2048        rcu_read_unlock();
2049        if (!src)
2050                return -ENOENT;
2051
2052        GEM_BUG_ON(src == dst);
2053
2054        for (bit = 0; bit < ARRAY_SIZE(fn); bit++) {
2055                if (!(local.flags & BIT(bit)))
2056                        continue;
2057
2058                err = fn[bit](dst, src);
2059                if (err)
2060                        return err;
2061        }
2062
2063        return 0;
2064}
2065
2066static const i915_user_extension_fn create_extensions[] = {
2067        [I915_CONTEXT_CREATE_EXT_SETPARAM] = create_setparam,
2068        [I915_CONTEXT_CREATE_EXT_CLONE] = create_clone,
2069};
2070
2071static bool client_is_banned(struct drm_i915_file_private *file_priv)
2072{
2073        return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED;
2074}
2075
2076int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
2077                                  struct drm_file *file)
2078{
2079        struct drm_i915_private *i915 = to_i915(dev);
2080        struct drm_i915_gem_context_create_ext *args = data;
2081        struct create_ext ext_data;
2082        int ret;
2083
2084        if (!DRIVER_CAPS(i915)->has_logical_contexts)
2085                return -ENODEV;
2086
2087        if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN)
2088                return -EINVAL;
2089
2090        ret = intel_gt_terminally_wedged(&i915->gt);
2091        if (ret)
2092                return ret;
2093
2094        ext_data.fpriv = file->driver_priv;
2095        if (client_is_banned(ext_data.fpriv)) {
2096                DRM_DEBUG("client %s[%d] banned from creating ctx\n",
2097                          current->comm,
2098                          pid_nr(get_task_pid(current, PIDTYPE_PID)));
2099                return -EIO;
2100        }
2101
2102        ret = i915_mutex_lock_interruptible(dev);
2103        if (ret)
2104                return ret;
2105
2106        ext_data.ctx = i915_gem_create_context(i915, args->flags);
2107        mutex_unlock(&dev->struct_mutex);
2108        if (IS_ERR(ext_data.ctx))
2109                return PTR_ERR(ext_data.ctx);
2110
2111        if (args->flags & I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS) {
2112                ret = i915_user_extensions(u64_to_user_ptr(args->extensions),
2113                                           create_extensions,
2114                                           ARRAY_SIZE(create_extensions),
2115                                           &ext_data);
2116                if (ret)
2117                        goto err_ctx;
2118        }
2119
2120        ret = gem_context_register(ext_data.ctx, ext_data.fpriv);
2121        if (ret < 0)
2122                goto err_ctx;
2123
2124        args->ctx_id = ret;
2125        DRM_DEBUG("HW context %d created\n", args->ctx_id);
2126
2127        return 0;
2128
2129err_ctx:
2130        context_close(ext_data.ctx);
2131        return ret;
2132}
2133
2134int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
2135                                   struct drm_file *file)
2136{
2137        struct drm_i915_gem_context_destroy *args = data;
2138        struct drm_i915_file_private *file_priv = file->driver_priv;
2139        struct i915_gem_context *ctx;
2140
2141        if (args->pad != 0)
2142                return -EINVAL;
2143
2144        if (!args->ctx_id)
2145                return -ENOENT;
2146
2147        if (mutex_lock_interruptible(&file_priv->context_idr_lock))
2148                return -EINTR;
2149
2150        ctx = idr_remove(&file_priv->context_idr, args->ctx_id);
2151        mutex_unlock(&file_priv->context_idr_lock);
2152        if (!ctx)
2153                return -ENOENT;
2154
2155        context_close(ctx);
2156        return 0;
2157}
2158
2159static int get_sseu(struct i915_gem_context *ctx,
2160                    struct drm_i915_gem_context_param *args)
2161{
2162        struct drm_i915_gem_context_param_sseu user_sseu;
2163        struct intel_context *ce;
2164        unsigned long lookup;
2165        int err;
2166
2167        if (args->size == 0)
2168                goto out;
2169        else if (args->size < sizeof(user_sseu))
2170                return -EINVAL;
2171
2172        if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
2173                           sizeof(user_sseu)))
2174                return -EFAULT;
2175
2176        if (user_sseu.rsvd)
2177                return -EINVAL;
2178
2179        if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
2180                return -EINVAL;
2181
2182        lookup = 0;
2183        if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
2184                lookup |= LOOKUP_USER_INDEX;
2185
2186        ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
2187        if (IS_ERR(ce))
2188                return PTR_ERR(ce);
2189
2190        err = intel_context_lock_pinned(ce); /* serialises with set_sseu */
2191        if (err) {
2192                intel_context_put(ce);
2193                return err;
2194        }
2195
2196        user_sseu.slice_mask = ce->sseu.slice_mask;
2197        user_sseu.subslice_mask = ce->sseu.subslice_mask;
2198        user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice;
2199        user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice;
2200
2201        intel_context_unlock_pinned(ce);
2202        intel_context_put(ce);
2203
2204        if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu,
2205                         sizeof(user_sseu)))
2206                return -EFAULT;
2207
2208out:
2209        args->size = sizeof(user_sseu);
2210
2211        return 0;
2212}
2213
2214int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
2215                                    struct drm_file *file)
2216{
2217        struct drm_i915_file_private *file_priv = file->driver_priv;
2218        struct drm_i915_gem_context_param *args = data;
2219        struct i915_gem_context *ctx;
2220        int ret = 0;
2221
2222        ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
2223        if (!ctx)
2224                return -ENOENT;
2225
2226        switch (args->param) {
2227        case I915_CONTEXT_PARAM_NO_ZEROMAP:
2228                args->size = 0;
2229                args->value = test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
2230                break;
2231
2232        case I915_CONTEXT_PARAM_GTT_SIZE:
2233                args->size = 0;
2234                if (ctx->vm)
2235                        args->value = ctx->vm->total;
2236                else if (to_i915(dev)->ggtt.alias)
2237                        args->value = to_i915(dev)->ggtt.alias->vm.total;
2238                else
2239                        args->value = to_i915(dev)->ggtt.vm.total;
2240                break;
2241
2242        case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
2243                args->size = 0;
2244                args->value = i915_gem_context_no_error_capture(ctx);
2245                break;
2246
2247        case I915_CONTEXT_PARAM_BANNABLE:
2248                args->size = 0;
2249                args->value = i915_gem_context_is_bannable(ctx);
2250                break;
2251
2252        case I915_CONTEXT_PARAM_RECOVERABLE:
2253                args->size = 0;
2254                args->value = i915_gem_context_is_recoverable(ctx);
2255                break;
2256
2257        case I915_CONTEXT_PARAM_PRIORITY:
2258                args->size = 0;
2259                args->value = ctx->sched.priority >> I915_USER_PRIORITY_SHIFT;
2260                break;
2261
2262        case I915_CONTEXT_PARAM_SSEU:
2263                ret = get_sseu(ctx, args);
2264                break;
2265
2266        case I915_CONTEXT_PARAM_VM:
2267                ret = get_ppgtt(file_priv, ctx, args);
2268                break;
2269
2270        case I915_CONTEXT_PARAM_ENGINES:
2271                ret = get_engines(ctx, args);
2272                break;
2273
2274        case I915_CONTEXT_PARAM_BAN_PERIOD:
2275        default:
2276                ret = -EINVAL;
2277                break;
2278        }
2279
2280        i915_gem_context_put(ctx);
2281        return ret;
2282}
2283
2284int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
2285                                    struct drm_file *file)
2286{
2287        struct drm_i915_file_private *file_priv = file->driver_priv;
2288        struct drm_i915_gem_context_param *args = data;
2289        struct i915_gem_context *ctx;
2290        int ret;
2291
2292        ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
2293        if (!ctx)
2294                return -ENOENT;
2295
2296        ret = ctx_setparam(file_priv, ctx, args);
2297
2298        i915_gem_context_put(ctx);
2299        return ret;
2300}
2301
2302int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
2303                                       void *data, struct drm_file *file)
2304{
2305        struct drm_i915_private *dev_priv = to_i915(dev);
2306        struct drm_i915_reset_stats *args = data;
2307        struct i915_gem_context *ctx;
2308        int ret;
2309
2310        if (args->flags || args->pad)
2311                return -EINVAL;
2312
2313        ret = -ENOENT;
2314        rcu_read_lock();
2315        ctx = __i915_gem_context_lookup_rcu(file->driver_priv, args->ctx_id);
2316        if (!ctx)
2317                goto out;
2318
2319        /*
2320         * We opt for unserialised reads here. This may result in tearing
2321         * in the extremely unlikely event of a GPU hang on this context
2322         * as we are querying them. If we need that extra layer of protection,
2323         * we should wrap the hangstats with a seqlock.
2324         */
2325
2326        if (capable(CAP_SYS_ADMIN))
2327                args->reset_count = i915_reset_count(&dev_priv->gpu_error);
2328        else
2329                args->reset_count = 0;
2330
2331        args->batch_active = atomic_read(&ctx->guilty_count);
2332        args->batch_pending = atomic_read(&ctx->active_count);
2333
2334        ret = 0;
2335out:
2336        rcu_read_unlock();
2337        return ret;
2338}
2339
2340int __i915_gem_context_pin_hw_id(struct i915_gem_context *ctx)
2341{
2342        struct drm_i915_private *i915 = ctx->i915;
2343        int err = 0;
2344
2345        mutex_lock(&i915->contexts.mutex);
2346
2347        GEM_BUG_ON(i915_gem_context_is_closed(ctx));
2348
2349        if (list_empty(&ctx->hw_id_link)) {
2350                GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count));
2351
2352                err = assign_hw_id(i915, &ctx->hw_id);
2353                if (err)
2354                        goto out_unlock;
2355
2356                list_add_tail(&ctx->hw_id_link, &i915->contexts.hw_id_list);
2357        }
2358
2359        GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count) == ~0u);
2360        atomic_inc(&ctx->hw_id_pin_count);
2361
2362out_unlock:
2363        mutex_unlock(&i915->contexts.mutex);
2364        return err;
2365}
2366
2367/* GEM context-engines iterator: for_each_gem_engine() */
2368struct intel_context *
2369i915_gem_engines_iter_next(struct i915_gem_engines_iter *it)
2370{
2371        const struct i915_gem_engines *e = it->engines;
2372        struct intel_context *ctx;
2373
2374        do {
2375                if (it->idx >= e->num_engines)
2376                        return NULL;
2377
2378                ctx = e->engines[it->idx++];
2379        } while (!ctx);
2380
2381        return ctx;
2382}
2383
2384#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2385#include "selftests/mock_context.c"
2386#include "selftests/i915_gem_context.c"
2387#endif
2388
2389static void i915_global_gem_context_shrink(void)
2390{
2391        kmem_cache_shrink(global.slab_luts);
2392}
2393
2394static void i915_global_gem_context_exit(void)
2395{
2396        kmem_cache_destroy(global.slab_luts);
2397}
2398
2399static struct i915_global_gem_context global = { {
2400        .shrink = i915_global_gem_context_shrink,
2401        .exit = i915_global_gem_context_exit,
2402} };
2403
2404int __init i915_global_gem_context_init(void)
2405{
2406        global.slab_luts = KMEM_CACHE(i915_lut_handle, 0);
2407        if (!global.slab_luts)
2408                return -ENOMEM;
2409
2410        i915_global_register(&global.base);
2411        return 0;
2412}
2413