linux/drivers/gpu/drm/i915/gem/i915_gem_context.c
<<
>>
Prefs
   1/*
   2 * SPDX-License-Identifier: MIT
   3 *
   4 * Copyright © 2011-2012 Intel Corporation
   5 */
   6
   7/*
   8 * This file implements HW context support. On gen5+ a HW context consists of an
   9 * opaque GPU object which is referenced at times of context saves and restores.
  10 * With RC6 enabled, the context is also referenced as the GPU enters and exists
  11 * from RC6 (GPU has it's own internal power context, except on gen5). Though
  12 * something like a context does exist for the media ring, the code only
  13 * supports contexts for the render ring.
  14 *
  15 * In software, there is a distinction between contexts created by the user,
  16 * and the default HW context. The default HW context is used by GPU clients
  17 * that do not request setup of their own hardware context. The default
  18 * context's state is never restored to help prevent programming errors. This
  19 * would happen if a client ran and piggy-backed off another clients GPU state.
  20 * The default context only exists to give the GPU some offset to load as the
  21 * current to invoke a save of the context we actually care about. In fact, the
  22 * code could likely be constructed, albeit in a more complicated fashion, to
  23 * never use the default context, though that limits the driver's ability to
  24 * swap out, and/or destroy other contexts.
  25 *
  26 * All other contexts are created as a request by the GPU client. These contexts
  27 * store GPU state, and thus allow GPU clients to not re-emit state (and
  28 * potentially query certain state) at any time. The kernel driver makes
  29 * certain that the appropriate commands are inserted.
  30 *
  31 * The context life cycle is semi-complicated in that context BOs may live
  32 * longer than the context itself because of the way the hardware, and object
  33 * tracking works. Below is a very crude representation of the state machine
  34 * describing the context life.
  35 *                                         refcount     pincount     active
  36 * S0: initial state                          0            0           0
  37 * S1: context created                        1            0           0
  38 * S2: context is currently running           2            1           X
  39 * S3: GPU referenced, but not current        2            0           1
  40 * S4: context is current, but destroyed      1            1           0
  41 * S5: like S3, but destroyed                 1            0           1
  42 *
  43 * The most common (but not all) transitions:
  44 * S0->S1: client creates a context
  45 * S1->S2: client submits execbuf with context
  46 * S2->S3: other clients submits execbuf with context
  47 * S3->S1: context object was retired
  48 * S3->S2: clients submits another execbuf
  49 * S2->S4: context destroy called with current context
  50 * S3->S5->S0: destroy path
  51 * S4->S5->S0: destroy path on current context
  52 *
  53 * There are two confusing terms used above:
  54 *  The "current context" means the context which is currently running on the
  55 *  GPU. The GPU has loaded its state already and has stored away the gtt
  56 *  offset of the BO. The GPU is not actively referencing the data at this
  57 *  offset, but it will on the next context switch. The only way to avoid this
  58 *  is to do a GPU reset.
  59 *
  60 *  An "active context' is one which was previously the "current context" and is
  61 *  on the active list waiting for the next context switch to occur. Until this
  62 *  happens, the object must remain at the same gtt offset. It is therefore
  63 *  possible to destroy a context, but it is still active.
  64 *
  65 */
  66
  67#include <linux/log2.h>
  68#include <linux/nospec.h>
  69
  70#include <drm/drm_syncobj.h>
  71
  72#include "gt/gen6_ppgtt.h"
  73#include "gt/intel_context.h"
  74#include "gt/intel_context_param.h"
  75#include "gt/intel_engine_heartbeat.h"
  76#include "gt/intel_engine_user.h"
  77#include "gt/intel_gpu_commands.h"
  78#include "gt/intel_ring.h"
  79
  80#include "i915_gem_context.h"
  81#include "i915_trace.h"
  82#include "i915_user_extensions.h"
  83
  84#define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
  85
  86static struct kmem_cache *slab_luts;
  87
  88struct i915_lut_handle *i915_lut_handle_alloc(void)
  89{
  90        return kmem_cache_alloc(slab_luts, GFP_KERNEL);
  91}
  92
  93void i915_lut_handle_free(struct i915_lut_handle *lut)
  94{
  95        return kmem_cache_free(slab_luts, lut);
  96}
  97
  98static void lut_close(struct i915_gem_context *ctx)
  99{
 100        struct radix_tree_iter iter;
 101        void __rcu **slot;
 102
 103        mutex_lock(&ctx->lut_mutex);
 104        rcu_read_lock();
 105        radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) {
 106                struct i915_vma *vma = rcu_dereference_raw(*slot);
 107                struct drm_i915_gem_object *obj = vma->obj;
 108                struct i915_lut_handle *lut;
 109
 110                if (!kref_get_unless_zero(&obj->base.refcount))
 111                        continue;
 112
 113                spin_lock(&obj->lut_lock);
 114                list_for_each_entry(lut, &obj->lut_list, obj_link) {
 115                        if (lut->ctx != ctx)
 116                                continue;
 117
 118                        if (lut->handle != iter.index)
 119                                continue;
 120
 121                        list_del(&lut->obj_link);
 122                        break;
 123                }
 124                spin_unlock(&obj->lut_lock);
 125
 126                if (&lut->obj_link != &obj->lut_list) {
 127                        i915_lut_handle_free(lut);
 128                        radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
 129                        i915_vma_close(vma);
 130                        i915_gem_object_put(obj);
 131                }
 132
 133                i915_gem_object_put(obj);
 134        }
 135        rcu_read_unlock();
 136        mutex_unlock(&ctx->lut_mutex);
 137}
 138
 139static struct intel_context *
 140lookup_user_engine(struct i915_gem_context *ctx,
 141                   unsigned long flags,
 142                   const struct i915_engine_class_instance *ci)
 143#define LOOKUP_USER_INDEX BIT(0)
 144{
 145        int idx;
 146
 147        if (!!(flags & LOOKUP_USER_INDEX) != i915_gem_context_user_engines(ctx))
 148                return ERR_PTR(-EINVAL);
 149
 150        if (!i915_gem_context_user_engines(ctx)) {
 151                struct intel_engine_cs *engine;
 152
 153                engine = intel_engine_lookup_user(ctx->i915,
 154                                                  ci->engine_class,
 155                                                  ci->engine_instance);
 156                if (!engine)
 157                        return ERR_PTR(-EINVAL);
 158
 159                idx = engine->legacy_idx;
 160        } else {
 161                idx = ci->engine_instance;
 162        }
 163
 164        return i915_gem_context_get_engine(ctx, idx);
 165}
 166
 167static int validate_priority(struct drm_i915_private *i915,
 168                             const struct drm_i915_gem_context_param *args)
 169{
 170        s64 priority = args->value;
 171
 172        if (args->size)
 173                return -EINVAL;
 174
 175        if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
 176                return -ENODEV;
 177
 178        if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
 179            priority < I915_CONTEXT_MIN_USER_PRIORITY)
 180                return -EINVAL;
 181
 182        if (priority > I915_CONTEXT_DEFAULT_PRIORITY &&
 183            !capable(CAP_SYS_NICE))
 184                return -EPERM;
 185
 186        return 0;
 187}
 188
 189static void proto_context_close(struct i915_gem_proto_context *pc)
 190{
 191        int i;
 192
 193        if (pc->vm)
 194                i915_vm_put(pc->vm);
 195        if (pc->user_engines) {
 196                for (i = 0; i < pc->num_user_engines; i++)
 197                        kfree(pc->user_engines[i].siblings);
 198                kfree(pc->user_engines);
 199        }
 200        kfree(pc);
 201}
 202
 203static int proto_context_set_persistence(struct drm_i915_private *i915,
 204                                         struct i915_gem_proto_context *pc,
 205                                         bool persist)
 206{
 207        if (persist) {
 208                /*
 209                 * Only contexts that are short-lived [that will expire or be
 210                 * reset] are allowed to survive past termination. We require
 211                 * hangcheck to ensure that the persistent requests are healthy.
 212                 */
 213                if (!i915->params.enable_hangcheck)
 214                        return -EINVAL;
 215
 216                pc->user_flags |= BIT(UCONTEXT_PERSISTENCE);
 217        } else {
 218                /* To cancel a context we use "preempt-to-idle" */
 219                if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
 220                        return -ENODEV;
 221
 222                /*
 223                 * If the cancel fails, we then need to reset, cleanly!
 224                 *
 225                 * If the per-engine reset fails, all hope is lost! We resort
 226                 * to a full GPU reset in that unlikely case, but realistically
 227                 * if the engine could not reset, the full reset does not fare
 228                 * much better. The damage has been done.
 229                 *
 230                 * However, if we cannot reset an engine by itself, we cannot
 231                 * cleanup a hanging persistent context without causing
 232                 * colateral damage, and we should not pretend we can by
 233                 * exposing the interface.
 234                 */
 235                if (!intel_has_reset_engine(&i915->gt))
 236                        return -ENODEV;
 237
 238                pc->user_flags &= ~BIT(UCONTEXT_PERSISTENCE);
 239        }
 240
 241        return 0;
 242}
 243
 244static struct i915_gem_proto_context *
 245proto_context_create(struct drm_i915_private *i915, unsigned int flags)
 246{
 247        struct i915_gem_proto_context *pc, *err;
 248
 249        pc = kzalloc(sizeof(*pc), GFP_KERNEL);
 250        if (!pc)
 251                return ERR_PTR(-ENOMEM);
 252
 253        pc->num_user_engines = -1;
 254        pc->user_engines = NULL;
 255        pc->user_flags = BIT(UCONTEXT_BANNABLE) |
 256                         BIT(UCONTEXT_RECOVERABLE);
 257        if (i915->params.enable_hangcheck)
 258                pc->user_flags |= BIT(UCONTEXT_PERSISTENCE);
 259        pc->sched.priority = I915_PRIORITY_NORMAL;
 260
 261        if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) {
 262                if (!HAS_EXECLISTS(i915)) {
 263                        err = ERR_PTR(-EINVAL);
 264                        goto proto_close;
 265                }
 266                pc->single_timeline = true;
 267        }
 268
 269        return pc;
 270
 271proto_close:
 272        proto_context_close(pc);
 273        return err;
 274}
 275
 276static int proto_context_register_locked(struct drm_i915_file_private *fpriv,
 277                                         struct i915_gem_proto_context *pc,
 278                                         u32 *id)
 279{
 280        int ret;
 281        void *old;
 282
 283        lockdep_assert_held(&fpriv->proto_context_lock);
 284
 285        ret = xa_alloc(&fpriv->context_xa, id, NULL, xa_limit_32b, GFP_KERNEL);
 286        if (ret)
 287                return ret;
 288
 289        old = xa_store(&fpriv->proto_context_xa, *id, pc, GFP_KERNEL);
 290        if (xa_is_err(old)) {
 291                xa_erase(&fpriv->context_xa, *id);
 292                return xa_err(old);
 293        }
 294        WARN_ON(old);
 295
 296        return 0;
 297}
 298
 299static int proto_context_register(struct drm_i915_file_private *fpriv,
 300                                  struct i915_gem_proto_context *pc,
 301                                  u32 *id)
 302{
 303        int ret;
 304
 305        mutex_lock(&fpriv->proto_context_lock);
 306        ret = proto_context_register_locked(fpriv, pc, id);
 307        mutex_unlock(&fpriv->proto_context_lock);
 308
 309        return ret;
 310}
 311
 312static int set_proto_ctx_vm(struct drm_i915_file_private *fpriv,
 313                            struct i915_gem_proto_context *pc,
 314                            const struct drm_i915_gem_context_param *args)
 315{
 316        struct drm_i915_private *i915 = fpriv->dev_priv;
 317        struct i915_address_space *vm;
 318
 319        if (args->size)
 320                return -EINVAL;
 321
 322        if (!HAS_FULL_PPGTT(i915))
 323                return -ENODEV;
 324
 325        if (upper_32_bits(args->value))
 326                return -ENOENT;
 327
 328        vm = i915_gem_vm_lookup(fpriv, args->value);
 329        if (!vm)
 330                return -ENOENT;
 331
 332        if (pc->vm)
 333                i915_vm_put(pc->vm);
 334        pc->vm = vm;
 335
 336        return 0;
 337}
 338
 339struct set_proto_ctx_engines {
 340        struct drm_i915_private *i915;
 341        unsigned num_engines;
 342        struct i915_gem_proto_engine *engines;
 343};
 344
 345static int
 346set_proto_ctx_engines_balance(struct i915_user_extension __user *base,
 347                              void *data)
 348{
 349        struct i915_context_engines_load_balance __user *ext =
 350                container_of_user(base, typeof(*ext), base);
 351        const struct set_proto_ctx_engines *set = data;
 352        struct drm_i915_private *i915 = set->i915;
 353        struct intel_engine_cs **siblings;
 354        u16 num_siblings, idx;
 355        unsigned int n;
 356        int err;
 357
 358        if (!HAS_EXECLISTS(i915))
 359                return -ENODEV;
 360
 361        if (get_user(idx, &ext->engine_index))
 362                return -EFAULT;
 363
 364        if (idx >= set->num_engines) {
 365                drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n",
 366                        idx, set->num_engines);
 367                return -EINVAL;
 368        }
 369
 370        idx = array_index_nospec(idx, set->num_engines);
 371        if (set->engines[idx].type != I915_GEM_ENGINE_TYPE_INVALID) {
 372                drm_dbg(&i915->drm,
 373                        "Invalid placement[%d], already occupied\n", idx);
 374                return -EEXIST;
 375        }
 376
 377        if (get_user(num_siblings, &ext->num_siblings))
 378                return -EFAULT;
 379
 380        err = check_user_mbz(&ext->flags);
 381        if (err)
 382                return err;
 383
 384        err = check_user_mbz(&ext->mbz64);
 385        if (err)
 386                return err;
 387
 388        if (num_siblings == 0)
 389                return 0;
 390
 391        siblings = kmalloc_array(num_siblings, sizeof(*siblings), GFP_KERNEL);
 392        if (!siblings)
 393                return -ENOMEM;
 394
 395        for (n = 0; n < num_siblings; n++) {
 396                struct i915_engine_class_instance ci;
 397
 398                if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) {
 399                        err = -EFAULT;
 400                        goto err_siblings;
 401                }
 402
 403                siblings[n] = intel_engine_lookup_user(i915,
 404                                                       ci.engine_class,
 405                                                       ci.engine_instance);
 406                if (!siblings[n]) {
 407                        drm_dbg(&i915->drm,
 408                                "Invalid sibling[%d]: { class:%d, inst:%d }\n",
 409                                n, ci.engine_class, ci.engine_instance);
 410                        err = -EINVAL;
 411                        goto err_siblings;
 412                }
 413        }
 414
 415        if (num_siblings == 1) {
 416                set->engines[idx].type = I915_GEM_ENGINE_TYPE_PHYSICAL;
 417                set->engines[idx].engine = siblings[0];
 418                kfree(siblings);
 419        } else {
 420                set->engines[idx].type = I915_GEM_ENGINE_TYPE_BALANCED;
 421                set->engines[idx].num_siblings = num_siblings;
 422                set->engines[idx].siblings = siblings;
 423        }
 424
 425        return 0;
 426
 427err_siblings:
 428        kfree(siblings);
 429
 430        return err;
 431}
 432
 433static int
 434set_proto_ctx_engines_bond(struct i915_user_extension __user *base, void *data)
 435{
 436        struct i915_context_engines_bond __user *ext =
 437                container_of_user(base, typeof(*ext), base);
 438        const struct set_proto_ctx_engines *set = data;
 439        struct drm_i915_private *i915 = set->i915;
 440        struct i915_engine_class_instance ci;
 441        struct intel_engine_cs *master;
 442        u16 idx, num_bonds;
 443        int err, n;
 444
 445        if (get_user(idx, &ext->virtual_index))
 446                return -EFAULT;
 447
 448        if (idx >= set->num_engines) {
 449                drm_dbg(&i915->drm,
 450                        "Invalid index for virtual engine: %d >= %d\n",
 451                        idx, set->num_engines);
 452                return -EINVAL;
 453        }
 454
 455        idx = array_index_nospec(idx, set->num_engines);
 456        if (set->engines[idx].type == I915_GEM_ENGINE_TYPE_INVALID) {
 457                drm_dbg(&i915->drm, "Invalid engine at %d\n", idx);
 458                return -EINVAL;
 459        }
 460
 461        if (set->engines[idx].type != I915_GEM_ENGINE_TYPE_PHYSICAL) {
 462                drm_dbg(&i915->drm,
 463                        "Bonding with virtual engines not allowed\n");
 464                return -EINVAL;
 465        }
 466
 467        err = check_user_mbz(&ext->flags);
 468        if (err)
 469                return err;
 470
 471        for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) {
 472                err = check_user_mbz(&ext->mbz64[n]);
 473                if (err)
 474                        return err;
 475        }
 476
 477        if (copy_from_user(&ci, &ext->master, sizeof(ci)))
 478                return -EFAULT;
 479
 480        master = intel_engine_lookup_user(i915,
 481                                          ci.engine_class,
 482                                          ci.engine_instance);
 483        if (!master) {
 484                drm_dbg(&i915->drm,
 485                        "Unrecognised master engine: { class:%u, instance:%u }\n",
 486                        ci.engine_class, ci.engine_instance);
 487                return -EINVAL;
 488        }
 489
 490        if (intel_engine_uses_guc(master)) {
 491                DRM_DEBUG("bonding extension not supported with GuC submission");
 492                return -ENODEV;
 493        }
 494
 495        if (get_user(num_bonds, &ext->num_bonds))
 496                return -EFAULT;
 497
 498        for (n = 0; n < num_bonds; n++) {
 499                struct intel_engine_cs *bond;
 500
 501                if (copy_from_user(&ci, &ext->engines[n], sizeof(ci)))
 502                        return -EFAULT;
 503
 504                bond = intel_engine_lookup_user(i915,
 505                                                ci.engine_class,
 506                                                ci.engine_instance);
 507                if (!bond) {
 508                        drm_dbg(&i915->drm,
 509                                "Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n",
 510                                n, ci.engine_class, ci.engine_instance);
 511                        return -EINVAL;
 512                }
 513        }
 514
 515        return 0;
 516}
 517
 518static const i915_user_extension_fn set_proto_ctx_engines_extensions[] = {
 519        [I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE] = set_proto_ctx_engines_balance,
 520        [I915_CONTEXT_ENGINES_EXT_BOND] = set_proto_ctx_engines_bond,
 521};
 522
 523static int set_proto_ctx_engines(struct drm_i915_file_private *fpriv,
 524                                 struct i915_gem_proto_context *pc,
 525                                 const struct drm_i915_gem_context_param *args)
 526{
 527        struct drm_i915_private *i915 = fpriv->dev_priv;
 528        struct set_proto_ctx_engines set = { .i915 = i915 };
 529        struct i915_context_param_engines __user *user =
 530                u64_to_user_ptr(args->value);
 531        unsigned int n;
 532        u64 extensions;
 533        int err;
 534
 535        if (pc->num_user_engines >= 0) {
 536                drm_dbg(&i915->drm, "Cannot set engines twice");
 537                return -EINVAL;
 538        }
 539
 540        if (args->size < sizeof(*user) ||
 541            !IS_ALIGNED(args->size - sizeof(*user), sizeof(*user->engines))) {
 542                drm_dbg(&i915->drm, "Invalid size for engine array: %d\n",
 543                        args->size);
 544                return -EINVAL;
 545        }
 546
 547        set.num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines);
 548        /* RING_MASK has no shift so we can use it directly here */
 549        if (set.num_engines > I915_EXEC_RING_MASK + 1)
 550                return -EINVAL;
 551
 552        set.engines = kmalloc_array(set.num_engines, sizeof(*set.engines), GFP_KERNEL);
 553        if (!set.engines)
 554                return -ENOMEM;
 555
 556        for (n = 0; n < set.num_engines; n++) {
 557                struct i915_engine_class_instance ci;
 558                struct intel_engine_cs *engine;
 559
 560                if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) {
 561                        kfree(set.engines);
 562                        return -EFAULT;
 563                }
 564
 565                memset(&set.engines[n], 0, sizeof(set.engines[n]));
 566
 567                if (ci.engine_class == (u16)I915_ENGINE_CLASS_INVALID &&
 568                    ci.engine_instance == (u16)I915_ENGINE_CLASS_INVALID_NONE)
 569                        continue;
 570
 571                engine = intel_engine_lookup_user(i915,
 572                                                  ci.engine_class,
 573                                                  ci.engine_instance);
 574                if (!engine) {
 575                        drm_dbg(&i915->drm,
 576                                "Invalid engine[%d]: { class:%d, instance:%d }\n",
 577                                n, ci.engine_class, ci.engine_instance);
 578                        kfree(set.engines);
 579                        return -ENOENT;
 580                }
 581
 582                set.engines[n].type = I915_GEM_ENGINE_TYPE_PHYSICAL;
 583                set.engines[n].engine = engine;
 584        }
 585
 586        err = -EFAULT;
 587        if (!get_user(extensions, &user->extensions))
 588                err = i915_user_extensions(u64_to_user_ptr(extensions),
 589                                           set_proto_ctx_engines_extensions,
 590                                           ARRAY_SIZE(set_proto_ctx_engines_extensions),
 591                                           &set);
 592        if (err) {
 593                kfree(set.engines);
 594                return err;
 595        }
 596
 597        pc->num_user_engines = set.num_engines;
 598        pc->user_engines = set.engines;
 599
 600        return 0;
 601}
 602
 603static int set_proto_ctx_sseu(struct drm_i915_file_private *fpriv,
 604                              struct i915_gem_proto_context *pc,
 605                              struct drm_i915_gem_context_param *args)
 606{
 607        struct drm_i915_private *i915 = fpriv->dev_priv;
 608        struct drm_i915_gem_context_param_sseu user_sseu;
 609        struct intel_sseu *sseu;
 610        int ret;
 611
 612        if (args->size < sizeof(user_sseu))
 613                return -EINVAL;
 614
 615        if (GRAPHICS_VER(i915) != 11)
 616                return -ENODEV;
 617
 618        if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
 619                           sizeof(user_sseu)))
 620                return -EFAULT;
 621
 622        if (user_sseu.rsvd)
 623                return -EINVAL;
 624
 625        if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
 626                return -EINVAL;
 627
 628        if (!!(user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX) != (pc->num_user_engines >= 0))
 629                return -EINVAL;
 630
 631        if (pc->num_user_engines >= 0) {
 632                int idx = user_sseu.engine.engine_instance;
 633                struct i915_gem_proto_engine *pe;
 634
 635                if (idx >= pc->num_user_engines)
 636                        return -EINVAL;
 637
 638                pe = &pc->user_engines[idx];
 639
 640                /* Only render engine supports RPCS configuration. */
 641                if (pe->engine->class != RENDER_CLASS)
 642                        return -EINVAL;
 643
 644                sseu = &pe->sseu;
 645        } else {
 646                /* Only render engine supports RPCS configuration. */
 647                if (user_sseu.engine.engine_class != I915_ENGINE_CLASS_RENDER)
 648                        return -EINVAL;
 649
 650                /* There is only one render engine */
 651                if (user_sseu.engine.engine_instance != 0)
 652                        return -EINVAL;
 653
 654                sseu = &pc->legacy_rcs_sseu;
 655        }
 656
 657        ret = i915_gem_user_to_context_sseu(&i915->gt, &user_sseu, sseu);
 658        if (ret)
 659                return ret;
 660
 661        args->size = sizeof(user_sseu);
 662
 663        return 0;
 664}
 665
 666static int set_proto_ctx_param(struct drm_i915_file_private *fpriv,
 667                               struct i915_gem_proto_context *pc,
 668                               struct drm_i915_gem_context_param *args)
 669{
 670        int ret = 0;
 671
 672        switch (args->param) {
 673        case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
 674                if (args->size)
 675                        ret = -EINVAL;
 676                else if (args->value)
 677                        pc->user_flags |= BIT(UCONTEXT_NO_ERROR_CAPTURE);
 678                else
 679                        pc->user_flags &= ~BIT(UCONTEXT_NO_ERROR_CAPTURE);
 680                break;
 681
 682        case I915_CONTEXT_PARAM_BANNABLE:
 683                if (args->size)
 684                        ret = -EINVAL;
 685                else if (!capable(CAP_SYS_ADMIN) && !args->value)
 686                        ret = -EPERM;
 687                else if (args->value)
 688                        pc->user_flags |= BIT(UCONTEXT_BANNABLE);
 689                else
 690                        pc->user_flags &= ~BIT(UCONTEXT_BANNABLE);
 691                break;
 692
 693        case I915_CONTEXT_PARAM_RECOVERABLE:
 694                if (args->size)
 695                        ret = -EINVAL;
 696                else if (args->value)
 697                        pc->user_flags |= BIT(UCONTEXT_RECOVERABLE);
 698                else
 699                        pc->user_flags &= ~BIT(UCONTEXT_RECOVERABLE);
 700                break;
 701
 702        case I915_CONTEXT_PARAM_PRIORITY:
 703                ret = validate_priority(fpriv->dev_priv, args);
 704                if (!ret)
 705                        pc->sched.priority = args->value;
 706                break;
 707
 708        case I915_CONTEXT_PARAM_SSEU:
 709                ret = set_proto_ctx_sseu(fpriv, pc, args);
 710                break;
 711
 712        case I915_CONTEXT_PARAM_VM:
 713                ret = set_proto_ctx_vm(fpriv, pc, args);
 714                break;
 715
 716        case I915_CONTEXT_PARAM_ENGINES:
 717                ret = set_proto_ctx_engines(fpriv, pc, args);
 718                break;
 719
 720        case I915_CONTEXT_PARAM_PERSISTENCE:
 721                if (args->size)
 722                        ret = -EINVAL;
 723                ret = proto_context_set_persistence(fpriv->dev_priv, pc,
 724                                                    args->value);
 725                break;
 726
 727        case I915_CONTEXT_PARAM_NO_ZEROMAP:
 728        case I915_CONTEXT_PARAM_BAN_PERIOD:
 729        case I915_CONTEXT_PARAM_RINGSIZE:
 730        default:
 731                ret = -EINVAL;
 732                break;
 733        }
 734
 735        return ret;
 736}
 737
 738static struct i915_address_space *
 739context_get_vm_rcu(struct i915_gem_context *ctx)
 740{
 741        GEM_BUG_ON(!rcu_access_pointer(ctx->vm));
 742
 743        do {
 744                struct i915_address_space *vm;
 745
 746                /*
 747                 * We do not allow downgrading from full-ppgtt [to a shared
 748                 * global gtt], so ctx->vm cannot become NULL.
 749                 */
 750                vm = rcu_dereference(ctx->vm);
 751                if (!kref_get_unless_zero(&vm->ref))
 752                        continue;
 753
 754                /*
 755                 * This ppgtt may have be reallocated between
 756                 * the read and the kref, and reassigned to a third
 757                 * context. In order to avoid inadvertent sharing
 758                 * of this ppgtt with that third context (and not
 759                 * src), we have to confirm that we have the same
 760                 * ppgtt after passing through the strong memory
 761                 * barrier implied by a successful
 762                 * kref_get_unless_zero().
 763                 *
 764                 * Once we have acquired the current ppgtt of ctx,
 765                 * we no longer care if it is released from ctx, as
 766                 * it cannot be reallocated elsewhere.
 767                 */
 768
 769                if (vm == rcu_access_pointer(ctx->vm))
 770                        return rcu_pointer_handoff(vm);
 771
 772                i915_vm_put(vm);
 773        } while (1);
 774}
 775
 776static int intel_context_set_gem(struct intel_context *ce,
 777                                 struct i915_gem_context *ctx,
 778                                 struct intel_sseu sseu)
 779{
 780        int ret = 0;
 781
 782        GEM_BUG_ON(rcu_access_pointer(ce->gem_context));
 783        RCU_INIT_POINTER(ce->gem_context, ctx);
 784
 785        ce->ring_size = SZ_16K;
 786
 787        if (rcu_access_pointer(ctx->vm)) {
 788                struct i915_address_space *vm;
 789
 790                rcu_read_lock();
 791                vm = context_get_vm_rcu(ctx); /* hmm */
 792                rcu_read_unlock();
 793
 794                i915_vm_put(ce->vm);
 795                ce->vm = vm;
 796        }
 797
 798        if (ctx->sched.priority >= I915_PRIORITY_NORMAL &&
 799            intel_engine_has_timeslices(ce->engine) &&
 800            intel_engine_has_semaphores(ce->engine))
 801                __set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
 802
 803        if (IS_ACTIVE(CONFIG_DRM_I915_REQUEST_TIMEOUT) &&
 804            ctx->i915->params.request_timeout_ms) {
 805                unsigned int timeout_ms = ctx->i915->params.request_timeout_ms;
 806
 807                intel_context_set_watchdog_us(ce, (u64)timeout_ms * 1000);
 808        }
 809
 810        /* A valid SSEU has no zero fields */
 811        if (sseu.slice_mask && !WARN_ON(ce->engine->class != RENDER_CLASS))
 812                ret = intel_context_reconfigure_sseu(ce, sseu);
 813
 814        return ret;
 815}
 816
 817static void __free_engines(struct i915_gem_engines *e, unsigned int count)
 818{
 819        while (count--) {
 820                if (!e->engines[count])
 821                        continue;
 822
 823                intel_context_put(e->engines[count]);
 824        }
 825        kfree(e);
 826}
 827
 828static void free_engines(struct i915_gem_engines *e)
 829{
 830        __free_engines(e, e->num_engines);
 831}
 832
 833static void free_engines_rcu(struct rcu_head *rcu)
 834{
 835        struct i915_gem_engines *engines =
 836                container_of(rcu, struct i915_gem_engines, rcu);
 837
 838        i915_sw_fence_fini(&engines->fence);
 839        free_engines(engines);
 840}
 841
 842static int __i915_sw_fence_call
 843engines_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
 844{
 845        struct i915_gem_engines *engines =
 846                container_of(fence, typeof(*engines), fence);
 847
 848        switch (state) {
 849        case FENCE_COMPLETE:
 850                if (!list_empty(&engines->link)) {
 851                        struct i915_gem_context *ctx = engines->ctx;
 852                        unsigned long flags;
 853
 854                        spin_lock_irqsave(&ctx->stale.lock, flags);
 855                        list_del(&engines->link);
 856                        spin_unlock_irqrestore(&ctx->stale.lock, flags);
 857                }
 858                i915_gem_context_put(engines->ctx);
 859                break;
 860
 861        case FENCE_FREE:
 862                init_rcu_head(&engines->rcu);
 863                call_rcu(&engines->rcu, free_engines_rcu);
 864                break;
 865        }
 866
 867        return NOTIFY_DONE;
 868}
 869
 870static struct i915_gem_engines *alloc_engines(unsigned int count)
 871{
 872        struct i915_gem_engines *e;
 873
 874        e = kzalloc(struct_size(e, engines, count), GFP_KERNEL);
 875        if (!e)
 876                return NULL;
 877
 878        i915_sw_fence_init(&e->fence, engines_notify);
 879        return e;
 880}
 881
 882static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx,
 883                                                struct intel_sseu rcs_sseu)
 884{
 885        const struct intel_gt *gt = &ctx->i915->gt;
 886        struct intel_engine_cs *engine;
 887        struct i915_gem_engines *e, *err;
 888        enum intel_engine_id id;
 889
 890        e = alloc_engines(I915_NUM_ENGINES);
 891        if (!e)
 892                return ERR_PTR(-ENOMEM);
 893
 894        for_each_engine(engine, gt, id) {
 895                struct intel_context *ce;
 896                struct intel_sseu sseu = {};
 897                int ret;
 898
 899                if (engine->legacy_idx == INVALID_ENGINE)
 900                        continue;
 901
 902                GEM_BUG_ON(engine->legacy_idx >= I915_NUM_ENGINES);
 903                GEM_BUG_ON(e->engines[engine->legacy_idx]);
 904
 905                ce = intel_context_create(engine);
 906                if (IS_ERR(ce)) {
 907                        err = ERR_CAST(ce);
 908                        goto free_engines;
 909                }
 910
 911                e->engines[engine->legacy_idx] = ce;
 912                e->num_engines = max(e->num_engines, engine->legacy_idx + 1);
 913
 914                if (engine->class == RENDER_CLASS)
 915                        sseu = rcs_sseu;
 916
 917                ret = intel_context_set_gem(ce, ctx, sseu);
 918                if (ret) {
 919                        err = ERR_PTR(ret);
 920                        goto free_engines;
 921                }
 922
 923        }
 924
 925        return e;
 926
 927free_engines:
 928        free_engines(e);
 929        return err;
 930}
 931
 932static struct i915_gem_engines *user_engines(struct i915_gem_context *ctx,
 933                                             unsigned int num_engines,
 934                                             struct i915_gem_proto_engine *pe)
 935{
 936        struct i915_gem_engines *e, *err;
 937        unsigned int n;
 938
 939        e = alloc_engines(num_engines);
 940        if (!e)
 941                return ERR_PTR(-ENOMEM);
 942        e->num_engines = num_engines;
 943
 944        for (n = 0; n < num_engines; n++) {
 945                struct intel_context *ce;
 946                int ret;
 947
 948                switch (pe[n].type) {
 949                case I915_GEM_ENGINE_TYPE_PHYSICAL:
 950                        ce = intel_context_create(pe[n].engine);
 951                        break;
 952
 953                case I915_GEM_ENGINE_TYPE_BALANCED:
 954                        ce = intel_engine_create_virtual(pe[n].siblings,
 955                                                         pe[n].num_siblings);
 956                        break;
 957
 958                case I915_GEM_ENGINE_TYPE_INVALID:
 959                default:
 960                        GEM_WARN_ON(pe[n].type != I915_GEM_ENGINE_TYPE_INVALID);
 961                        continue;
 962                }
 963
 964                if (IS_ERR(ce)) {
 965                        err = ERR_CAST(ce);
 966                        goto free_engines;
 967                }
 968
 969                e->engines[n] = ce;
 970
 971                ret = intel_context_set_gem(ce, ctx, pe->sseu);
 972                if (ret) {
 973                        err = ERR_PTR(ret);
 974                        goto free_engines;
 975                }
 976        }
 977
 978        return e;
 979
 980free_engines:
 981        free_engines(e);
 982        return err;
 983}
 984
 985void i915_gem_context_release(struct kref *ref)
 986{
 987        struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
 988
 989        trace_i915_context_free(ctx);
 990        GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
 991
 992        if (ctx->syncobj)
 993                drm_syncobj_put(ctx->syncobj);
 994
 995        mutex_destroy(&ctx->engines_mutex);
 996        mutex_destroy(&ctx->lut_mutex);
 997
 998        put_pid(ctx->pid);
 999        mutex_destroy(&ctx->mutex);
1000
1001        kfree_rcu(ctx, rcu);
1002}
1003
1004static inline struct i915_gem_engines *
1005__context_engines_static(const struct i915_gem_context *ctx)
1006{
1007        return rcu_dereference_protected(ctx->engines, true);
1008}
1009
1010static void __reset_context(struct i915_gem_context *ctx,
1011                            struct intel_engine_cs *engine)
1012{
1013        intel_gt_handle_error(engine->gt, engine->mask, 0,
1014                              "context closure in %s", ctx->name);
1015}
1016
1017static bool __cancel_engine(struct intel_engine_cs *engine)
1018{
1019        /*
1020         * Send a "high priority pulse" down the engine to cause the
1021         * current request to be momentarily preempted. (If it fails to
1022         * be preempted, it will be reset). As we have marked our context
1023         * as banned, any incomplete request, including any running, will
1024         * be skipped following the preemption.
1025         *
1026         * If there is no hangchecking (one of the reasons why we try to
1027         * cancel the context) and no forced preemption, there may be no
1028         * means by which we reset the GPU and evict the persistent hog.
1029         * Ergo if we are unable to inject a preemptive pulse that can
1030         * kill the banned context, we fallback to doing a local reset
1031         * instead.
1032         */
1033        return intel_engine_pulse(engine) == 0;
1034}
1035
1036static struct intel_engine_cs *active_engine(struct intel_context *ce)
1037{
1038        struct intel_engine_cs *engine = NULL;
1039        struct i915_request *rq;
1040
1041        if (intel_context_has_inflight(ce))
1042                return intel_context_inflight(ce);
1043
1044        if (!ce->timeline)
1045                return NULL;
1046
1047        /*
1048         * rq->link is only SLAB_TYPESAFE_BY_RCU, we need to hold a reference
1049         * to the request to prevent it being transferred to a new timeline
1050         * (and onto a new timeline->requests list).
1051         */
1052        rcu_read_lock();
1053        list_for_each_entry_reverse(rq, &ce->timeline->requests, link) {
1054                bool found;
1055
1056                /* timeline is already completed upto this point? */
1057                if (!i915_request_get_rcu(rq))
1058                        break;
1059
1060                /* Check with the backend if the request is inflight */
1061                found = true;
1062                if (likely(rcu_access_pointer(rq->timeline) == ce->timeline))
1063                        found = i915_request_active_engine(rq, &engine);
1064
1065                i915_request_put(rq);
1066                if (found)
1067                        break;
1068        }
1069        rcu_read_unlock();
1070
1071        return engine;
1072}
1073
1074static void kill_engines(struct i915_gem_engines *engines, bool ban)
1075{
1076        struct i915_gem_engines_iter it;
1077        struct intel_context *ce;
1078
1079        /*
1080         * Map the user's engine back to the actual engines; one virtual
1081         * engine will be mapped to multiple engines, and using ctx->engine[]
1082         * the same engine may be have multiple instances in the user's map.
1083         * However, we only care about pending requests, so only include
1084         * engines on which there are incomplete requests.
1085         */
1086        for_each_gem_engine(ce, engines, it) {
1087                struct intel_engine_cs *engine;
1088
1089                if (ban && intel_context_ban(ce, NULL))
1090                        continue;
1091
1092                /*
1093                 * Check the current active state of this context; if we
1094                 * are currently executing on the GPU we need to evict
1095                 * ourselves. On the other hand, if we haven't yet been
1096                 * submitted to the GPU or if everything is complete,
1097                 * we have nothing to do.
1098                 */
1099                engine = active_engine(ce);
1100
1101                /* First attempt to gracefully cancel the context */
1102                if (engine && !__cancel_engine(engine) && ban)
1103                        /*
1104                         * If we are unable to send a preemptive pulse to bump
1105                         * the context from the GPU, we have to resort to a full
1106                         * reset. We hope the collateral damage is worth it.
1107                         */
1108                        __reset_context(engines->ctx, engine);
1109        }
1110}
1111
1112static void kill_context(struct i915_gem_context *ctx)
1113{
1114        bool ban = (!i915_gem_context_is_persistent(ctx) ||
1115                    !ctx->i915->params.enable_hangcheck);
1116        struct i915_gem_engines *pos, *next;
1117
1118        spin_lock_irq(&ctx->stale.lock);
1119        GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
1120        list_for_each_entry_safe(pos, next, &ctx->stale.engines, link) {
1121                if (!i915_sw_fence_await(&pos->fence)) {
1122                        list_del_init(&pos->link);
1123                        continue;
1124                }
1125
1126                spin_unlock_irq(&ctx->stale.lock);
1127
1128                kill_engines(pos, ban);
1129
1130                spin_lock_irq(&ctx->stale.lock);
1131                GEM_BUG_ON(i915_sw_fence_signaled(&pos->fence));
1132                list_safe_reset_next(pos, next, link);
1133                list_del_init(&pos->link); /* decouple from FENCE_COMPLETE */
1134
1135                i915_sw_fence_complete(&pos->fence);
1136        }
1137        spin_unlock_irq(&ctx->stale.lock);
1138}
1139
1140static void engines_idle_release(struct i915_gem_context *ctx,
1141                                 struct i915_gem_engines *engines)
1142{
1143        struct i915_gem_engines_iter it;
1144        struct intel_context *ce;
1145
1146        INIT_LIST_HEAD(&engines->link);
1147
1148        engines->ctx = i915_gem_context_get(ctx);
1149
1150        for_each_gem_engine(ce, engines, it) {
1151                int err;
1152
1153                /* serialises with execbuf */
1154                set_bit(CONTEXT_CLOSED_BIT, &ce->flags);
1155                if (!intel_context_pin_if_active(ce))
1156                        continue;
1157
1158                /* Wait until context is finally scheduled out and retired */
1159                err = i915_sw_fence_await_active(&engines->fence,
1160                                                 &ce->active,
1161                                                 I915_ACTIVE_AWAIT_BARRIER);
1162                intel_context_unpin(ce);
1163                if (err)
1164                        goto kill;
1165        }
1166
1167        spin_lock_irq(&ctx->stale.lock);
1168        if (!i915_gem_context_is_closed(ctx))
1169                list_add_tail(&engines->link, &ctx->stale.engines);
1170        spin_unlock_irq(&ctx->stale.lock);
1171
1172kill:
1173        if (list_empty(&engines->link)) /* raced, already closed */
1174                kill_engines(engines, true);
1175
1176        i915_sw_fence_commit(&engines->fence);
1177}
1178
1179static void set_closed_name(struct i915_gem_context *ctx)
1180{
1181        char *s;
1182
1183        /* Replace '[]' with '<>' to indicate closed in debug prints */
1184
1185        s = strrchr(ctx->name, '[');
1186        if (!s)
1187                return;
1188
1189        *s = '<';
1190
1191        s = strchr(s + 1, ']');
1192        if (s)
1193                *s = '>';
1194}
1195
1196static void context_close(struct i915_gem_context *ctx)
1197{
1198        struct i915_address_space *vm;
1199
1200        /* Flush any concurrent set_engines() */
1201        mutex_lock(&ctx->engines_mutex);
1202        engines_idle_release(ctx, rcu_replace_pointer(ctx->engines, NULL, 1));
1203        i915_gem_context_set_closed(ctx);
1204        mutex_unlock(&ctx->engines_mutex);
1205
1206        mutex_lock(&ctx->mutex);
1207
1208        set_closed_name(ctx);
1209
1210        vm = i915_gem_context_vm(ctx);
1211        if (vm)
1212                i915_vm_close(vm);
1213
1214        ctx->file_priv = ERR_PTR(-EBADF);
1215
1216        /*
1217         * The LUT uses the VMA as a backpointer to unref the object,
1218         * so we need to clear the LUT before we close all the VMA (inside
1219         * the ppgtt).
1220         */
1221        lut_close(ctx);
1222
1223        spin_lock(&ctx->i915->gem.contexts.lock);
1224        list_del(&ctx->link);
1225        spin_unlock(&ctx->i915->gem.contexts.lock);
1226
1227        mutex_unlock(&ctx->mutex);
1228
1229        /*
1230         * If the user has disabled hangchecking, we can not be sure that
1231         * the batches will ever complete after the context is closed,
1232         * keeping the context and all resources pinned forever. So in this
1233         * case we opt to forcibly kill off all remaining requests on
1234         * context close.
1235         */
1236        kill_context(ctx);
1237
1238        i915_gem_context_put(ctx);
1239}
1240
1241static int __context_set_persistence(struct i915_gem_context *ctx, bool state)
1242{
1243        if (i915_gem_context_is_persistent(ctx) == state)
1244                return 0;
1245
1246        if (state) {
1247                /*
1248                 * Only contexts that are short-lived [that will expire or be
1249                 * reset] are allowed to survive past termination. We require
1250                 * hangcheck to ensure that the persistent requests are healthy.
1251                 */
1252                if (!ctx->i915->params.enable_hangcheck)
1253                        return -EINVAL;
1254
1255                i915_gem_context_set_persistence(ctx);
1256        } else {
1257                /* To cancel a context we use "preempt-to-idle" */
1258                if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
1259                        return -ENODEV;
1260
1261                /*
1262                 * If the cancel fails, we then need to reset, cleanly!
1263                 *
1264                 * If the per-engine reset fails, all hope is lost! We resort
1265                 * to a full GPU reset in that unlikely case, but realistically
1266                 * if the engine could not reset, the full reset does not fare
1267                 * much better. The damage has been done.
1268                 *
1269                 * However, if we cannot reset an engine by itself, we cannot
1270                 * cleanup a hanging persistent context without causing
1271                 * colateral damage, and we should not pretend we can by
1272                 * exposing the interface.
1273                 */
1274                if (!intel_has_reset_engine(&ctx->i915->gt))
1275                        return -ENODEV;
1276
1277                i915_gem_context_clear_persistence(ctx);
1278        }
1279
1280        return 0;
1281}
1282
1283static inline struct i915_gem_engines *
1284__context_engines_await(const struct i915_gem_context *ctx,
1285                        bool *user_engines)
1286{
1287        struct i915_gem_engines *engines;
1288
1289        rcu_read_lock();
1290        do {
1291                engines = rcu_dereference(ctx->engines);
1292                GEM_BUG_ON(!engines);
1293
1294                if (user_engines)
1295                        *user_engines = i915_gem_context_user_engines(ctx);
1296
1297                /* successful await => strong mb */
1298                if (unlikely(!i915_sw_fence_await(&engines->fence)))
1299                        continue;
1300
1301                if (likely(engines == rcu_access_pointer(ctx->engines)))
1302                        break;
1303
1304                i915_sw_fence_complete(&engines->fence);
1305        } while (1);
1306        rcu_read_unlock();
1307
1308        return engines;
1309}
1310
1311static void
1312context_apply_all(struct i915_gem_context *ctx,
1313                  void (*fn)(struct intel_context *ce, void *data),
1314                  void *data)
1315{
1316        struct i915_gem_engines_iter it;
1317        struct i915_gem_engines *e;
1318        struct intel_context *ce;
1319
1320        e = __context_engines_await(ctx, NULL);
1321        for_each_gem_engine(ce, e, it)
1322                fn(ce, data);
1323        i915_sw_fence_complete(&e->fence);
1324}
1325
1326static struct i915_gem_context *
1327i915_gem_create_context(struct drm_i915_private *i915,
1328                        const struct i915_gem_proto_context *pc)
1329{
1330        struct i915_gem_context *ctx;
1331        struct i915_address_space *vm = NULL;
1332        struct i915_gem_engines *e;
1333        int err;
1334        int i;
1335
1336        ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1337        if (!ctx)
1338                return ERR_PTR(-ENOMEM);
1339
1340        kref_init(&ctx->ref);
1341        ctx->i915 = i915;
1342        ctx->sched = pc->sched;
1343        mutex_init(&ctx->mutex);
1344        INIT_LIST_HEAD(&ctx->link);
1345
1346        spin_lock_init(&ctx->stale.lock);
1347        INIT_LIST_HEAD(&ctx->stale.engines);
1348
1349        if (pc->vm) {
1350                vm = i915_vm_get(pc->vm);
1351        } else if (HAS_FULL_PPGTT(i915)) {
1352                struct i915_ppgtt *ppgtt;
1353
1354                ppgtt = i915_ppgtt_create(&i915->gt);
1355                if (IS_ERR(ppgtt)) {
1356                        drm_dbg(&i915->drm, "PPGTT setup failed (%ld)\n",
1357                                PTR_ERR(ppgtt));
1358                        err = PTR_ERR(ppgtt);
1359                        goto err_ctx;
1360                }
1361                vm = &ppgtt->vm;
1362        }
1363        if (vm) {
1364                RCU_INIT_POINTER(ctx->vm, i915_vm_open(vm));
1365
1366                /* i915_vm_open() takes a reference */
1367                i915_vm_put(vm);
1368        }
1369
1370        mutex_init(&ctx->engines_mutex);
1371        if (pc->num_user_engines >= 0) {
1372                i915_gem_context_set_user_engines(ctx);
1373                e = user_engines(ctx, pc->num_user_engines, pc->user_engines);
1374        } else {
1375                i915_gem_context_clear_user_engines(ctx);
1376                e = default_engines(ctx, pc->legacy_rcs_sseu);
1377        }
1378        if (IS_ERR(e)) {
1379                err = PTR_ERR(e);
1380                goto err_vm;
1381        }
1382        RCU_INIT_POINTER(ctx->engines, e);
1383
1384        INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
1385        mutex_init(&ctx->lut_mutex);
1386
1387        /* NB: Mark all slices as needing a remap so that when the context first
1388         * loads it will restore whatever remap state already exists. If there
1389         * is no remap info, it will be a NOP. */
1390        ctx->remap_slice = ALL_L3_SLICES(i915);
1391
1392        ctx->user_flags = pc->user_flags;
1393
1394        for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
1395                ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
1396
1397        if (pc->single_timeline) {
1398                err = drm_syncobj_create(&ctx->syncobj,
1399                                         DRM_SYNCOBJ_CREATE_SIGNALED,
1400                                         NULL);
1401                if (err)
1402                        goto err_engines;
1403        }
1404
1405        trace_i915_context_create(ctx);
1406
1407        return ctx;
1408
1409err_engines:
1410        free_engines(e);
1411err_vm:
1412        if (ctx->vm)
1413                i915_vm_close(ctx->vm);
1414err_ctx:
1415        kfree(ctx);
1416        return ERR_PTR(err);
1417}
1418
1419static void init_contexts(struct i915_gem_contexts *gc)
1420{
1421        spin_lock_init(&gc->lock);
1422        INIT_LIST_HEAD(&gc->list);
1423}
1424
1425void i915_gem_init__contexts(struct drm_i915_private *i915)
1426{
1427        init_contexts(&i915->gem.contexts);
1428}
1429
1430static void gem_context_register(struct i915_gem_context *ctx,
1431                                 struct drm_i915_file_private *fpriv,
1432                                 u32 id)
1433{
1434        struct drm_i915_private *i915 = ctx->i915;
1435        void *old;
1436
1437        ctx->file_priv = fpriv;
1438
1439        ctx->pid = get_task_pid(current, PIDTYPE_PID);
1440        snprintf(ctx->name, sizeof(ctx->name), "%s[%d]",
1441                 current->comm, pid_nr(ctx->pid));
1442
1443        /* And finally expose ourselves to userspace via the idr */
1444        old = xa_store(&fpriv->context_xa, id, ctx, GFP_KERNEL);
1445        WARN_ON(old);
1446
1447        spin_lock(&i915->gem.contexts.lock);
1448        list_add_tail(&ctx->link, &i915->gem.contexts.list);
1449        spin_unlock(&i915->gem.contexts.lock);
1450}
1451
1452int i915_gem_context_open(struct drm_i915_private *i915,
1453                          struct drm_file *file)
1454{
1455        struct drm_i915_file_private *file_priv = file->driver_priv;
1456        struct i915_gem_proto_context *pc;
1457        struct i915_gem_context *ctx;
1458        int err;
1459
1460        mutex_init(&file_priv->proto_context_lock);
1461        xa_init_flags(&file_priv->proto_context_xa, XA_FLAGS_ALLOC);
1462
1463        /* 0 reserved for the default context */
1464        xa_init_flags(&file_priv->context_xa, XA_FLAGS_ALLOC1);
1465
1466        /* 0 reserved for invalid/unassigned ppgtt */
1467        xa_init_flags(&file_priv->vm_xa, XA_FLAGS_ALLOC1);
1468
1469        pc = proto_context_create(i915, 0);
1470        if (IS_ERR(pc)) {
1471                err = PTR_ERR(pc);
1472                goto err;
1473        }
1474
1475        ctx = i915_gem_create_context(i915, pc);
1476        proto_context_close(pc);
1477        if (IS_ERR(ctx)) {
1478                err = PTR_ERR(ctx);
1479                goto err;
1480        }
1481
1482        gem_context_register(ctx, file_priv, 0);
1483
1484        return 0;
1485
1486err:
1487        xa_destroy(&file_priv->vm_xa);
1488        xa_destroy(&file_priv->context_xa);
1489        xa_destroy(&file_priv->proto_context_xa);
1490        mutex_destroy(&file_priv->proto_context_lock);
1491        return err;
1492}
1493
1494void i915_gem_context_close(struct drm_file *file)
1495{
1496        struct drm_i915_file_private *file_priv = file->driver_priv;
1497        struct i915_gem_proto_context *pc;
1498        struct i915_address_space *vm;
1499        struct i915_gem_context *ctx;
1500        unsigned long idx;
1501
1502        xa_for_each(&file_priv->proto_context_xa, idx, pc)
1503                proto_context_close(pc);
1504        xa_destroy(&file_priv->proto_context_xa);
1505        mutex_destroy(&file_priv->proto_context_lock);
1506
1507        xa_for_each(&file_priv->context_xa, idx, ctx)
1508                context_close(ctx);
1509        xa_destroy(&file_priv->context_xa);
1510
1511        xa_for_each(&file_priv->vm_xa, idx, vm)
1512                i915_vm_put(vm);
1513        xa_destroy(&file_priv->vm_xa);
1514}
1515
1516int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
1517                             struct drm_file *file)
1518{
1519        struct drm_i915_private *i915 = to_i915(dev);
1520        struct drm_i915_gem_vm_control *args = data;
1521        struct drm_i915_file_private *file_priv = file->driver_priv;
1522        struct i915_ppgtt *ppgtt;
1523        u32 id;
1524        int err;
1525
1526        if (!HAS_FULL_PPGTT(i915))
1527                return -ENODEV;
1528
1529        if (args->flags)
1530                return -EINVAL;
1531
1532        ppgtt = i915_ppgtt_create(&i915->gt);
1533        if (IS_ERR(ppgtt))
1534                return PTR_ERR(ppgtt);
1535
1536        if (args->extensions) {
1537                err = i915_user_extensions(u64_to_user_ptr(args->extensions),
1538                                           NULL, 0,
1539                                           ppgtt);
1540                if (err)
1541                        goto err_put;
1542        }
1543
1544        err = xa_alloc(&file_priv->vm_xa, &id, &ppgtt->vm,
1545                       xa_limit_32b, GFP_KERNEL);
1546        if (err)
1547                goto err_put;
1548
1549        GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */
1550        args->vm_id = id;
1551        return 0;
1552
1553err_put:
1554        i915_vm_put(&ppgtt->vm);
1555        return err;
1556}
1557
1558int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
1559                              struct drm_file *file)
1560{
1561        struct drm_i915_file_private *file_priv = file->driver_priv;
1562        struct drm_i915_gem_vm_control *args = data;
1563        struct i915_address_space *vm;
1564
1565        if (args->flags)
1566                return -EINVAL;
1567
1568        if (args->extensions)
1569                return -EINVAL;
1570
1571        vm = xa_erase(&file_priv->vm_xa, args->vm_id);
1572        if (!vm)
1573                return -ENOENT;
1574
1575        i915_vm_put(vm);
1576        return 0;
1577}
1578
1579static int get_ppgtt(struct drm_i915_file_private *file_priv,
1580                     struct i915_gem_context *ctx,
1581                     struct drm_i915_gem_context_param *args)
1582{
1583        struct i915_address_space *vm;
1584        int err;
1585        u32 id;
1586
1587        if (!rcu_access_pointer(ctx->vm))
1588                return -ENODEV;
1589
1590        rcu_read_lock();
1591        vm = context_get_vm_rcu(ctx);
1592        rcu_read_unlock();
1593        if (!vm)
1594                return -ENODEV;
1595
1596        err = xa_alloc(&file_priv->vm_xa, &id, vm, xa_limit_32b, GFP_KERNEL);
1597        if (err)
1598                goto err_put;
1599
1600        i915_vm_open(vm);
1601
1602        GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */
1603        args->value = id;
1604        args->size = 0;
1605
1606err_put:
1607        i915_vm_put(vm);
1608        return err;
1609}
1610
1611int
1612i915_gem_user_to_context_sseu(struct intel_gt *gt,
1613                              const struct drm_i915_gem_context_param_sseu *user,
1614                              struct intel_sseu *context)
1615{
1616        const struct sseu_dev_info *device = &gt->info.sseu;
1617        struct drm_i915_private *i915 = gt->i915;
1618
1619        /* No zeros in any field. */
1620        if (!user->slice_mask || !user->subslice_mask ||
1621            !user->min_eus_per_subslice || !user->max_eus_per_subslice)
1622                return -EINVAL;
1623
1624        /* Max > min. */
1625        if (user->max_eus_per_subslice < user->min_eus_per_subslice)
1626                return -EINVAL;
1627
1628        /*
1629         * Some future proofing on the types since the uAPI is wider than the
1630         * current internal implementation.
1631         */
1632        if (overflows_type(user->slice_mask, context->slice_mask) ||
1633            overflows_type(user->subslice_mask, context->subslice_mask) ||
1634            overflows_type(user->min_eus_per_subslice,
1635                           context->min_eus_per_subslice) ||
1636            overflows_type(user->max_eus_per_subslice,
1637                           context->max_eus_per_subslice))
1638                return -EINVAL;
1639
1640        /* Check validity against hardware. */
1641        if (user->slice_mask & ~device->slice_mask)
1642                return -EINVAL;
1643
1644        if (user->subslice_mask & ~device->subslice_mask[0])
1645                return -EINVAL;
1646
1647        if (user->max_eus_per_subslice > device->max_eus_per_subslice)
1648                return -EINVAL;
1649
1650        context->slice_mask = user->slice_mask;
1651        context->subslice_mask = user->subslice_mask;
1652        context->min_eus_per_subslice = user->min_eus_per_subslice;
1653        context->max_eus_per_subslice = user->max_eus_per_subslice;
1654
1655        /* Part specific restrictions. */
1656        if (GRAPHICS_VER(i915) == 11) {
1657                unsigned int hw_s = hweight8(device->slice_mask);
1658                unsigned int hw_ss_per_s = hweight8(device->subslice_mask[0]);
1659                unsigned int req_s = hweight8(context->slice_mask);
1660                unsigned int req_ss = hweight8(context->subslice_mask);
1661
1662                /*
1663                 * Only full subslice enablement is possible if more than one
1664                 * slice is turned on.
1665                 */
1666                if (req_s > 1 && req_ss != hw_ss_per_s)
1667                        return -EINVAL;
1668
1669                /*
1670                 * If more than four (SScount bitfield limit) subslices are
1671                 * requested then the number has to be even.
1672                 */
1673                if (req_ss > 4 && (req_ss & 1))
1674                        return -EINVAL;
1675
1676                /*
1677                 * If only one slice is enabled and subslice count is below the
1678                 * device full enablement, it must be at most half of the all
1679                 * available subslices.
1680                 */
1681                if (req_s == 1 && req_ss < hw_ss_per_s &&
1682                    req_ss > (hw_ss_per_s / 2))
1683                        return -EINVAL;
1684
1685                /* ABI restriction - VME use case only. */
1686
1687                /* All slices or one slice only. */
1688                if (req_s != 1 && req_s != hw_s)
1689                        return -EINVAL;
1690
1691                /*
1692                 * Half subslices or full enablement only when one slice is
1693                 * enabled.
1694                 */
1695                if (req_s == 1 &&
1696                    (req_ss != hw_ss_per_s && req_ss != (hw_ss_per_s / 2)))
1697                        return -EINVAL;
1698
1699                /* No EU configuration changes. */
1700                if ((user->min_eus_per_subslice !=
1701                     device->max_eus_per_subslice) ||
1702                    (user->max_eus_per_subslice !=
1703                     device->max_eus_per_subslice))
1704                        return -EINVAL;
1705        }
1706
1707        return 0;
1708}
1709
1710static int set_sseu(struct i915_gem_context *ctx,
1711                    struct drm_i915_gem_context_param *args)
1712{
1713        struct drm_i915_private *i915 = ctx->i915;
1714        struct drm_i915_gem_context_param_sseu user_sseu;
1715        struct intel_context *ce;
1716        struct intel_sseu sseu;
1717        unsigned long lookup;
1718        int ret;
1719
1720        if (args->size < sizeof(user_sseu))
1721                return -EINVAL;
1722
1723        if (GRAPHICS_VER(i915) != 11)
1724                return -ENODEV;
1725
1726        if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
1727                           sizeof(user_sseu)))
1728                return -EFAULT;
1729
1730        if (user_sseu.rsvd)
1731                return -EINVAL;
1732
1733        if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
1734                return -EINVAL;
1735
1736        lookup = 0;
1737        if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
1738                lookup |= LOOKUP_USER_INDEX;
1739
1740        ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
1741        if (IS_ERR(ce))
1742                return PTR_ERR(ce);
1743
1744        /* Only render engine supports RPCS configuration. */
1745        if (ce->engine->class != RENDER_CLASS) {
1746                ret = -ENODEV;
1747                goto out_ce;
1748        }
1749
1750        ret = i915_gem_user_to_context_sseu(ce->engine->gt, &user_sseu, &sseu);
1751        if (ret)
1752                goto out_ce;
1753
1754        ret = intel_context_reconfigure_sseu(ce, sseu);
1755        if (ret)
1756                goto out_ce;
1757
1758        args->size = sizeof(user_sseu);
1759
1760out_ce:
1761        intel_context_put(ce);
1762        return ret;
1763}
1764
1765static int
1766set_persistence(struct i915_gem_context *ctx,
1767                const struct drm_i915_gem_context_param *args)
1768{
1769        if (args->size)
1770                return -EINVAL;
1771
1772        return __context_set_persistence(ctx, args->value);
1773}
1774
1775static void __apply_priority(struct intel_context *ce, void *arg)
1776{
1777        struct i915_gem_context *ctx = arg;
1778
1779        if (!intel_engine_has_timeslices(ce->engine))
1780                return;
1781
1782        if (ctx->sched.priority >= I915_PRIORITY_NORMAL &&
1783            intel_engine_has_semaphores(ce->engine))
1784                intel_context_set_use_semaphores(ce);
1785        else
1786                intel_context_clear_use_semaphores(ce);
1787}
1788
1789static int set_priority(struct i915_gem_context *ctx,
1790                        const struct drm_i915_gem_context_param *args)
1791{
1792        int err;
1793
1794        err = validate_priority(ctx->i915, args);
1795        if (err)
1796                return err;
1797
1798        ctx->sched.priority = args->value;
1799        context_apply_all(ctx, __apply_priority, ctx);
1800
1801        return 0;
1802}
1803
1804static int ctx_setparam(struct drm_i915_file_private *fpriv,
1805                        struct i915_gem_context *ctx,
1806                        struct drm_i915_gem_context_param *args)
1807{
1808        int ret = 0;
1809
1810        switch (args->param) {
1811        case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
1812                if (args->size)
1813                        ret = -EINVAL;
1814                else if (args->value)
1815                        i915_gem_context_set_no_error_capture(ctx);
1816                else
1817                        i915_gem_context_clear_no_error_capture(ctx);
1818                break;
1819
1820        case I915_CONTEXT_PARAM_BANNABLE:
1821                if (args->size)
1822                        ret = -EINVAL;
1823                else if (!capable(CAP_SYS_ADMIN) && !args->value)
1824                        ret = -EPERM;
1825                else if (args->value)
1826                        i915_gem_context_set_bannable(ctx);
1827                else
1828                        i915_gem_context_clear_bannable(ctx);
1829                break;
1830
1831        case I915_CONTEXT_PARAM_RECOVERABLE:
1832                if (args->size)
1833                        ret = -EINVAL;
1834                else if (args->value)
1835                        i915_gem_context_set_recoverable(ctx);
1836                else
1837                        i915_gem_context_clear_recoverable(ctx);
1838                break;
1839
1840        case I915_CONTEXT_PARAM_PRIORITY:
1841                ret = set_priority(ctx, args);
1842                break;
1843
1844        case I915_CONTEXT_PARAM_SSEU:
1845                ret = set_sseu(ctx, args);
1846                break;
1847
1848        case I915_CONTEXT_PARAM_PERSISTENCE:
1849                ret = set_persistence(ctx, args);
1850                break;
1851
1852        case I915_CONTEXT_PARAM_NO_ZEROMAP:
1853        case I915_CONTEXT_PARAM_BAN_PERIOD:
1854        case I915_CONTEXT_PARAM_RINGSIZE:
1855        case I915_CONTEXT_PARAM_VM:
1856        case I915_CONTEXT_PARAM_ENGINES:
1857        default:
1858                ret = -EINVAL;
1859                break;
1860        }
1861
1862        return ret;
1863}
1864
1865struct create_ext {
1866        struct i915_gem_proto_context *pc;
1867        struct drm_i915_file_private *fpriv;
1868};
1869
1870static int create_setparam(struct i915_user_extension __user *ext, void *data)
1871{
1872        struct drm_i915_gem_context_create_ext_setparam local;
1873        const struct create_ext *arg = data;
1874
1875        if (copy_from_user(&local, ext, sizeof(local)))
1876                return -EFAULT;
1877
1878        if (local.param.ctx_id)
1879                return -EINVAL;
1880
1881        return set_proto_ctx_param(arg->fpriv, arg->pc, &local.param);
1882}
1883
1884static int invalid_ext(struct i915_user_extension __user *ext, void *data)
1885{
1886        return -EINVAL;
1887}
1888
1889static const i915_user_extension_fn create_extensions[] = {
1890        [I915_CONTEXT_CREATE_EXT_SETPARAM] = create_setparam,
1891        [I915_CONTEXT_CREATE_EXT_CLONE] = invalid_ext,
1892};
1893
1894static bool client_is_banned(struct drm_i915_file_private *file_priv)
1895{
1896        return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED;
1897}
1898
1899static inline struct i915_gem_context *
1900__context_lookup(struct drm_i915_file_private *file_priv, u32 id)
1901{
1902        struct i915_gem_context *ctx;
1903
1904        rcu_read_lock();
1905        ctx = xa_load(&file_priv->context_xa, id);
1906        if (ctx && !kref_get_unless_zero(&ctx->ref))
1907                ctx = NULL;
1908        rcu_read_unlock();
1909
1910        return ctx;
1911}
1912
1913static struct i915_gem_context *
1914finalize_create_context_locked(struct drm_i915_file_private *file_priv,
1915                               struct i915_gem_proto_context *pc, u32 id)
1916{
1917        struct i915_gem_context *ctx;
1918        void *old;
1919
1920        lockdep_assert_held(&file_priv->proto_context_lock);
1921
1922        ctx = i915_gem_create_context(file_priv->dev_priv, pc);
1923        if (IS_ERR(ctx))
1924                return ctx;
1925
1926        gem_context_register(ctx, file_priv, id);
1927
1928        old = xa_erase(&file_priv->proto_context_xa, id);
1929        GEM_BUG_ON(old != pc);
1930        proto_context_close(pc);
1931
1932        /* One for the xarray and one for the caller */
1933        return i915_gem_context_get(ctx);
1934}
1935
1936struct i915_gem_context *
1937i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)
1938{
1939        struct i915_gem_proto_context *pc;
1940        struct i915_gem_context *ctx;
1941
1942        ctx = __context_lookup(file_priv, id);
1943        if (ctx)
1944                return ctx;
1945
1946        mutex_lock(&file_priv->proto_context_lock);
1947        /* Try one more time under the lock */
1948        ctx = __context_lookup(file_priv, id);
1949        if (!ctx) {
1950                pc = xa_load(&file_priv->proto_context_xa, id);
1951                if (!pc)
1952                        ctx = ERR_PTR(-ENOENT);
1953                else
1954                        ctx = finalize_create_context_locked(file_priv, pc, id);
1955        }
1956        mutex_unlock(&file_priv->proto_context_lock);
1957
1958        return ctx;
1959}
1960
1961int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
1962                                  struct drm_file *file)
1963{
1964        struct drm_i915_private *i915 = to_i915(dev);
1965        struct drm_i915_gem_context_create_ext *args = data;
1966        struct create_ext ext_data;
1967        int ret;
1968        u32 id;
1969
1970        if (!DRIVER_CAPS(i915)->has_logical_contexts)
1971                return -ENODEV;
1972
1973        if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN)
1974                return -EINVAL;
1975
1976        ret = intel_gt_terminally_wedged(&i915->gt);
1977        if (ret)
1978                return ret;
1979
1980        ext_data.fpriv = file->driver_priv;
1981        if (client_is_banned(ext_data.fpriv)) {
1982                drm_dbg(&i915->drm,
1983                        "client %s[%d] banned from creating ctx\n",
1984                        current->comm, task_pid_nr(current));
1985                return -EIO;
1986        }
1987
1988        ext_data.pc = proto_context_create(i915, args->flags);
1989        if (IS_ERR(ext_data.pc))
1990                return PTR_ERR(ext_data.pc);
1991
1992        if (args->flags & I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS) {
1993                ret = i915_user_extensions(u64_to_user_ptr(args->extensions),
1994                                           create_extensions,
1995                                           ARRAY_SIZE(create_extensions),
1996                                           &ext_data);
1997                if (ret)
1998                        goto err_pc;
1999        }
2000
2001        if (GRAPHICS_VER(i915) > 12) {
2002                struct i915_gem_context *ctx;
2003
2004                /* Get ourselves a context ID */
2005                ret = xa_alloc(&ext_data.fpriv->context_xa, &id, NULL,
2006                               xa_limit_32b, GFP_KERNEL);
2007                if (ret)
2008                        goto err_pc;
2009
2010                ctx = i915_gem_create_context(i915, ext_data.pc);
2011                if (IS_ERR(ctx)) {
2012                        ret = PTR_ERR(ctx);
2013                        goto err_pc;
2014                }
2015
2016                proto_context_close(ext_data.pc);
2017                gem_context_register(ctx, ext_data.fpriv, id);
2018        } else {
2019                ret = proto_context_register(ext_data.fpriv, ext_data.pc, &id);
2020                if (ret < 0)
2021                        goto err_pc;
2022        }
2023
2024        args->ctx_id = id;
2025        drm_dbg(&i915->drm, "HW context %d created\n", args->ctx_id);
2026
2027        return 0;
2028
2029err_pc:
2030        proto_context_close(ext_data.pc);
2031        return ret;
2032}
2033
2034int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
2035                                   struct drm_file *file)
2036{
2037        struct drm_i915_gem_context_destroy *args = data;
2038        struct drm_i915_file_private *file_priv = file->driver_priv;
2039        struct i915_gem_proto_context *pc;
2040        struct i915_gem_context *ctx;
2041
2042        if (args->pad != 0)
2043                return -EINVAL;
2044
2045        if (!args->ctx_id)
2046                return -ENOENT;
2047
2048        /* We need to hold the proto-context lock here to prevent races
2049         * with finalize_create_context_locked().
2050         */
2051        mutex_lock(&file_priv->proto_context_lock);
2052        ctx = xa_erase(&file_priv->context_xa, args->ctx_id);
2053        pc = xa_erase(&file_priv->proto_context_xa, args->ctx_id);
2054        mutex_unlock(&file_priv->proto_context_lock);
2055
2056        if (!ctx && !pc)
2057                return -ENOENT;
2058        GEM_WARN_ON(ctx && pc);
2059
2060        if (pc)
2061                proto_context_close(pc);
2062
2063        if (ctx)
2064                context_close(ctx);
2065
2066        return 0;
2067}
2068
2069static int get_sseu(struct i915_gem_context *ctx,
2070                    struct drm_i915_gem_context_param *args)
2071{
2072        struct drm_i915_gem_context_param_sseu user_sseu;
2073        struct intel_context *ce;
2074        unsigned long lookup;
2075        int err;
2076
2077        if (args->size == 0)
2078                goto out;
2079        else if (args->size < sizeof(user_sseu))
2080                return -EINVAL;
2081
2082        if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
2083                           sizeof(user_sseu)))
2084                return -EFAULT;
2085
2086        if (user_sseu.rsvd)
2087                return -EINVAL;
2088
2089        if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
2090                return -EINVAL;
2091
2092        lookup = 0;
2093        if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
2094                lookup |= LOOKUP_USER_INDEX;
2095
2096        ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
2097        if (IS_ERR(ce))
2098                return PTR_ERR(ce);
2099
2100        err = intel_context_lock_pinned(ce); /* serialises with set_sseu */
2101        if (err) {
2102                intel_context_put(ce);
2103                return err;
2104        }
2105
2106        user_sseu.slice_mask = ce->sseu.slice_mask;
2107        user_sseu.subslice_mask = ce->sseu.subslice_mask;
2108        user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice;
2109        user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice;
2110
2111        intel_context_unlock_pinned(ce);
2112        intel_context_put(ce);
2113
2114        if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu,
2115                         sizeof(user_sseu)))
2116                return -EFAULT;
2117
2118out:
2119        args->size = sizeof(user_sseu);
2120
2121        return 0;
2122}
2123
2124int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
2125                                    struct drm_file *file)
2126{
2127        struct drm_i915_file_private *file_priv = file->driver_priv;
2128        struct drm_i915_gem_context_param *args = data;
2129        struct i915_gem_context *ctx;
2130        int ret = 0;
2131
2132        ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
2133        if (IS_ERR(ctx))
2134                return PTR_ERR(ctx);
2135
2136        switch (args->param) {
2137        case I915_CONTEXT_PARAM_GTT_SIZE:
2138                args->size = 0;
2139                rcu_read_lock();
2140                if (rcu_access_pointer(ctx->vm))
2141                        args->value = rcu_dereference(ctx->vm)->total;
2142                else
2143                        args->value = to_i915(dev)->ggtt.vm.total;
2144                rcu_read_unlock();
2145                break;
2146
2147        case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
2148                args->size = 0;
2149                args->value = i915_gem_context_no_error_capture(ctx);
2150                break;
2151
2152        case I915_CONTEXT_PARAM_BANNABLE:
2153                args->size = 0;
2154                args->value = i915_gem_context_is_bannable(ctx);
2155                break;
2156
2157        case I915_CONTEXT_PARAM_RECOVERABLE:
2158                args->size = 0;
2159                args->value = i915_gem_context_is_recoverable(ctx);
2160                break;
2161
2162        case I915_CONTEXT_PARAM_PRIORITY:
2163                args->size = 0;
2164                args->value = ctx->sched.priority;
2165                break;
2166
2167        case I915_CONTEXT_PARAM_SSEU:
2168                ret = get_sseu(ctx, args);
2169                break;
2170
2171        case I915_CONTEXT_PARAM_VM:
2172                ret = get_ppgtt(file_priv, ctx, args);
2173                break;
2174
2175        case I915_CONTEXT_PARAM_PERSISTENCE:
2176                args->size = 0;
2177                args->value = i915_gem_context_is_persistent(ctx);
2178                break;
2179
2180        case I915_CONTEXT_PARAM_NO_ZEROMAP:
2181        case I915_CONTEXT_PARAM_BAN_PERIOD:
2182        case I915_CONTEXT_PARAM_ENGINES:
2183        case I915_CONTEXT_PARAM_RINGSIZE:
2184        default:
2185                ret = -EINVAL;
2186                break;
2187        }
2188
2189        i915_gem_context_put(ctx);
2190        return ret;
2191}
2192
2193int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
2194                                    struct drm_file *file)
2195{
2196        struct drm_i915_file_private *file_priv = file->driver_priv;
2197        struct drm_i915_gem_context_param *args = data;
2198        struct i915_gem_proto_context *pc;
2199        struct i915_gem_context *ctx;
2200        int ret = 0;
2201
2202        mutex_lock(&file_priv->proto_context_lock);
2203        ctx = __context_lookup(file_priv, args->ctx_id);
2204        if (!ctx) {
2205                pc = xa_load(&file_priv->proto_context_xa, args->ctx_id);
2206                if (pc) {
2207                        /* Contexts should be finalized inside
2208                         * GEM_CONTEXT_CREATE starting with graphics
2209                         * version 13.
2210                         */
2211                        WARN_ON(GRAPHICS_VER(file_priv->dev_priv) > 12);
2212                        ret = set_proto_ctx_param(file_priv, pc, args);
2213                } else {
2214                        ret = -ENOENT;
2215                }
2216        }
2217        mutex_unlock(&file_priv->proto_context_lock);
2218
2219        if (ctx) {
2220                ret = ctx_setparam(file_priv, ctx, args);
2221                i915_gem_context_put(ctx);
2222        }
2223
2224        return ret;
2225}
2226
2227int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
2228                                       void *data, struct drm_file *file)
2229{
2230        struct drm_i915_private *i915 = to_i915(dev);
2231        struct drm_i915_reset_stats *args = data;
2232        struct i915_gem_context *ctx;
2233
2234        if (args->flags || args->pad)
2235                return -EINVAL;
2236
2237        ctx = i915_gem_context_lookup(file->driver_priv, args->ctx_id);
2238        if (IS_ERR(ctx))
2239                return PTR_ERR(ctx);
2240
2241        /*
2242         * We opt for unserialised reads here. This may result in tearing
2243         * in the extremely unlikely event of a GPU hang on this context
2244         * as we are querying them. If we need that extra layer of protection,
2245         * we should wrap the hangstats with a seqlock.
2246         */
2247
2248        if (capable(CAP_SYS_ADMIN))
2249                args->reset_count = i915_reset_count(&i915->gpu_error);
2250        else
2251                args->reset_count = 0;
2252
2253        args->batch_active = atomic_read(&ctx->guilty_count);
2254        args->batch_pending = atomic_read(&ctx->active_count);
2255
2256        i915_gem_context_put(ctx);
2257        return 0;
2258}
2259
2260/* GEM context-engines iterator: for_each_gem_engine() */
2261struct intel_context *
2262i915_gem_engines_iter_next(struct i915_gem_engines_iter *it)
2263{
2264        const struct i915_gem_engines *e = it->engines;
2265        struct intel_context *ctx;
2266
2267        if (unlikely(!e))
2268                return NULL;
2269
2270        do {
2271                if (it->idx >= e->num_engines)
2272                        return NULL;
2273
2274                ctx = e->engines[it->idx++];
2275        } while (!ctx);
2276
2277        return ctx;
2278}
2279
2280#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2281#include "selftests/mock_context.c"
2282#include "selftests/i915_gem_context.c"
2283#endif
2284
2285void i915_gem_context_module_exit(void)
2286{
2287        kmem_cache_destroy(slab_luts);
2288}
2289
2290int __init i915_gem_context_module_init(void)
2291{
2292        slab_luts = KMEM_CACHE(i915_lut_handle, 0);
2293        if (!slab_luts)
2294                return -ENOMEM;
2295
2296        return 0;
2297}
2298