1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67#include <linux/log2.h>
68#include <linux/nospec.h>
69
70#include <drm/i915_drm.h>
71
72#include "gt/intel_lrc_reg.h"
73
74#include "i915_gem_context.h"
75#include "i915_globals.h"
76#include "i915_trace.h"
77#include "i915_user_extensions.h"
78
79#define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
80
81static struct i915_global_gem_context {
82 struct i915_global base;
83 struct kmem_cache *slab_luts;
84} global;
85
86struct i915_lut_handle *i915_lut_handle_alloc(void)
87{
88 return kmem_cache_alloc(global.slab_luts, GFP_KERNEL);
89}
90
91void i915_lut_handle_free(struct i915_lut_handle *lut)
92{
93 return kmem_cache_free(global.slab_luts, lut);
94}
95
96static void lut_close(struct i915_gem_context *ctx)
97{
98 struct radix_tree_iter iter;
99 void __rcu **slot;
100
101 lockdep_assert_held(&ctx->mutex);
102
103 rcu_read_lock();
104 radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) {
105 struct i915_vma *vma = rcu_dereference_raw(*slot);
106 struct drm_i915_gem_object *obj = vma->obj;
107 struct i915_lut_handle *lut;
108
109 if (!kref_get_unless_zero(&obj->base.refcount))
110 continue;
111
112 rcu_read_unlock();
113 i915_gem_object_lock(obj);
114 list_for_each_entry(lut, &obj->lut_list, obj_link) {
115 if (lut->ctx != ctx)
116 continue;
117
118 if (lut->handle != iter.index)
119 continue;
120
121 list_del(&lut->obj_link);
122 break;
123 }
124 i915_gem_object_unlock(obj);
125 rcu_read_lock();
126
127 if (&lut->obj_link != &obj->lut_list) {
128 i915_lut_handle_free(lut);
129 radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
130 if (atomic_dec_and_test(&vma->open_count) &&
131 !i915_vma_is_ggtt(vma))
132 i915_vma_close(vma);
133 i915_gem_object_put(obj);
134 }
135
136 i915_gem_object_put(obj);
137 }
138 rcu_read_unlock();
139}
140
141static struct intel_context *
142lookup_user_engine(struct i915_gem_context *ctx,
143 unsigned long flags,
144 const struct i915_engine_class_instance *ci)
145#define LOOKUP_USER_INDEX BIT(0)
146{
147 int idx;
148
149 if (!!(flags & LOOKUP_USER_INDEX) != i915_gem_context_user_engines(ctx))
150 return ERR_PTR(-EINVAL);
151
152 if (!i915_gem_context_user_engines(ctx)) {
153 struct intel_engine_cs *engine;
154
155 engine = intel_engine_lookup_user(ctx->i915,
156 ci->engine_class,
157 ci->engine_instance);
158 if (!engine)
159 return ERR_PTR(-EINVAL);
160
161 idx = engine->id;
162 } else {
163 idx = ci->engine_instance;
164 }
165
166 return i915_gem_context_get_engine(ctx, idx);
167}
168
169static inline int new_hw_id(struct drm_i915_private *i915, gfp_t gfp)
170{
171 unsigned int max;
172
173 lockdep_assert_held(&i915->contexts.mutex);
174
175 if (INTEL_GEN(i915) >= 11)
176 max = GEN11_MAX_CONTEXT_HW_ID;
177 else if (USES_GUC_SUBMISSION(i915))
178
179
180
181
182 max = MAX_GUC_CONTEXT_HW_ID;
183 else
184 max = MAX_CONTEXT_HW_ID;
185
186 return ida_simple_get(&i915->contexts.hw_ida, 0, max, gfp);
187}
188
189static int steal_hw_id(struct drm_i915_private *i915)
190{
191 struct i915_gem_context *ctx, *cn;
192 LIST_HEAD(pinned);
193 int id = -ENOSPC;
194
195 lockdep_assert_held(&i915->contexts.mutex);
196
197 list_for_each_entry_safe(ctx, cn,
198 &i915->contexts.hw_id_list, hw_id_link) {
199 if (atomic_read(&ctx->hw_id_pin_count)) {
200 list_move_tail(&ctx->hw_id_link, &pinned);
201 continue;
202 }
203
204 GEM_BUG_ON(!ctx->hw_id);
205 list_del_init(&ctx->hw_id_link);
206 id = ctx->hw_id;
207 break;
208 }
209
210
211
212
213
214 list_splice_tail(&pinned, &i915->contexts.hw_id_list);
215 return id;
216}
217
218static int assign_hw_id(struct drm_i915_private *i915, unsigned int *out)
219{
220 int ret;
221
222 lockdep_assert_held(&i915->contexts.mutex);
223
224
225
226
227
228
229
230 ret = new_hw_id(i915, GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
231 if (unlikely(ret < 0)) {
232 ret = steal_hw_id(i915);
233 if (ret < 0)
234 ret = new_hw_id(i915, GFP_KERNEL);
235 if (ret < 0)
236 return ret;
237 }
238
239 *out = ret;
240 return 0;
241}
242
243static void release_hw_id(struct i915_gem_context *ctx)
244{
245 struct drm_i915_private *i915 = ctx->i915;
246
247 if (list_empty(&ctx->hw_id_link))
248 return;
249
250 mutex_lock(&i915->contexts.mutex);
251 if (!list_empty(&ctx->hw_id_link)) {
252 ida_simple_remove(&i915->contexts.hw_ida, ctx->hw_id);
253 list_del_init(&ctx->hw_id_link);
254 }
255 mutex_unlock(&i915->contexts.mutex);
256}
257
258static void __free_engines(struct i915_gem_engines *e, unsigned int count)
259{
260 while (count--) {
261 if (!e->engines[count])
262 continue;
263
264 intel_context_put(e->engines[count]);
265 }
266 kfree(e);
267}
268
269static void free_engines(struct i915_gem_engines *e)
270{
271 __free_engines(e, e->num_engines);
272}
273
274static void free_engines_rcu(struct rcu_head *rcu)
275{
276 free_engines(container_of(rcu, struct i915_gem_engines, rcu));
277}
278
279static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
280{
281 struct intel_engine_cs *engine;
282 struct i915_gem_engines *e;
283 enum intel_engine_id id;
284
285 e = kzalloc(struct_size(e, engines, I915_NUM_ENGINES), GFP_KERNEL);
286 if (!e)
287 return ERR_PTR(-ENOMEM);
288
289 init_rcu_head(&e->rcu);
290 for_each_engine(engine, ctx->i915, id) {
291 struct intel_context *ce;
292
293 ce = intel_context_create(ctx, engine);
294 if (IS_ERR(ce)) {
295 __free_engines(e, id);
296 return ERR_CAST(ce);
297 }
298
299 e->engines[id] = ce;
300 }
301 e->num_engines = id;
302
303 return e;
304}
305
306static void i915_gem_context_free(struct i915_gem_context *ctx)
307{
308 lockdep_assert_held(&ctx->i915->drm.struct_mutex);
309 GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
310
311 release_hw_id(ctx);
312 if (ctx->vm)
313 i915_vm_put(ctx->vm);
314
315 free_engines(rcu_access_pointer(ctx->engines));
316 mutex_destroy(&ctx->engines_mutex);
317
318 if (ctx->timeline)
319 i915_timeline_put(ctx->timeline);
320
321 kfree(ctx->name);
322 put_pid(ctx->pid);
323
324 list_del(&ctx->link);
325 mutex_destroy(&ctx->mutex);
326
327 kfree_rcu(ctx, rcu);
328}
329
330static void contexts_free(struct drm_i915_private *i915)
331{
332 struct llist_node *freed = llist_del_all(&i915->contexts.free_list);
333 struct i915_gem_context *ctx, *cn;
334
335 lockdep_assert_held(&i915->drm.struct_mutex);
336
337 llist_for_each_entry_safe(ctx, cn, freed, free_link)
338 i915_gem_context_free(ctx);
339}
340
341static void contexts_free_first(struct drm_i915_private *i915)
342{
343 struct i915_gem_context *ctx;
344 struct llist_node *freed;
345
346 lockdep_assert_held(&i915->drm.struct_mutex);
347
348 freed = llist_del_first(&i915->contexts.free_list);
349 if (!freed)
350 return;
351
352 ctx = container_of(freed, typeof(*ctx), free_link);
353 i915_gem_context_free(ctx);
354}
355
356static void contexts_free_worker(struct work_struct *work)
357{
358 struct drm_i915_private *i915 =
359 container_of(work, typeof(*i915), contexts.free_work);
360
361 mutex_lock(&i915->drm.struct_mutex);
362 contexts_free(i915);
363 mutex_unlock(&i915->drm.struct_mutex);
364}
365
366void i915_gem_context_release(struct kref *ref)
367{
368 struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
369 struct drm_i915_private *i915 = ctx->i915;
370
371 trace_i915_context_free(ctx);
372 if (llist_add(&ctx->free_link, &i915->contexts.free_list))
373 queue_work(i915->wq, &i915->contexts.free_work);
374}
375
376static void context_close(struct i915_gem_context *ctx)
377{
378 mutex_lock(&ctx->mutex);
379
380 i915_gem_context_set_closed(ctx);
381 ctx->file_priv = ERR_PTR(-EBADF);
382
383
384
385
386
387 release_hw_id(ctx);
388
389
390
391
392
393
394 lut_close(ctx);
395
396 mutex_unlock(&ctx->mutex);
397 i915_gem_context_put(ctx);
398}
399
400static u32 default_desc_template(const struct drm_i915_private *i915,
401 const struct i915_address_space *vm)
402{
403 u32 address_mode;
404 u32 desc;
405
406 desc = GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
407
408 address_mode = INTEL_LEGACY_32B_CONTEXT;
409 if (vm && i915_vm_is_4lvl(vm))
410 address_mode = INTEL_LEGACY_64B_CONTEXT;
411 desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT;
412
413 if (IS_GEN(i915, 8))
414 desc |= GEN8_CTX_L3LLC_COHERENT;
415
416
417
418
419
420
421 return desc;
422}
423
424static struct i915_gem_context *
425__create_context(struct drm_i915_private *i915)
426{
427 struct i915_gem_context *ctx;
428 struct i915_gem_engines *e;
429 int err;
430 int i;
431
432 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
433 if (!ctx)
434 return ERR_PTR(-ENOMEM);
435
436 kref_init(&ctx->ref);
437 list_add_tail(&ctx->link, &i915->contexts.list);
438 ctx->i915 = i915;
439 ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
440 mutex_init(&ctx->mutex);
441
442 mutex_init(&ctx->engines_mutex);
443 e = default_engines(ctx);
444 if (IS_ERR(e)) {
445 err = PTR_ERR(e);
446 goto err_free;
447 }
448 RCU_INIT_POINTER(ctx->engines, e);
449
450 INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
451 INIT_LIST_HEAD(&ctx->hw_id_link);
452
453
454
455
456 ctx->remap_slice = ALL_L3_SLICES(i915);
457
458 i915_gem_context_set_bannable(ctx);
459 i915_gem_context_set_recoverable(ctx);
460
461 ctx->ring_size = 4 * PAGE_SIZE;
462 ctx->desc_template =
463 default_desc_template(i915, &i915->mm.aliasing_ppgtt->vm);
464
465 for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
466 ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
467
468 return ctx;
469
470err_free:
471 kfree(ctx);
472 return ERR_PTR(err);
473}
474
475static struct i915_address_space *
476__set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm)
477{
478 struct i915_address_space *old = ctx->vm;
479
480 ctx->vm = i915_vm_get(vm);
481 ctx->desc_template = default_desc_template(ctx->i915, vm);
482
483 return old;
484}
485
486static void __assign_ppgtt(struct i915_gem_context *ctx,
487 struct i915_address_space *vm)
488{
489 if (vm == ctx->vm)
490 return;
491
492 vm = __set_ppgtt(ctx, vm);
493 if (vm)
494 i915_vm_put(vm);
495}
496
497static struct i915_gem_context *
498i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
499{
500 struct i915_gem_context *ctx;
501
502 lockdep_assert_held(&dev_priv->drm.struct_mutex);
503
504 if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE &&
505 !HAS_EXECLISTS(dev_priv))
506 return ERR_PTR(-EINVAL);
507
508
509 contexts_free_first(dev_priv);
510
511 ctx = __create_context(dev_priv);
512 if (IS_ERR(ctx))
513 return ctx;
514
515 if (HAS_FULL_PPGTT(dev_priv)) {
516 struct i915_ppgtt *ppgtt;
517
518 ppgtt = i915_ppgtt_create(dev_priv);
519 if (IS_ERR(ppgtt)) {
520 DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
521 PTR_ERR(ppgtt));
522 context_close(ctx);
523 return ERR_CAST(ppgtt);
524 }
525
526 __assign_ppgtt(ctx, &ppgtt->vm);
527 i915_vm_put(&ppgtt->vm);
528 }
529
530 if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) {
531 struct i915_timeline *timeline;
532
533 timeline = i915_timeline_create(dev_priv, NULL);
534 if (IS_ERR(timeline)) {
535 context_close(ctx);
536 return ERR_CAST(timeline);
537 }
538
539 ctx->timeline = timeline;
540 }
541
542 trace_i915_context_create(ctx);
543
544 return ctx;
545}
546
547
548
549
550
551
552
553
554
555
556
557struct i915_gem_context *
558i915_gem_context_create_gvt(struct drm_device *dev)
559{
560 struct i915_gem_context *ctx;
561 int ret;
562
563 if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
564 return ERR_PTR(-ENODEV);
565
566 ret = i915_mutex_lock_interruptible(dev);
567 if (ret)
568 return ERR_PTR(ret);
569
570 ctx = i915_gem_create_context(to_i915(dev), 0);
571 if (IS_ERR(ctx))
572 goto out;
573
574 ret = i915_gem_context_pin_hw_id(ctx);
575 if (ret) {
576 context_close(ctx);
577 ctx = ERR_PTR(ret);
578 goto out;
579 }
580
581 ctx->file_priv = ERR_PTR(-EBADF);
582 i915_gem_context_set_closed(ctx);
583 i915_gem_context_clear_bannable(ctx);
584 i915_gem_context_set_force_single_submission(ctx);
585 if (!USES_GUC_SUBMISSION(to_i915(dev)))
586 ctx->ring_size = 512 * PAGE_SIZE;
587
588 GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
589out:
590 mutex_unlock(&dev->struct_mutex);
591 return ctx;
592}
593
594static void
595destroy_kernel_context(struct i915_gem_context **ctxp)
596{
597 struct i915_gem_context *ctx;
598
599
600 ctx = i915_gem_context_get(fetch_and_zero(ctxp));
601 GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
602
603 context_close(ctx);
604 i915_gem_context_free(ctx);
605}
606
607struct i915_gem_context *
608i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
609{
610 struct i915_gem_context *ctx;
611 int err;
612
613 ctx = i915_gem_create_context(i915, 0);
614 if (IS_ERR(ctx))
615 return ctx;
616
617 err = i915_gem_context_pin_hw_id(ctx);
618 if (err) {
619 destroy_kernel_context(&ctx);
620 return ERR_PTR(err);
621 }
622
623 i915_gem_context_clear_bannable(ctx);
624 ctx->sched.priority = I915_USER_PRIORITY(prio);
625 ctx->ring_size = PAGE_SIZE;
626
627 GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
628
629 return ctx;
630}
631
632static void init_contexts(struct drm_i915_private *i915)
633{
634 mutex_init(&i915->contexts.mutex);
635 INIT_LIST_HEAD(&i915->contexts.list);
636
637
638 BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
639 BUILD_BUG_ON(GEN11_MAX_CONTEXT_HW_ID > INT_MAX);
640 ida_init(&i915->contexts.hw_ida);
641 INIT_LIST_HEAD(&i915->contexts.hw_id_list);
642
643 INIT_WORK(&i915->contexts.free_work, contexts_free_worker);
644 init_llist_head(&i915->contexts.free_list);
645}
646
647static bool needs_preempt_context(struct drm_i915_private *i915)
648{
649 return HAS_EXECLISTS(i915);
650}
651
652int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
653{
654 struct i915_gem_context *ctx;
655
656
657 GEM_BUG_ON(dev_priv->kernel_context);
658 GEM_BUG_ON(dev_priv->preempt_context);
659
660 intel_engine_init_ctx_wa(dev_priv->engine[RCS0]);
661 init_contexts(dev_priv);
662
663
664 ctx = i915_gem_context_create_kernel(dev_priv, I915_PRIORITY_MIN);
665 if (IS_ERR(ctx)) {
666 DRM_ERROR("Failed to create default global context\n");
667 return PTR_ERR(ctx);
668 }
669
670
671
672
673
674
675
676 GEM_BUG_ON(ctx->hw_id);
677 GEM_BUG_ON(!atomic_read(&ctx->hw_id_pin_count));
678 dev_priv->kernel_context = ctx;
679
680
681 if (needs_preempt_context(dev_priv)) {
682 ctx = i915_gem_context_create_kernel(dev_priv, INT_MAX);
683 if (!IS_ERR(ctx))
684 dev_priv->preempt_context = ctx;
685 else
686 DRM_ERROR("Failed to create preempt context; disabling preemption\n");
687 }
688
689 DRM_DEBUG_DRIVER("%s context support initialized\n",
690 DRIVER_CAPS(dev_priv)->has_logical_contexts ?
691 "logical" : "fake");
692 return 0;
693}
694
695void i915_gem_contexts_fini(struct drm_i915_private *i915)
696{
697 lockdep_assert_held(&i915->drm.struct_mutex);
698
699 if (i915->preempt_context)
700 destroy_kernel_context(&i915->preempt_context);
701 destroy_kernel_context(&i915->kernel_context);
702
703
704 GEM_BUG_ON(!list_empty(&i915->contexts.hw_id_list));
705 ida_destroy(&i915->contexts.hw_ida);
706}
707
708static int context_idr_cleanup(int id, void *p, void *data)
709{
710 context_close(p);
711 return 0;
712}
713
714static int vm_idr_cleanup(int id, void *p, void *data)
715{
716 i915_vm_put(p);
717 return 0;
718}
719
720static int gem_context_register(struct i915_gem_context *ctx,
721 struct drm_i915_file_private *fpriv)
722{
723 int ret;
724
725 ctx->file_priv = fpriv;
726 if (ctx->vm)
727 ctx->vm->file = fpriv;
728
729 ctx->pid = get_task_pid(current, PIDTYPE_PID);
730 ctx->name = kasprintf(GFP_KERNEL, "%s[%d]",
731 current->comm, pid_nr(ctx->pid));
732 if (!ctx->name) {
733 ret = -ENOMEM;
734 goto err_pid;
735 }
736
737
738 mutex_lock(&fpriv->context_idr_lock);
739 ret = idr_alloc(&fpriv->context_idr, ctx, 0, 0, GFP_KERNEL);
740 mutex_unlock(&fpriv->context_idr_lock);
741 if (ret >= 0)
742 goto out;
743
744 kfree(fetch_and_zero(&ctx->name));
745err_pid:
746 put_pid(fetch_and_zero(&ctx->pid));
747out:
748 return ret;
749}
750
751int i915_gem_context_open(struct drm_i915_private *i915,
752 struct drm_file *file)
753{
754 struct drm_i915_file_private *file_priv = file->driver_priv;
755 struct i915_gem_context *ctx;
756 int err;
757
758 mutex_init(&file_priv->context_idr_lock);
759 mutex_init(&file_priv->vm_idr_lock);
760
761 idr_init(&file_priv->context_idr);
762 idr_init_base(&file_priv->vm_idr, 1);
763
764 mutex_lock(&i915->drm.struct_mutex);
765 ctx = i915_gem_create_context(i915, 0);
766 mutex_unlock(&i915->drm.struct_mutex);
767 if (IS_ERR(ctx)) {
768 err = PTR_ERR(ctx);
769 goto err;
770 }
771
772 err = gem_context_register(ctx, file_priv);
773 if (err < 0)
774 goto err_ctx;
775
776 GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
777 GEM_BUG_ON(err > 0);
778
779 return 0;
780
781err_ctx:
782 context_close(ctx);
783err:
784 idr_destroy(&file_priv->vm_idr);
785 idr_destroy(&file_priv->context_idr);
786 mutex_destroy(&file_priv->vm_idr_lock);
787 mutex_destroy(&file_priv->context_idr_lock);
788 return err;
789}
790
791void i915_gem_context_close(struct drm_file *file)
792{
793 struct drm_i915_file_private *file_priv = file->driver_priv;
794
795 idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
796 idr_destroy(&file_priv->context_idr);
797 mutex_destroy(&file_priv->context_idr_lock);
798
799 idr_for_each(&file_priv->vm_idr, vm_idr_cleanup, NULL);
800 idr_destroy(&file_priv->vm_idr);
801 mutex_destroy(&file_priv->vm_idr_lock);
802}
803
804int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
805 struct drm_file *file)
806{
807 struct drm_i915_private *i915 = to_i915(dev);
808 struct drm_i915_gem_vm_control *args = data;
809 struct drm_i915_file_private *file_priv = file->driver_priv;
810 struct i915_ppgtt *ppgtt;
811 int err;
812
813 if (!HAS_FULL_PPGTT(i915))
814 return -ENODEV;
815
816 if (args->flags)
817 return -EINVAL;
818
819 ppgtt = i915_ppgtt_create(i915);
820 if (IS_ERR(ppgtt))
821 return PTR_ERR(ppgtt);
822
823 ppgtt->vm.file = file_priv;
824
825 if (args->extensions) {
826 err = i915_user_extensions(u64_to_user_ptr(args->extensions),
827 NULL, 0,
828 ppgtt);
829 if (err)
830 goto err_put;
831 }
832
833 err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
834 if (err)
835 goto err_put;
836
837 err = idr_alloc(&file_priv->vm_idr, &ppgtt->vm, 0, 0, GFP_KERNEL);
838 if (err < 0)
839 goto err_unlock;
840
841 GEM_BUG_ON(err == 0);
842
843 mutex_unlock(&file_priv->vm_idr_lock);
844
845 args->vm_id = err;
846 return 0;
847
848err_unlock:
849 mutex_unlock(&file_priv->vm_idr_lock);
850err_put:
851 i915_vm_put(&ppgtt->vm);
852 return err;
853}
854
855int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
856 struct drm_file *file)
857{
858 struct drm_i915_file_private *file_priv = file->driver_priv;
859 struct drm_i915_gem_vm_control *args = data;
860 struct i915_address_space *vm;
861 int err;
862 u32 id;
863
864 if (args->flags)
865 return -EINVAL;
866
867 if (args->extensions)
868 return -EINVAL;
869
870 id = args->vm_id;
871 if (!id)
872 return -ENOENT;
873
874 err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
875 if (err)
876 return err;
877
878 vm = idr_remove(&file_priv->vm_idr, id);
879
880 mutex_unlock(&file_priv->vm_idr_lock);
881 if (!vm)
882 return -ENOENT;
883
884 i915_vm_put(vm);
885 return 0;
886}
887
888struct context_barrier_task {
889 struct i915_active base;
890 void (*task)(void *data);
891 void *data;
892};
893
894static void cb_retire(struct i915_active *base)
895{
896 struct context_barrier_task *cb = container_of(base, typeof(*cb), base);
897
898 if (cb->task)
899 cb->task(cb->data);
900
901 i915_active_fini(&cb->base);
902 kfree(cb);
903}
904
905I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault);
906static int context_barrier_task(struct i915_gem_context *ctx,
907 intel_engine_mask_t engines,
908 bool (*skip)(struct intel_context *ce, void *data),
909 int (*emit)(struct i915_request *rq, void *data),
910 void (*task)(void *data),
911 void *data)
912{
913 struct drm_i915_private *i915 = ctx->i915;
914 struct context_barrier_task *cb;
915 struct i915_gem_engines_iter it;
916 struct intel_context *ce;
917 int err = 0;
918
919 lockdep_assert_held(&i915->drm.struct_mutex);
920 GEM_BUG_ON(!task);
921
922 cb = kmalloc(sizeof(*cb), GFP_KERNEL);
923 if (!cb)
924 return -ENOMEM;
925
926 i915_active_init(i915, &cb->base, cb_retire);
927 i915_active_acquire(&cb->base);
928
929 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
930 struct i915_request *rq;
931
932 if (I915_SELFTEST_ONLY(context_barrier_inject_fault &
933 ce->engine->mask)) {
934 err = -ENXIO;
935 break;
936 }
937
938 if (!(ce->engine->mask & engines))
939 continue;
940
941 if (skip && skip(ce, data))
942 continue;
943
944 rq = intel_context_create_request(ce);
945 if (IS_ERR(rq)) {
946 err = PTR_ERR(rq);
947 break;
948 }
949
950 err = 0;
951 if (emit)
952 err = emit(rq, data);
953 if (err == 0)
954 err = i915_active_ref(&cb->base, rq->fence.context, rq);
955
956 i915_request_add(rq);
957 if (err)
958 break;
959 }
960 i915_gem_context_unlock_engines(ctx);
961
962 cb->task = err ? NULL : task;
963 cb->data = data;
964
965 i915_active_release(&cb->base);
966
967 return err;
968}
969
970static int get_ppgtt(struct drm_i915_file_private *file_priv,
971 struct i915_gem_context *ctx,
972 struct drm_i915_gem_context_param *args)
973{
974 struct i915_address_space *vm;
975 int ret;
976
977 if (!ctx->vm)
978 return -ENODEV;
979
980
981 ret = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
982 if (ret)
983 return ret;
984
985 vm = i915_vm_get(ctx->vm);
986 mutex_unlock(&ctx->i915->drm.struct_mutex);
987
988 ret = mutex_lock_interruptible(&file_priv->vm_idr_lock);
989 if (ret)
990 goto err_put;
991
992 ret = idr_alloc(&file_priv->vm_idr, vm, 0, 0, GFP_KERNEL);
993 GEM_BUG_ON(!ret);
994 if (ret < 0)
995 goto err_unlock;
996
997 i915_vm_get(vm);
998
999 args->size = 0;
1000 args->value = ret;
1001
1002 ret = 0;
1003err_unlock:
1004 mutex_unlock(&file_priv->vm_idr_lock);
1005err_put:
1006 i915_vm_put(vm);
1007 return ret;
1008}
1009
1010static void set_ppgtt_barrier(void *data)
1011{
1012 struct i915_address_space *old = data;
1013
1014 if (INTEL_GEN(old->i915) < 8)
1015 gen6_ppgtt_unpin_all(i915_vm_to_ppgtt(old));
1016
1017 i915_vm_put(old);
1018}
1019
1020static int emit_ppgtt_update(struct i915_request *rq, void *data)
1021{
1022 struct i915_address_space *vm = rq->gem_context->vm;
1023 struct intel_engine_cs *engine = rq->engine;
1024 u32 base = engine->mmio_base;
1025 u32 *cs;
1026 int i;
1027
1028 if (i915_vm_is_4lvl(vm)) {
1029 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1030 const dma_addr_t pd_daddr = px_dma(ppgtt->pd);
1031
1032 cs = intel_ring_begin(rq, 6);
1033 if (IS_ERR(cs))
1034 return PTR_ERR(cs);
1035
1036 *cs++ = MI_LOAD_REGISTER_IMM(2);
1037
1038 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 0));
1039 *cs++ = upper_32_bits(pd_daddr);
1040 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 0));
1041 *cs++ = lower_32_bits(pd_daddr);
1042
1043 *cs++ = MI_NOOP;
1044 intel_ring_advance(rq, cs);
1045 } else if (HAS_LOGICAL_RING_CONTEXTS(engine->i915)) {
1046 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1047
1048 cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
1049 if (IS_ERR(cs))
1050 return PTR_ERR(cs);
1051
1052 *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES);
1053 for (i = GEN8_3LVL_PDPES; i--; ) {
1054 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
1055
1056 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i));
1057 *cs++ = upper_32_bits(pd_daddr);
1058 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i));
1059 *cs++ = lower_32_bits(pd_daddr);
1060 }
1061 *cs++ = MI_NOOP;
1062 intel_ring_advance(rq, cs);
1063 } else {
1064
1065 gen6_ppgtt_pin(i915_vm_to_ppgtt(vm));
1066 }
1067
1068 return 0;
1069}
1070
1071static bool skip_ppgtt_update(struct intel_context *ce, void *data)
1072{
1073 if (HAS_LOGICAL_RING_CONTEXTS(ce->engine->i915))
1074 return !ce->state;
1075 else
1076 return !atomic_read(&ce->pin_count);
1077}
1078
1079static int set_ppgtt(struct drm_i915_file_private *file_priv,
1080 struct i915_gem_context *ctx,
1081 struct drm_i915_gem_context_param *args)
1082{
1083 struct i915_address_space *vm, *old;
1084 int err;
1085
1086 if (args->size)
1087 return -EINVAL;
1088
1089 if (!ctx->vm)
1090 return -ENODEV;
1091
1092 if (upper_32_bits(args->value))
1093 return -ENOENT;
1094
1095 err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
1096 if (err)
1097 return err;
1098
1099 vm = idr_find(&file_priv->vm_idr, args->value);
1100 if (vm)
1101 i915_vm_get(vm);
1102 mutex_unlock(&file_priv->vm_idr_lock);
1103 if (!vm)
1104 return -ENOENT;
1105
1106 err = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
1107 if (err)
1108 goto out;
1109
1110 if (vm == ctx->vm)
1111 goto unlock;
1112
1113
1114 mutex_lock(&ctx->mutex);
1115 lut_close(ctx);
1116 mutex_unlock(&ctx->mutex);
1117
1118 old = __set_ppgtt(ctx, vm);
1119
1120
1121
1122
1123
1124
1125 err = context_barrier_task(ctx, ALL_ENGINES,
1126 skip_ppgtt_update,
1127 emit_ppgtt_update,
1128 set_ppgtt_barrier,
1129 old);
1130 if (err) {
1131 ctx->vm = old;
1132 ctx->desc_template = default_desc_template(ctx->i915, old);
1133 i915_vm_put(vm);
1134 }
1135
1136unlock:
1137 mutex_unlock(&ctx->i915->drm.struct_mutex);
1138
1139out:
1140 i915_vm_put(vm);
1141 return err;
1142}
1143
1144static int gen8_emit_rpcs_config(struct i915_request *rq,
1145 struct intel_context *ce,
1146 struct intel_sseu sseu)
1147{
1148 u64 offset;
1149 u32 *cs;
1150
1151 cs = intel_ring_begin(rq, 4);
1152 if (IS_ERR(cs))
1153 return PTR_ERR(cs);
1154
1155 offset = i915_ggtt_offset(ce->state) +
1156 LRC_STATE_PN * PAGE_SIZE +
1157 (CTX_R_PWR_CLK_STATE + 1) * 4;
1158
1159 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
1160 *cs++ = lower_32_bits(offset);
1161 *cs++ = upper_32_bits(offset);
1162 *cs++ = intel_sseu_make_rpcs(rq->i915, &sseu);
1163
1164 intel_ring_advance(rq, cs);
1165
1166 return 0;
1167}
1168
1169static int
1170gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
1171{
1172 struct i915_request *rq;
1173 int ret;
1174
1175 lockdep_assert_held(&ce->pin_mutex);
1176
1177
1178
1179
1180
1181
1182
1183 if (!intel_context_is_pinned(ce))
1184 return 0;
1185
1186 rq = i915_request_create(ce->engine->kernel_context);
1187 if (IS_ERR(rq))
1188 return PTR_ERR(rq);
1189
1190
1191 ret = i915_active_request_set(&ce->ring->timeline->last_request, rq);
1192 if (ret)
1193 goto out_add;
1194
1195
1196
1197
1198
1199
1200
1201
1202 GEM_BUG_ON(i915_active_is_idle(&ce->active));
1203 ret = i915_active_ref(&ce->active, rq->fence.context, rq);
1204 if (ret)
1205 goto out_add;
1206
1207 ret = gen8_emit_rpcs_config(rq, ce, sseu);
1208
1209out_add:
1210 i915_request_add(rq);
1211 return ret;
1212}
1213
1214static int
1215__intel_context_reconfigure_sseu(struct intel_context *ce,
1216 struct intel_sseu sseu)
1217{
1218 int ret;
1219
1220 GEM_BUG_ON(INTEL_GEN(ce->gem_context->i915) < 8);
1221
1222 ret = intel_context_lock_pinned(ce);
1223 if (ret)
1224 return ret;
1225
1226
1227 if (!memcmp(&ce->sseu, &sseu, sizeof(sseu)))
1228 goto unlock;
1229
1230 ret = gen8_modify_rpcs(ce, sseu);
1231 if (!ret)
1232 ce->sseu = sseu;
1233
1234unlock:
1235 intel_context_unlock_pinned(ce);
1236 return ret;
1237}
1238
1239static int
1240intel_context_reconfigure_sseu(struct intel_context *ce, struct intel_sseu sseu)
1241{
1242 struct drm_i915_private *i915 = ce->gem_context->i915;
1243 int ret;
1244
1245 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
1246 if (ret)
1247 return ret;
1248
1249 ret = __intel_context_reconfigure_sseu(ce, sseu);
1250
1251 mutex_unlock(&i915->drm.struct_mutex);
1252
1253 return ret;
1254}
1255
1256static int
1257user_to_context_sseu(struct drm_i915_private *i915,
1258 const struct drm_i915_gem_context_param_sseu *user,
1259 struct intel_sseu *context)
1260{
1261 const struct sseu_dev_info *device = &RUNTIME_INFO(i915)->sseu;
1262
1263
1264 if (!user->slice_mask || !user->subslice_mask ||
1265 !user->min_eus_per_subslice || !user->max_eus_per_subslice)
1266 return -EINVAL;
1267
1268
1269 if (user->max_eus_per_subslice < user->min_eus_per_subslice)
1270 return -EINVAL;
1271
1272
1273
1274
1275
1276 if (overflows_type(user->slice_mask, context->slice_mask) ||
1277 overflows_type(user->subslice_mask, context->subslice_mask) ||
1278 overflows_type(user->min_eus_per_subslice,
1279 context->min_eus_per_subslice) ||
1280 overflows_type(user->max_eus_per_subslice,
1281 context->max_eus_per_subslice))
1282 return -EINVAL;
1283
1284
1285 if (user->slice_mask & ~device->slice_mask)
1286 return -EINVAL;
1287
1288 if (user->subslice_mask & ~device->subslice_mask[0])
1289 return -EINVAL;
1290
1291 if (user->max_eus_per_subslice > device->max_eus_per_subslice)
1292 return -EINVAL;
1293
1294 context->slice_mask = user->slice_mask;
1295 context->subslice_mask = user->subslice_mask;
1296 context->min_eus_per_subslice = user->min_eus_per_subslice;
1297 context->max_eus_per_subslice = user->max_eus_per_subslice;
1298
1299
1300 if (IS_GEN(i915, 11)) {
1301 unsigned int hw_s = hweight8(device->slice_mask);
1302 unsigned int hw_ss_per_s = hweight8(device->subslice_mask[0]);
1303 unsigned int req_s = hweight8(context->slice_mask);
1304 unsigned int req_ss = hweight8(context->subslice_mask);
1305
1306
1307
1308
1309
1310 if (req_s > 1 && req_ss != hw_ss_per_s)
1311 return -EINVAL;
1312
1313
1314
1315
1316
1317 if (req_ss > 4 && (req_ss & 1))
1318 return -EINVAL;
1319
1320
1321
1322
1323
1324
1325 if (req_s == 1 && req_ss < hw_ss_per_s &&
1326 req_ss > (hw_ss_per_s / 2))
1327 return -EINVAL;
1328
1329
1330
1331
1332 if (req_s != 1 && req_s != hw_s)
1333 return -EINVAL;
1334
1335
1336
1337
1338
1339 if (req_s == 1 &&
1340 (req_ss != hw_ss_per_s && req_ss != (hw_ss_per_s / 2)))
1341 return -EINVAL;
1342
1343
1344 if ((user->min_eus_per_subslice !=
1345 device->max_eus_per_subslice) ||
1346 (user->max_eus_per_subslice !=
1347 device->max_eus_per_subslice))
1348 return -EINVAL;
1349 }
1350
1351 return 0;
1352}
1353
1354static int set_sseu(struct i915_gem_context *ctx,
1355 struct drm_i915_gem_context_param *args)
1356{
1357 struct drm_i915_private *i915 = ctx->i915;
1358 struct drm_i915_gem_context_param_sseu user_sseu;
1359 struct intel_context *ce;
1360 struct intel_sseu sseu;
1361 unsigned long lookup;
1362 int ret;
1363
1364 if (args->size < sizeof(user_sseu))
1365 return -EINVAL;
1366
1367 if (!IS_GEN(i915, 11))
1368 return -ENODEV;
1369
1370 if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
1371 sizeof(user_sseu)))
1372 return -EFAULT;
1373
1374 if (user_sseu.rsvd)
1375 return -EINVAL;
1376
1377 if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
1378 return -EINVAL;
1379
1380 lookup = 0;
1381 if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
1382 lookup |= LOOKUP_USER_INDEX;
1383
1384 ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
1385 if (IS_ERR(ce))
1386 return PTR_ERR(ce);
1387
1388
1389 if (ce->engine->class != RENDER_CLASS) {
1390 ret = -ENODEV;
1391 goto out_ce;
1392 }
1393
1394 ret = user_to_context_sseu(i915, &user_sseu, &sseu);
1395 if (ret)
1396 goto out_ce;
1397
1398 ret = intel_context_reconfigure_sseu(ce, sseu);
1399 if (ret)
1400 goto out_ce;
1401
1402 args->size = sizeof(user_sseu);
1403
1404out_ce:
1405 intel_context_put(ce);
1406 return ret;
1407}
1408
1409struct set_engines {
1410 struct i915_gem_context *ctx;
1411 struct i915_gem_engines *engines;
1412};
1413
1414static int
1415set_engines__load_balance(struct i915_user_extension __user *base, void *data)
1416{
1417 struct i915_context_engines_load_balance __user *ext =
1418 container_of_user(base, typeof(*ext), base);
1419 const struct set_engines *set = data;
1420 struct intel_engine_cs *stack[16];
1421 struct intel_engine_cs **siblings;
1422 struct intel_context *ce;
1423 u16 num_siblings, idx;
1424 unsigned int n;
1425 int err;
1426
1427 if (!HAS_EXECLISTS(set->ctx->i915))
1428 return -ENODEV;
1429
1430 if (USES_GUC_SUBMISSION(set->ctx->i915))
1431 return -ENODEV;
1432
1433 if (get_user(idx, &ext->engine_index))
1434 return -EFAULT;
1435
1436 if (idx >= set->engines->num_engines) {
1437 DRM_DEBUG("Invalid placement value, %d >= %d\n",
1438 idx, set->engines->num_engines);
1439 return -EINVAL;
1440 }
1441
1442 idx = array_index_nospec(idx, set->engines->num_engines);
1443 if (set->engines->engines[idx]) {
1444 DRM_DEBUG("Invalid placement[%d], already occupied\n", idx);
1445 return -EEXIST;
1446 }
1447
1448 if (get_user(num_siblings, &ext->num_siblings))
1449 return -EFAULT;
1450
1451 err = check_user_mbz(&ext->flags);
1452 if (err)
1453 return err;
1454
1455 err = check_user_mbz(&ext->mbz64);
1456 if (err)
1457 return err;
1458
1459 siblings = stack;
1460 if (num_siblings > ARRAY_SIZE(stack)) {
1461 siblings = kmalloc_array(num_siblings,
1462 sizeof(*siblings),
1463 GFP_KERNEL);
1464 if (!siblings)
1465 return -ENOMEM;
1466 }
1467
1468 for (n = 0; n < num_siblings; n++) {
1469 struct i915_engine_class_instance ci;
1470
1471 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) {
1472 err = -EFAULT;
1473 goto out_siblings;
1474 }
1475
1476 siblings[n] = intel_engine_lookup_user(set->ctx->i915,
1477 ci.engine_class,
1478 ci.engine_instance);
1479 if (!siblings[n]) {
1480 DRM_DEBUG("Invalid sibling[%d]: { class:%d, inst:%d }\n",
1481 n, ci.engine_class, ci.engine_instance);
1482 err = -EINVAL;
1483 goto out_siblings;
1484 }
1485 }
1486
1487 ce = intel_execlists_create_virtual(set->ctx, siblings, n);
1488 if (IS_ERR(ce)) {
1489 err = PTR_ERR(ce);
1490 goto out_siblings;
1491 }
1492
1493 if (cmpxchg(&set->engines->engines[idx], NULL, ce)) {
1494 intel_context_put(ce);
1495 err = -EEXIST;
1496 goto out_siblings;
1497 }
1498
1499out_siblings:
1500 if (siblings != stack)
1501 kfree(siblings);
1502
1503 return err;
1504}
1505
1506static int
1507set_engines__bond(struct i915_user_extension __user *base, void *data)
1508{
1509 struct i915_context_engines_bond __user *ext =
1510 container_of_user(base, typeof(*ext), base);
1511 const struct set_engines *set = data;
1512 struct i915_engine_class_instance ci;
1513 struct intel_engine_cs *virtual;
1514 struct intel_engine_cs *master;
1515 u16 idx, num_bonds;
1516 int err, n;
1517
1518 if (get_user(idx, &ext->virtual_index))
1519 return -EFAULT;
1520
1521 if (idx >= set->engines->num_engines) {
1522 DRM_DEBUG("Invalid index for virtual engine: %d >= %d\n",
1523 idx, set->engines->num_engines);
1524 return -EINVAL;
1525 }
1526
1527 idx = array_index_nospec(idx, set->engines->num_engines);
1528 if (!set->engines->engines[idx]) {
1529 DRM_DEBUG("Invalid engine at %d\n", idx);
1530 return -EINVAL;
1531 }
1532 virtual = set->engines->engines[idx]->engine;
1533
1534 err = check_user_mbz(&ext->flags);
1535 if (err)
1536 return err;
1537
1538 for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) {
1539 err = check_user_mbz(&ext->mbz64[n]);
1540 if (err)
1541 return err;
1542 }
1543
1544 if (copy_from_user(&ci, &ext->master, sizeof(ci)))
1545 return -EFAULT;
1546
1547 master = intel_engine_lookup_user(set->ctx->i915,
1548 ci.engine_class, ci.engine_instance);
1549 if (!master) {
1550 DRM_DEBUG("Unrecognised master engine: { class:%u, instance:%u }\n",
1551 ci.engine_class, ci.engine_instance);
1552 return -EINVAL;
1553 }
1554
1555 if (get_user(num_bonds, &ext->num_bonds))
1556 return -EFAULT;
1557
1558 for (n = 0; n < num_bonds; n++) {
1559 struct intel_engine_cs *bond;
1560
1561 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci)))
1562 return -EFAULT;
1563
1564 bond = intel_engine_lookup_user(set->ctx->i915,
1565 ci.engine_class,
1566 ci.engine_instance);
1567 if (!bond) {
1568 DRM_DEBUG("Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n",
1569 n, ci.engine_class, ci.engine_instance);
1570 return -EINVAL;
1571 }
1572
1573
1574
1575
1576
1577 if (intel_engine_is_virtual(virtual)) {
1578 err = intel_virtual_engine_attach_bond(virtual,
1579 master,
1580 bond);
1581 if (err)
1582 return err;
1583 }
1584 }
1585
1586 return 0;
1587}
1588
1589static const i915_user_extension_fn set_engines__extensions[] = {
1590 [I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE] = set_engines__load_balance,
1591 [I915_CONTEXT_ENGINES_EXT_BOND] = set_engines__bond,
1592};
1593
1594static int
1595set_engines(struct i915_gem_context *ctx,
1596 const struct drm_i915_gem_context_param *args)
1597{
1598 struct i915_context_param_engines __user *user =
1599 u64_to_user_ptr(args->value);
1600 struct set_engines set = { .ctx = ctx };
1601 unsigned int num_engines, n;
1602 u64 extensions;
1603 int err;
1604
1605 if (!args->size) {
1606 if (!i915_gem_context_user_engines(ctx))
1607 return 0;
1608
1609 set.engines = default_engines(ctx);
1610 if (IS_ERR(set.engines))
1611 return PTR_ERR(set.engines);
1612
1613 goto replace;
1614 }
1615
1616 BUILD_BUG_ON(!IS_ALIGNED(sizeof(*user), sizeof(*user->engines)));
1617 if (args->size < sizeof(*user) ||
1618 !IS_ALIGNED(args->size, sizeof(*user->engines))) {
1619 DRM_DEBUG("Invalid size for engine array: %d\n",
1620 args->size);
1621 return -EINVAL;
1622 }
1623
1624
1625
1626
1627
1628 num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines);
1629
1630 set.engines = kmalloc(struct_size(set.engines, engines, num_engines),
1631 GFP_KERNEL);
1632 if (!set.engines)
1633 return -ENOMEM;
1634
1635 init_rcu_head(&set.engines->rcu);
1636 for (n = 0; n < num_engines; n++) {
1637 struct i915_engine_class_instance ci;
1638 struct intel_engine_cs *engine;
1639
1640 if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) {
1641 __free_engines(set.engines, n);
1642 return -EFAULT;
1643 }
1644
1645 if (ci.engine_class == (u16)I915_ENGINE_CLASS_INVALID &&
1646 ci.engine_instance == (u16)I915_ENGINE_CLASS_INVALID_NONE) {
1647 set.engines->engines[n] = NULL;
1648 continue;
1649 }
1650
1651 engine = intel_engine_lookup_user(ctx->i915,
1652 ci.engine_class,
1653 ci.engine_instance);
1654 if (!engine) {
1655 DRM_DEBUG("Invalid engine[%d]: { class:%d, instance:%d }\n",
1656 n, ci.engine_class, ci.engine_instance);
1657 __free_engines(set.engines, n);
1658 return -ENOENT;
1659 }
1660
1661 set.engines->engines[n] = intel_context_create(ctx, engine);
1662 if (!set.engines->engines[n]) {
1663 __free_engines(set.engines, n);
1664 return -ENOMEM;
1665 }
1666 }
1667 set.engines->num_engines = num_engines;
1668
1669 err = -EFAULT;
1670 if (!get_user(extensions, &user->extensions))
1671 err = i915_user_extensions(u64_to_user_ptr(extensions),
1672 set_engines__extensions,
1673 ARRAY_SIZE(set_engines__extensions),
1674 &set);
1675 if (err) {
1676 free_engines(set.engines);
1677 return err;
1678 }
1679
1680replace:
1681 mutex_lock(&ctx->engines_mutex);
1682 if (args->size)
1683 i915_gem_context_set_user_engines(ctx);
1684 else
1685 i915_gem_context_clear_user_engines(ctx);
1686 rcu_swap_protected(ctx->engines, set.engines, 1);
1687 mutex_unlock(&ctx->engines_mutex);
1688
1689 call_rcu(&set.engines->rcu, free_engines_rcu);
1690
1691 return 0;
1692}
1693
1694static struct i915_gem_engines *
1695__copy_engines(struct i915_gem_engines *e)
1696{
1697 struct i915_gem_engines *copy;
1698 unsigned int n;
1699
1700 copy = kmalloc(struct_size(e, engines, e->num_engines), GFP_KERNEL);
1701 if (!copy)
1702 return ERR_PTR(-ENOMEM);
1703
1704 init_rcu_head(©->rcu);
1705 for (n = 0; n < e->num_engines; n++) {
1706 if (e->engines[n])
1707 copy->engines[n] = intel_context_get(e->engines[n]);
1708 else
1709 copy->engines[n] = NULL;
1710 }
1711 copy->num_engines = n;
1712
1713 return copy;
1714}
1715
1716static int
1717get_engines(struct i915_gem_context *ctx,
1718 struct drm_i915_gem_context_param *args)
1719{
1720 struct i915_context_param_engines __user *user;
1721 struct i915_gem_engines *e;
1722 size_t n, count, size;
1723 int err = 0;
1724
1725 err = mutex_lock_interruptible(&ctx->engines_mutex);
1726 if (err)
1727 return err;
1728
1729 e = NULL;
1730 if (i915_gem_context_user_engines(ctx))
1731 e = __copy_engines(i915_gem_context_engines(ctx));
1732 mutex_unlock(&ctx->engines_mutex);
1733 if (IS_ERR_OR_NULL(e)) {
1734 args->size = 0;
1735 return PTR_ERR_OR_ZERO(e);
1736 }
1737
1738 count = e->num_engines;
1739
1740
1741 if (!check_struct_size(user, engines, count, &size)) {
1742 err = -EINVAL;
1743 goto err_free;
1744 }
1745 if (overflows_type(size, args->size)) {
1746 err = -EINVAL;
1747 goto err_free;
1748 }
1749
1750 if (!args->size) {
1751 args->size = size;
1752 goto err_free;
1753 }
1754
1755 if (args->size < size) {
1756 err = -EINVAL;
1757 goto err_free;
1758 }
1759
1760 user = u64_to_user_ptr(args->value);
1761 if (!access_ok(user, size)) {
1762 err = -EFAULT;
1763 goto err_free;
1764 }
1765
1766 if (put_user(0, &user->extensions)) {
1767 err = -EFAULT;
1768 goto err_free;
1769 }
1770
1771 for (n = 0; n < count; n++) {
1772 struct i915_engine_class_instance ci = {
1773 .engine_class = I915_ENGINE_CLASS_INVALID,
1774 .engine_instance = I915_ENGINE_CLASS_INVALID_NONE,
1775 };
1776
1777 if (e->engines[n]) {
1778 ci.engine_class = e->engines[n]->engine->uabi_class;
1779 ci.engine_instance = e->engines[n]->engine->instance;
1780 }
1781
1782 if (copy_to_user(&user->engines[n], &ci, sizeof(ci))) {
1783 err = -EFAULT;
1784 goto err_free;
1785 }
1786 }
1787
1788 args->size = size;
1789
1790err_free:
1791 free_engines(e);
1792 return err;
1793}
1794
1795static int ctx_setparam(struct drm_i915_file_private *fpriv,
1796 struct i915_gem_context *ctx,
1797 struct drm_i915_gem_context_param *args)
1798{
1799 int ret = 0;
1800
1801 switch (args->param) {
1802 case I915_CONTEXT_PARAM_NO_ZEROMAP:
1803 if (args->size)
1804 ret = -EINVAL;
1805 else if (args->value)
1806 set_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
1807 else
1808 clear_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
1809 break;
1810
1811 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
1812 if (args->size)
1813 ret = -EINVAL;
1814 else if (args->value)
1815 i915_gem_context_set_no_error_capture(ctx);
1816 else
1817 i915_gem_context_clear_no_error_capture(ctx);
1818 break;
1819
1820 case I915_CONTEXT_PARAM_BANNABLE:
1821 if (args->size)
1822 ret = -EINVAL;
1823 else if (!capable(CAP_SYS_ADMIN) && !args->value)
1824 ret = -EPERM;
1825 else if (args->value)
1826 i915_gem_context_set_bannable(ctx);
1827 else
1828 i915_gem_context_clear_bannable(ctx);
1829 break;
1830
1831 case I915_CONTEXT_PARAM_RECOVERABLE:
1832 if (args->size)
1833 ret = -EINVAL;
1834 else if (args->value)
1835 i915_gem_context_set_recoverable(ctx);
1836 else
1837 i915_gem_context_clear_recoverable(ctx);
1838 break;
1839
1840 case I915_CONTEXT_PARAM_PRIORITY:
1841 {
1842 s64 priority = args->value;
1843
1844 if (args->size)
1845 ret = -EINVAL;
1846 else if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
1847 ret = -ENODEV;
1848 else if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
1849 priority < I915_CONTEXT_MIN_USER_PRIORITY)
1850 ret = -EINVAL;
1851 else if (priority > I915_CONTEXT_DEFAULT_PRIORITY &&
1852 !capable(CAP_SYS_NICE))
1853 ret = -EPERM;
1854 else
1855 ctx->sched.priority =
1856 I915_USER_PRIORITY(priority);
1857 }
1858 break;
1859
1860 case I915_CONTEXT_PARAM_SSEU:
1861 ret = set_sseu(ctx, args);
1862 break;
1863
1864 case I915_CONTEXT_PARAM_VM:
1865 ret = set_ppgtt(fpriv, ctx, args);
1866 break;
1867
1868 case I915_CONTEXT_PARAM_ENGINES:
1869 ret = set_engines(ctx, args);
1870 break;
1871
1872 case I915_CONTEXT_PARAM_BAN_PERIOD:
1873 default:
1874 ret = -EINVAL;
1875 break;
1876 }
1877
1878 return ret;
1879}
1880
1881struct create_ext {
1882 struct i915_gem_context *ctx;
1883 struct drm_i915_file_private *fpriv;
1884};
1885
1886static int create_setparam(struct i915_user_extension __user *ext, void *data)
1887{
1888 struct drm_i915_gem_context_create_ext_setparam local;
1889 const struct create_ext *arg = data;
1890
1891 if (copy_from_user(&local, ext, sizeof(local)))
1892 return -EFAULT;
1893
1894 if (local.param.ctx_id)
1895 return -EINVAL;
1896
1897 return ctx_setparam(arg->fpriv, arg->ctx, &local.param);
1898}
1899
1900static int clone_engines(struct i915_gem_context *dst,
1901 struct i915_gem_context *src)
1902{
1903 struct i915_gem_engines *e = i915_gem_context_lock_engines(src);
1904 struct i915_gem_engines *clone;
1905 bool user_engines;
1906 unsigned long n;
1907
1908 clone = kmalloc(struct_size(e, engines, e->num_engines), GFP_KERNEL);
1909 if (!clone)
1910 goto err_unlock;
1911
1912 init_rcu_head(&clone->rcu);
1913 for (n = 0; n < e->num_engines; n++) {
1914 struct intel_engine_cs *engine;
1915
1916 if (!e->engines[n]) {
1917 clone->engines[n] = NULL;
1918 continue;
1919 }
1920 engine = e->engines[n]->engine;
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931 if (intel_engine_is_virtual(engine))
1932 clone->engines[n] =
1933 intel_execlists_clone_virtual(dst, engine);
1934 else
1935 clone->engines[n] = intel_context_create(dst, engine);
1936 if (IS_ERR_OR_NULL(clone->engines[n])) {
1937 __free_engines(clone, n);
1938 goto err_unlock;
1939 }
1940 }
1941 clone->num_engines = n;
1942
1943 user_engines = i915_gem_context_user_engines(src);
1944 i915_gem_context_unlock_engines(src);
1945
1946 free_engines(dst->engines);
1947 RCU_INIT_POINTER(dst->engines, clone);
1948 if (user_engines)
1949 i915_gem_context_set_user_engines(dst);
1950 else
1951 i915_gem_context_clear_user_engines(dst);
1952 return 0;
1953
1954err_unlock:
1955 i915_gem_context_unlock_engines(src);
1956 return -ENOMEM;
1957}
1958
1959static int clone_flags(struct i915_gem_context *dst,
1960 struct i915_gem_context *src)
1961{
1962 dst->user_flags = src->user_flags;
1963 return 0;
1964}
1965
1966static int clone_schedattr(struct i915_gem_context *dst,
1967 struct i915_gem_context *src)
1968{
1969 dst->sched = src->sched;
1970 return 0;
1971}
1972
1973static int clone_sseu(struct i915_gem_context *dst,
1974 struct i915_gem_context *src)
1975{
1976 struct i915_gem_engines *e = i915_gem_context_lock_engines(src);
1977 struct i915_gem_engines *clone;
1978 unsigned long n;
1979 int err;
1980
1981 clone = dst->engines;
1982 if (e->num_engines != clone->num_engines) {
1983 err = -EINVAL;
1984 goto unlock;
1985 }
1986
1987 for (n = 0; n < e->num_engines; n++) {
1988 struct intel_context *ce = e->engines[n];
1989
1990 if (clone->engines[n]->engine->class != ce->engine->class) {
1991
1992 err = -EINVAL;
1993 goto unlock;
1994 }
1995
1996
1997 err = intel_context_lock_pinned(ce);
1998 if (err)
1999 goto unlock;
2000
2001 clone->engines[n]->sseu = ce->sseu;
2002 intel_context_unlock_pinned(ce);
2003 }
2004
2005 err = 0;
2006unlock:
2007 i915_gem_context_unlock_engines(src);
2008 return err;
2009}
2010
2011static int clone_timeline(struct i915_gem_context *dst,
2012 struct i915_gem_context *src)
2013{
2014 if (src->timeline) {
2015 GEM_BUG_ON(src->timeline == dst->timeline);
2016
2017 if (dst->timeline)
2018 i915_timeline_put(dst->timeline);
2019 dst->timeline = i915_timeline_get(src->timeline);
2020 }
2021
2022 return 0;
2023}
2024
2025static int clone_vm(struct i915_gem_context *dst,
2026 struct i915_gem_context *src)
2027{
2028 struct i915_address_space *vm;
2029
2030 rcu_read_lock();
2031 do {
2032 vm = READ_ONCE(src->vm);
2033 if (!vm)
2034 break;
2035
2036 if (!kref_get_unless_zero(&vm->ref))
2037 continue;
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054 if (vm == READ_ONCE(src->vm))
2055 break;
2056
2057 i915_vm_put(vm);
2058 } while (1);
2059 rcu_read_unlock();
2060
2061 if (vm) {
2062 __assign_ppgtt(dst, vm);
2063 i915_vm_put(vm);
2064 }
2065
2066 return 0;
2067}
2068
2069static int create_clone(struct i915_user_extension __user *ext, void *data)
2070{
2071 static int (* const fn[])(struct i915_gem_context *dst,
2072 struct i915_gem_context *src) = {
2073#define MAP(x, y) [ilog2(I915_CONTEXT_CLONE_##x)] = y
2074 MAP(ENGINES, clone_engines),
2075 MAP(FLAGS, clone_flags),
2076 MAP(SCHEDATTR, clone_schedattr),
2077 MAP(SSEU, clone_sseu),
2078 MAP(TIMELINE, clone_timeline),
2079 MAP(VM, clone_vm),
2080#undef MAP
2081 };
2082 struct drm_i915_gem_context_create_ext_clone local;
2083 const struct create_ext *arg = data;
2084 struct i915_gem_context *dst = arg->ctx;
2085 struct i915_gem_context *src;
2086 int err, bit;
2087
2088 if (copy_from_user(&local, ext, sizeof(local)))
2089 return -EFAULT;
2090
2091 BUILD_BUG_ON(GENMASK(BITS_PER_TYPE(local.flags) - 1, ARRAY_SIZE(fn)) !=
2092 I915_CONTEXT_CLONE_UNKNOWN);
2093
2094 if (local.flags & I915_CONTEXT_CLONE_UNKNOWN)
2095 return -EINVAL;
2096
2097 if (local.rsvd)
2098 return -EINVAL;
2099
2100 rcu_read_lock();
2101 src = __i915_gem_context_lookup_rcu(arg->fpriv, local.clone_id);
2102 rcu_read_unlock();
2103 if (!src)
2104 return -ENOENT;
2105
2106 GEM_BUG_ON(src == dst);
2107
2108 for (bit = 0; bit < ARRAY_SIZE(fn); bit++) {
2109 if (!(local.flags & BIT(bit)))
2110 continue;
2111
2112 err = fn[bit](dst, src);
2113 if (err)
2114 return err;
2115 }
2116
2117 return 0;
2118}
2119
2120static const i915_user_extension_fn create_extensions[] = {
2121 [I915_CONTEXT_CREATE_EXT_SETPARAM] = create_setparam,
2122 [I915_CONTEXT_CREATE_EXT_CLONE] = create_clone,
2123};
2124
2125static bool client_is_banned(struct drm_i915_file_private *file_priv)
2126{
2127 return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED;
2128}
2129
2130int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
2131 struct drm_file *file)
2132{
2133 struct drm_i915_private *i915 = to_i915(dev);
2134 struct drm_i915_gem_context_create_ext *args = data;
2135 struct create_ext ext_data;
2136 int ret;
2137
2138 if (!DRIVER_CAPS(i915)->has_logical_contexts)
2139 return -ENODEV;
2140
2141 if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN)
2142 return -EINVAL;
2143
2144 ret = i915_terminally_wedged(i915);
2145 if (ret)
2146 return ret;
2147
2148 ext_data.fpriv = file->driver_priv;
2149 if (client_is_banned(ext_data.fpriv)) {
2150 DRM_DEBUG("client %s[%d] banned from creating ctx\n",
2151 current->comm,
2152 pid_nr(get_task_pid(current, PIDTYPE_PID)));
2153 return -EIO;
2154 }
2155
2156 ret = i915_mutex_lock_interruptible(dev);
2157 if (ret)
2158 return ret;
2159
2160 ext_data.ctx = i915_gem_create_context(i915, args->flags);
2161 mutex_unlock(&dev->struct_mutex);
2162 if (IS_ERR(ext_data.ctx))
2163 return PTR_ERR(ext_data.ctx);
2164
2165 if (args->flags & I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS) {
2166 ret = i915_user_extensions(u64_to_user_ptr(args->extensions),
2167 create_extensions,
2168 ARRAY_SIZE(create_extensions),
2169 &ext_data);
2170 if (ret)
2171 goto err_ctx;
2172 }
2173
2174 ret = gem_context_register(ext_data.ctx, ext_data.fpriv);
2175 if (ret < 0)
2176 goto err_ctx;
2177
2178 args->ctx_id = ret;
2179 DRM_DEBUG("HW context %d created\n", args->ctx_id);
2180
2181 return 0;
2182
2183err_ctx:
2184 context_close(ext_data.ctx);
2185 return ret;
2186}
2187
2188int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
2189 struct drm_file *file)
2190{
2191 struct drm_i915_gem_context_destroy *args = data;
2192 struct drm_i915_file_private *file_priv = file->driver_priv;
2193 struct i915_gem_context *ctx;
2194
2195 if (args->pad != 0)
2196 return -EINVAL;
2197
2198 if (!args->ctx_id)
2199 return -ENOENT;
2200
2201 if (mutex_lock_interruptible(&file_priv->context_idr_lock))
2202 return -EINTR;
2203
2204 ctx = idr_remove(&file_priv->context_idr, args->ctx_id);
2205 mutex_unlock(&file_priv->context_idr_lock);
2206 if (!ctx)
2207 return -ENOENT;
2208
2209 context_close(ctx);
2210 return 0;
2211}
2212
2213static int get_sseu(struct i915_gem_context *ctx,
2214 struct drm_i915_gem_context_param *args)
2215{
2216 struct drm_i915_gem_context_param_sseu user_sseu;
2217 struct intel_context *ce;
2218 unsigned long lookup;
2219 int err;
2220
2221 if (args->size == 0)
2222 goto out;
2223 else if (args->size < sizeof(user_sseu))
2224 return -EINVAL;
2225
2226 if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
2227 sizeof(user_sseu)))
2228 return -EFAULT;
2229
2230 if (user_sseu.rsvd)
2231 return -EINVAL;
2232
2233 if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
2234 return -EINVAL;
2235
2236 lookup = 0;
2237 if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
2238 lookup |= LOOKUP_USER_INDEX;
2239
2240 ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
2241 if (IS_ERR(ce))
2242 return PTR_ERR(ce);
2243
2244 err = intel_context_lock_pinned(ce);
2245 if (err) {
2246 intel_context_put(ce);
2247 return err;
2248 }
2249
2250 user_sseu.slice_mask = ce->sseu.slice_mask;
2251 user_sseu.subslice_mask = ce->sseu.subslice_mask;
2252 user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice;
2253 user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice;
2254
2255 intel_context_unlock_pinned(ce);
2256 intel_context_put(ce);
2257
2258 if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu,
2259 sizeof(user_sseu)))
2260 return -EFAULT;
2261
2262out:
2263 args->size = sizeof(user_sseu);
2264
2265 return 0;
2266}
2267
2268int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
2269 struct drm_file *file)
2270{
2271 struct drm_i915_file_private *file_priv = file->driver_priv;
2272 struct drm_i915_gem_context_param *args = data;
2273 struct i915_gem_context *ctx;
2274 int ret = 0;
2275
2276 ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
2277 if (!ctx)
2278 return -ENOENT;
2279
2280 switch (args->param) {
2281 case I915_CONTEXT_PARAM_NO_ZEROMAP:
2282 args->size = 0;
2283 args->value = test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
2284 break;
2285
2286 case I915_CONTEXT_PARAM_GTT_SIZE:
2287 args->size = 0;
2288 if (ctx->vm)
2289 args->value = ctx->vm->total;
2290 else if (to_i915(dev)->mm.aliasing_ppgtt)
2291 args->value = to_i915(dev)->mm.aliasing_ppgtt->vm.total;
2292 else
2293 args->value = to_i915(dev)->ggtt.vm.total;
2294 break;
2295
2296 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
2297 args->size = 0;
2298 args->value = i915_gem_context_no_error_capture(ctx);
2299 break;
2300
2301 case I915_CONTEXT_PARAM_BANNABLE:
2302 args->size = 0;
2303 args->value = i915_gem_context_is_bannable(ctx);
2304 break;
2305
2306 case I915_CONTEXT_PARAM_RECOVERABLE:
2307 args->size = 0;
2308 args->value = i915_gem_context_is_recoverable(ctx);
2309 break;
2310
2311 case I915_CONTEXT_PARAM_PRIORITY:
2312 args->size = 0;
2313 args->value = ctx->sched.priority >> I915_USER_PRIORITY_SHIFT;
2314 break;
2315
2316 case I915_CONTEXT_PARAM_SSEU:
2317 ret = get_sseu(ctx, args);
2318 break;
2319
2320 case I915_CONTEXT_PARAM_VM:
2321 ret = get_ppgtt(file_priv, ctx, args);
2322 break;
2323
2324 case I915_CONTEXT_PARAM_ENGINES:
2325 ret = get_engines(ctx, args);
2326 break;
2327
2328 case I915_CONTEXT_PARAM_BAN_PERIOD:
2329 default:
2330 ret = -EINVAL;
2331 break;
2332 }
2333
2334 i915_gem_context_put(ctx);
2335 return ret;
2336}
2337
2338int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
2339 struct drm_file *file)
2340{
2341 struct drm_i915_file_private *file_priv = file->driver_priv;
2342 struct drm_i915_gem_context_param *args = data;
2343 struct i915_gem_context *ctx;
2344 int ret;
2345
2346 ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
2347 if (!ctx)
2348 return -ENOENT;
2349
2350 ret = ctx_setparam(file_priv, ctx, args);
2351
2352 i915_gem_context_put(ctx);
2353 return ret;
2354}
2355
2356int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
2357 void *data, struct drm_file *file)
2358{
2359 struct drm_i915_private *dev_priv = to_i915(dev);
2360 struct drm_i915_reset_stats *args = data;
2361 struct i915_gem_context *ctx;
2362 int ret;
2363
2364 if (args->flags || args->pad)
2365 return -EINVAL;
2366
2367 ret = -ENOENT;
2368 rcu_read_lock();
2369 ctx = __i915_gem_context_lookup_rcu(file->driver_priv, args->ctx_id);
2370 if (!ctx)
2371 goto out;
2372
2373
2374
2375
2376
2377
2378
2379
2380 if (capable(CAP_SYS_ADMIN))
2381 args->reset_count = i915_reset_count(&dev_priv->gpu_error);
2382 else
2383 args->reset_count = 0;
2384
2385 args->batch_active = atomic_read(&ctx->guilty_count);
2386 args->batch_pending = atomic_read(&ctx->active_count);
2387
2388 ret = 0;
2389out:
2390 rcu_read_unlock();
2391 return ret;
2392}
2393
2394int __i915_gem_context_pin_hw_id(struct i915_gem_context *ctx)
2395{
2396 struct drm_i915_private *i915 = ctx->i915;
2397 int err = 0;
2398
2399 mutex_lock(&i915->contexts.mutex);
2400
2401 GEM_BUG_ON(i915_gem_context_is_closed(ctx));
2402
2403 if (list_empty(&ctx->hw_id_link)) {
2404 GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count));
2405
2406 err = assign_hw_id(i915, &ctx->hw_id);
2407 if (err)
2408 goto out_unlock;
2409
2410 list_add_tail(&ctx->hw_id_link, &i915->contexts.hw_id_list);
2411 }
2412
2413 GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count) == ~0u);
2414 atomic_inc(&ctx->hw_id_pin_count);
2415
2416out_unlock:
2417 mutex_unlock(&i915->contexts.mutex);
2418 return err;
2419}
2420
2421
2422struct intel_context *
2423i915_gem_engines_iter_next(struct i915_gem_engines_iter *it)
2424{
2425 const struct i915_gem_engines *e = it->engines;
2426 struct intel_context *ctx;
2427
2428 do {
2429 if (it->idx >= e->num_engines)
2430 return NULL;
2431
2432 ctx = e->engines[it->idx++];
2433 } while (!ctx);
2434
2435 return ctx;
2436}
2437
2438#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2439#include "selftests/mock_context.c"
2440#include "selftests/i915_gem_context.c"
2441#endif
2442
2443static void i915_global_gem_context_shrink(void)
2444{
2445 kmem_cache_shrink(global.slab_luts);
2446}
2447
2448static void i915_global_gem_context_exit(void)
2449{
2450 kmem_cache_destroy(global.slab_luts);
2451}
2452
2453static struct i915_global_gem_context global = { {
2454 .shrink = i915_global_gem_context_shrink,
2455 .exit = i915_global_gem_context_exit,
2456} };
2457
2458int __init i915_global_gem_context_init(void)
2459{
2460 global.slab_luts = KMEM_CACHE(i915_lut_handle, 0);
2461 if (!global.slab_luts)
2462 return -ENOMEM;
2463
2464 i915_global_register(&global.base);
2465 return 0;
2466}
2467