1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88#include <linux/log2.h>
89#include <drm/i915_drm.h>
90#include "i915_drv.h"
91#include "i915_trace.h"
92#include "intel_lrc_reg.h"
93#include "intel_workarounds.h"
94
95#define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
96
97static void lut_close(struct i915_gem_context *ctx)
98{
99 struct i915_lut_handle *lut, *ln;
100 struct radix_tree_iter iter;
101 void __rcu **slot;
102
103 list_for_each_entry_safe(lut, ln, &ctx->handles_list, ctx_link) {
104 list_del(&lut->obj_link);
105 kmem_cache_free(ctx->i915->luts, lut);
106 }
107
108 rcu_read_lock();
109 radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) {
110 struct i915_vma *vma = rcu_dereference_raw(*slot);
111
112 radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
113 __i915_gem_object_release_unless_active(vma->obj);
114 }
115 rcu_read_unlock();
116}
117
118static inline int new_hw_id(struct drm_i915_private *i915, gfp_t gfp)
119{
120 unsigned int max;
121
122 lockdep_assert_held(&i915->contexts.mutex);
123
124 if (INTEL_GEN(i915) >= 11)
125 max = GEN11_MAX_CONTEXT_HW_ID;
126 else if (USES_GUC_SUBMISSION(i915))
127
128
129
130
131 max = MAX_GUC_CONTEXT_HW_ID;
132 else
133 max = MAX_CONTEXT_HW_ID;
134
135 return ida_simple_get(&i915->contexts.hw_ida, 0, max, gfp);
136}
137
138static int steal_hw_id(struct drm_i915_private *i915)
139{
140 struct i915_gem_context *ctx, *cn;
141 LIST_HEAD(pinned);
142 int id = -ENOSPC;
143
144 lockdep_assert_held(&i915->contexts.mutex);
145
146 list_for_each_entry_safe(ctx, cn,
147 &i915->contexts.hw_id_list, hw_id_link) {
148 if (atomic_read(&ctx->hw_id_pin_count)) {
149 list_move_tail(&ctx->hw_id_link, &pinned);
150 continue;
151 }
152
153 GEM_BUG_ON(!ctx->hw_id);
154 list_del_init(&ctx->hw_id_link);
155 id = ctx->hw_id;
156 break;
157 }
158
159
160
161
162
163 list_splice_tail(&pinned, &i915->contexts.hw_id_list);
164 return id;
165}
166
167static int assign_hw_id(struct drm_i915_private *i915, unsigned int *out)
168{
169 int ret;
170
171 lockdep_assert_held(&i915->contexts.mutex);
172
173
174
175
176
177
178
179 ret = new_hw_id(i915, GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
180 if (unlikely(ret < 0)) {
181 ret = steal_hw_id(i915);
182 if (ret < 0)
183 ret = new_hw_id(i915, GFP_KERNEL);
184 if (ret < 0)
185 return ret;
186 }
187
188 *out = ret;
189 return 0;
190}
191
192static void release_hw_id(struct i915_gem_context *ctx)
193{
194 struct drm_i915_private *i915 = ctx->i915;
195
196 if (list_empty(&ctx->hw_id_link))
197 return;
198
199 mutex_lock(&i915->contexts.mutex);
200 if (!list_empty(&ctx->hw_id_link)) {
201 ida_simple_remove(&i915->contexts.hw_ida, ctx->hw_id);
202 list_del_init(&ctx->hw_id_link);
203 }
204 mutex_unlock(&i915->contexts.mutex);
205}
206
207static void i915_gem_context_free(struct i915_gem_context *ctx)
208{
209 unsigned int n;
210
211 lockdep_assert_held(&ctx->i915->drm.struct_mutex);
212 GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
213
214 release_hw_id(ctx);
215 i915_ppgtt_put(ctx->ppgtt);
216
217 for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
218 struct intel_context *ce = &ctx->__engine[n];
219
220 if (ce->ops)
221 ce->ops->destroy(ce);
222 }
223
224 kfree(ctx->name);
225 put_pid(ctx->pid);
226
227 list_del(&ctx->link);
228
229 kfree_rcu(ctx, rcu);
230}
231
232static void contexts_free(struct drm_i915_private *i915)
233{
234 struct llist_node *freed = llist_del_all(&i915->contexts.free_list);
235 struct i915_gem_context *ctx, *cn;
236
237 lockdep_assert_held(&i915->drm.struct_mutex);
238
239 llist_for_each_entry_safe(ctx, cn, freed, free_link)
240 i915_gem_context_free(ctx);
241}
242
243static void contexts_free_first(struct drm_i915_private *i915)
244{
245 struct i915_gem_context *ctx;
246 struct llist_node *freed;
247
248 lockdep_assert_held(&i915->drm.struct_mutex);
249
250 freed = llist_del_first(&i915->contexts.free_list);
251 if (!freed)
252 return;
253
254 ctx = container_of(freed, typeof(*ctx), free_link);
255 i915_gem_context_free(ctx);
256}
257
258static void contexts_free_worker(struct work_struct *work)
259{
260 struct drm_i915_private *i915 =
261 container_of(work, typeof(*i915), contexts.free_work);
262
263 mutex_lock(&i915->drm.struct_mutex);
264 contexts_free(i915);
265 mutex_unlock(&i915->drm.struct_mutex);
266}
267
268void i915_gem_context_release(struct kref *ref)
269{
270 struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
271 struct drm_i915_private *i915 = ctx->i915;
272
273 trace_i915_context_free(ctx);
274 if (llist_add(&ctx->free_link, &i915->contexts.free_list))
275 queue_work(i915->wq, &i915->contexts.free_work);
276}
277
278static void context_close(struct i915_gem_context *ctx)
279{
280 i915_gem_context_set_closed(ctx);
281
282
283
284
285
286 release_hw_id(ctx);
287
288
289
290
291
292
293 lut_close(ctx);
294 if (ctx->ppgtt)
295 i915_ppgtt_close(&ctx->ppgtt->vm);
296
297 ctx->file_priv = ERR_PTR(-EBADF);
298 i915_gem_context_put(ctx);
299}
300
301static u32 default_desc_template(const struct drm_i915_private *i915,
302 const struct i915_hw_ppgtt *ppgtt)
303{
304 u32 address_mode;
305 u32 desc;
306
307 desc = GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
308
309 address_mode = INTEL_LEGACY_32B_CONTEXT;
310 if (ppgtt && i915_vm_is_48bit(&ppgtt->vm))
311 address_mode = INTEL_LEGACY_64B_CONTEXT;
312 desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT;
313
314 if (IS_GEN(i915, 8))
315 desc |= GEN8_CTX_L3LLC_COHERENT;
316
317
318
319
320
321
322 return desc;
323}
324
325static void intel_context_retire(struct i915_active_request *active,
326 struct i915_request *rq)
327{
328 struct intel_context *ce =
329 container_of(active, typeof(*ce), active_tracker);
330
331 intel_context_unpin(ce);
332}
333
334void
335intel_context_init(struct intel_context *ce,
336 struct i915_gem_context *ctx,
337 struct intel_engine_cs *engine)
338{
339 ce->gem_context = ctx;
340
341 INIT_LIST_HEAD(&ce->signal_link);
342 INIT_LIST_HEAD(&ce->signals);
343
344
345 ce->sseu = intel_device_default_sseu(ctx->i915);
346
347 i915_active_request_init(&ce->active_tracker,
348 NULL, intel_context_retire);
349}
350
351static struct i915_gem_context *
352__create_hw_context(struct drm_i915_private *dev_priv,
353 struct drm_i915_file_private *file_priv)
354{
355 struct i915_gem_context *ctx;
356 unsigned int n;
357 int ret;
358
359 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
360 if (ctx == NULL)
361 return ERR_PTR(-ENOMEM);
362
363 kref_init(&ctx->ref);
364 list_add_tail(&ctx->link, &dev_priv->contexts.list);
365 ctx->i915 = dev_priv;
366 ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
367
368 for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++)
369 intel_context_init(&ctx->__engine[n], ctx, dev_priv->engine[n]);
370
371 INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
372 INIT_LIST_HEAD(&ctx->handles_list);
373 INIT_LIST_HEAD(&ctx->hw_id_link);
374
375
376 ret = DEFAULT_CONTEXT_HANDLE;
377 if (file_priv) {
378 ret = idr_alloc(&file_priv->context_idr, ctx,
379 DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL);
380 if (ret < 0)
381 goto err_lut;
382 }
383 ctx->user_handle = ret;
384
385 ctx->file_priv = file_priv;
386 if (file_priv) {
387 ctx->pid = get_task_pid(current, PIDTYPE_PID);
388 ctx->name = kasprintf(GFP_KERNEL, "%s[%d]/%x",
389 current->comm,
390 pid_nr(ctx->pid),
391 ctx->user_handle);
392 if (!ctx->name) {
393 ret = -ENOMEM;
394 goto err_pid;
395 }
396 }
397
398
399
400
401 ctx->remap_slice = ALL_L3_SLICES(dev_priv);
402
403 i915_gem_context_set_bannable(ctx);
404 ctx->ring_size = 4 * PAGE_SIZE;
405 ctx->desc_template =
406 default_desc_template(dev_priv, dev_priv->mm.aliasing_ppgtt);
407
408 return ctx;
409
410err_pid:
411 put_pid(ctx->pid);
412 idr_remove(&file_priv->context_idr, ctx->user_handle);
413err_lut:
414 context_close(ctx);
415 return ERR_PTR(ret);
416}
417
418static void __destroy_hw_context(struct i915_gem_context *ctx,
419 struct drm_i915_file_private *file_priv)
420{
421 idr_remove(&file_priv->context_idr, ctx->user_handle);
422 context_close(ctx);
423}
424
425static struct i915_gem_context *
426i915_gem_create_context(struct drm_i915_private *dev_priv,
427 struct drm_i915_file_private *file_priv)
428{
429 struct i915_gem_context *ctx;
430
431 lockdep_assert_held(&dev_priv->drm.struct_mutex);
432
433
434 contexts_free_first(dev_priv);
435
436 ctx = __create_hw_context(dev_priv, file_priv);
437 if (IS_ERR(ctx))
438 return ctx;
439
440 if (HAS_FULL_PPGTT(dev_priv)) {
441 struct i915_hw_ppgtt *ppgtt;
442
443 ppgtt = i915_ppgtt_create(dev_priv, file_priv);
444 if (IS_ERR(ppgtt)) {
445 DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
446 PTR_ERR(ppgtt));
447 __destroy_hw_context(ctx, file_priv);
448 return ERR_CAST(ppgtt);
449 }
450
451 ctx->ppgtt = ppgtt;
452 ctx->desc_template = default_desc_template(dev_priv, ppgtt);
453 }
454
455 trace_i915_context_create(ctx);
456
457 return ctx;
458}
459
460
461
462
463
464
465
466
467
468
469
470struct i915_gem_context *
471i915_gem_context_create_gvt(struct drm_device *dev)
472{
473 struct i915_gem_context *ctx;
474 int ret;
475
476 if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
477 return ERR_PTR(-ENODEV);
478
479 ret = i915_mutex_lock_interruptible(dev);
480 if (ret)
481 return ERR_PTR(ret);
482
483 ctx = i915_gem_create_context(to_i915(dev), NULL);
484 if (IS_ERR(ctx))
485 goto out;
486
487 ctx->file_priv = ERR_PTR(-EBADF);
488 i915_gem_context_set_closed(ctx);
489 i915_gem_context_clear_bannable(ctx);
490 i915_gem_context_set_force_single_submission(ctx);
491 if (!USES_GUC_SUBMISSION(to_i915(dev)))
492 ctx->ring_size = 512 * PAGE_SIZE;
493
494 GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
495out:
496 mutex_unlock(&dev->struct_mutex);
497 return ctx;
498}
499
500static void
501destroy_kernel_context(struct i915_gem_context **ctxp)
502{
503 struct i915_gem_context *ctx;
504
505
506 ctx = i915_gem_context_get(fetch_and_zero(ctxp));
507 GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
508
509 context_close(ctx);
510 i915_gem_context_free(ctx);
511}
512
513struct i915_gem_context *
514i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
515{
516 struct i915_gem_context *ctx;
517 int err;
518
519 ctx = i915_gem_create_context(i915, NULL);
520 if (IS_ERR(ctx))
521 return ctx;
522
523 err = i915_gem_context_pin_hw_id(ctx);
524 if (err) {
525 destroy_kernel_context(&ctx);
526 return ERR_PTR(err);
527 }
528
529 i915_gem_context_clear_bannable(ctx);
530 ctx->sched.priority = I915_USER_PRIORITY(prio);
531 ctx->ring_size = PAGE_SIZE;
532
533 GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
534
535 return ctx;
536}
537
538static void init_contexts(struct drm_i915_private *i915)
539{
540 mutex_init(&i915->contexts.mutex);
541 INIT_LIST_HEAD(&i915->contexts.list);
542
543
544 BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
545 BUILD_BUG_ON(GEN11_MAX_CONTEXT_HW_ID > INT_MAX);
546 ida_init(&i915->contexts.hw_ida);
547 INIT_LIST_HEAD(&i915->contexts.hw_id_list);
548
549 INIT_WORK(&i915->contexts.free_work, contexts_free_worker);
550 init_llist_head(&i915->contexts.free_list);
551}
552
553static bool needs_preempt_context(struct drm_i915_private *i915)
554{
555 return HAS_LOGICAL_RING_PREEMPTION(i915);
556}
557
558int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
559{
560 struct i915_gem_context *ctx;
561
562
563 GEM_BUG_ON(dev_priv->kernel_context);
564 GEM_BUG_ON(dev_priv->preempt_context);
565
566 intel_engine_init_ctx_wa(dev_priv->engine[RCS]);
567 init_contexts(dev_priv);
568
569
570 ctx = i915_gem_context_create_kernel(dev_priv, I915_PRIORITY_MIN);
571 if (IS_ERR(ctx)) {
572 DRM_ERROR("Failed to create default global context\n");
573 return PTR_ERR(ctx);
574 }
575
576
577
578
579
580
581
582 GEM_BUG_ON(ctx->hw_id);
583 GEM_BUG_ON(!atomic_read(&ctx->hw_id_pin_count));
584 dev_priv->kernel_context = ctx;
585
586
587 if (needs_preempt_context(dev_priv)) {
588 ctx = i915_gem_context_create_kernel(dev_priv, INT_MAX);
589 if (!IS_ERR(ctx))
590 dev_priv->preempt_context = ctx;
591 else
592 DRM_ERROR("Failed to create preempt context; disabling preemption\n");
593 }
594
595 DRM_DEBUG_DRIVER("%s context support initialized\n",
596 DRIVER_CAPS(dev_priv)->has_logical_contexts ?
597 "logical" : "fake");
598 return 0;
599}
600
601void i915_gem_contexts_lost(struct drm_i915_private *dev_priv)
602{
603 struct intel_engine_cs *engine;
604 enum intel_engine_id id;
605
606 lockdep_assert_held(&dev_priv->drm.struct_mutex);
607
608 for_each_engine(engine, dev_priv, id)
609 intel_engine_lost_context(engine);
610}
611
612void i915_gem_contexts_fini(struct drm_i915_private *i915)
613{
614 lockdep_assert_held(&i915->drm.struct_mutex);
615
616 if (i915->preempt_context)
617 destroy_kernel_context(&i915->preempt_context);
618 destroy_kernel_context(&i915->kernel_context);
619
620
621 GEM_BUG_ON(!list_empty(&i915->contexts.hw_id_list));
622 ida_destroy(&i915->contexts.hw_ida);
623}
624
625static int context_idr_cleanup(int id, void *p, void *data)
626{
627 struct i915_gem_context *ctx = p;
628
629 context_close(ctx);
630 return 0;
631}
632
633int i915_gem_context_open(struct drm_i915_private *i915,
634 struct drm_file *file)
635{
636 struct drm_i915_file_private *file_priv = file->driver_priv;
637 struct i915_gem_context *ctx;
638
639 idr_init(&file_priv->context_idr);
640
641 mutex_lock(&i915->drm.struct_mutex);
642 ctx = i915_gem_create_context(i915, file_priv);
643 mutex_unlock(&i915->drm.struct_mutex);
644 if (IS_ERR(ctx)) {
645 idr_destroy(&file_priv->context_idr);
646 return PTR_ERR(ctx);
647 }
648
649 GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
650
651 return 0;
652}
653
654void i915_gem_context_close(struct drm_file *file)
655{
656 struct drm_i915_file_private *file_priv = file->driver_priv;
657
658 lockdep_assert_held(&file_priv->dev_priv->drm.struct_mutex);
659
660 idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
661 idr_destroy(&file_priv->context_idr);
662}
663
664static struct i915_request *
665last_request_on_engine(struct i915_timeline *timeline,
666 struct intel_engine_cs *engine)
667{
668 struct i915_request *rq;
669
670 GEM_BUG_ON(timeline == &engine->timeline);
671
672 rq = i915_active_request_raw(&timeline->last_request,
673 &engine->i915->drm.struct_mutex);
674 if (rq && rq->engine == engine) {
675 GEM_TRACE("last request for %s on engine %s: %llx:%llu\n",
676 timeline->name, engine->name,
677 rq->fence.context, rq->fence.seqno);
678 GEM_BUG_ON(rq->timeline != timeline);
679 return rq;
680 }
681
682 return NULL;
683}
684
685static bool engine_has_kernel_context_barrier(struct intel_engine_cs *engine)
686{
687 struct drm_i915_private *i915 = engine->i915;
688 const struct intel_context * const ce =
689 to_intel_context(i915->kernel_context, engine);
690 struct i915_timeline *barrier = ce->ring->timeline;
691 struct intel_ring *ring;
692 bool any_active = false;
693
694 lockdep_assert_held(&i915->drm.struct_mutex);
695 list_for_each_entry(ring, &i915->gt.active_rings, active_link) {
696 struct i915_request *rq;
697
698 rq = last_request_on_engine(ring->timeline, engine);
699 if (!rq)
700 continue;
701
702 any_active = true;
703
704 if (rq->hw_context == ce)
705 continue;
706
707
708
709
710
711 if (!i915_timeline_sync_is_later(barrier, &rq->fence)) {
712 GEM_TRACE("%s needs barrier for %llx:%lld\n",
713 ring->timeline->name,
714 rq->fence.context,
715 rq->fence.seqno);
716 return false;
717 }
718
719 GEM_TRACE("%s has barrier after %llx:%lld\n",
720 ring->timeline->name,
721 rq->fence.context,
722 rq->fence.seqno);
723 }
724
725
726
727
728
729
730
731 if (any_active)
732 return true;
733
734
735 return engine->last_retired_context == ce;
736}
737
738int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915)
739{
740 struct intel_engine_cs *engine;
741 enum intel_engine_id id;
742
743 GEM_TRACE("awake?=%s\n", yesno(i915->gt.awake));
744
745 lockdep_assert_held(&i915->drm.struct_mutex);
746 GEM_BUG_ON(!i915->kernel_context);
747
748 i915_retire_requests(i915);
749
750 for_each_engine(engine, i915, id) {
751 struct intel_ring *ring;
752 struct i915_request *rq;
753
754 GEM_BUG_ON(!to_intel_context(i915->kernel_context, engine));
755 if (engine_has_kernel_context_barrier(engine))
756 continue;
757
758 GEM_TRACE("emit barrier on %s\n", engine->name);
759
760 rq = i915_request_alloc(engine, i915->kernel_context);
761 if (IS_ERR(rq))
762 return PTR_ERR(rq);
763
764
765 list_for_each_entry(ring, &i915->gt.active_rings, active_link) {
766 struct i915_request *prev;
767
768 prev = last_request_on_engine(ring->timeline, engine);
769 if (!prev)
770 continue;
771
772 if (prev->gem_context == i915->kernel_context)
773 continue;
774
775 GEM_TRACE("add barrier on %s for %llx:%lld\n",
776 engine->name,
777 prev->fence.context,
778 prev->fence.seqno);
779 i915_sw_fence_await_sw_fence_gfp(&rq->submit,
780 &prev->submit,
781 I915_FENCE_GFP);
782 i915_timeline_sync_set(rq->timeline, &prev->fence);
783 }
784
785 i915_request_add(rq);
786 }
787
788 return 0;
789}
790
791static bool client_is_banned(struct drm_i915_file_private *file_priv)
792{
793 return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED;
794}
795
796int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
797 struct drm_file *file)
798{
799 struct drm_i915_private *dev_priv = to_i915(dev);
800 struct drm_i915_gem_context_create *args = data;
801 struct drm_i915_file_private *file_priv = file->driver_priv;
802 struct i915_gem_context *ctx;
803 int ret;
804
805 if (!DRIVER_CAPS(dev_priv)->has_logical_contexts)
806 return -ENODEV;
807
808 if (args->pad != 0)
809 return -EINVAL;
810
811 if (client_is_banned(file_priv)) {
812 DRM_DEBUG("client %s[%d] banned from creating ctx\n",
813 current->comm,
814 pid_nr(get_task_pid(current, PIDTYPE_PID)));
815
816 return -EIO;
817 }
818
819 ret = i915_mutex_lock_interruptible(dev);
820 if (ret)
821 return ret;
822
823 ctx = i915_gem_create_context(dev_priv, file_priv);
824 mutex_unlock(&dev->struct_mutex);
825 if (IS_ERR(ctx))
826 return PTR_ERR(ctx);
827
828 GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
829
830 args->ctx_id = ctx->user_handle;
831 DRM_DEBUG("HW context %d created\n", args->ctx_id);
832
833 return 0;
834}
835
836int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
837 struct drm_file *file)
838{
839 struct drm_i915_gem_context_destroy *args = data;
840 struct drm_i915_file_private *file_priv = file->driver_priv;
841 struct i915_gem_context *ctx;
842 int ret;
843
844 if (args->pad != 0)
845 return -EINVAL;
846
847 if (args->ctx_id == DEFAULT_CONTEXT_HANDLE)
848 return -ENOENT;
849
850 ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
851 if (!ctx)
852 return -ENOENT;
853
854 ret = mutex_lock_interruptible(&dev->struct_mutex);
855 if (ret)
856 goto out;
857
858 __destroy_hw_context(ctx, file_priv);
859 mutex_unlock(&dev->struct_mutex);
860
861out:
862 i915_gem_context_put(ctx);
863 return 0;
864}
865
866static int get_sseu(struct i915_gem_context *ctx,
867 struct drm_i915_gem_context_param *args)
868{
869 struct drm_i915_gem_context_param_sseu user_sseu;
870 struct intel_engine_cs *engine;
871 struct intel_context *ce;
872 int ret;
873
874 if (args->size == 0)
875 goto out;
876 else if (args->size < sizeof(user_sseu))
877 return -EINVAL;
878
879 if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
880 sizeof(user_sseu)))
881 return -EFAULT;
882
883 if (user_sseu.flags || user_sseu.rsvd)
884 return -EINVAL;
885
886 engine = intel_engine_lookup_user(ctx->i915,
887 user_sseu.engine_class,
888 user_sseu.engine_instance);
889 if (!engine)
890 return -EINVAL;
891
892
893 ret = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
894 if (ret)
895 return ret;
896
897 ce = to_intel_context(ctx, engine);
898
899 user_sseu.slice_mask = ce->sseu.slice_mask;
900 user_sseu.subslice_mask = ce->sseu.subslice_mask;
901 user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice;
902 user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice;
903
904 mutex_unlock(&ctx->i915->drm.struct_mutex);
905
906 if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu,
907 sizeof(user_sseu)))
908 return -EFAULT;
909
910out:
911 args->size = sizeof(user_sseu);
912
913 return 0;
914}
915
916int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
917 struct drm_file *file)
918{
919 struct drm_i915_file_private *file_priv = file->driver_priv;
920 struct drm_i915_gem_context_param *args = data;
921 struct i915_gem_context *ctx;
922 int ret = 0;
923
924 ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
925 if (!ctx)
926 return -ENOENT;
927
928 switch (args->param) {
929 case I915_CONTEXT_PARAM_BAN_PERIOD:
930 ret = -EINVAL;
931 break;
932 case I915_CONTEXT_PARAM_NO_ZEROMAP:
933 args->size = 0;
934 args->value = test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
935 break;
936 case I915_CONTEXT_PARAM_GTT_SIZE:
937 args->size = 0;
938
939 if (ctx->ppgtt)
940 args->value = ctx->ppgtt->vm.total;
941 else if (to_i915(dev)->mm.aliasing_ppgtt)
942 args->value = to_i915(dev)->mm.aliasing_ppgtt->vm.total;
943 else
944 args->value = to_i915(dev)->ggtt.vm.total;
945 break;
946 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
947 args->size = 0;
948 args->value = i915_gem_context_no_error_capture(ctx);
949 break;
950 case I915_CONTEXT_PARAM_BANNABLE:
951 args->size = 0;
952 args->value = i915_gem_context_is_bannable(ctx);
953 break;
954 case I915_CONTEXT_PARAM_PRIORITY:
955 args->size = 0;
956 args->value = ctx->sched.priority >> I915_USER_PRIORITY_SHIFT;
957 break;
958 case I915_CONTEXT_PARAM_SSEU:
959 ret = get_sseu(ctx, args);
960 break;
961 default:
962 ret = -EINVAL;
963 break;
964 }
965
966 i915_gem_context_put(ctx);
967 return ret;
968}
969
970static int gen8_emit_rpcs_config(struct i915_request *rq,
971 struct intel_context *ce,
972 struct intel_sseu sseu)
973{
974 u64 offset;
975 u32 *cs;
976
977 cs = intel_ring_begin(rq, 4);
978 if (IS_ERR(cs))
979 return PTR_ERR(cs);
980
981 offset = i915_ggtt_offset(ce->state) +
982 LRC_STATE_PN * PAGE_SIZE +
983 (CTX_R_PWR_CLK_STATE + 1) * 4;
984
985 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
986 *cs++ = lower_32_bits(offset);
987 *cs++ = upper_32_bits(offset);
988 *cs++ = gen8_make_rpcs(rq->i915, &sseu);
989
990 intel_ring_advance(rq, cs);
991
992 return 0;
993}
994
995static int
996gen8_modify_rpcs_gpu(struct intel_context *ce,
997 struct intel_engine_cs *engine,
998 struct intel_sseu sseu)
999{
1000 struct drm_i915_private *i915 = engine->i915;
1001 struct i915_request *rq, *prev;
1002 intel_wakeref_t wakeref;
1003 int ret;
1004
1005 GEM_BUG_ON(!ce->pin_count);
1006
1007 lockdep_assert_held(&i915->drm.struct_mutex);
1008
1009
1010 wakeref = intel_runtime_pm_get(i915);
1011
1012 rq = i915_request_alloc(engine, i915->kernel_context);
1013 if (IS_ERR(rq)) {
1014 ret = PTR_ERR(rq);
1015 goto out_put;
1016 }
1017
1018
1019 prev = i915_active_request_raw(&ce->ring->timeline->last_request,
1020 &i915->drm.struct_mutex);
1021 if (prev && !i915_request_completed(prev)) {
1022 ret = i915_request_await_dma_fence(rq, &prev->fence);
1023 if (ret < 0)
1024 goto out_add;
1025 }
1026
1027
1028 ret = i915_timeline_set_barrier(ce->ring->timeline, rq);
1029 if (ret)
1030 goto out_add;
1031
1032 ret = gen8_emit_rpcs_config(rq, ce, sseu);
1033 if (ret)
1034 goto out_add;
1035
1036
1037
1038
1039
1040
1041
1042
1043 if (!i915_active_request_isset(&ce->active_tracker))
1044 __intel_context_pin(ce);
1045 __i915_active_request_set(&ce->active_tracker, rq);
1046
1047out_add:
1048 i915_request_add(rq);
1049out_put:
1050 intel_runtime_pm_put(i915, wakeref);
1051
1052 return ret;
1053}
1054
1055static int
1056__i915_gem_context_reconfigure_sseu(struct i915_gem_context *ctx,
1057 struct intel_engine_cs *engine,
1058 struct intel_sseu sseu)
1059{
1060 struct intel_context *ce = to_intel_context(ctx, engine);
1061 int ret = 0;
1062
1063 GEM_BUG_ON(INTEL_GEN(ctx->i915) < 8);
1064 GEM_BUG_ON(engine->id != RCS);
1065
1066
1067 if (!memcmp(&ce->sseu, &sseu, sizeof(sseu)))
1068 return 0;
1069
1070
1071
1072
1073
1074
1075 if (ce->pin_count)
1076 ret = gen8_modify_rpcs_gpu(ce, engine, sseu);
1077
1078 if (!ret)
1079 ce->sseu = sseu;
1080
1081 return ret;
1082}
1083
1084static int
1085i915_gem_context_reconfigure_sseu(struct i915_gem_context *ctx,
1086 struct intel_engine_cs *engine,
1087 struct intel_sseu sseu)
1088{
1089 int ret;
1090
1091 ret = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
1092 if (ret)
1093 return ret;
1094
1095 ret = __i915_gem_context_reconfigure_sseu(ctx, engine, sseu);
1096
1097 mutex_unlock(&ctx->i915->drm.struct_mutex);
1098
1099 return ret;
1100}
1101
1102static int
1103user_to_context_sseu(struct drm_i915_private *i915,
1104 const struct drm_i915_gem_context_param_sseu *user,
1105 struct intel_sseu *context)
1106{
1107 const struct sseu_dev_info *device = &RUNTIME_INFO(i915)->sseu;
1108
1109
1110 if (!user->slice_mask || !user->subslice_mask ||
1111 !user->min_eus_per_subslice || !user->max_eus_per_subslice)
1112 return -EINVAL;
1113
1114
1115 if (user->max_eus_per_subslice < user->min_eus_per_subslice)
1116 return -EINVAL;
1117
1118
1119
1120
1121
1122 if (overflows_type(user->slice_mask, context->slice_mask) ||
1123 overflows_type(user->subslice_mask, context->subslice_mask) ||
1124 overflows_type(user->min_eus_per_subslice,
1125 context->min_eus_per_subslice) ||
1126 overflows_type(user->max_eus_per_subslice,
1127 context->max_eus_per_subslice))
1128 return -EINVAL;
1129
1130
1131 if (user->slice_mask & ~device->slice_mask)
1132 return -EINVAL;
1133
1134 if (user->subslice_mask & ~device->subslice_mask[0])
1135 return -EINVAL;
1136
1137 if (user->max_eus_per_subslice > device->max_eus_per_subslice)
1138 return -EINVAL;
1139
1140 context->slice_mask = user->slice_mask;
1141 context->subslice_mask = user->subslice_mask;
1142 context->min_eus_per_subslice = user->min_eus_per_subslice;
1143 context->max_eus_per_subslice = user->max_eus_per_subslice;
1144
1145
1146 if (IS_GEN(i915, 11)) {
1147 unsigned int hw_s = hweight8(device->slice_mask);
1148 unsigned int hw_ss_per_s = hweight8(device->subslice_mask[0]);
1149 unsigned int req_s = hweight8(context->slice_mask);
1150 unsigned int req_ss = hweight8(context->subslice_mask);
1151
1152
1153
1154
1155
1156 if (req_s > 1 && req_ss != hw_ss_per_s)
1157 return -EINVAL;
1158
1159
1160
1161
1162
1163 if (req_ss > 4 && (req_ss & 1))
1164 return -EINVAL;
1165
1166
1167
1168
1169
1170
1171 if (req_s == 1 && req_ss < hw_ss_per_s &&
1172 req_ss > (hw_ss_per_s / 2))
1173 return -EINVAL;
1174
1175
1176
1177
1178 if (req_s != 1 && req_s != hw_s)
1179 return -EINVAL;
1180
1181
1182
1183
1184
1185 if (req_s == 1 &&
1186 (req_ss != hw_ss_per_s && req_ss != (hw_ss_per_s / 2)))
1187 return -EINVAL;
1188
1189
1190 if ((user->min_eus_per_subslice !=
1191 device->max_eus_per_subslice) ||
1192 (user->max_eus_per_subslice !=
1193 device->max_eus_per_subslice))
1194 return -EINVAL;
1195 }
1196
1197 return 0;
1198}
1199
1200static int set_sseu(struct i915_gem_context *ctx,
1201 struct drm_i915_gem_context_param *args)
1202{
1203 struct drm_i915_private *i915 = ctx->i915;
1204 struct drm_i915_gem_context_param_sseu user_sseu;
1205 struct intel_engine_cs *engine;
1206 struct intel_sseu sseu;
1207 int ret;
1208
1209 if (args->size < sizeof(user_sseu))
1210 return -EINVAL;
1211
1212 if (!IS_GEN(i915, 11))
1213 return -ENODEV;
1214
1215 if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
1216 sizeof(user_sseu)))
1217 return -EFAULT;
1218
1219 if (user_sseu.flags || user_sseu.rsvd)
1220 return -EINVAL;
1221
1222 engine = intel_engine_lookup_user(i915,
1223 user_sseu.engine_class,
1224 user_sseu.engine_instance);
1225 if (!engine)
1226 return -EINVAL;
1227
1228
1229 if (engine->class != RENDER_CLASS)
1230 return -ENODEV;
1231
1232 ret = user_to_context_sseu(i915, &user_sseu, &sseu);
1233 if (ret)
1234 return ret;
1235
1236 ret = i915_gem_context_reconfigure_sseu(ctx, engine, sseu);
1237 if (ret)
1238 return ret;
1239
1240 args->size = sizeof(user_sseu);
1241
1242 return 0;
1243}
1244
1245int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
1246 struct drm_file *file)
1247{
1248 struct drm_i915_file_private *file_priv = file->driver_priv;
1249 struct drm_i915_gem_context_param *args = data;
1250 struct i915_gem_context *ctx;
1251 int ret = 0;
1252
1253 ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
1254 if (!ctx)
1255 return -ENOENT;
1256
1257 switch (args->param) {
1258 case I915_CONTEXT_PARAM_BAN_PERIOD:
1259 ret = -EINVAL;
1260 break;
1261 case I915_CONTEXT_PARAM_NO_ZEROMAP:
1262 if (args->size)
1263 ret = -EINVAL;
1264 else if (args->value)
1265 set_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
1266 else
1267 clear_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
1268 break;
1269 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
1270 if (args->size)
1271 ret = -EINVAL;
1272 else if (args->value)
1273 i915_gem_context_set_no_error_capture(ctx);
1274 else
1275 i915_gem_context_clear_no_error_capture(ctx);
1276 break;
1277 case I915_CONTEXT_PARAM_BANNABLE:
1278 if (args->size)
1279 ret = -EINVAL;
1280 else if (!capable(CAP_SYS_ADMIN) && !args->value)
1281 ret = -EPERM;
1282 else if (args->value)
1283 i915_gem_context_set_bannable(ctx);
1284 else
1285 i915_gem_context_clear_bannable(ctx);
1286 break;
1287
1288 case I915_CONTEXT_PARAM_PRIORITY:
1289 {
1290 s64 priority = args->value;
1291
1292 if (args->size)
1293 ret = -EINVAL;
1294 else if (!(to_i915(dev)->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
1295 ret = -ENODEV;
1296 else if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
1297 priority < I915_CONTEXT_MIN_USER_PRIORITY)
1298 ret = -EINVAL;
1299 else if (priority > I915_CONTEXT_DEFAULT_PRIORITY &&
1300 !capable(CAP_SYS_NICE))
1301 ret = -EPERM;
1302 else
1303 ctx->sched.priority =
1304 I915_USER_PRIORITY(priority);
1305 }
1306 break;
1307 case I915_CONTEXT_PARAM_SSEU:
1308 ret = set_sseu(ctx, args);
1309 break;
1310 default:
1311 ret = -EINVAL;
1312 break;
1313 }
1314
1315 i915_gem_context_put(ctx);
1316 return ret;
1317}
1318
1319int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
1320 void *data, struct drm_file *file)
1321{
1322 struct drm_i915_private *dev_priv = to_i915(dev);
1323 struct drm_i915_reset_stats *args = data;
1324 struct i915_gem_context *ctx;
1325 int ret;
1326
1327 if (args->flags || args->pad)
1328 return -EINVAL;
1329
1330 ret = -ENOENT;
1331 rcu_read_lock();
1332 ctx = __i915_gem_context_lookup_rcu(file->driver_priv, args->ctx_id);
1333 if (!ctx)
1334 goto out;
1335
1336
1337
1338
1339
1340
1341
1342
1343 if (capable(CAP_SYS_ADMIN))
1344 args->reset_count = i915_reset_count(&dev_priv->gpu_error);
1345 else
1346 args->reset_count = 0;
1347
1348 args->batch_active = atomic_read(&ctx->guilty_count);
1349 args->batch_pending = atomic_read(&ctx->active_count);
1350
1351 ret = 0;
1352out:
1353 rcu_read_unlock();
1354 return ret;
1355}
1356
1357int __i915_gem_context_pin_hw_id(struct i915_gem_context *ctx)
1358{
1359 struct drm_i915_private *i915 = ctx->i915;
1360 int err = 0;
1361
1362 mutex_lock(&i915->contexts.mutex);
1363
1364 GEM_BUG_ON(i915_gem_context_is_closed(ctx));
1365
1366 if (list_empty(&ctx->hw_id_link)) {
1367 GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count));
1368
1369 err = assign_hw_id(i915, &ctx->hw_id);
1370 if (err)
1371 goto out_unlock;
1372
1373 list_add_tail(&ctx->hw_id_link, &i915->contexts.hw_id_list);
1374 }
1375
1376 GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count) == ~0u);
1377 atomic_inc(&ctx->hw_id_pin_count);
1378
1379out_unlock:
1380 mutex_unlock(&i915->contexts.mutex);
1381 return err;
1382}
1383
1384#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1385#include "selftests/mock_context.c"
1386#include "selftests/i915_gem_context.c"
1387#endif
1388