1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88#include <linux/log2.h>
89#include <drm/drmP.h>
90#include <drm/i915_drm.h>
91#include "i915_drv.h"
92#include "i915_trace.h"
93#include "intel_workarounds.h"
94
95#define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
96
97static void lut_close(struct i915_gem_context *ctx)
98{
99 struct i915_lut_handle *lut, *ln;
100 struct radix_tree_iter iter;
101 void __rcu **slot;
102
103 list_for_each_entry_safe(lut, ln, &ctx->handles_list, ctx_link) {
104 list_del(&lut->obj_link);
105 kmem_cache_free(ctx->i915->luts, lut);
106 }
107
108
109
110
111
112
113
114
115
116
117
118 rcu_read_lock();
119restart:
120 radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) {
121 struct i915_vma *vma = rcu_dereference_raw(*slot);
122
123 radix_tree_delete(&ctx->handles_vma, iter.index);
124
125 __i915_gem_object_release_unless_active(vma->obj);
126 goto restart;
127 }
128 rcu_read_unlock();
129}
130
131static inline int new_hw_id(struct drm_i915_private *i915, gfp_t gfp)
132{
133 unsigned int max;
134
135 lockdep_assert_held(&i915->contexts.mutex);
136
137 if (INTEL_GEN(i915) >= 11)
138 max = GEN11_MAX_CONTEXT_HW_ID;
139 else if (USES_GUC_SUBMISSION(i915))
140
141
142
143
144 max = MAX_GUC_CONTEXT_HW_ID;
145 else
146 max = MAX_CONTEXT_HW_ID;
147
148 return ida_simple_get(&i915->contexts.hw_ida, 0, max, gfp);
149}
150
151static int steal_hw_id(struct drm_i915_private *i915)
152{
153 struct i915_gem_context *ctx, *cn;
154 LIST_HEAD(pinned);
155 int id = -ENOSPC;
156
157 lockdep_assert_held(&i915->contexts.mutex);
158
159 list_for_each_entry_safe(ctx, cn,
160 &i915->contexts.hw_id_list, hw_id_link) {
161 if (atomic_read(&ctx->hw_id_pin_count)) {
162 list_move_tail(&ctx->hw_id_link, &pinned);
163 continue;
164 }
165
166 GEM_BUG_ON(!ctx->hw_id);
167 list_del_init(&ctx->hw_id_link);
168 id = ctx->hw_id;
169 break;
170 }
171
172
173
174
175
176 list_splice_tail(&pinned, &i915->contexts.hw_id_list);
177 return id;
178}
179
180static int assign_hw_id(struct drm_i915_private *i915, unsigned int *out)
181{
182 int ret;
183
184 lockdep_assert_held(&i915->contexts.mutex);
185
186
187
188
189
190
191
192 ret = new_hw_id(i915, GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
193 if (unlikely(ret < 0)) {
194 ret = steal_hw_id(i915);
195 if (ret < 0)
196 ret = new_hw_id(i915, GFP_KERNEL);
197 if (ret < 0)
198 return ret;
199 }
200
201 *out = ret;
202 return 0;
203}
204
205static void release_hw_id(struct i915_gem_context *ctx)
206{
207 struct drm_i915_private *i915 = ctx->i915;
208
209 if (list_empty(&ctx->hw_id_link))
210 return;
211
212 mutex_lock(&i915->contexts.mutex);
213 if (!list_empty(&ctx->hw_id_link)) {
214 ida_simple_remove(&i915->contexts.hw_ida, ctx->hw_id);
215 list_del_init(&ctx->hw_id_link);
216 }
217 mutex_unlock(&i915->contexts.mutex);
218}
219
220static void i915_gem_context_free(struct i915_gem_context *ctx)
221{
222 unsigned int n;
223
224 lockdep_assert_held(&ctx->i915->drm.struct_mutex);
225 GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
226
227 release_hw_id(ctx);
228 i915_ppgtt_put(ctx->ppgtt);
229
230 for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
231 struct intel_context *ce = &ctx->__engine[n];
232
233 if (ce->ops)
234 ce->ops->destroy(ce);
235 }
236
237 kfree(ctx->name);
238 put_pid(ctx->pid);
239
240 list_del(&ctx->link);
241
242 kfree_rcu(ctx, rcu);
243}
244
245static void contexts_free(struct drm_i915_private *i915)
246{
247 struct llist_node *freed = llist_del_all(&i915->contexts.free_list);
248 struct i915_gem_context *ctx, *cn;
249
250 lockdep_assert_held(&i915->drm.struct_mutex);
251
252 llist_for_each_entry_safe(ctx, cn, freed, free_link)
253 i915_gem_context_free(ctx);
254}
255
256static void contexts_free_first(struct drm_i915_private *i915)
257{
258 struct i915_gem_context *ctx;
259 struct llist_node *freed;
260
261 lockdep_assert_held(&i915->drm.struct_mutex);
262
263 freed = llist_del_first(&i915->contexts.free_list);
264 if (!freed)
265 return;
266
267 ctx = container_of(freed, typeof(*ctx), free_link);
268 i915_gem_context_free(ctx);
269}
270
271static void contexts_free_worker(struct work_struct *work)
272{
273 struct drm_i915_private *i915 =
274 container_of(work, typeof(*i915), contexts.free_work);
275
276 mutex_lock(&i915->drm.struct_mutex);
277 contexts_free(i915);
278 mutex_unlock(&i915->drm.struct_mutex);
279}
280
281void i915_gem_context_release(struct kref *ref)
282{
283 struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
284 struct drm_i915_private *i915 = ctx->i915;
285
286 trace_i915_context_free(ctx);
287 if (llist_add(&ctx->free_link, &i915->contexts.free_list))
288 queue_work(i915->wq, &i915->contexts.free_work);
289}
290
291static void context_close(struct i915_gem_context *ctx)
292{
293 i915_gem_context_set_closed(ctx);
294
295
296
297
298
299 release_hw_id(ctx);
300
301
302
303
304
305
306 lut_close(ctx);
307 if (ctx->ppgtt)
308 i915_ppgtt_close(&ctx->ppgtt->vm);
309
310 ctx->file_priv = ERR_PTR(-EBADF);
311 i915_gem_context_put(ctx);
312}
313
314static u32 default_desc_template(const struct drm_i915_private *i915,
315 const struct i915_hw_ppgtt *ppgtt)
316{
317 u32 address_mode;
318 u32 desc;
319
320 desc = GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
321
322 address_mode = INTEL_LEGACY_32B_CONTEXT;
323 if (ppgtt && i915_vm_is_48bit(&ppgtt->vm))
324 address_mode = INTEL_LEGACY_64B_CONTEXT;
325 desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT;
326
327 if (IS_GEN8(i915))
328 desc |= GEN8_CTX_L3LLC_COHERENT;
329
330
331
332
333
334
335 return desc;
336}
337
338static struct i915_gem_context *
339__create_hw_context(struct drm_i915_private *dev_priv,
340 struct drm_i915_file_private *file_priv)
341{
342 struct i915_gem_context *ctx;
343 unsigned int n;
344 int ret;
345
346 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
347 if (ctx == NULL)
348 return ERR_PTR(-ENOMEM);
349
350 kref_init(&ctx->ref);
351 list_add_tail(&ctx->link, &dev_priv->contexts.list);
352 ctx->i915 = dev_priv;
353 ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
354
355 for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
356 struct intel_context *ce = &ctx->__engine[n];
357
358 ce->gem_context = ctx;
359 }
360
361 INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
362 INIT_LIST_HEAD(&ctx->handles_list);
363 INIT_LIST_HEAD(&ctx->hw_id_link);
364
365
366 ret = DEFAULT_CONTEXT_HANDLE;
367 if (file_priv) {
368 ret = idr_alloc(&file_priv->context_idr, ctx,
369 DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL);
370 if (ret < 0)
371 goto err_lut;
372 }
373 ctx->user_handle = ret;
374
375 ctx->file_priv = file_priv;
376 if (file_priv) {
377 ctx->pid = get_task_pid(current, PIDTYPE_PID);
378 ctx->name = kasprintf(GFP_KERNEL, "%s[%d]/%x",
379 current->comm,
380 pid_nr(ctx->pid),
381 ctx->user_handle);
382 if (!ctx->name) {
383 ret = -ENOMEM;
384 goto err_pid;
385 }
386 }
387
388
389
390
391 ctx->remap_slice = ALL_L3_SLICES(dev_priv);
392
393 i915_gem_context_set_bannable(ctx);
394 ctx->ring_size = 4 * PAGE_SIZE;
395 ctx->desc_template =
396 default_desc_template(dev_priv, dev_priv->mm.aliasing_ppgtt);
397
398 return ctx;
399
400err_pid:
401 put_pid(ctx->pid);
402 idr_remove(&file_priv->context_idr, ctx->user_handle);
403err_lut:
404 context_close(ctx);
405 return ERR_PTR(ret);
406}
407
408static void __destroy_hw_context(struct i915_gem_context *ctx,
409 struct drm_i915_file_private *file_priv)
410{
411 idr_remove(&file_priv->context_idr, ctx->user_handle);
412 context_close(ctx);
413}
414
415static struct i915_gem_context *
416i915_gem_create_context(struct drm_i915_private *dev_priv,
417 struct drm_i915_file_private *file_priv)
418{
419 struct i915_gem_context *ctx;
420
421 lockdep_assert_held(&dev_priv->drm.struct_mutex);
422
423
424 contexts_free_first(dev_priv);
425
426 ctx = __create_hw_context(dev_priv, file_priv);
427 if (IS_ERR(ctx))
428 return ctx;
429
430 if (HAS_FULL_PPGTT(dev_priv)) {
431 struct i915_hw_ppgtt *ppgtt;
432
433 ppgtt = i915_ppgtt_create(dev_priv, file_priv);
434 if (IS_ERR(ppgtt)) {
435 DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
436 PTR_ERR(ppgtt));
437 __destroy_hw_context(ctx, file_priv);
438 return ERR_CAST(ppgtt);
439 }
440
441 ctx->ppgtt = ppgtt;
442 ctx->desc_template = default_desc_template(dev_priv, ppgtt);
443 }
444
445 trace_i915_context_create(ctx);
446
447 return ctx;
448}
449
450
451
452
453
454
455
456
457
458
459
460struct i915_gem_context *
461i915_gem_context_create_gvt(struct drm_device *dev)
462{
463 struct i915_gem_context *ctx;
464 int ret;
465
466 if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
467 return ERR_PTR(-ENODEV);
468
469 ret = i915_mutex_lock_interruptible(dev);
470 if (ret)
471 return ERR_PTR(ret);
472
473 ctx = i915_gem_create_context(to_i915(dev), NULL);
474 if (IS_ERR(ctx))
475 goto out;
476
477 ctx->file_priv = ERR_PTR(-EBADF);
478 i915_gem_context_set_closed(ctx);
479 i915_gem_context_clear_bannable(ctx);
480 i915_gem_context_set_force_single_submission(ctx);
481 if (!USES_GUC_SUBMISSION(to_i915(dev)))
482 ctx->ring_size = 512 * PAGE_SIZE;
483
484 GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
485out:
486 mutex_unlock(&dev->struct_mutex);
487 return ctx;
488}
489
490static void
491destroy_kernel_context(struct i915_gem_context **ctxp)
492{
493 struct i915_gem_context *ctx;
494
495
496 ctx = i915_gem_context_get(fetch_and_zero(ctxp));
497 GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
498
499 context_close(ctx);
500 i915_gem_context_free(ctx);
501}
502
503struct i915_gem_context *
504i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
505{
506 struct i915_gem_context *ctx;
507 int err;
508
509 ctx = i915_gem_create_context(i915, NULL);
510 if (IS_ERR(ctx))
511 return ctx;
512
513 err = i915_gem_context_pin_hw_id(ctx);
514 if (err) {
515 destroy_kernel_context(&ctx);
516 return ERR_PTR(err);
517 }
518
519 i915_gem_context_clear_bannable(ctx);
520 ctx->sched.priority = I915_USER_PRIORITY(prio);
521 ctx->ring_size = PAGE_SIZE;
522
523 GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
524
525 return ctx;
526}
527
528static void init_contexts(struct drm_i915_private *i915)
529{
530 mutex_init(&i915->contexts.mutex);
531 INIT_LIST_HEAD(&i915->contexts.list);
532
533
534 BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
535 BUILD_BUG_ON(GEN11_MAX_CONTEXT_HW_ID > INT_MAX);
536 ida_init(&i915->contexts.hw_ida);
537 INIT_LIST_HEAD(&i915->contexts.hw_id_list);
538
539 INIT_WORK(&i915->contexts.free_work, contexts_free_worker);
540 init_llist_head(&i915->contexts.free_list);
541}
542
543static bool needs_preempt_context(struct drm_i915_private *i915)
544{
545 return HAS_LOGICAL_RING_PREEMPTION(i915);
546}
547
548int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
549{
550 struct i915_gem_context *ctx;
551
552
553 GEM_BUG_ON(dev_priv->kernel_context);
554 GEM_BUG_ON(dev_priv->preempt_context);
555
556 intel_engine_init_ctx_wa(dev_priv->engine[RCS]);
557 init_contexts(dev_priv);
558
559
560 ctx = i915_gem_context_create_kernel(dev_priv, I915_PRIORITY_MIN);
561 if (IS_ERR(ctx)) {
562 DRM_ERROR("Failed to create default global context\n");
563 return PTR_ERR(ctx);
564 }
565
566
567
568
569
570
571
572 GEM_BUG_ON(ctx->hw_id);
573 GEM_BUG_ON(!atomic_read(&ctx->hw_id_pin_count));
574 dev_priv->kernel_context = ctx;
575
576
577 if (needs_preempt_context(dev_priv)) {
578 ctx = i915_gem_context_create_kernel(dev_priv, INT_MAX);
579 if (!IS_ERR(ctx))
580 dev_priv->preempt_context = ctx;
581 else
582 DRM_ERROR("Failed to create preempt context; disabling preemption\n");
583 }
584
585 DRM_DEBUG_DRIVER("%s context support initialized\n",
586 DRIVER_CAPS(dev_priv)->has_logical_contexts ?
587 "logical" : "fake");
588 return 0;
589}
590
591void i915_gem_contexts_lost(struct drm_i915_private *dev_priv)
592{
593 struct intel_engine_cs *engine;
594 enum intel_engine_id id;
595
596 lockdep_assert_held(&dev_priv->drm.struct_mutex);
597
598 for_each_engine(engine, dev_priv, id)
599 intel_engine_lost_context(engine);
600}
601
602void i915_gem_contexts_fini(struct drm_i915_private *i915)
603{
604 lockdep_assert_held(&i915->drm.struct_mutex);
605
606 if (i915->preempt_context)
607 destroy_kernel_context(&i915->preempt_context);
608 destroy_kernel_context(&i915->kernel_context);
609
610
611 GEM_BUG_ON(!list_empty(&i915->contexts.hw_id_list));
612 ida_destroy(&i915->contexts.hw_ida);
613}
614
615static int context_idr_cleanup(int id, void *p, void *data)
616{
617 struct i915_gem_context *ctx = p;
618
619 context_close(ctx);
620 return 0;
621}
622
623int i915_gem_context_open(struct drm_i915_private *i915,
624 struct drm_file *file)
625{
626 struct drm_i915_file_private *file_priv = file->driver_priv;
627 struct i915_gem_context *ctx;
628
629 idr_init(&file_priv->context_idr);
630
631 mutex_lock(&i915->drm.struct_mutex);
632 ctx = i915_gem_create_context(i915, file_priv);
633 mutex_unlock(&i915->drm.struct_mutex);
634 if (IS_ERR(ctx)) {
635 idr_destroy(&file_priv->context_idr);
636 return PTR_ERR(ctx);
637 }
638
639 GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
640
641 return 0;
642}
643
644void i915_gem_context_close(struct drm_file *file)
645{
646 struct drm_i915_file_private *file_priv = file->driver_priv;
647
648 lockdep_assert_held(&file_priv->dev_priv->drm.struct_mutex);
649
650 idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
651 idr_destroy(&file_priv->context_idr);
652}
653
654static struct i915_request *
655last_request_on_engine(struct i915_timeline *timeline,
656 struct intel_engine_cs *engine)
657{
658 struct i915_request *rq;
659
660 GEM_BUG_ON(timeline == &engine->timeline);
661
662 rq = i915_gem_active_raw(&timeline->last_request,
663 &engine->i915->drm.struct_mutex);
664 if (rq && rq->engine == engine) {
665 GEM_TRACE("last request for %s on engine %s: %llx:%d\n",
666 timeline->name, engine->name,
667 rq->fence.context, rq->fence.seqno);
668 GEM_BUG_ON(rq->timeline != timeline);
669 return rq;
670 }
671
672 return NULL;
673}
674
675static bool engine_has_kernel_context_barrier(struct intel_engine_cs *engine)
676{
677 struct drm_i915_private *i915 = engine->i915;
678 const struct intel_context * const ce =
679 to_intel_context(i915->kernel_context, engine);
680 struct i915_timeline *barrier = ce->ring->timeline;
681 struct intel_ring *ring;
682 bool any_active = false;
683
684 lockdep_assert_held(&i915->drm.struct_mutex);
685 list_for_each_entry(ring, &i915->gt.active_rings, active_link) {
686 struct i915_request *rq;
687
688 rq = last_request_on_engine(ring->timeline, engine);
689 if (!rq)
690 continue;
691
692 any_active = true;
693
694 if (rq->hw_context == ce)
695 continue;
696
697
698
699
700
701 if (!i915_timeline_sync_is_later(barrier, &rq->fence)) {
702 GEM_TRACE("%s needs barrier for %llx:%d\n",
703 ring->timeline->name,
704 rq->fence.context,
705 rq->fence.seqno);
706 return false;
707 }
708
709 GEM_TRACE("%s has barrier after %llx:%d\n",
710 ring->timeline->name,
711 rq->fence.context,
712 rq->fence.seqno);
713 }
714
715
716
717
718
719
720
721 if (any_active)
722 return true;
723
724
725 return engine->last_retired_context == ce;
726}
727
728int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915)
729{
730 struct intel_engine_cs *engine;
731 enum intel_engine_id id;
732
733 GEM_TRACE("awake?=%s\n", yesno(i915->gt.awake));
734
735 lockdep_assert_held(&i915->drm.struct_mutex);
736 GEM_BUG_ON(!i915->kernel_context);
737
738 i915_retire_requests(i915);
739
740 for_each_engine(engine, i915, id) {
741 struct intel_ring *ring;
742 struct i915_request *rq;
743
744 GEM_BUG_ON(!to_intel_context(i915->kernel_context, engine));
745 if (engine_has_kernel_context_barrier(engine))
746 continue;
747
748 GEM_TRACE("emit barrier on %s\n", engine->name);
749
750 rq = i915_request_alloc(engine, i915->kernel_context);
751 if (IS_ERR(rq))
752 return PTR_ERR(rq);
753
754
755 list_for_each_entry(ring, &i915->gt.active_rings, active_link) {
756 struct i915_request *prev;
757
758 prev = last_request_on_engine(ring->timeline, engine);
759 if (!prev)
760 continue;
761
762 if (prev->gem_context == i915->kernel_context)
763 continue;
764
765 GEM_TRACE("add barrier on %s for %llx:%d\n",
766 engine->name,
767 prev->fence.context,
768 prev->fence.seqno);
769 i915_sw_fence_await_sw_fence_gfp(&rq->submit,
770 &prev->submit,
771 I915_FENCE_GFP);
772 i915_timeline_sync_set(rq->timeline, &prev->fence);
773 }
774
775 i915_request_add(rq);
776 }
777
778 return 0;
779}
780
781static bool client_is_banned(struct drm_i915_file_private *file_priv)
782{
783 return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED;
784}
785
786int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
787 struct drm_file *file)
788{
789 struct drm_i915_private *dev_priv = to_i915(dev);
790 struct drm_i915_gem_context_create *args = data;
791 struct drm_i915_file_private *file_priv = file->driver_priv;
792 struct i915_gem_context *ctx;
793 int ret;
794
795 if (!DRIVER_CAPS(dev_priv)->has_logical_contexts)
796 return -ENODEV;
797
798 if (args->pad != 0)
799 return -EINVAL;
800
801 if (client_is_banned(file_priv)) {
802 DRM_DEBUG("client %s[%d] banned from creating ctx\n",
803 current->comm,
804 pid_nr(get_task_pid(current, PIDTYPE_PID)));
805
806 return -EIO;
807 }
808
809 ret = i915_mutex_lock_interruptible(dev);
810 if (ret)
811 return ret;
812
813 ctx = i915_gem_create_context(dev_priv, file_priv);
814 mutex_unlock(&dev->struct_mutex);
815 if (IS_ERR(ctx))
816 return PTR_ERR(ctx);
817
818 GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
819
820 args->ctx_id = ctx->user_handle;
821 DRM_DEBUG("HW context %d created\n", args->ctx_id);
822
823 return 0;
824}
825
826int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
827 struct drm_file *file)
828{
829 struct drm_i915_gem_context_destroy *args = data;
830 struct drm_i915_file_private *file_priv = file->driver_priv;
831 struct i915_gem_context *ctx;
832 int ret;
833
834 if (args->pad != 0)
835 return -EINVAL;
836
837 if (args->ctx_id == DEFAULT_CONTEXT_HANDLE)
838 return -ENOENT;
839
840 ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
841 if (!ctx)
842 return -ENOENT;
843
844 ret = mutex_lock_interruptible(&dev->struct_mutex);
845 if (ret)
846 goto out;
847
848 __destroy_hw_context(ctx, file_priv);
849 mutex_unlock(&dev->struct_mutex);
850
851out:
852 i915_gem_context_put(ctx);
853 return 0;
854}
855
856int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
857 struct drm_file *file)
858{
859 struct drm_i915_file_private *file_priv = file->driver_priv;
860 struct drm_i915_gem_context_param *args = data;
861 struct i915_gem_context *ctx;
862 int ret = 0;
863
864 ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
865 if (!ctx)
866 return -ENOENT;
867
868 args->size = 0;
869 switch (args->param) {
870 case I915_CONTEXT_PARAM_BAN_PERIOD:
871 ret = -EINVAL;
872 break;
873 case I915_CONTEXT_PARAM_NO_ZEROMAP:
874 args->value = test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
875 break;
876 case I915_CONTEXT_PARAM_GTT_SIZE:
877 if (ctx->ppgtt)
878 args->value = ctx->ppgtt->vm.total;
879 else if (to_i915(dev)->mm.aliasing_ppgtt)
880 args->value = to_i915(dev)->mm.aliasing_ppgtt->vm.total;
881 else
882 args->value = to_i915(dev)->ggtt.vm.total;
883 break;
884 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
885 args->value = i915_gem_context_no_error_capture(ctx);
886 break;
887 case I915_CONTEXT_PARAM_BANNABLE:
888 args->value = i915_gem_context_is_bannable(ctx);
889 break;
890 case I915_CONTEXT_PARAM_PRIORITY:
891 args->value = ctx->sched.priority >> I915_USER_PRIORITY_SHIFT;
892 break;
893 default:
894 ret = -EINVAL;
895 break;
896 }
897
898 i915_gem_context_put(ctx);
899 return ret;
900}
901
902int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
903 struct drm_file *file)
904{
905 struct drm_i915_file_private *file_priv = file->driver_priv;
906 struct drm_i915_gem_context_param *args = data;
907 struct i915_gem_context *ctx;
908 int ret = 0;
909
910 ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
911 if (!ctx)
912 return -ENOENT;
913
914 switch (args->param) {
915 case I915_CONTEXT_PARAM_BAN_PERIOD:
916 ret = -EINVAL;
917 break;
918 case I915_CONTEXT_PARAM_NO_ZEROMAP:
919 if (args->size)
920 ret = -EINVAL;
921 else if (args->value)
922 set_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
923 else
924 clear_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
925 break;
926 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
927 if (args->size)
928 ret = -EINVAL;
929 else if (args->value)
930 i915_gem_context_set_no_error_capture(ctx);
931 else
932 i915_gem_context_clear_no_error_capture(ctx);
933 break;
934 case I915_CONTEXT_PARAM_BANNABLE:
935 if (args->size)
936 ret = -EINVAL;
937 else if (!capable(CAP_SYS_ADMIN) && !args->value)
938 ret = -EPERM;
939 else if (args->value)
940 i915_gem_context_set_bannable(ctx);
941 else
942 i915_gem_context_clear_bannable(ctx);
943 break;
944
945 case I915_CONTEXT_PARAM_PRIORITY:
946 {
947 s64 priority = args->value;
948
949 if (args->size)
950 ret = -EINVAL;
951 else if (!(to_i915(dev)->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
952 ret = -ENODEV;
953 else if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
954 priority < I915_CONTEXT_MIN_USER_PRIORITY)
955 ret = -EINVAL;
956 else if (priority > I915_CONTEXT_DEFAULT_PRIORITY &&
957 !capable(CAP_SYS_NICE))
958 ret = -EPERM;
959 else
960 ctx->sched.priority =
961 I915_USER_PRIORITY(priority);
962 }
963 break;
964
965 default:
966 ret = -EINVAL;
967 break;
968 }
969
970 i915_gem_context_put(ctx);
971 return ret;
972}
973
974int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
975 void *data, struct drm_file *file)
976{
977 struct drm_i915_private *dev_priv = to_i915(dev);
978 struct drm_i915_reset_stats *args = data;
979 struct i915_gem_context *ctx;
980 int ret;
981
982 if (args->flags || args->pad)
983 return -EINVAL;
984
985 ret = -ENOENT;
986 rcu_read_lock();
987 ctx = __i915_gem_context_lookup_rcu(file->driver_priv, args->ctx_id);
988 if (!ctx)
989 goto out;
990
991
992
993
994
995
996
997
998 if (capable(CAP_SYS_ADMIN))
999 args->reset_count = i915_reset_count(&dev_priv->gpu_error);
1000 else
1001 args->reset_count = 0;
1002
1003 args->batch_active = atomic_read(&ctx->guilty_count);
1004 args->batch_pending = atomic_read(&ctx->active_count);
1005
1006 ret = 0;
1007out:
1008 rcu_read_unlock();
1009 return ret;
1010}
1011
1012int __i915_gem_context_pin_hw_id(struct i915_gem_context *ctx)
1013{
1014 struct drm_i915_private *i915 = ctx->i915;
1015 int err = 0;
1016
1017 mutex_lock(&i915->contexts.mutex);
1018
1019 GEM_BUG_ON(i915_gem_context_is_closed(ctx));
1020
1021 if (list_empty(&ctx->hw_id_link)) {
1022 GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count));
1023
1024 err = assign_hw_id(i915, &ctx->hw_id);
1025 if (err)
1026 goto out_unlock;
1027
1028 list_add_tail(&ctx->hw_id_link, &i915->contexts.hw_id_list);
1029 }
1030
1031 GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count) == ~0u);
1032 atomic_inc(&ctx->hw_id_pin_count);
1033
1034out_unlock:
1035 mutex_unlock(&i915->contexts.mutex);
1036 return err;
1037}
1038
1039#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1040#include "selftests/mock_context.c"
1041#include "selftests/i915_gem_context.c"
1042#endif
1043