1
2
3
4
5
6
7#include <linux/prime_numbers.h>
8
9#include "gem/i915_gem_pm.h"
10#include "gt/intel_reset.h"
11#include "i915_selftest.h"
12
13#include "gem/selftests/igt_gem_utils.h"
14#include "selftests/i915_random.h"
15#include "selftests/igt_flush_test.h"
16#include "selftests/igt_live_test.h"
17#include "selftests/igt_reset.h"
18#include "selftests/igt_spinner.h"
19#include "selftests/mock_drm.h"
20#include "selftests/mock_gem_device.h"
21
22#include "huge_gem_object.h"
23#include "igt_gem_utils.h"
24
25#define DW_PER_PAGE (PAGE_SIZE / sizeof(u32))
26
27static int live_nop_switch(void *arg)
28{
29 const unsigned int nctx = 1024;
30 struct drm_i915_private *i915 = arg;
31 struct intel_engine_cs *engine;
32 struct i915_gem_context **ctx;
33 enum intel_engine_id id;
34 intel_wakeref_t wakeref;
35 struct igt_live_test t;
36 struct drm_file *file;
37 unsigned long n;
38 int err = -ENODEV;
39
40
41
42
43
44
45
46
47
48 if (!DRIVER_CAPS(i915)->has_logical_contexts)
49 return 0;
50
51 file = mock_file(i915);
52 if (IS_ERR(file))
53 return PTR_ERR(file);
54
55 mutex_lock(&i915->drm.struct_mutex);
56 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
57
58 ctx = kcalloc(nctx, sizeof(*ctx), GFP_KERNEL);
59 if (!ctx) {
60 err = -ENOMEM;
61 goto out_unlock;
62 }
63
64 for (n = 0; n < nctx; n++) {
65 ctx[n] = live_context(i915, file);
66 if (IS_ERR(ctx[n])) {
67 err = PTR_ERR(ctx[n]);
68 goto out_unlock;
69 }
70 }
71
72 for_each_engine(engine, i915, id) {
73 struct i915_request *rq;
74 unsigned long end_time, prime;
75 ktime_t times[2] = {};
76
77 times[0] = ktime_get_raw();
78 for (n = 0; n < nctx; n++) {
79 rq = igt_request_alloc(ctx[n], engine);
80 if (IS_ERR(rq)) {
81 err = PTR_ERR(rq);
82 goto out_unlock;
83 }
84 i915_request_add(rq);
85 }
86 if (i915_request_wait(rq, 0, HZ / 5) < 0) {
87 pr_err("Failed to populated %d contexts\n", nctx);
88 i915_gem_set_wedged(i915);
89 err = -EIO;
90 goto out_unlock;
91 }
92
93 times[1] = ktime_get_raw();
94
95 pr_info("Populated %d contexts on %s in %lluns\n",
96 nctx, engine->name, ktime_to_ns(times[1] - times[0]));
97
98 err = igt_live_test_begin(&t, i915, __func__, engine->name);
99 if (err)
100 goto out_unlock;
101
102 end_time = jiffies + i915_selftest.timeout_jiffies;
103 for_each_prime_number_from(prime, 2, 8192) {
104 times[1] = ktime_get_raw();
105
106 for (n = 0; n < prime; n++) {
107 rq = igt_request_alloc(ctx[n % nctx], engine);
108 if (IS_ERR(rq)) {
109 err = PTR_ERR(rq);
110 goto out_unlock;
111 }
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127 i915_request_add(rq);
128 }
129 if (i915_request_wait(rq, 0, HZ / 5) < 0) {
130 pr_err("Switching between %ld contexts timed out\n",
131 prime);
132 i915_gem_set_wedged(i915);
133 break;
134 }
135
136 times[1] = ktime_sub(ktime_get_raw(), times[1]);
137 if (prime == 2)
138 times[0] = times[1];
139
140 if (__igt_timeout(end_time, NULL))
141 break;
142 }
143
144 err = igt_live_test_end(&t);
145 if (err)
146 goto out_unlock;
147
148 pr_info("Switch latencies on %s: 1 = %lluns, %lu = %lluns\n",
149 engine->name,
150 ktime_to_ns(times[0]),
151 prime - 1, div64_u64(ktime_to_ns(times[1]), prime - 1));
152 }
153
154out_unlock:
155 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
156 mutex_unlock(&i915->drm.struct_mutex);
157 mock_file_free(i915, file);
158 return err;
159}
160
161static struct i915_vma *
162gpu_fill_dw(struct i915_vma *vma, u64 offset, unsigned long count, u32 value)
163{
164 struct drm_i915_gem_object *obj;
165 const int gen = INTEL_GEN(vma->vm->i915);
166 unsigned long n, size;
167 u32 *cmd;
168 int err;
169
170 size = (4 * count + 1) * sizeof(u32);
171 size = round_up(size, PAGE_SIZE);
172 obj = i915_gem_object_create_internal(vma->vm->i915, size);
173 if (IS_ERR(obj))
174 return ERR_CAST(obj);
175
176 cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
177 if (IS_ERR(cmd)) {
178 err = PTR_ERR(cmd);
179 goto err;
180 }
181
182 GEM_BUG_ON(offset + (count - 1) * PAGE_SIZE > vma->node.size);
183 offset += vma->node.start;
184
185 for (n = 0; n < count; n++) {
186 if (gen >= 8) {
187 *cmd++ = MI_STORE_DWORD_IMM_GEN4;
188 *cmd++ = lower_32_bits(offset);
189 *cmd++ = upper_32_bits(offset);
190 *cmd++ = value;
191 } else if (gen >= 4) {
192 *cmd++ = MI_STORE_DWORD_IMM_GEN4 |
193 (gen < 6 ? MI_USE_GGTT : 0);
194 *cmd++ = 0;
195 *cmd++ = offset;
196 *cmd++ = value;
197 } else {
198 *cmd++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
199 *cmd++ = offset;
200 *cmd++ = value;
201 }
202 offset += PAGE_SIZE;
203 }
204 *cmd = MI_BATCH_BUFFER_END;
205 i915_gem_object_flush_map(obj);
206 i915_gem_object_unpin_map(obj);
207
208 vma = i915_vma_instance(obj, vma->vm, NULL);
209 if (IS_ERR(vma)) {
210 err = PTR_ERR(vma);
211 goto err;
212 }
213
214 err = i915_vma_pin(vma, 0, 0, PIN_USER);
215 if (err)
216 goto err;
217
218 return vma;
219
220err:
221 i915_gem_object_put(obj);
222 return ERR_PTR(err);
223}
224
225static unsigned long real_page_count(struct drm_i915_gem_object *obj)
226{
227 return huge_gem_object_phys_size(obj) >> PAGE_SHIFT;
228}
229
230static unsigned long fake_page_count(struct drm_i915_gem_object *obj)
231{
232 return huge_gem_object_dma_size(obj) >> PAGE_SHIFT;
233}
234
235static int gpu_fill(struct drm_i915_gem_object *obj,
236 struct i915_gem_context *ctx,
237 struct intel_engine_cs *engine,
238 unsigned int dw)
239{
240 struct drm_i915_private *i915 = to_i915(obj->base.dev);
241 struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
242 struct i915_request *rq;
243 struct i915_vma *vma;
244 struct i915_vma *batch;
245 unsigned int flags;
246 int err;
247
248 GEM_BUG_ON(obj->base.size > vm->total);
249 GEM_BUG_ON(!intel_engine_can_store_dword(engine));
250
251 vma = i915_vma_instance(obj, vm, NULL);
252 if (IS_ERR(vma))
253 return PTR_ERR(vma);
254
255 i915_gem_object_lock(obj);
256 err = i915_gem_object_set_to_gtt_domain(obj, false);
257 i915_gem_object_unlock(obj);
258 if (err)
259 return err;
260
261 err = i915_vma_pin(vma, 0, 0, PIN_HIGH | PIN_USER);
262 if (err)
263 return err;
264
265
266
267
268
269
270
271
272 batch = gpu_fill_dw(vma,
273 (dw * real_page_count(obj)) << PAGE_SHIFT |
274 (dw * sizeof(u32)),
275 real_page_count(obj),
276 dw);
277 if (IS_ERR(batch)) {
278 err = PTR_ERR(batch);
279 goto err_vma;
280 }
281
282 rq = igt_request_alloc(ctx, engine);
283 if (IS_ERR(rq)) {
284 err = PTR_ERR(rq);
285 goto err_batch;
286 }
287
288 flags = 0;
289 if (INTEL_GEN(vm->i915) <= 5)
290 flags |= I915_DISPATCH_SECURE;
291
292 err = engine->emit_bb_start(rq,
293 batch->node.start, batch->node.size,
294 flags);
295 if (err)
296 goto err_request;
297
298 i915_vma_lock(batch);
299 err = i915_vma_move_to_active(batch, rq, 0);
300 i915_vma_unlock(batch);
301 if (err)
302 goto skip_request;
303
304 i915_vma_lock(vma);
305 err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
306 i915_vma_unlock(vma);
307 if (err)
308 goto skip_request;
309
310 i915_request_add(rq);
311
312 i915_vma_unpin(batch);
313 i915_vma_close(batch);
314 i915_vma_put(batch);
315
316 i915_vma_unpin(vma);
317
318 return 0;
319
320skip_request:
321 i915_request_skip(rq, err);
322err_request:
323 i915_request_add(rq);
324err_batch:
325 i915_vma_unpin(batch);
326 i915_vma_put(batch);
327err_vma:
328 i915_vma_unpin(vma);
329 return err;
330}
331
332static int cpu_fill(struct drm_i915_gem_object *obj, u32 value)
333{
334 const bool has_llc = HAS_LLC(to_i915(obj->base.dev));
335 unsigned int n, m, need_flush;
336 int err;
337
338 err = i915_gem_object_prepare_write(obj, &need_flush);
339 if (err)
340 return err;
341
342 for (n = 0; n < real_page_count(obj); n++) {
343 u32 *map;
344
345 map = kmap_atomic(i915_gem_object_get_page(obj, n));
346 for (m = 0; m < DW_PER_PAGE; m++)
347 map[m] = value;
348 if (!has_llc)
349 drm_clflush_virt_range(map, PAGE_SIZE);
350 kunmap_atomic(map);
351 }
352
353 i915_gem_object_finish_access(obj);
354 obj->read_domains = I915_GEM_DOMAIN_GTT | I915_GEM_DOMAIN_CPU;
355 obj->write_domain = 0;
356 return 0;
357}
358
359static noinline int cpu_check(struct drm_i915_gem_object *obj,
360 unsigned int idx, unsigned int max)
361{
362 unsigned int n, m, needs_flush;
363 int err;
364
365 err = i915_gem_object_prepare_read(obj, &needs_flush);
366 if (err)
367 return err;
368
369 for (n = 0; n < real_page_count(obj); n++) {
370 u32 *map;
371
372 map = kmap_atomic(i915_gem_object_get_page(obj, n));
373 if (needs_flush & CLFLUSH_BEFORE)
374 drm_clflush_virt_range(map, PAGE_SIZE);
375
376 for (m = 0; m < max; m++) {
377 if (map[m] != m) {
378 pr_err("%pS: Invalid value at object %d page %d/%ld, offset %d/%d: found %x expected %x\n",
379 __builtin_return_address(0), idx,
380 n, real_page_count(obj), m, max,
381 map[m], m);
382 err = -EINVAL;
383 goto out_unmap;
384 }
385 }
386
387 for (; m < DW_PER_PAGE; m++) {
388 if (map[m] != STACK_MAGIC) {
389 pr_err("%pS: Invalid value at object %d page %d, offset %d: found %x expected %x (uninitialised)\n",
390 __builtin_return_address(0), idx, n, m,
391 map[m], STACK_MAGIC);
392 err = -EINVAL;
393 goto out_unmap;
394 }
395 }
396
397out_unmap:
398 kunmap_atomic(map);
399 if (err)
400 break;
401 }
402
403 i915_gem_object_finish_access(obj);
404 return err;
405}
406
407static int file_add_object(struct drm_file *file,
408 struct drm_i915_gem_object *obj)
409{
410 int err;
411
412 GEM_BUG_ON(obj->base.handle_count);
413
414
415 err = idr_alloc(&file->object_idr, &obj->base, 1, 0, GFP_KERNEL);
416 if (err < 0)
417 return err;
418
419 i915_gem_object_get(obj);
420 obj->base.handle_count++;
421 return 0;
422}
423
424static struct drm_i915_gem_object *
425create_test_object(struct i915_gem_context *ctx,
426 struct drm_file *file,
427 struct list_head *objects)
428{
429 struct drm_i915_gem_object *obj;
430 struct i915_address_space *vm = ctx->vm ?: &ctx->i915->ggtt.vm;
431 u64 size;
432 int err;
433
434 size = min(vm->total / 2, 1024ull * DW_PER_PAGE * PAGE_SIZE);
435 size = round_down(size, DW_PER_PAGE * PAGE_SIZE);
436
437 obj = huge_gem_object(ctx->i915, DW_PER_PAGE * PAGE_SIZE, size);
438 if (IS_ERR(obj))
439 return obj;
440
441 err = file_add_object(file, obj);
442 i915_gem_object_put(obj);
443 if (err)
444 return ERR_PTR(err);
445
446 err = cpu_fill(obj, STACK_MAGIC);
447 if (err) {
448 pr_err("Failed to fill object with cpu, err=%d\n",
449 err);
450 return ERR_PTR(err);
451 }
452
453 list_add_tail(&obj->st_link, objects);
454 return obj;
455}
456
457static unsigned long max_dwords(struct drm_i915_gem_object *obj)
458{
459 unsigned long npages = fake_page_count(obj);
460
461 GEM_BUG_ON(!IS_ALIGNED(npages, DW_PER_PAGE));
462 return npages / DW_PER_PAGE;
463}
464
465static int igt_ctx_exec(void *arg)
466{
467 struct drm_i915_private *i915 = arg;
468 struct intel_engine_cs *engine;
469 enum intel_engine_id id;
470 int err = -ENODEV;
471
472
473
474
475
476
477
478 if (!DRIVER_CAPS(i915)->has_logical_contexts)
479 return 0;
480
481 for_each_engine(engine, i915, id) {
482 struct drm_i915_gem_object *obj = NULL;
483 unsigned long ncontexts, ndwords, dw;
484 struct igt_live_test t;
485 struct drm_file *file;
486 IGT_TIMEOUT(end_time);
487 LIST_HEAD(objects);
488
489 if (!intel_engine_can_store_dword(engine))
490 continue;
491
492 if (!engine->context_size)
493 continue;
494
495 file = mock_file(i915);
496 if (IS_ERR(file))
497 return PTR_ERR(file);
498
499 mutex_lock(&i915->drm.struct_mutex);
500
501 err = igt_live_test_begin(&t, i915, __func__, engine->name);
502 if (err)
503 goto out_unlock;
504
505 ncontexts = 0;
506 ndwords = 0;
507 dw = 0;
508 while (!time_after(jiffies, end_time)) {
509 struct i915_gem_context *ctx;
510 intel_wakeref_t wakeref;
511
512 ctx = live_context(i915, file);
513 if (IS_ERR(ctx)) {
514 err = PTR_ERR(ctx);
515 goto out_unlock;
516 }
517
518 if (!obj) {
519 obj = create_test_object(ctx, file, &objects);
520 if (IS_ERR(obj)) {
521 err = PTR_ERR(obj);
522 goto out_unlock;
523 }
524 }
525
526 with_intel_runtime_pm(&i915->runtime_pm, wakeref)
527 err = gpu_fill(obj, ctx, engine, dw);
528 if (err) {
529 pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
530 ndwords, dw, max_dwords(obj),
531 engine->name, ctx->hw_id,
532 yesno(!!ctx->vm), err);
533 goto out_unlock;
534 }
535
536 if (++dw == max_dwords(obj)) {
537 obj = NULL;
538 dw = 0;
539 }
540
541 ndwords++;
542 ncontexts++;
543 }
544
545 pr_info("Submitted %lu contexts to %s, filling %lu dwords\n",
546 ncontexts, engine->name, ndwords);
547
548 ncontexts = dw = 0;
549 list_for_each_entry(obj, &objects, st_link) {
550 unsigned int rem =
551 min_t(unsigned int, ndwords - dw, max_dwords(obj));
552
553 err = cpu_check(obj, ncontexts++, rem);
554 if (err)
555 break;
556
557 dw += rem;
558 }
559
560out_unlock:
561 if (igt_live_test_end(&t))
562 err = -EIO;
563 mutex_unlock(&i915->drm.struct_mutex);
564
565 mock_file_free(i915, file);
566 if (err)
567 return err;
568 }
569
570 return 0;
571}
572
573static int igt_shared_ctx_exec(void *arg)
574{
575 struct drm_i915_private *i915 = arg;
576 struct i915_gem_context *parent;
577 struct intel_engine_cs *engine;
578 enum intel_engine_id id;
579 struct igt_live_test t;
580 struct drm_file *file;
581 int err = 0;
582
583
584
585
586
587
588 if (!DRIVER_CAPS(i915)->has_logical_contexts)
589 return 0;
590
591 file = mock_file(i915);
592 if (IS_ERR(file))
593 return PTR_ERR(file);
594
595 mutex_lock(&i915->drm.struct_mutex);
596
597 parent = live_context(i915, file);
598 if (IS_ERR(parent)) {
599 err = PTR_ERR(parent);
600 goto out_unlock;
601 }
602
603 if (!parent->vm) {
604 err = 0;
605 goto out_unlock;
606 }
607
608 err = igt_live_test_begin(&t, i915, __func__, "");
609 if (err)
610 goto out_unlock;
611
612 for_each_engine(engine, i915, id) {
613 unsigned long ncontexts, ndwords, dw;
614 struct drm_i915_gem_object *obj = NULL;
615 IGT_TIMEOUT(end_time);
616 LIST_HEAD(objects);
617
618 if (!intel_engine_can_store_dword(engine))
619 continue;
620
621 dw = 0;
622 ndwords = 0;
623 ncontexts = 0;
624 while (!time_after(jiffies, end_time)) {
625 struct i915_gem_context *ctx;
626 intel_wakeref_t wakeref;
627
628 ctx = kernel_context(i915);
629 if (IS_ERR(ctx)) {
630 err = PTR_ERR(ctx);
631 goto out_test;
632 }
633
634 __assign_ppgtt(ctx, parent->vm);
635
636 if (!obj) {
637 obj = create_test_object(parent, file, &objects);
638 if (IS_ERR(obj)) {
639 err = PTR_ERR(obj);
640 kernel_context_close(ctx);
641 goto out_test;
642 }
643 }
644
645 err = 0;
646 with_intel_runtime_pm(&i915->runtime_pm, wakeref)
647 err = gpu_fill(obj, ctx, engine, dw);
648 if (err) {
649 pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
650 ndwords, dw, max_dwords(obj),
651 engine->name, ctx->hw_id,
652 yesno(!!ctx->vm), err);
653 kernel_context_close(ctx);
654 goto out_test;
655 }
656
657 if (++dw == max_dwords(obj)) {
658 obj = NULL;
659 dw = 0;
660 }
661
662 ndwords++;
663 ncontexts++;
664
665 kernel_context_close(ctx);
666 }
667 pr_info("Submitted %lu contexts to %s, filling %lu dwords\n",
668 ncontexts, engine->name, ndwords);
669
670 ncontexts = dw = 0;
671 list_for_each_entry(obj, &objects, st_link) {
672 unsigned int rem =
673 min_t(unsigned int, ndwords - dw, max_dwords(obj));
674
675 err = cpu_check(obj, ncontexts++, rem);
676 if (err)
677 goto out_test;
678
679 dw += rem;
680 }
681 }
682out_test:
683 if (igt_live_test_end(&t))
684 err = -EIO;
685out_unlock:
686 mutex_unlock(&i915->drm.struct_mutex);
687
688 mock_file_free(i915, file);
689 return err;
690}
691
692static struct i915_vma *rpcs_query_batch(struct i915_vma *vma)
693{
694 struct drm_i915_gem_object *obj;
695 u32 *cmd;
696 int err;
697
698 if (INTEL_GEN(vma->vm->i915) < 8)
699 return ERR_PTR(-EINVAL);
700
701 obj = i915_gem_object_create_internal(vma->vm->i915, PAGE_SIZE);
702 if (IS_ERR(obj))
703 return ERR_CAST(obj);
704
705 cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
706 if (IS_ERR(cmd)) {
707 err = PTR_ERR(cmd);
708 goto err;
709 }
710
711 *cmd++ = MI_STORE_REGISTER_MEM_GEN8;
712 *cmd++ = i915_mmio_reg_offset(GEN8_R_PWR_CLK_STATE);
713 *cmd++ = lower_32_bits(vma->node.start);
714 *cmd++ = upper_32_bits(vma->node.start);
715 *cmd = MI_BATCH_BUFFER_END;
716
717 __i915_gem_object_flush_map(obj, 0, 64);
718 i915_gem_object_unpin_map(obj);
719
720 vma = i915_vma_instance(obj, vma->vm, NULL);
721 if (IS_ERR(vma)) {
722 err = PTR_ERR(vma);
723 goto err;
724 }
725
726 err = i915_vma_pin(vma, 0, 0, PIN_USER);
727 if (err)
728 goto err;
729
730 return vma;
731
732err:
733 i915_gem_object_put(obj);
734 return ERR_PTR(err);
735}
736
737static int
738emit_rpcs_query(struct drm_i915_gem_object *obj,
739 struct intel_context *ce,
740 struct i915_request **rq_out)
741{
742 struct i915_request *rq;
743 struct i915_vma *batch;
744 struct i915_vma *vma;
745 int err;
746
747 GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine));
748
749 vma = i915_vma_instance(obj, ce->gem_context->vm, NULL);
750 if (IS_ERR(vma))
751 return PTR_ERR(vma);
752
753 i915_gem_object_lock(obj);
754 err = i915_gem_object_set_to_gtt_domain(obj, false);
755 i915_gem_object_unlock(obj);
756 if (err)
757 return err;
758
759 err = i915_vma_pin(vma, 0, 0, PIN_USER);
760 if (err)
761 return err;
762
763 batch = rpcs_query_batch(vma);
764 if (IS_ERR(batch)) {
765 err = PTR_ERR(batch);
766 goto err_vma;
767 }
768
769 rq = i915_request_create(ce);
770 if (IS_ERR(rq)) {
771 err = PTR_ERR(rq);
772 goto err_batch;
773 }
774
775 err = rq->engine->emit_bb_start(rq,
776 batch->node.start, batch->node.size,
777 0);
778 if (err)
779 goto err_request;
780
781 i915_vma_lock(batch);
782 err = i915_vma_move_to_active(batch, rq, 0);
783 i915_vma_unlock(batch);
784 if (err)
785 goto skip_request;
786
787 i915_vma_lock(vma);
788 err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
789 i915_vma_unlock(vma);
790 if (err)
791 goto skip_request;
792
793 i915_vma_unpin(batch);
794 i915_vma_close(batch);
795 i915_vma_put(batch);
796
797 i915_vma_unpin(vma);
798
799 *rq_out = i915_request_get(rq);
800
801 i915_request_add(rq);
802
803 return 0;
804
805skip_request:
806 i915_request_skip(rq, err);
807err_request:
808 i915_request_add(rq);
809err_batch:
810 i915_vma_unpin(batch);
811 i915_vma_put(batch);
812err_vma:
813 i915_vma_unpin(vma);
814
815 return err;
816}
817
818#define TEST_IDLE BIT(0)
819#define TEST_BUSY BIT(1)
820#define TEST_RESET BIT(2)
821
822static int
823__sseu_prepare(struct drm_i915_private *i915,
824 const char *name,
825 unsigned int flags,
826 struct intel_context *ce,
827 struct igt_spinner **spin)
828{
829 struct i915_request *rq;
830 int ret;
831
832 *spin = NULL;
833 if (!(flags & (TEST_BUSY | TEST_RESET)))
834 return 0;
835
836 *spin = kzalloc(sizeof(**spin), GFP_KERNEL);
837 if (!*spin)
838 return -ENOMEM;
839
840 ret = igt_spinner_init(*spin, i915);
841 if (ret)
842 goto err_free;
843
844 rq = igt_spinner_create_request(*spin,
845 ce->gem_context,
846 ce->engine,
847 MI_NOOP);
848 if (IS_ERR(rq)) {
849 ret = PTR_ERR(rq);
850 goto err_fini;
851 }
852
853 i915_request_add(rq);
854
855 if (!igt_wait_for_spinner(*spin, rq)) {
856 pr_err("%s: Spinner failed to start!\n", name);
857 ret = -ETIMEDOUT;
858 goto err_end;
859 }
860
861 return 0;
862
863err_end:
864 igt_spinner_end(*spin);
865err_fini:
866 igt_spinner_fini(*spin);
867err_free:
868 kfree(fetch_and_zero(spin));
869 return ret;
870}
871
872static int
873__read_slice_count(struct drm_i915_private *i915,
874 struct intel_context *ce,
875 struct drm_i915_gem_object *obj,
876 struct igt_spinner *spin,
877 u32 *rpcs)
878{
879 struct i915_request *rq = NULL;
880 u32 s_mask, s_shift;
881 unsigned int cnt;
882 u32 *buf, val;
883 long ret;
884
885 ret = emit_rpcs_query(obj, ce, &rq);
886 if (ret)
887 return ret;
888
889 if (spin)
890 igt_spinner_end(spin);
891
892 ret = i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
893 i915_request_put(rq);
894 if (ret < 0)
895 return ret;
896
897 buf = i915_gem_object_pin_map(obj, I915_MAP_WB);
898 if (IS_ERR(buf)) {
899 ret = PTR_ERR(buf);
900 return ret;
901 }
902
903 if (INTEL_GEN(i915) >= 11) {
904 s_mask = GEN11_RPCS_S_CNT_MASK;
905 s_shift = GEN11_RPCS_S_CNT_SHIFT;
906 } else {
907 s_mask = GEN8_RPCS_S_CNT_MASK;
908 s_shift = GEN8_RPCS_S_CNT_SHIFT;
909 }
910
911 val = *buf;
912 cnt = (val & s_mask) >> s_shift;
913 *rpcs = val;
914
915 i915_gem_object_unpin_map(obj);
916
917 return cnt;
918}
919
920static int
921__check_rpcs(const char *name, u32 rpcs, int slices, unsigned int expected,
922 const char *prefix, const char *suffix)
923{
924 if (slices == expected)
925 return 0;
926
927 if (slices < 0) {
928 pr_err("%s: %s read slice count failed with %d%s\n",
929 name, prefix, slices, suffix);
930 return slices;
931 }
932
933 pr_err("%s: %s slice count %d is not %u%s\n",
934 name, prefix, slices, expected, suffix);
935
936 pr_info("RPCS=0x%x; %u%sx%u%s\n",
937 rpcs, slices,
938 (rpcs & GEN8_RPCS_S_CNT_ENABLE) ? "*" : "",
939 (rpcs & GEN8_RPCS_SS_CNT_MASK) >> GEN8_RPCS_SS_CNT_SHIFT,
940 (rpcs & GEN8_RPCS_SS_CNT_ENABLE) ? "*" : "");
941
942 return -EINVAL;
943}
944
945static int
946__sseu_finish(struct drm_i915_private *i915,
947 const char *name,
948 unsigned int flags,
949 struct intel_context *ce,
950 struct drm_i915_gem_object *obj,
951 unsigned int expected,
952 struct igt_spinner *spin)
953{
954 unsigned int slices = hweight32(ce->engine->sseu.slice_mask);
955 u32 rpcs = 0;
956 int ret = 0;
957
958 if (flags & TEST_RESET) {
959 ret = i915_reset_engine(ce->engine, "sseu");
960 if (ret)
961 goto out;
962 }
963
964 ret = __read_slice_count(i915, ce, obj,
965 flags & TEST_RESET ? NULL : spin, &rpcs);
966 ret = __check_rpcs(name, rpcs, ret, expected, "Context", "!");
967 if (ret)
968 goto out;
969
970 ret = __read_slice_count(i915, ce->engine->kernel_context, obj,
971 NULL, &rpcs);
972 ret = __check_rpcs(name, rpcs, ret, slices, "Kernel context", "!");
973
974out:
975 if (spin)
976 igt_spinner_end(spin);
977
978 if ((flags & TEST_IDLE) && ret == 0) {
979 ret = i915_gem_wait_for_idle(i915, 0, MAX_SCHEDULE_TIMEOUT);
980 if (ret)
981 return ret;
982
983 ret = __read_slice_count(i915, ce, obj, NULL, &rpcs);
984 ret = __check_rpcs(name, rpcs, ret, expected,
985 "Context", " after idle!");
986 }
987
988 return ret;
989}
990
991static int
992__sseu_test(struct drm_i915_private *i915,
993 const char *name,
994 unsigned int flags,
995 struct intel_context *ce,
996 struct drm_i915_gem_object *obj,
997 struct intel_sseu sseu)
998{
999 struct igt_spinner *spin = NULL;
1000 int ret;
1001
1002 ret = __sseu_prepare(i915, name, flags, ce, &spin);
1003 if (ret)
1004 return ret;
1005
1006 ret = __intel_context_reconfigure_sseu(ce, sseu);
1007 if (ret)
1008 goto out_spin;
1009
1010 ret = __sseu_finish(i915, name, flags, ce, obj,
1011 hweight32(sseu.slice_mask), spin);
1012
1013out_spin:
1014 if (spin) {
1015 igt_spinner_end(spin);
1016 igt_spinner_fini(spin);
1017 kfree(spin);
1018 }
1019 return ret;
1020}
1021
1022static int
1023__igt_ctx_sseu(struct drm_i915_private *i915,
1024 const char *name,
1025 unsigned int flags)
1026{
1027 struct intel_engine_cs *engine = i915->engine[RCS0];
1028 struct intel_sseu default_sseu = engine->sseu;
1029 struct drm_i915_gem_object *obj;
1030 struct i915_gem_context *ctx;
1031 struct intel_context *ce;
1032 struct intel_sseu pg_sseu;
1033 intel_wakeref_t wakeref;
1034 struct drm_file *file;
1035 int ret;
1036
1037 if (INTEL_GEN(i915) < 9)
1038 return 0;
1039
1040 if (!RUNTIME_INFO(i915)->sseu.has_slice_pg)
1041 return 0;
1042
1043 if (hweight32(default_sseu.slice_mask) < 2)
1044 return 0;
1045
1046
1047
1048
1049
1050 pg_sseu = default_sseu;
1051 pg_sseu.slice_mask = 1;
1052 pg_sseu.subslice_mask =
1053 ~(~0 << (hweight32(default_sseu.subslice_mask) / 2));
1054
1055 pr_info("SSEU subtest '%s', flags=%x, def_slices=%u, pg_slices=%u\n",
1056 name, flags, hweight32(default_sseu.slice_mask),
1057 hweight32(pg_sseu.slice_mask));
1058
1059 file = mock_file(i915);
1060 if (IS_ERR(file))
1061 return PTR_ERR(file);
1062
1063 if (flags & TEST_RESET)
1064 igt_global_reset_lock(i915);
1065
1066 mutex_lock(&i915->drm.struct_mutex);
1067
1068 ctx = live_context(i915, file);
1069 if (IS_ERR(ctx)) {
1070 ret = PTR_ERR(ctx);
1071 goto out_unlock;
1072 }
1073 i915_gem_context_clear_bannable(ctx);
1074
1075 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1076 if (IS_ERR(obj)) {
1077 ret = PTR_ERR(obj);
1078 goto out_unlock;
1079 }
1080
1081 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1082
1083 ce = i915_gem_context_get_engine(ctx, RCS0);
1084 if (IS_ERR(ce)) {
1085 ret = PTR_ERR(ce);
1086 goto out_rpm;
1087 }
1088
1089 ret = intel_context_pin(ce);
1090 if (ret)
1091 goto out_context;
1092
1093
1094 ret = __sseu_test(i915, name, flags, ce, obj, default_sseu);
1095 if (ret)
1096 goto out_fail;
1097
1098
1099 ret = __sseu_test(i915, name, flags, ce, obj, pg_sseu);
1100 if (ret)
1101 goto out_fail;
1102
1103
1104 ret = __sseu_test(i915, name, flags, ce, obj, default_sseu);
1105 if (ret)
1106 goto out_fail;
1107
1108
1109 ret = __sseu_test(i915, name, flags, ce, obj, pg_sseu);
1110 if (ret)
1111 goto out_fail;
1112
1113out_fail:
1114 if (igt_flush_test(i915, I915_WAIT_LOCKED))
1115 ret = -EIO;
1116
1117 intel_context_unpin(ce);
1118out_context:
1119 intel_context_put(ce);
1120out_rpm:
1121 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1122 i915_gem_object_put(obj);
1123
1124out_unlock:
1125 mutex_unlock(&i915->drm.struct_mutex);
1126
1127 if (flags & TEST_RESET)
1128 igt_global_reset_unlock(i915);
1129
1130 mock_file_free(i915, file);
1131
1132 if (ret)
1133 pr_err("%s: Failed with %d!\n", name, ret);
1134
1135 return ret;
1136}
1137
1138static int igt_ctx_sseu(void *arg)
1139{
1140 struct {
1141 const char *name;
1142 unsigned int flags;
1143 } *phase, phases[] = {
1144 { .name = "basic", .flags = 0 },
1145 { .name = "idle", .flags = TEST_IDLE },
1146 { .name = "busy", .flags = TEST_BUSY },
1147 { .name = "busy-reset", .flags = TEST_BUSY | TEST_RESET },
1148 { .name = "busy-idle", .flags = TEST_BUSY | TEST_IDLE },
1149 { .name = "reset-idle", .flags = TEST_RESET | TEST_IDLE },
1150 };
1151 unsigned int i;
1152 int ret = 0;
1153
1154 for (i = 0, phase = phases; ret == 0 && i < ARRAY_SIZE(phases);
1155 i++, phase++)
1156 ret = __igt_ctx_sseu(arg, phase->name, phase->flags);
1157
1158 return ret;
1159}
1160
1161static int igt_ctx_readonly(void *arg)
1162{
1163 struct drm_i915_private *i915 = arg;
1164 struct drm_i915_gem_object *obj = NULL;
1165 struct i915_address_space *vm;
1166 struct i915_gem_context *ctx;
1167 unsigned long idx, ndwords, dw;
1168 struct igt_live_test t;
1169 struct drm_file *file;
1170 I915_RND_STATE(prng);
1171 IGT_TIMEOUT(end_time);
1172 LIST_HEAD(objects);
1173 int err = -ENODEV;
1174
1175
1176
1177
1178
1179
1180
1181 file = mock_file(i915);
1182 if (IS_ERR(file))
1183 return PTR_ERR(file);
1184
1185 mutex_lock(&i915->drm.struct_mutex);
1186
1187 err = igt_live_test_begin(&t, i915, __func__, "");
1188 if (err)
1189 goto out_unlock;
1190
1191 ctx = live_context(i915, file);
1192 if (IS_ERR(ctx)) {
1193 err = PTR_ERR(ctx);
1194 goto out_unlock;
1195 }
1196
1197 vm = ctx->vm ?: &i915->mm.aliasing_ppgtt->vm;
1198 if (!vm || !vm->has_read_only) {
1199 err = 0;
1200 goto out_unlock;
1201 }
1202
1203 ndwords = 0;
1204 dw = 0;
1205 while (!time_after(jiffies, end_time)) {
1206 struct intel_engine_cs *engine;
1207 unsigned int id;
1208
1209 for_each_engine(engine, i915, id) {
1210 intel_wakeref_t wakeref;
1211
1212 if (!intel_engine_can_store_dword(engine))
1213 continue;
1214
1215 if (!obj) {
1216 obj = create_test_object(ctx, file, &objects);
1217 if (IS_ERR(obj)) {
1218 err = PTR_ERR(obj);
1219 goto out_unlock;
1220 }
1221
1222 if (prandom_u32_state(&prng) & 1)
1223 i915_gem_object_set_readonly(obj);
1224 }
1225
1226 err = 0;
1227 with_intel_runtime_pm(&i915->runtime_pm, wakeref)
1228 err = gpu_fill(obj, ctx, engine, dw);
1229 if (err) {
1230 pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
1231 ndwords, dw, max_dwords(obj),
1232 engine->name, ctx->hw_id,
1233 yesno(!!ctx->vm), err);
1234 goto out_unlock;
1235 }
1236
1237 if (++dw == max_dwords(obj)) {
1238 obj = NULL;
1239 dw = 0;
1240 }
1241 ndwords++;
1242 }
1243 }
1244 pr_info("Submitted %lu dwords (across %u engines)\n",
1245 ndwords, RUNTIME_INFO(i915)->num_engines);
1246
1247 dw = 0;
1248 idx = 0;
1249 list_for_each_entry(obj, &objects, st_link) {
1250 unsigned int rem =
1251 min_t(unsigned int, ndwords - dw, max_dwords(obj));
1252 unsigned int num_writes;
1253
1254 num_writes = rem;
1255 if (i915_gem_object_is_readonly(obj))
1256 num_writes = 0;
1257
1258 err = cpu_check(obj, idx++, num_writes);
1259 if (err)
1260 break;
1261
1262 dw += rem;
1263 }
1264
1265out_unlock:
1266 if (igt_live_test_end(&t))
1267 err = -EIO;
1268 mutex_unlock(&i915->drm.struct_mutex);
1269
1270 mock_file_free(i915, file);
1271 return err;
1272}
1273
1274static int check_scratch(struct i915_gem_context *ctx, u64 offset)
1275{
1276 struct drm_mm_node *node =
1277 __drm_mm_interval_first(&ctx->vm->mm,
1278 offset, offset + sizeof(u32) - 1);
1279 if (!node || node->start > offset)
1280 return 0;
1281
1282 GEM_BUG_ON(offset >= node->start + node->size);
1283
1284 pr_err("Target offset 0x%08x_%08x overlaps with a node in the mm!\n",
1285 upper_32_bits(offset), lower_32_bits(offset));
1286 return -EINVAL;
1287}
1288
1289static int write_to_scratch(struct i915_gem_context *ctx,
1290 struct intel_engine_cs *engine,
1291 u64 offset, u32 value)
1292{
1293 struct drm_i915_private *i915 = ctx->i915;
1294 struct drm_i915_gem_object *obj;
1295 struct i915_request *rq;
1296 struct i915_vma *vma;
1297 u32 *cmd;
1298 int err;
1299
1300 GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE);
1301
1302 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1303 if (IS_ERR(obj))
1304 return PTR_ERR(obj);
1305
1306 cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
1307 if (IS_ERR(cmd)) {
1308 err = PTR_ERR(cmd);
1309 goto err;
1310 }
1311
1312 *cmd++ = MI_STORE_DWORD_IMM_GEN4;
1313 if (INTEL_GEN(i915) >= 8) {
1314 *cmd++ = lower_32_bits(offset);
1315 *cmd++ = upper_32_bits(offset);
1316 } else {
1317 *cmd++ = 0;
1318 *cmd++ = offset;
1319 }
1320 *cmd++ = value;
1321 *cmd = MI_BATCH_BUFFER_END;
1322 __i915_gem_object_flush_map(obj, 0, 64);
1323 i915_gem_object_unpin_map(obj);
1324
1325 vma = i915_vma_instance(obj, ctx->vm, NULL);
1326 if (IS_ERR(vma)) {
1327 err = PTR_ERR(vma);
1328 goto err;
1329 }
1330
1331 err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
1332 if (err)
1333 goto err;
1334
1335 err = check_scratch(ctx, offset);
1336 if (err)
1337 goto err_unpin;
1338
1339 rq = igt_request_alloc(ctx, engine);
1340 if (IS_ERR(rq)) {
1341 err = PTR_ERR(rq);
1342 goto err_unpin;
1343 }
1344
1345 err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0);
1346 if (err)
1347 goto err_request;
1348
1349 i915_vma_lock(vma);
1350 err = i915_vma_move_to_active(vma, rq, 0);
1351 i915_vma_unlock(vma);
1352 if (err)
1353 goto skip_request;
1354
1355 i915_vma_unpin(vma);
1356 i915_vma_close(vma);
1357 i915_vma_put(vma);
1358
1359 i915_request_add(rq);
1360
1361 return 0;
1362
1363skip_request:
1364 i915_request_skip(rq, err);
1365err_request:
1366 i915_request_add(rq);
1367err_unpin:
1368 i915_vma_unpin(vma);
1369err:
1370 i915_gem_object_put(obj);
1371 return err;
1372}
1373
1374static int read_from_scratch(struct i915_gem_context *ctx,
1375 struct intel_engine_cs *engine,
1376 u64 offset, u32 *value)
1377{
1378 struct drm_i915_private *i915 = ctx->i915;
1379 struct drm_i915_gem_object *obj;
1380 const u32 RCS_GPR0 = 0x2600;
1381 const u32 result = 0x100;
1382 struct i915_request *rq;
1383 struct i915_vma *vma;
1384 u32 *cmd;
1385 int err;
1386
1387 GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE);
1388
1389 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1390 if (IS_ERR(obj))
1391 return PTR_ERR(obj);
1392
1393 cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
1394 if (IS_ERR(cmd)) {
1395 err = PTR_ERR(cmd);
1396 goto err;
1397 }
1398
1399 memset(cmd, POISON_INUSE, PAGE_SIZE);
1400 if (INTEL_GEN(i915) >= 8) {
1401 *cmd++ = MI_LOAD_REGISTER_MEM_GEN8;
1402 *cmd++ = RCS_GPR0;
1403 *cmd++ = lower_32_bits(offset);
1404 *cmd++ = upper_32_bits(offset);
1405 *cmd++ = MI_STORE_REGISTER_MEM_GEN8;
1406 *cmd++ = RCS_GPR0;
1407 *cmd++ = result;
1408 *cmd++ = 0;
1409 } else {
1410 *cmd++ = MI_LOAD_REGISTER_MEM;
1411 *cmd++ = RCS_GPR0;
1412 *cmd++ = offset;
1413 *cmd++ = MI_STORE_REGISTER_MEM;
1414 *cmd++ = RCS_GPR0;
1415 *cmd++ = result;
1416 }
1417 *cmd = MI_BATCH_BUFFER_END;
1418
1419 i915_gem_object_flush_map(obj);
1420 i915_gem_object_unpin_map(obj);
1421
1422 vma = i915_vma_instance(obj, ctx->vm, NULL);
1423 if (IS_ERR(vma)) {
1424 err = PTR_ERR(vma);
1425 goto err;
1426 }
1427
1428 err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
1429 if (err)
1430 goto err;
1431
1432 err = check_scratch(ctx, offset);
1433 if (err)
1434 goto err_unpin;
1435
1436 rq = igt_request_alloc(ctx, engine);
1437 if (IS_ERR(rq)) {
1438 err = PTR_ERR(rq);
1439 goto err_unpin;
1440 }
1441
1442 err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0);
1443 if (err)
1444 goto err_request;
1445
1446 i915_vma_lock(vma);
1447 err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
1448 i915_vma_unlock(vma);
1449 if (err)
1450 goto skip_request;
1451
1452 i915_vma_unpin(vma);
1453 i915_vma_close(vma);
1454
1455 i915_request_add(rq);
1456
1457 i915_gem_object_lock(obj);
1458 err = i915_gem_object_set_to_cpu_domain(obj, false);
1459 i915_gem_object_unlock(obj);
1460 if (err)
1461 goto err;
1462
1463 cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
1464 if (IS_ERR(cmd)) {
1465 err = PTR_ERR(cmd);
1466 goto err;
1467 }
1468
1469 *value = cmd[result / sizeof(*cmd)];
1470 i915_gem_object_unpin_map(obj);
1471 i915_gem_object_put(obj);
1472
1473 return 0;
1474
1475skip_request:
1476 i915_request_skip(rq, err);
1477err_request:
1478 i915_request_add(rq);
1479err_unpin:
1480 i915_vma_unpin(vma);
1481err:
1482 i915_gem_object_put(obj);
1483 return err;
1484}
1485
1486static int igt_vm_isolation(void *arg)
1487{
1488 struct drm_i915_private *i915 = arg;
1489 struct i915_gem_context *ctx_a, *ctx_b;
1490 struct intel_engine_cs *engine;
1491 intel_wakeref_t wakeref;
1492 struct igt_live_test t;
1493 struct drm_file *file;
1494 I915_RND_STATE(prng);
1495 unsigned long count;
1496 unsigned int id;
1497 u64 vm_total;
1498 int err;
1499
1500 if (INTEL_GEN(i915) < 7)
1501 return 0;
1502
1503
1504
1505
1506
1507
1508 file = mock_file(i915);
1509 if (IS_ERR(file))
1510 return PTR_ERR(file);
1511
1512 mutex_lock(&i915->drm.struct_mutex);
1513
1514 err = igt_live_test_begin(&t, i915, __func__, "");
1515 if (err)
1516 goto out_unlock;
1517
1518 ctx_a = live_context(i915, file);
1519 if (IS_ERR(ctx_a)) {
1520 err = PTR_ERR(ctx_a);
1521 goto out_unlock;
1522 }
1523
1524 ctx_b = live_context(i915, file);
1525 if (IS_ERR(ctx_b)) {
1526 err = PTR_ERR(ctx_b);
1527 goto out_unlock;
1528 }
1529
1530
1531 if (ctx_a->vm == ctx_b->vm)
1532 goto out_unlock;
1533
1534 vm_total = ctx_a->vm->total;
1535 GEM_BUG_ON(ctx_b->vm->total != vm_total);
1536 vm_total -= I915_GTT_PAGE_SIZE;
1537
1538 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1539
1540 count = 0;
1541 for_each_engine(engine, i915, id) {
1542 IGT_TIMEOUT(end_time);
1543 unsigned long this = 0;
1544
1545 if (!intel_engine_can_store_dword(engine))
1546 continue;
1547
1548 while (!__igt_timeout(end_time, NULL)) {
1549 u32 value = 0xc5c5c5c5;
1550 u64 offset;
1551
1552 div64_u64_rem(i915_prandom_u64_state(&prng),
1553 vm_total, &offset);
1554 offset &= -sizeof(u32);
1555 offset += I915_GTT_PAGE_SIZE;
1556
1557 err = write_to_scratch(ctx_a, engine,
1558 offset, 0xdeadbeef);
1559 if (err == 0)
1560 err = read_from_scratch(ctx_b, engine,
1561 offset, &value);
1562 if (err)
1563 goto out_rpm;
1564
1565 if (value) {
1566 pr_err("%s: Read %08x from scratch (offset 0x%08x_%08x), after %lu reads!\n",
1567 engine->name, value,
1568 upper_32_bits(offset),
1569 lower_32_bits(offset),
1570 this);
1571 err = -EINVAL;
1572 goto out_rpm;
1573 }
1574
1575 this++;
1576 }
1577 count += this;
1578 }
1579 pr_info("Checked %lu scratch offsets across %d engines\n",
1580 count, RUNTIME_INFO(i915)->num_engines);
1581
1582out_rpm:
1583 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1584out_unlock:
1585 if (igt_live_test_end(&t))
1586 err = -EIO;
1587 mutex_unlock(&i915->drm.struct_mutex);
1588
1589 mock_file_free(i915, file);
1590 return err;
1591}
1592
1593static __maybe_unused const char *
1594__engine_name(struct drm_i915_private *i915, intel_engine_mask_t engines)
1595{
1596 struct intel_engine_cs *engine;
1597 intel_engine_mask_t tmp;
1598
1599 if (engines == ALL_ENGINES)
1600 return "all";
1601
1602 for_each_engine_masked(engine, i915, engines, tmp)
1603 return engine->name;
1604
1605 return "none";
1606}
1607
1608static bool skip_unused_engines(struct intel_context *ce, void *data)
1609{
1610 return !ce->state;
1611}
1612
1613static void mock_barrier_task(void *data)
1614{
1615 unsigned int *counter = data;
1616
1617 ++*counter;
1618}
1619
1620static int mock_context_barrier(void *arg)
1621{
1622#undef pr_fmt
1623#define pr_fmt(x) "context_barrier_task():" # x
1624 struct drm_i915_private *i915 = arg;
1625 struct i915_gem_context *ctx;
1626 struct i915_request *rq;
1627 unsigned int counter;
1628 int err;
1629
1630
1631
1632
1633
1634
1635 mutex_lock(&i915->drm.struct_mutex);
1636
1637 ctx = mock_context(i915, "mock");
1638 if (!ctx) {
1639 err = -ENOMEM;
1640 goto unlock;
1641 }
1642
1643 counter = 0;
1644 err = context_barrier_task(ctx, 0,
1645 NULL, NULL, mock_barrier_task, &counter);
1646 if (err) {
1647 pr_err("Failed at line %d, err=%d\n", __LINE__, err);
1648 goto out;
1649 }
1650 if (counter == 0) {
1651 pr_err("Did not retire immediately with 0 engines\n");
1652 err = -EINVAL;
1653 goto out;
1654 }
1655
1656 counter = 0;
1657 err = context_barrier_task(ctx, ALL_ENGINES,
1658 skip_unused_engines,
1659 NULL,
1660 mock_barrier_task,
1661 &counter);
1662 if (err) {
1663 pr_err("Failed at line %d, err=%d\n", __LINE__, err);
1664 goto out;
1665 }
1666 if (counter == 0) {
1667 pr_err("Did not retire immediately for all unused engines\n");
1668 err = -EINVAL;
1669 goto out;
1670 }
1671
1672 rq = igt_request_alloc(ctx, i915->engine[RCS0]);
1673 if (IS_ERR(rq)) {
1674 pr_err("Request allocation failed!\n");
1675 goto out;
1676 }
1677 i915_request_add(rq);
1678
1679 counter = 0;
1680 context_barrier_inject_fault = BIT(RCS0);
1681 err = context_barrier_task(ctx, ALL_ENGINES,
1682 NULL, NULL, mock_barrier_task, &counter);
1683 context_barrier_inject_fault = 0;
1684 if (err == -ENXIO)
1685 err = 0;
1686 else
1687 pr_err("Did not hit fault injection!\n");
1688 if (counter != 0) {
1689 pr_err("Invoked callback on error!\n");
1690 err = -EIO;
1691 }
1692 if (err)
1693 goto out;
1694
1695 counter = 0;
1696 err = context_barrier_task(ctx, ALL_ENGINES,
1697 skip_unused_engines,
1698 NULL,
1699 mock_barrier_task,
1700 &counter);
1701 if (err) {
1702 pr_err("Failed at line %d, err=%d\n", __LINE__, err);
1703 goto out;
1704 }
1705 mock_device_flush(i915);
1706 if (counter == 0) {
1707 pr_err("Did not retire on each active engines\n");
1708 err = -EINVAL;
1709 goto out;
1710 }
1711
1712out:
1713 mock_context_close(ctx);
1714unlock:
1715 mutex_unlock(&i915->drm.struct_mutex);
1716 return err;
1717#undef pr_fmt
1718#define pr_fmt(x) x
1719}
1720
1721int i915_gem_context_mock_selftests(void)
1722{
1723 static const struct i915_subtest tests[] = {
1724 SUBTEST(mock_context_barrier),
1725 };
1726 struct drm_i915_private *i915;
1727 int err;
1728
1729 i915 = mock_gem_device();
1730 if (!i915)
1731 return -ENOMEM;
1732
1733 err = i915_subtests(tests, i915);
1734
1735 drm_dev_put(&i915->drm);
1736 return err;
1737}
1738
1739int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv)
1740{
1741 static const struct i915_subtest tests[] = {
1742 SUBTEST(live_nop_switch),
1743 SUBTEST(igt_ctx_exec),
1744 SUBTEST(igt_ctx_readonly),
1745 SUBTEST(igt_ctx_sseu),
1746 SUBTEST(igt_shared_ctx_exec),
1747 SUBTEST(igt_vm_isolation),
1748 };
1749
1750 if (i915_terminally_wedged(dev_priv))
1751 return 0;
1752
1753 return i915_subtests(tests, dev_priv);
1754}
1755