1
2
3
4
5
6
7#include <linux/prime_numbers.h>
8
9#include "gem/i915_gem_pm.h"
10#include "gt/intel_gt.h"
11#include "gt/intel_reset.h"
12#include "i915_selftest.h"
13
14#include "gem/selftests/igt_gem_utils.h"
15#include "selftests/i915_random.h"
16#include "selftests/igt_flush_test.h"
17#include "selftests/igt_live_test.h"
18#include "selftests/igt_reset.h"
19#include "selftests/igt_spinner.h"
20#include "selftests/mock_drm.h"
21#include "selftests/mock_gem_device.h"
22
23#include "huge_gem_object.h"
24#include "igt_gem_utils.h"
25
26#define DW_PER_PAGE (PAGE_SIZE / sizeof(u32))
27
28static int live_nop_switch(void *arg)
29{
30 const unsigned int nctx = 1024;
31 struct drm_i915_private *i915 = arg;
32 struct intel_engine_cs *engine;
33 struct i915_gem_context **ctx;
34 enum intel_engine_id id;
35 struct igt_live_test t;
36 struct drm_file *file;
37 unsigned long n;
38 int err = -ENODEV;
39
40
41
42
43
44
45
46
47
48 if (!DRIVER_CAPS(i915)->has_logical_contexts)
49 return 0;
50
51 file = mock_file(i915);
52 if (IS_ERR(file))
53 return PTR_ERR(file);
54
55 mutex_lock(&i915->drm.struct_mutex);
56
57 ctx = kcalloc(nctx, sizeof(*ctx), GFP_KERNEL);
58 if (!ctx) {
59 err = -ENOMEM;
60 goto out_unlock;
61 }
62
63 for (n = 0; n < nctx; n++) {
64 ctx[n] = live_context(i915, file);
65 if (IS_ERR(ctx[n])) {
66 err = PTR_ERR(ctx[n]);
67 goto out_unlock;
68 }
69 }
70
71 for_each_engine(engine, i915, id) {
72 struct i915_request *rq;
73 unsigned long end_time, prime;
74 ktime_t times[2] = {};
75
76 times[0] = ktime_get_raw();
77 for (n = 0; n < nctx; n++) {
78 rq = igt_request_alloc(ctx[n], engine);
79 if (IS_ERR(rq)) {
80 err = PTR_ERR(rq);
81 goto out_unlock;
82 }
83 i915_request_add(rq);
84 }
85 if (i915_request_wait(rq, 0, HZ / 5) < 0) {
86 pr_err("Failed to populated %d contexts\n", nctx);
87 intel_gt_set_wedged(&i915->gt);
88 err = -EIO;
89 goto out_unlock;
90 }
91
92 times[1] = ktime_get_raw();
93
94 pr_info("Populated %d contexts on %s in %lluns\n",
95 nctx, engine->name, ktime_to_ns(times[1] - times[0]));
96
97 err = igt_live_test_begin(&t, i915, __func__, engine->name);
98 if (err)
99 goto out_unlock;
100
101 end_time = jiffies + i915_selftest.timeout_jiffies;
102 for_each_prime_number_from(prime, 2, 8192) {
103 times[1] = ktime_get_raw();
104
105 for (n = 0; n < prime; n++) {
106 rq = igt_request_alloc(ctx[n % nctx], engine);
107 if (IS_ERR(rq)) {
108 err = PTR_ERR(rq);
109 goto out_unlock;
110 }
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126 i915_request_add(rq);
127 }
128 if (i915_request_wait(rq, 0, HZ / 5) < 0) {
129 pr_err("Switching between %ld contexts timed out\n",
130 prime);
131 intel_gt_set_wedged(&i915->gt);
132 break;
133 }
134
135 times[1] = ktime_sub(ktime_get_raw(), times[1]);
136 if (prime == 2)
137 times[0] = times[1];
138
139 if (__igt_timeout(end_time, NULL))
140 break;
141 }
142
143 err = igt_live_test_end(&t);
144 if (err)
145 goto out_unlock;
146
147 pr_info("Switch latencies on %s: 1 = %lluns, %lu = %lluns\n",
148 engine->name,
149 ktime_to_ns(times[0]),
150 prime - 1, div64_u64(ktime_to_ns(times[1]), prime - 1));
151 }
152
153out_unlock:
154 mutex_unlock(&i915->drm.struct_mutex);
155 mock_file_free(i915, file);
156 return err;
157}
158
159static unsigned long real_page_count(struct drm_i915_gem_object *obj)
160{
161 return huge_gem_object_phys_size(obj) >> PAGE_SHIFT;
162}
163
164static unsigned long fake_page_count(struct drm_i915_gem_object *obj)
165{
166 return huge_gem_object_dma_size(obj) >> PAGE_SHIFT;
167}
168
169static int gpu_fill(struct drm_i915_gem_object *obj,
170 struct i915_gem_context *ctx,
171 struct intel_engine_cs *engine,
172 unsigned int dw)
173{
174 struct i915_address_space *vm = ctx->vm ?: &engine->gt->ggtt->vm;
175 struct i915_vma *vma;
176 int err;
177
178 GEM_BUG_ON(obj->base.size > vm->total);
179 GEM_BUG_ON(!intel_engine_can_store_dword(engine));
180
181 vma = i915_vma_instance(obj, vm, NULL);
182 if (IS_ERR(vma))
183 return PTR_ERR(vma);
184
185 i915_gem_object_lock(obj);
186 err = i915_gem_object_set_to_gtt_domain(obj, true);
187 i915_gem_object_unlock(obj);
188 if (err)
189 return err;
190
191 err = i915_vma_pin(vma, 0, 0, PIN_HIGH | PIN_USER);
192 if (err)
193 return err;
194
195
196
197
198
199
200
201
202
203 err = igt_gpu_fill_dw(vma,
204 ctx,
205 engine,
206 (dw * real_page_count(obj)) << PAGE_SHIFT |
207 (dw * sizeof(u32)),
208 real_page_count(obj),
209 dw);
210 i915_vma_unpin(vma);
211
212 return err;
213}
214
215static int cpu_fill(struct drm_i915_gem_object *obj, u32 value)
216{
217 const bool has_llc = HAS_LLC(to_i915(obj->base.dev));
218 unsigned int n, m, need_flush;
219 int err;
220
221 err = i915_gem_object_prepare_write(obj, &need_flush);
222 if (err)
223 return err;
224
225 for (n = 0; n < real_page_count(obj); n++) {
226 u32 *map;
227
228 map = kmap_atomic(i915_gem_object_get_page(obj, n));
229 for (m = 0; m < DW_PER_PAGE; m++)
230 map[m] = value;
231 if (!has_llc)
232 drm_clflush_virt_range(map, PAGE_SIZE);
233 kunmap_atomic(map);
234 }
235
236 i915_gem_object_finish_access(obj);
237 obj->read_domains = I915_GEM_DOMAIN_GTT | I915_GEM_DOMAIN_CPU;
238 obj->write_domain = 0;
239 return 0;
240}
241
242static noinline int cpu_check(struct drm_i915_gem_object *obj,
243 unsigned int idx, unsigned int max)
244{
245 unsigned int n, m, needs_flush;
246 int err;
247
248 err = i915_gem_object_prepare_read(obj, &needs_flush);
249 if (err)
250 return err;
251
252 for (n = 0; n < real_page_count(obj); n++) {
253 u32 *map;
254
255 map = kmap_atomic(i915_gem_object_get_page(obj, n));
256 if (needs_flush & CLFLUSH_BEFORE)
257 drm_clflush_virt_range(map, PAGE_SIZE);
258
259 for (m = 0; m < max; m++) {
260 if (map[m] != m) {
261 pr_err("%pS: Invalid value at object %d page %d/%ld, offset %d/%d: found %x expected %x\n",
262 __builtin_return_address(0), idx,
263 n, real_page_count(obj), m, max,
264 map[m], m);
265 err = -EINVAL;
266 goto out_unmap;
267 }
268 }
269
270 for (; m < DW_PER_PAGE; m++) {
271 if (map[m] != STACK_MAGIC) {
272 pr_err("%pS: Invalid value at object %d page %d, offset %d: found %x expected %x (uninitialised)\n",
273 __builtin_return_address(0), idx, n, m,
274 map[m], STACK_MAGIC);
275 err = -EINVAL;
276 goto out_unmap;
277 }
278 }
279
280out_unmap:
281 kunmap_atomic(map);
282 if (err)
283 break;
284 }
285
286 i915_gem_object_finish_access(obj);
287 return err;
288}
289
290static int file_add_object(struct drm_file *file,
291 struct drm_i915_gem_object *obj)
292{
293 int err;
294
295 GEM_BUG_ON(obj->base.handle_count);
296
297
298 err = idr_alloc(&file->object_idr, &obj->base, 1, 0, GFP_KERNEL);
299 if (err < 0)
300 return err;
301
302 i915_gem_object_get(obj);
303 obj->base.handle_count++;
304 return 0;
305}
306
307static struct drm_i915_gem_object *
308create_test_object(struct i915_gem_context *ctx,
309 struct drm_file *file,
310 struct list_head *objects)
311{
312 struct drm_i915_gem_object *obj;
313 struct i915_address_space *vm = ctx->vm ?: &ctx->i915->ggtt.vm;
314 u64 size;
315 int err;
316
317
318 i915_retire_requests(ctx->i915);
319
320 size = min(vm->total / 2, 1024ull * DW_PER_PAGE * PAGE_SIZE);
321 size = round_down(size, DW_PER_PAGE * PAGE_SIZE);
322
323 obj = huge_gem_object(ctx->i915, DW_PER_PAGE * PAGE_SIZE, size);
324 if (IS_ERR(obj))
325 return obj;
326
327 err = file_add_object(file, obj);
328 i915_gem_object_put(obj);
329 if (err)
330 return ERR_PTR(err);
331
332 err = cpu_fill(obj, STACK_MAGIC);
333 if (err) {
334 pr_err("Failed to fill object with cpu, err=%d\n",
335 err);
336 return ERR_PTR(err);
337 }
338
339 list_add_tail(&obj->st_link, objects);
340 return obj;
341}
342
343static unsigned long max_dwords(struct drm_i915_gem_object *obj)
344{
345 unsigned long npages = fake_page_count(obj);
346
347 GEM_BUG_ON(!IS_ALIGNED(npages, DW_PER_PAGE));
348 return npages / DW_PER_PAGE;
349}
350
351static int igt_ctx_exec(void *arg)
352{
353 struct drm_i915_private *i915 = arg;
354 struct intel_engine_cs *engine;
355 enum intel_engine_id id;
356 int err = -ENODEV;
357
358
359
360
361
362
363
364 if (!DRIVER_CAPS(i915)->has_logical_contexts)
365 return 0;
366
367 for_each_engine(engine, i915, id) {
368 struct drm_i915_gem_object *obj = NULL;
369 unsigned long ncontexts, ndwords, dw;
370 struct igt_live_test t;
371 struct drm_file *file;
372 IGT_TIMEOUT(end_time);
373 LIST_HEAD(objects);
374
375 if (!intel_engine_can_store_dword(engine))
376 continue;
377
378 if (!engine->context_size)
379 continue;
380
381 file = mock_file(i915);
382 if (IS_ERR(file))
383 return PTR_ERR(file);
384
385 mutex_lock(&i915->drm.struct_mutex);
386
387 err = igt_live_test_begin(&t, i915, __func__, engine->name);
388 if (err)
389 goto out_unlock;
390
391 ncontexts = 0;
392 ndwords = 0;
393 dw = 0;
394 while (!time_after(jiffies, end_time)) {
395 struct i915_gem_context *ctx;
396
397 ctx = live_context(i915, file);
398 if (IS_ERR(ctx)) {
399 err = PTR_ERR(ctx);
400 goto out_unlock;
401 }
402
403 if (!obj) {
404 obj = create_test_object(ctx, file, &objects);
405 if (IS_ERR(obj)) {
406 err = PTR_ERR(obj);
407 goto out_unlock;
408 }
409 }
410
411 err = gpu_fill(obj, ctx, engine, dw);
412 if (err) {
413 pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
414 ndwords, dw, max_dwords(obj),
415 engine->name, ctx->hw_id,
416 yesno(!!ctx->vm), err);
417 goto out_unlock;
418 }
419
420 if (++dw == max_dwords(obj)) {
421 obj = NULL;
422 dw = 0;
423 }
424
425 ndwords++;
426 ncontexts++;
427 }
428
429 pr_info("Submitted %lu contexts to %s, filling %lu dwords\n",
430 ncontexts, engine->name, ndwords);
431
432 ncontexts = dw = 0;
433 list_for_each_entry(obj, &objects, st_link) {
434 unsigned int rem =
435 min_t(unsigned int, ndwords - dw, max_dwords(obj));
436
437 err = cpu_check(obj, ncontexts++, rem);
438 if (err)
439 break;
440
441 dw += rem;
442 }
443
444out_unlock:
445 if (igt_live_test_end(&t))
446 err = -EIO;
447 mutex_unlock(&i915->drm.struct_mutex);
448
449 mock_file_free(i915, file);
450 if (err)
451 return err;
452
453 i915_gem_drain_freed_objects(i915);
454 }
455
456 return 0;
457}
458
459static int igt_shared_ctx_exec(void *arg)
460{
461 struct drm_i915_private *i915 = arg;
462 struct i915_gem_context *parent;
463 struct intel_engine_cs *engine;
464 enum intel_engine_id id;
465 struct igt_live_test t;
466 struct drm_file *file;
467 int err = 0;
468
469
470
471
472
473
474 if (!DRIVER_CAPS(i915)->has_logical_contexts)
475 return 0;
476
477 file = mock_file(i915);
478 if (IS_ERR(file))
479 return PTR_ERR(file);
480
481 mutex_lock(&i915->drm.struct_mutex);
482
483 parent = live_context(i915, file);
484 if (IS_ERR(parent)) {
485 err = PTR_ERR(parent);
486 goto out_unlock;
487 }
488
489 if (!parent->vm) {
490 err = 0;
491 goto out_unlock;
492 }
493
494 err = igt_live_test_begin(&t, i915, __func__, "");
495 if (err)
496 goto out_unlock;
497
498 for_each_engine(engine, i915, id) {
499 unsigned long ncontexts, ndwords, dw;
500 struct drm_i915_gem_object *obj = NULL;
501 IGT_TIMEOUT(end_time);
502 LIST_HEAD(objects);
503
504 if (!intel_engine_can_store_dword(engine))
505 continue;
506
507 dw = 0;
508 ndwords = 0;
509 ncontexts = 0;
510 while (!time_after(jiffies, end_time)) {
511 struct i915_gem_context *ctx;
512
513 ctx = kernel_context(i915);
514 if (IS_ERR(ctx)) {
515 err = PTR_ERR(ctx);
516 goto out_test;
517 }
518
519 __assign_ppgtt(ctx, parent->vm);
520
521 if (!obj) {
522 obj = create_test_object(parent, file, &objects);
523 if (IS_ERR(obj)) {
524 err = PTR_ERR(obj);
525 kernel_context_close(ctx);
526 goto out_test;
527 }
528 }
529
530 err = gpu_fill(obj, ctx, engine, dw);
531 if (err) {
532 pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
533 ndwords, dw, max_dwords(obj),
534 engine->name, ctx->hw_id,
535 yesno(!!ctx->vm), err);
536 kernel_context_close(ctx);
537 goto out_test;
538 }
539
540 if (++dw == max_dwords(obj)) {
541 obj = NULL;
542 dw = 0;
543 }
544
545 ndwords++;
546 ncontexts++;
547
548 kernel_context_close(ctx);
549 }
550 pr_info("Submitted %lu contexts to %s, filling %lu dwords\n",
551 ncontexts, engine->name, ndwords);
552
553 ncontexts = dw = 0;
554 list_for_each_entry(obj, &objects, st_link) {
555 unsigned int rem =
556 min_t(unsigned int, ndwords - dw, max_dwords(obj));
557
558 err = cpu_check(obj, ncontexts++, rem);
559 if (err)
560 goto out_test;
561
562 dw += rem;
563 }
564
565 mutex_unlock(&i915->drm.struct_mutex);
566 i915_gem_drain_freed_objects(i915);
567 mutex_lock(&i915->drm.struct_mutex);
568 }
569out_test:
570 if (igt_live_test_end(&t))
571 err = -EIO;
572out_unlock:
573 mutex_unlock(&i915->drm.struct_mutex);
574
575 mock_file_free(i915, file);
576 return err;
577}
578
579static struct i915_vma *rpcs_query_batch(struct i915_vma *vma)
580{
581 struct drm_i915_gem_object *obj;
582 u32 *cmd;
583 int err;
584
585 if (INTEL_GEN(vma->vm->i915) < 8)
586 return ERR_PTR(-EINVAL);
587
588 obj = i915_gem_object_create_internal(vma->vm->i915, PAGE_SIZE);
589 if (IS_ERR(obj))
590 return ERR_CAST(obj);
591
592 cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
593 if (IS_ERR(cmd)) {
594 err = PTR_ERR(cmd);
595 goto err;
596 }
597
598 *cmd++ = MI_STORE_REGISTER_MEM_GEN8;
599 *cmd++ = i915_mmio_reg_offset(GEN8_R_PWR_CLK_STATE);
600 *cmd++ = lower_32_bits(vma->node.start);
601 *cmd++ = upper_32_bits(vma->node.start);
602 *cmd = MI_BATCH_BUFFER_END;
603
604 __i915_gem_object_flush_map(obj, 0, 64);
605 i915_gem_object_unpin_map(obj);
606
607 vma = i915_vma_instance(obj, vma->vm, NULL);
608 if (IS_ERR(vma)) {
609 err = PTR_ERR(vma);
610 goto err;
611 }
612
613 err = i915_vma_pin(vma, 0, 0, PIN_USER);
614 if (err)
615 goto err;
616
617 return vma;
618
619err:
620 i915_gem_object_put(obj);
621 return ERR_PTR(err);
622}
623
624static int
625emit_rpcs_query(struct drm_i915_gem_object *obj,
626 struct intel_context *ce,
627 struct i915_request **rq_out)
628{
629 struct i915_request *rq;
630 struct i915_vma *batch;
631 struct i915_vma *vma;
632 int err;
633
634 GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine));
635
636 vma = i915_vma_instance(obj, ce->vm, NULL);
637 if (IS_ERR(vma))
638 return PTR_ERR(vma);
639
640 i915_gem_object_lock(obj);
641 err = i915_gem_object_set_to_gtt_domain(obj, false);
642 i915_gem_object_unlock(obj);
643 if (err)
644 return err;
645
646 err = i915_vma_pin(vma, 0, 0, PIN_USER);
647 if (err)
648 return err;
649
650 batch = rpcs_query_batch(vma);
651 if (IS_ERR(batch)) {
652 err = PTR_ERR(batch);
653 goto err_vma;
654 }
655
656 rq = i915_request_create(ce);
657 if (IS_ERR(rq)) {
658 err = PTR_ERR(rq);
659 goto err_batch;
660 }
661
662 err = rq->engine->emit_bb_start(rq,
663 batch->node.start, batch->node.size,
664 0);
665 if (err)
666 goto err_request;
667
668 i915_vma_lock(batch);
669 err = i915_request_await_object(rq, batch->obj, false);
670 if (err == 0)
671 err = i915_vma_move_to_active(batch, rq, 0);
672 i915_vma_unlock(batch);
673 if (err)
674 goto skip_request;
675
676 i915_vma_lock(vma);
677 err = i915_request_await_object(rq, vma->obj, true);
678 if (err == 0)
679 err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
680 i915_vma_unlock(vma);
681 if (err)
682 goto skip_request;
683
684 i915_vma_unpin(batch);
685 i915_vma_close(batch);
686 i915_vma_put(batch);
687
688 i915_vma_unpin(vma);
689
690 *rq_out = i915_request_get(rq);
691
692 i915_request_add(rq);
693
694 return 0;
695
696skip_request:
697 i915_request_skip(rq, err);
698err_request:
699 i915_request_add(rq);
700err_batch:
701 i915_vma_unpin(batch);
702 i915_vma_put(batch);
703err_vma:
704 i915_vma_unpin(vma);
705
706 return err;
707}
708
709#define TEST_IDLE BIT(0)
710#define TEST_BUSY BIT(1)
711#define TEST_RESET BIT(2)
712
713static int
714__sseu_prepare(const char *name,
715 unsigned int flags,
716 struct intel_context *ce,
717 struct igt_spinner **spin)
718{
719 struct i915_request *rq;
720 int ret;
721
722 *spin = NULL;
723 if (!(flags & (TEST_BUSY | TEST_RESET)))
724 return 0;
725
726 *spin = kzalloc(sizeof(**spin), GFP_KERNEL);
727 if (!*spin)
728 return -ENOMEM;
729
730 ret = igt_spinner_init(*spin, ce->engine->gt);
731 if (ret)
732 goto err_free;
733
734 rq = igt_spinner_create_request(*spin, ce, MI_NOOP);
735 if (IS_ERR(rq)) {
736 ret = PTR_ERR(rq);
737 goto err_fini;
738 }
739
740 i915_request_add(rq);
741
742 if (!igt_wait_for_spinner(*spin, rq)) {
743 pr_err("%s: Spinner failed to start!\n", name);
744 ret = -ETIMEDOUT;
745 goto err_end;
746 }
747
748 return 0;
749
750err_end:
751 igt_spinner_end(*spin);
752err_fini:
753 igt_spinner_fini(*spin);
754err_free:
755 kfree(fetch_and_zero(spin));
756 return ret;
757}
758
759static int
760__read_slice_count(struct intel_context *ce,
761 struct drm_i915_gem_object *obj,
762 struct igt_spinner *spin,
763 u32 *rpcs)
764{
765 struct i915_request *rq = NULL;
766 u32 s_mask, s_shift;
767 unsigned int cnt;
768 u32 *buf, val;
769 long ret;
770
771 ret = emit_rpcs_query(obj, ce, &rq);
772 if (ret)
773 return ret;
774
775 if (spin)
776 igt_spinner_end(spin);
777
778 ret = i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
779 i915_request_put(rq);
780 if (ret < 0)
781 return ret;
782
783 buf = i915_gem_object_pin_map(obj, I915_MAP_WB);
784 if (IS_ERR(buf)) {
785 ret = PTR_ERR(buf);
786 return ret;
787 }
788
789 if (INTEL_GEN(ce->engine->i915) >= 11) {
790 s_mask = GEN11_RPCS_S_CNT_MASK;
791 s_shift = GEN11_RPCS_S_CNT_SHIFT;
792 } else {
793 s_mask = GEN8_RPCS_S_CNT_MASK;
794 s_shift = GEN8_RPCS_S_CNT_SHIFT;
795 }
796
797 val = *buf;
798 cnt = (val & s_mask) >> s_shift;
799 *rpcs = val;
800
801 i915_gem_object_unpin_map(obj);
802
803 return cnt;
804}
805
806static int
807__check_rpcs(const char *name, u32 rpcs, int slices, unsigned int expected,
808 const char *prefix, const char *suffix)
809{
810 if (slices == expected)
811 return 0;
812
813 if (slices < 0) {
814 pr_err("%s: %s read slice count failed with %d%s\n",
815 name, prefix, slices, suffix);
816 return slices;
817 }
818
819 pr_err("%s: %s slice count %d is not %u%s\n",
820 name, prefix, slices, expected, suffix);
821
822 pr_info("RPCS=0x%x; %u%sx%u%s\n",
823 rpcs, slices,
824 (rpcs & GEN8_RPCS_S_CNT_ENABLE) ? "*" : "",
825 (rpcs & GEN8_RPCS_SS_CNT_MASK) >> GEN8_RPCS_SS_CNT_SHIFT,
826 (rpcs & GEN8_RPCS_SS_CNT_ENABLE) ? "*" : "");
827
828 return -EINVAL;
829}
830
831static int
832__sseu_finish(const char *name,
833 unsigned int flags,
834 struct intel_context *ce,
835 struct drm_i915_gem_object *obj,
836 unsigned int expected,
837 struct igt_spinner *spin)
838{
839 unsigned int slices = hweight32(ce->engine->sseu.slice_mask);
840 u32 rpcs = 0;
841 int ret = 0;
842
843 if (flags & TEST_RESET) {
844 ret = intel_engine_reset(ce->engine, "sseu");
845 if (ret)
846 goto out;
847 }
848
849 ret = __read_slice_count(ce, obj,
850 flags & TEST_RESET ? NULL : spin, &rpcs);
851 ret = __check_rpcs(name, rpcs, ret, expected, "Context", "!");
852 if (ret)
853 goto out;
854
855 ret = __read_slice_count(ce->engine->kernel_context, obj, NULL, &rpcs);
856 ret = __check_rpcs(name, rpcs, ret, slices, "Kernel context", "!");
857
858out:
859 if (spin)
860 igt_spinner_end(spin);
861
862 if ((flags & TEST_IDLE) && ret == 0) {
863 ret = i915_gem_wait_for_idle(ce->engine->i915,
864 0, MAX_SCHEDULE_TIMEOUT);
865 if (ret)
866 return ret;
867
868 ret = __read_slice_count(ce, obj, NULL, &rpcs);
869 ret = __check_rpcs(name, rpcs, ret, expected,
870 "Context", " after idle!");
871 }
872
873 return ret;
874}
875
876static int
877__sseu_test(const char *name,
878 unsigned int flags,
879 struct intel_context *ce,
880 struct drm_i915_gem_object *obj,
881 struct intel_sseu sseu)
882{
883 struct igt_spinner *spin = NULL;
884 int ret;
885
886 ret = __sseu_prepare(name, flags, ce, &spin);
887 if (ret)
888 return ret;
889
890 ret = __intel_context_reconfigure_sseu(ce, sseu);
891 if (ret)
892 goto out_spin;
893
894 ret = __sseu_finish(name, flags, ce, obj,
895 hweight32(sseu.slice_mask), spin);
896
897out_spin:
898 if (spin) {
899 igt_spinner_end(spin);
900 igt_spinner_fini(spin);
901 kfree(spin);
902 }
903 return ret;
904}
905
906static int
907__igt_ctx_sseu(struct drm_i915_private *i915,
908 const char *name,
909 unsigned int flags)
910{
911 struct intel_engine_cs *engine = i915->engine[RCS0];
912 struct drm_i915_gem_object *obj;
913 struct i915_gem_context *ctx;
914 struct intel_context *ce;
915 struct intel_sseu pg_sseu;
916 struct drm_file *file;
917 int ret;
918
919 if (INTEL_GEN(i915) < 9 || !engine)
920 return 0;
921
922 if (!RUNTIME_INFO(i915)->sseu.has_slice_pg)
923 return 0;
924
925 if (hweight32(engine->sseu.slice_mask) < 2)
926 return 0;
927
928
929
930
931
932 pg_sseu = engine->sseu;
933 pg_sseu.slice_mask = 1;
934 pg_sseu.subslice_mask =
935 ~(~0 << (hweight32(engine->sseu.subslice_mask) / 2));
936
937 pr_info("SSEU subtest '%s', flags=%x, def_slices=%u, pg_slices=%u\n",
938 name, flags, hweight32(engine->sseu.slice_mask),
939 hweight32(pg_sseu.slice_mask));
940
941 file = mock_file(i915);
942 if (IS_ERR(file))
943 return PTR_ERR(file);
944
945 if (flags & TEST_RESET)
946 igt_global_reset_lock(&i915->gt);
947
948 mutex_lock(&i915->drm.struct_mutex);
949
950 ctx = live_context(i915, file);
951 if (IS_ERR(ctx)) {
952 ret = PTR_ERR(ctx);
953 goto out_unlock;
954 }
955 i915_gem_context_clear_bannable(ctx);
956
957 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
958 if (IS_ERR(obj)) {
959 ret = PTR_ERR(obj);
960 goto out_unlock;
961 }
962
963 ce = i915_gem_context_get_engine(ctx, RCS0);
964 if (IS_ERR(ce)) {
965 ret = PTR_ERR(ce);
966 goto out_put;
967 }
968
969 ret = intel_context_pin(ce);
970 if (ret)
971 goto out_context;
972
973
974 ret = __sseu_test(name, flags, ce, obj, engine->sseu);
975 if (ret)
976 goto out_fail;
977
978
979 ret = __sseu_test(name, flags, ce, obj, pg_sseu);
980 if (ret)
981 goto out_fail;
982
983
984 ret = __sseu_test(name, flags, ce, obj, engine->sseu);
985 if (ret)
986 goto out_fail;
987
988
989 ret = __sseu_test(name, flags, ce, obj, pg_sseu);
990 if (ret)
991 goto out_fail;
992
993out_fail:
994 if (igt_flush_test(i915, I915_WAIT_LOCKED))
995 ret = -EIO;
996
997 intel_context_unpin(ce);
998out_context:
999 intel_context_put(ce);
1000out_put:
1001 i915_gem_object_put(obj);
1002
1003out_unlock:
1004 mutex_unlock(&i915->drm.struct_mutex);
1005
1006 if (flags & TEST_RESET)
1007 igt_global_reset_unlock(&i915->gt);
1008
1009 mock_file_free(i915, file);
1010
1011 if (ret)
1012 pr_err("%s: Failed with %d!\n", name, ret);
1013
1014 return ret;
1015}
1016
1017static int igt_ctx_sseu(void *arg)
1018{
1019 struct {
1020 const char *name;
1021 unsigned int flags;
1022 } *phase, phases[] = {
1023 { .name = "basic", .flags = 0 },
1024 { .name = "idle", .flags = TEST_IDLE },
1025 { .name = "busy", .flags = TEST_BUSY },
1026 { .name = "busy-reset", .flags = TEST_BUSY | TEST_RESET },
1027 { .name = "busy-idle", .flags = TEST_BUSY | TEST_IDLE },
1028 { .name = "reset-idle", .flags = TEST_RESET | TEST_IDLE },
1029 };
1030 unsigned int i;
1031 int ret = 0;
1032
1033 for (i = 0, phase = phases; ret == 0 && i < ARRAY_SIZE(phases);
1034 i++, phase++)
1035 ret = __igt_ctx_sseu(arg, phase->name, phase->flags);
1036
1037 return ret;
1038}
1039
1040static int igt_ctx_readonly(void *arg)
1041{
1042 struct drm_i915_private *i915 = arg;
1043 struct drm_i915_gem_object *obj = NULL;
1044 struct i915_address_space *vm;
1045 struct i915_gem_context *ctx;
1046 unsigned long idx, ndwords, dw;
1047 struct igt_live_test t;
1048 struct drm_file *file;
1049 I915_RND_STATE(prng);
1050 IGT_TIMEOUT(end_time);
1051 LIST_HEAD(objects);
1052 int err = -ENODEV;
1053
1054
1055
1056
1057
1058
1059
1060 file = mock_file(i915);
1061 if (IS_ERR(file))
1062 return PTR_ERR(file);
1063
1064 mutex_lock(&i915->drm.struct_mutex);
1065
1066 err = igt_live_test_begin(&t, i915, __func__, "");
1067 if (err)
1068 goto out_unlock;
1069
1070 ctx = live_context(i915, file);
1071 if (IS_ERR(ctx)) {
1072 err = PTR_ERR(ctx);
1073 goto out_unlock;
1074 }
1075
1076 vm = ctx->vm ?: &i915->ggtt.alias->vm;
1077 if (!vm || !vm->has_read_only) {
1078 err = 0;
1079 goto out_unlock;
1080 }
1081
1082 ndwords = 0;
1083 dw = 0;
1084 while (!time_after(jiffies, end_time)) {
1085 struct intel_engine_cs *engine;
1086 unsigned int id;
1087
1088 for_each_engine(engine, i915, id) {
1089 if (!intel_engine_can_store_dword(engine))
1090 continue;
1091
1092 if (!obj) {
1093 obj = create_test_object(ctx, file, &objects);
1094 if (IS_ERR(obj)) {
1095 err = PTR_ERR(obj);
1096 goto out_unlock;
1097 }
1098
1099 if (prandom_u32_state(&prng) & 1)
1100 i915_gem_object_set_readonly(obj);
1101 }
1102
1103 err = gpu_fill(obj, ctx, engine, dw);
1104 if (err) {
1105 pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
1106 ndwords, dw, max_dwords(obj),
1107 engine->name, ctx->hw_id,
1108 yesno(!!ctx->vm), err);
1109 goto out_unlock;
1110 }
1111
1112 if (++dw == max_dwords(obj)) {
1113 obj = NULL;
1114 dw = 0;
1115 }
1116 ndwords++;
1117 }
1118 }
1119 pr_info("Submitted %lu dwords (across %u engines)\n",
1120 ndwords, RUNTIME_INFO(i915)->num_engines);
1121
1122 dw = 0;
1123 idx = 0;
1124 list_for_each_entry(obj, &objects, st_link) {
1125 unsigned int rem =
1126 min_t(unsigned int, ndwords - dw, max_dwords(obj));
1127 unsigned int num_writes;
1128
1129 num_writes = rem;
1130 if (i915_gem_object_is_readonly(obj))
1131 num_writes = 0;
1132
1133 err = cpu_check(obj, idx++, num_writes);
1134 if (err)
1135 break;
1136
1137 dw += rem;
1138 }
1139
1140out_unlock:
1141 if (igt_live_test_end(&t))
1142 err = -EIO;
1143 mutex_unlock(&i915->drm.struct_mutex);
1144
1145 mock_file_free(i915, file);
1146 return err;
1147}
1148
1149static int check_scratch(struct i915_gem_context *ctx, u64 offset)
1150{
1151 struct drm_mm_node *node =
1152 __drm_mm_interval_first(&ctx->vm->mm,
1153 offset, offset + sizeof(u32) - 1);
1154 if (!node || node->start > offset)
1155 return 0;
1156
1157 GEM_BUG_ON(offset >= node->start + node->size);
1158
1159 pr_err("Target offset 0x%08x_%08x overlaps with a node in the mm!\n",
1160 upper_32_bits(offset), lower_32_bits(offset));
1161 return -EINVAL;
1162}
1163
1164static int write_to_scratch(struct i915_gem_context *ctx,
1165 struct intel_engine_cs *engine,
1166 u64 offset, u32 value)
1167{
1168 struct drm_i915_private *i915 = ctx->i915;
1169 struct drm_i915_gem_object *obj;
1170 struct i915_request *rq;
1171 struct i915_vma *vma;
1172 u32 *cmd;
1173 int err;
1174
1175 GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE);
1176
1177 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1178 if (IS_ERR(obj))
1179 return PTR_ERR(obj);
1180
1181 cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
1182 if (IS_ERR(cmd)) {
1183 err = PTR_ERR(cmd);
1184 goto err;
1185 }
1186
1187 *cmd++ = MI_STORE_DWORD_IMM_GEN4;
1188 if (INTEL_GEN(i915) >= 8) {
1189 *cmd++ = lower_32_bits(offset);
1190 *cmd++ = upper_32_bits(offset);
1191 } else {
1192 *cmd++ = 0;
1193 *cmd++ = offset;
1194 }
1195 *cmd++ = value;
1196 *cmd = MI_BATCH_BUFFER_END;
1197 __i915_gem_object_flush_map(obj, 0, 64);
1198 i915_gem_object_unpin_map(obj);
1199
1200 vma = i915_vma_instance(obj, ctx->vm, NULL);
1201 if (IS_ERR(vma)) {
1202 err = PTR_ERR(vma);
1203 goto err;
1204 }
1205
1206 err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
1207 if (err)
1208 goto err;
1209
1210 err = check_scratch(ctx, offset);
1211 if (err)
1212 goto err_unpin;
1213
1214 rq = igt_request_alloc(ctx, engine);
1215 if (IS_ERR(rq)) {
1216 err = PTR_ERR(rq);
1217 goto err_unpin;
1218 }
1219
1220 err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0);
1221 if (err)
1222 goto err_request;
1223
1224 i915_vma_lock(vma);
1225 err = i915_request_await_object(rq, vma->obj, false);
1226 if (err == 0)
1227 err = i915_vma_move_to_active(vma, rq, 0);
1228 i915_vma_unlock(vma);
1229 if (err)
1230 goto skip_request;
1231
1232 i915_vma_unpin(vma);
1233 i915_vma_close(vma);
1234 i915_vma_put(vma);
1235
1236 i915_request_add(rq);
1237
1238 return 0;
1239
1240skip_request:
1241 i915_request_skip(rq, err);
1242err_request:
1243 i915_request_add(rq);
1244err_unpin:
1245 i915_vma_unpin(vma);
1246err:
1247 i915_gem_object_put(obj);
1248 return err;
1249}
1250
1251static int read_from_scratch(struct i915_gem_context *ctx,
1252 struct intel_engine_cs *engine,
1253 u64 offset, u32 *value)
1254{
1255 struct drm_i915_private *i915 = ctx->i915;
1256 struct drm_i915_gem_object *obj;
1257 const u32 RCS_GPR0 = 0x2600;
1258 const u32 result = 0x100;
1259 struct i915_request *rq;
1260 struct i915_vma *vma;
1261 u32 *cmd;
1262 int err;
1263
1264 GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE);
1265
1266 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1267 if (IS_ERR(obj))
1268 return PTR_ERR(obj);
1269
1270 cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
1271 if (IS_ERR(cmd)) {
1272 err = PTR_ERR(cmd);
1273 goto err;
1274 }
1275
1276 memset(cmd, POISON_INUSE, PAGE_SIZE);
1277 if (INTEL_GEN(i915) >= 8) {
1278 *cmd++ = MI_LOAD_REGISTER_MEM_GEN8;
1279 *cmd++ = RCS_GPR0;
1280 *cmd++ = lower_32_bits(offset);
1281 *cmd++ = upper_32_bits(offset);
1282 *cmd++ = MI_STORE_REGISTER_MEM_GEN8;
1283 *cmd++ = RCS_GPR0;
1284 *cmd++ = result;
1285 *cmd++ = 0;
1286 } else {
1287 *cmd++ = MI_LOAD_REGISTER_MEM;
1288 *cmd++ = RCS_GPR0;
1289 *cmd++ = offset;
1290 *cmd++ = MI_STORE_REGISTER_MEM;
1291 *cmd++ = RCS_GPR0;
1292 *cmd++ = result;
1293 }
1294 *cmd = MI_BATCH_BUFFER_END;
1295
1296 i915_gem_object_flush_map(obj);
1297 i915_gem_object_unpin_map(obj);
1298
1299 vma = i915_vma_instance(obj, ctx->vm, NULL);
1300 if (IS_ERR(vma)) {
1301 err = PTR_ERR(vma);
1302 goto err;
1303 }
1304
1305 err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
1306 if (err)
1307 goto err;
1308
1309 err = check_scratch(ctx, offset);
1310 if (err)
1311 goto err_unpin;
1312
1313 rq = igt_request_alloc(ctx, engine);
1314 if (IS_ERR(rq)) {
1315 err = PTR_ERR(rq);
1316 goto err_unpin;
1317 }
1318
1319 err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0);
1320 if (err)
1321 goto err_request;
1322
1323 i915_vma_lock(vma);
1324 err = i915_request_await_object(rq, vma->obj, true);
1325 if (err == 0)
1326 err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
1327 i915_vma_unlock(vma);
1328 if (err)
1329 goto skip_request;
1330
1331 i915_vma_unpin(vma);
1332 i915_vma_close(vma);
1333
1334 i915_request_add(rq);
1335
1336 i915_gem_object_lock(obj);
1337 err = i915_gem_object_set_to_cpu_domain(obj, false);
1338 i915_gem_object_unlock(obj);
1339 if (err)
1340 goto err;
1341
1342 cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
1343 if (IS_ERR(cmd)) {
1344 err = PTR_ERR(cmd);
1345 goto err;
1346 }
1347
1348 *value = cmd[result / sizeof(*cmd)];
1349 i915_gem_object_unpin_map(obj);
1350 i915_gem_object_put(obj);
1351
1352 return 0;
1353
1354skip_request:
1355 i915_request_skip(rq, err);
1356err_request:
1357 i915_request_add(rq);
1358err_unpin:
1359 i915_vma_unpin(vma);
1360err:
1361 i915_gem_object_put(obj);
1362 return err;
1363}
1364
1365static int igt_vm_isolation(void *arg)
1366{
1367 struct drm_i915_private *i915 = arg;
1368 struct i915_gem_context *ctx_a, *ctx_b;
1369 struct intel_engine_cs *engine;
1370 struct igt_live_test t;
1371 struct drm_file *file;
1372 I915_RND_STATE(prng);
1373 unsigned long count;
1374 unsigned int id;
1375 u64 vm_total;
1376 int err;
1377
1378 if (INTEL_GEN(i915) < 7)
1379 return 0;
1380
1381
1382
1383
1384
1385
1386 file = mock_file(i915);
1387 if (IS_ERR(file))
1388 return PTR_ERR(file);
1389
1390 mutex_lock(&i915->drm.struct_mutex);
1391
1392 err = igt_live_test_begin(&t, i915, __func__, "");
1393 if (err)
1394 goto out_unlock;
1395
1396 ctx_a = live_context(i915, file);
1397 if (IS_ERR(ctx_a)) {
1398 err = PTR_ERR(ctx_a);
1399 goto out_unlock;
1400 }
1401
1402 ctx_b = live_context(i915, file);
1403 if (IS_ERR(ctx_b)) {
1404 err = PTR_ERR(ctx_b);
1405 goto out_unlock;
1406 }
1407
1408
1409 if (ctx_a->vm == ctx_b->vm)
1410 goto out_unlock;
1411
1412 vm_total = ctx_a->vm->total;
1413 GEM_BUG_ON(ctx_b->vm->total != vm_total);
1414 vm_total -= I915_GTT_PAGE_SIZE;
1415
1416 count = 0;
1417 for_each_engine(engine, i915, id) {
1418 IGT_TIMEOUT(end_time);
1419 unsigned long this = 0;
1420
1421 if (!intel_engine_can_store_dword(engine))
1422 continue;
1423
1424 while (!__igt_timeout(end_time, NULL)) {
1425 u32 value = 0xc5c5c5c5;
1426 u64 offset;
1427
1428 div64_u64_rem(i915_prandom_u64_state(&prng),
1429 vm_total, &offset);
1430 offset = round_down(offset, alignof_dword);
1431 offset += I915_GTT_PAGE_SIZE;
1432
1433 err = write_to_scratch(ctx_a, engine,
1434 offset, 0xdeadbeef);
1435 if (err == 0)
1436 err = read_from_scratch(ctx_b, engine,
1437 offset, &value);
1438 if (err)
1439 goto out_unlock;
1440
1441 if (value) {
1442 pr_err("%s: Read %08x from scratch (offset 0x%08x_%08x), after %lu reads!\n",
1443 engine->name, value,
1444 upper_32_bits(offset),
1445 lower_32_bits(offset),
1446 this);
1447 err = -EINVAL;
1448 goto out_unlock;
1449 }
1450
1451 this++;
1452 }
1453 count += this;
1454 }
1455 pr_info("Checked %lu scratch offsets across %d engines\n",
1456 count, RUNTIME_INFO(i915)->num_engines);
1457
1458out_unlock:
1459 if (igt_live_test_end(&t))
1460 err = -EIO;
1461 mutex_unlock(&i915->drm.struct_mutex);
1462
1463 mock_file_free(i915, file);
1464 return err;
1465}
1466
1467static __maybe_unused const char *
1468__engine_name(struct drm_i915_private *i915, intel_engine_mask_t engines)
1469{
1470 struct intel_engine_cs *engine;
1471 intel_engine_mask_t tmp;
1472
1473 if (engines == ALL_ENGINES)
1474 return "all";
1475
1476 for_each_engine_masked(engine, i915, engines, tmp)
1477 return engine->name;
1478
1479 return "none";
1480}
1481
1482static bool skip_unused_engines(struct intel_context *ce, void *data)
1483{
1484 return !ce->state;
1485}
1486
1487static void mock_barrier_task(void *data)
1488{
1489 unsigned int *counter = data;
1490
1491 ++*counter;
1492}
1493
1494static int mock_context_barrier(void *arg)
1495{
1496#undef pr_fmt
1497#define pr_fmt(x) "context_barrier_task():" # x
1498 struct drm_i915_private *i915 = arg;
1499 struct i915_gem_context *ctx;
1500 struct i915_request *rq;
1501 unsigned int counter;
1502 int err;
1503
1504
1505
1506
1507
1508
1509 mutex_lock(&i915->drm.struct_mutex);
1510
1511 ctx = mock_context(i915, "mock");
1512 if (!ctx) {
1513 err = -ENOMEM;
1514 goto unlock;
1515 }
1516
1517 counter = 0;
1518 err = context_barrier_task(ctx, 0,
1519 NULL, NULL, mock_barrier_task, &counter);
1520 if (err) {
1521 pr_err("Failed at line %d, err=%d\n", __LINE__, err);
1522 goto out;
1523 }
1524 if (counter == 0) {
1525 pr_err("Did not retire immediately with 0 engines\n");
1526 err = -EINVAL;
1527 goto out;
1528 }
1529
1530 counter = 0;
1531 err = context_barrier_task(ctx, ALL_ENGINES,
1532 skip_unused_engines,
1533 NULL,
1534 mock_barrier_task,
1535 &counter);
1536 if (err) {
1537 pr_err("Failed at line %d, err=%d\n", __LINE__, err);
1538 goto out;
1539 }
1540 if (counter == 0) {
1541 pr_err("Did not retire immediately for all unused engines\n");
1542 err = -EINVAL;
1543 goto out;
1544 }
1545
1546 rq = igt_request_alloc(ctx, i915->engine[RCS0]);
1547 if (IS_ERR(rq)) {
1548 pr_err("Request allocation failed!\n");
1549 goto out;
1550 }
1551 i915_request_add(rq);
1552
1553 counter = 0;
1554 context_barrier_inject_fault = BIT(RCS0);
1555 err = context_barrier_task(ctx, ALL_ENGINES,
1556 NULL, NULL, mock_barrier_task, &counter);
1557 context_barrier_inject_fault = 0;
1558 if (err == -ENXIO)
1559 err = 0;
1560 else
1561 pr_err("Did not hit fault injection!\n");
1562 if (counter != 0) {
1563 pr_err("Invoked callback on error!\n");
1564 err = -EIO;
1565 }
1566 if (err)
1567 goto out;
1568
1569 counter = 0;
1570 err = context_barrier_task(ctx, ALL_ENGINES,
1571 skip_unused_engines,
1572 NULL,
1573 mock_barrier_task,
1574 &counter);
1575 if (err) {
1576 pr_err("Failed at line %d, err=%d\n", __LINE__, err);
1577 goto out;
1578 }
1579 mock_device_flush(i915);
1580 if (counter == 0) {
1581 pr_err("Did not retire on each active engines\n");
1582 err = -EINVAL;
1583 goto out;
1584 }
1585
1586out:
1587 mock_context_close(ctx);
1588unlock:
1589 mutex_unlock(&i915->drm.struct_mutex);
1590 return err;
1591#undef pr_fmt
1592#define pr_fmt(x) x
1593}
1594
1595int i915_gem_context_mock_selftests(void)
1596{
1597 static const struct i915_subtest tests[] = {
1598 SUBTEST(mock_context_barrier),
1599 };
1600 struct drm_i915_private *i915;
1601 int err;
1602
1603 i915 = mock_gem_device();
1604 if (!i915)
1605 return -ENOMEM;
1606
1607 err = i915_subtests(tests, i915);
1608
1609 drm_dev_put(&i915->drm);
1610 return err;
1611}
1612
1613int i915_gem_context_live_selftests(struct drm_i915_private *i915)
1614{
1615 static const struct i915_subtest tests[] = {
1616 SUBTEST(live_nop_switch),
1617 SUBTEST(igt_ctx_exec),
1618 SUBTEST(igt_ctx_readonly),
1619 SUBTEST(igt_ctx_sseu),
1620 SUBTEST(igt_shared_ctx_exec),
1621 SUBTEST(igt_vm_isolation),
1622 };
1623
1624 if (intel_gt_is_wedged(&i915->gt))
1625 return 0;
1626
1627 return i915_live_subtests(tests, i915);
1628}
1629