1
2
3
4
5
6
7#include <linux/prime_numbers.h>
8
9#include "i915_selftest.h"
10
11#include "gem/i915_gem_pm.h"
12
13#include "igt_gem_utils.h"
14#include "mock_context.h"
15
16#include "selftests/mock_drm.h"
17#include "selftests/mock_gem_device.h"
18#include "selftests/i915_random.h"
19
20static const unsigned int page_sizes[] = {
21 I915_GTT_PAGE_SIZE_2M,
22 I915_GTT_PAGE_SIZE_64K,
23 I915_GTT_PAGE_SIZE_4K,
24};
25
26static unsigned int get_largest_page_size(struct drm_i915_private *i915,
27 u64 rem)
28{
29 int i;
30
31 for (i = 0; i < ARRAY_SIZE(page_sizes); ++i) {
32 unsigned int page_size = page_sizes[i];
33
34 if (HAS_PAGE_SIZES(i915, page_size) && rem >= page_size)
35 return page_size;
36 }
37
38 return 0;
39}
40
41static void huge_pages_free_pages(struct sg_table *st)
42{
43 struct scatterlist *sg;
44
45 for (sg = st->sgl; sg; sg = __sg_next(sg)) {
46 if (sg_page(sg))
47 __free_pages(sg_page(sg), get_order(sg->length));
48 }
49
50 sg_free_table(st);
51 kfree(st);
52}
53
54static int get_huge_pages(struct drm_i915_gem_object *obj)
55{
56#define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
57 unsigned int page_mask = obj->mm.page_mask;
58 struct sg_table *st;
59 struct scatterlist *sg;
60 unsigned int sg_page_sizes;
61 u64 rem;
62
63 st = kmalloc(sizeof(*st), GFP);
64 if (!st)
65 return -ENOMEM;
66
67 if (sg_alloc_table(st, obj->base.size >> PAGE_SHIFT, GFP)) {
68 kfree(st);
69 return -ENOMEM;
70 }
71
72 rem = obj->base.size;
73 sg = st->sgl;
74 st->nents = 0;
75 sg_page_sizes = 0;
76
77
78
79
80
81
82 do {
83 unsigned int bit = ilog2(page_mask);
84 unsigned int page_size = BIT(bit);
85 int order = get_order(page_size);
86
87 do {
88 struct page *page;
89
90 GEM_BUG_ON(order >= MAX_ORDER);
91 page = alloc_pages(GFP | __GFP_ZERO, order);
92 if (!page)
93 goto err;
94
95 sg_set_page(sg, page, page_size, 0);
96 sg_page_sizes |= page_size;
97 st->nents++;
98
99 rem -= page_size;
100 if (!rem) {
101 sg_mark_end(sg);
102 break;
103 }
104
105 sg = __sg_next(sg);
106 } while ((rem - ((page_size-1) & page_mask)) >= page_size);
107
108 page_mask &= (page_size-1);
109 } while (page_mask);
110
111 if (i915_gem_gtt_prepare_pages(obj, st))
112 goto err;
113
114 obj->mm.madv = I915_MADV_DONTNEED;
115
116 GEM_BUG_ON(sg_page_sizes != obj->mm.page_mask);
117 __i915_gem_object_set_pages(obj, st, sg_page_sizes);
118
119 return 0;
120
121err:
122 sg_set_page(sg, NULL, 0, 0);
123 sg_mark_end(sg);
124 huge_pages_free_pages(st);
125
126 return -ENOMEM;
127}
128
129static void put_huge_pages(struct drm_i915_gem_object *obj,
130 struct sg_table *pages)
131{
132 i915_gem_gtt_finish_pages(obj, pages);
133 huge_pages_free_pages(pages);
134
135 obj->mm.dirty = false;
136 obj->mm.madv = I915_MADV_WILLNEED;
137}
138
139static const struct drm_i915_gem_object_ops huge_page_ops = {
140 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
141 I915_GEM_OBJECT_IS_SHRINKABLE,
142 .get_pages = get_huge_pages,
143 .put_pages = put_huge_pages,
144};
145
146static struct drm_i915_gem_object *
147huge_pages_object(struct drm_i915_private *i915,
148 u64 size,
149 unsigned int page_mask)
150{
151 struct drm_i915_gem_object *obj;
152
153 GEM_BUG_ON(!size);
154 GEM_BUG_ON(!IS_ALIGNED(size, BIT(__ffs(page_mask))));
155
156 if (size >> PAGE_SHIFT > INT_MAX)
157 return ERR_PTR(-E2BIG);
158
159 if (overflows_type(size, obj->base.size))
160 return ERR_PTR(-E2BIG);
161
162 obj = i915_gem_object_alloc();
163 if (!obj)
164 return ERR_PTR(-ENOMEM);
165
166 drm_gem_private_object_init(&i915->drm, &obj->base, size);
167 i915_gem_object_init(obj, &huge_page_ops);
168
169 obj->write_domain = I915_GEM_DOMAIN_CPU;
170 obj->read_domains = I915_GEM_DOMAIN_CPU;
171 obj->cache_level = I915_CACHE_NONE;
172
173 obj->mm.page_mask = page_mask;
174
175 return obj;
176}
177
178static int fake_get_huge_pages(struct drm_i915_gem_object *obj)
179{
180 struct drm_i915_private *i915 = to_i915(obj->base.dev);
181 const u64 max_len = rounddown_pow_of_two(UINT_MAX);
182 struct sg_table *st;
183 struct scatterlist *sg;
184 unsigned int sg_page_sizes;
185 u64 rem;
186
187 st = kmalloc(sizeof(*st), GFP);
188 if (!st)
189 return -ENOMEM;
190
191 if (sg_alloc_table(st, obj->base.size >> PAGE_SHIFT, GFP)) {
192 kfree(st);
193 return -ENOMEM;
194 }
195
196
197 rem = obj->base.size;
198 sg = st->sgl;
199 st->nents = 0;
200 sg_page_sizes = 0;
201 do {
202 unsigned int page_size = get_largest_page_size(i915, rem);
203 unsigned int len = min(page_size * div_u64(rem, page_size),
204 max_len);
205
206 GEM_BUG_ON(!page_size);
207
208 sg->offset = 0;
209 sg->length = len;
210 sg_dma_len(sg) = len;
211 sg_dma_address(sg) = page_size;
212
213 sg_page_sizes |= len;
214
215 st->nents++;
216
217 rem -= len;
218 if (!rem) {
219 sg_mark_end(sg);
220 break;
221 }
222
223 sg = sg_next(sg);
224 } while (1);
225
226 i915_sg_trim(st);
227
228 obj->mm.madv = I915_MADV_DONTNEED;
229
230 __i915_gem_object_set_pages(obj, st, sg_page_sizes);
231
232 return 0;
233}
234
235static int fake_get_huge_pages_single(struct drm_i915_gem_object *obj)
236{
237 struct drm_i915_private *i915 = to_i915(obj->base.dev);
238 struct sg_table *st;
239 struct scatterlist *sg;
240 unsigned int page_size;
241
242 st = kmalloc(sizeof(*st), GFP);
243 if (!st)
244 return -ENOMEM;
245
246 if (sg_alloc_table(st, 1, GFP)) {
247 kfree(st);
248 return -ENOMEM;
249 }
250
251 sg = st->sgl;
252 st->nents = 1;
253
254 page_size = get_largest_page_size(i915, obj->base.size);
255 GEM_BUG_ON(!page_size);
256
257 sg->offset = 0;
258 sg->length = obj->base.size;
259 sg_dma_len(sg) = obj->base.size;
260 sg_dma_address(sg) = page_size;
261
262 obj->mm.madv = I915_MADV_DONTNEED;
263
264 __i915_gem_object_set_pages(obj, st, sg->length);
265
266 return 0;
267#undef GFP
268}
269
270static void fake_free_huge_pages(struct drm_i915_gem_object *obj,
271 struct sg_table *pages)
272{
273 sg_free_table(pages);
274 kfree(pages);
275}
276
277static void fake_put_huge_pages(struct drm_i915_gem_object *obj,
278 struct sg_table *pages)
279{
280 fake_free_huge_pages(obj, pages);
281 obj->mm.dirty = false;
282 obj->mm.madv = I915_MADV_WILLNEED;
283}
284
285static const struct drm_i915_gem_object_ops fake_ops = {
286 .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
287 .get_pages = fake_get_huge_pages,
288 .put_pages = fake_put_huge_pages,
289};
290
291static const struct drm_i915_gem_object_ops fake_ops_single = {
292 .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
293 .get_pages = fake_get_huge_pages_single,
294 .put_pages = fake_put_huge_pages,
295};
296
297static struct drm_i915_gem_object *
298fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single)
299{
300 struct drm_i915_gem_object *obj;
301
302 GEM_BUG_ON(!size);
303 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
304
305 if (size >> PAGE_SHIFT > UINT_MAX)
306 return ERR_PTR(-E2BIG);
307
308 if (overflows_type(size, obj->base.size))
309 return ERR_PTR(-E2BIG);
310
311 obj = i915_gem_object_alloc();
312 if (!obj)
313 return ERR_PTR(-ENOMEM);
314
315 drm_gem_private_object_init(&i915->drm, &obj->base, size);
316
317 if (single)
318 i915_gem_object_init(obj, &fake_ops_single);
319 else
320 i915_gem_object_init(obj, &fake_ops);
321
322 obj->write_domain = I915_GEM_DOMAIN_CPU;
323 obj->read_domains = I915_GEM_DOMAIN_CPU;
324 obj->cache_level = I915_CACHE_NONE;
325
326 return obj;
327}
328
329static int igt_check_page_sizes(struct i915_vma *vma)
330{
331 struct drm_i915_private *i915 = vma->vm->i915;
332 unsigned int supported = INTEL_INFO(i915)->page_sizes;
333 struct drm_i915_gem_object *obj = vma->obj;
334 int err = 0;
335
336 if (!HAS_PAGE_SIZES(i915, vma->page_sizes.sg)) {
337 pr_err("unsupported page_sizes.sg=%u, supported=%u\n",
338 vma->page_sizes.sg & ~supported, supported);
339 err = -EINVAL;
340 }
341
342 if (!HAS_PAGE_SIZES(i915, vma->page_sizes.gtt)) {
343 pr_err("unsupported page_sizes.gtt=%u, supported=%u\n",
344 vma->page_sizes.gtt & ~supported, supported);
345 err = -EINVAL;
346 }
347
348 if (vma->page_sizes.phys != obj->mm.page_sizes.phys) {
349 pr_err("vma->page_sizes.phys(%u) != obj->mm.page_sizes.phys(%u)\n",
350 vma->page_sizes.phys, obj->mm.page_sizes.phys);
351 err = -EINVAL;
352 }
353
354 if (vma->page_sizes.sg != obj->mm.page_sizes.sg) {
355 pr_err("vma->page_sizes.sg(%u) != obj->mm.page_sizes.sg(%u)\n",
356 vma->page_sizes.sg, obj->mm.page_sizes.sg);
357 err = -EINVAL;
358 }
359
360 if (obj->mm.page_sizes.gtt) {
361 pr_err("obj->page_sizes.gtt(%u) should never be set\n",
362 obj->mm.page_sizes.gtt);
363 err = -EINVAL;
364 }
365
366 return err;
367}
368
369static int igt_mock_exhaust_device_supported_pages(void *arg)
370{
371 struct i915_ppgtt *ppgtt = arg;
372 struct drm_i915_private *i915 = ppgtt->vm.i915;
373 unsigned int saved_mask = INTEL_INFO(i915)->page_sizes;
374 struct drm_i915_gem_object *obj;
375 struct i915_vma *vma;
376 int i, j, single;
377 int err;
378
379
380
381
382
383
384 for (i = 1; i < BIT(ARRAY_SIZE(page_sizes)); i++) {
385 unsigned int combination = 0;
386
387 for (j = 0; j < ARRAY_SIZE(page_sizes); j++) {
388 if (i & BIT(j))
389 combination |= page_sizes[j];
390 }
391
392 mkwrite_device_info(i915)->page_sizes = combination;
393
394 for (single = 0; single <= 1; ++single) {
395 obj = fake_huge_pages_object(i915, combination, !!single);
396 if (IS_ERR(obj)) {
397 err = PTR_ERR(obj);
398 goto out_device;
399 }
400
401 if (obj->base.size != combination) {
402 pr_err("obj->base.size=%zu, expected=%u\n",
403 obj->base.size, combination);
404 err = -EINVAL;
405 goto out_put;
406 }
407
408 vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
409 if (IS_ERR(vma)) {
410 err = PTR_ERR(vma);
411 goto out_put;
412 }
413
414 err = i915_vma_pin(vma, 0, 0, PIN_USER);
415 if (err)
416 goto out_close;
417
418 err = igt_check_page_sizes(vma);
419
420 if (vma->page_sizes.sg != combination) {
421 pr_err("page_sizes.sg=%u, expected=%u\n",
422 vma->page_sizes.sg, combination);
423 err = -EINVAL;
424 }
425
426 i915_vma_unpin(vma);
427 i915_vma_close(vma);
428
429 i915_gem_object_put(obj);
430
431 if (err)
432 goto out_device;
433 }
434 }
435
436 goto out_device;
437
438out_close:
439 i915_vma_close(vma);
440out_put:
441 i915_gem_object_put(obj);
442out_device:
443 mkwrite_device_info(i915)->page_sizes = saved_mask;
444
445 return err;
446}
447
448static int igt_mock_ppgtt_misaligned_dma(void *arg)
449{
450 struct i915_ppgtt *ppgtt = arg;
451 struct drm_i915_private *i915 = ppgtt->vm.i915;
452 unsigned long supported = INTEL_INFO(i915)->page_sizes;
453 struct drm_i915_gem_object *obj;
454 int bit;
455 int err;
456
457
458
459
460
461
462
463 bit = ilog2(I915_GTT_PAGE_SIZE_64K);
464
465 for_each_set_bit_from(bit, &supported,
466 ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
467 IGT_TIMEOUT(end_time);
468 unsigned int page_size = BIT(bit);
469 unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
470 unsigned int offset;
471 unsigned int size =
472 round_up(page_size, I915_GTT_PAGE_SIZE_2M) << 1;
473 struct i915_vma *vma;
474
475 obj = fake_huge_pages_object(i915, size, true);
476 if (IS_ERR(obj))
477 return PTR_ERR(obj);
478
479 if (obj->base.size != size) {
480 pr_err("obj->base.size=%zu, expected=%u\n",
481 obj->base.size, size);
482 err = -EINVAL;
483 goto out_put;
484 }
485
486 err = i915_gem_object_pin_pages(obj);
487 if (err)
488 goto out_put;
489
490
491 obj->mm.page_sizes.sg = page_size;
492
493 vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
494 if (IS_ERR(vma)) {
495 err = PTR_ERR(vma);
496 goto out_unpin;
497 }
498
499 err = i915_vma_pin(vma, 0, 0, flags);
500 if (err) {
501 i915_vma_close(vma);
502 goto out_unpin;
503 }
504
505
506 err = igt_check_page_sizes(vma);
507
508 if (vma->page_sizes.gtt != page_size) {
509 pr_err("page_sizes.gtt=%u, expected %u\n",
510 vma->page_sizes.gtt, page_size);
511 err = -EINVAL;
512 }
513
514 i915_vma_unpin(vma);
515
516 if (err) {
517 i915_vma_close(vma);
518 goto out_unpin;
519 }
520
521
522
523
524
525
526 for (offset = 4096; offset < page_size; offset += 4096) {
527 err = i915_vma_unbind(vma);
528 if (err) {
529 i915_vma_close(vma);
530 goto out_unpin;
531 }
532
533 err = i915_vma_pin(vma, 0, 0, flags | offset);
534 if (err) {
535 i915_vma_close(vma);
536 goto out_unpin;
537 }
538
539 err = igt_check_page_sizes(vma);
540
541 if (vma->page_sizes.gtt != I915_GTT_PAGE_SIZE_4K) {
542 pr_err("page_sizes.gtt=%u, expected %llu\n",
543 vma->page_sizes.gtt, I915_GTT_PAGE_SIZE_4K);
544 err = -EINVAL;
545 }
546
547 i915_vma_unpin(vma);
548
549 if (err) {
550 i915_vma_close(vma);
551 goto out_unpin;
552 }
553
554 if (igt_timeout(end_time,
555 "%s timed out at offset %x with page-size %x\n",
556 __func__, offset, page_size))
557 break;
558 }
559
560 i915_vma_close(vma);
561
562 i915_gem_object_unpin_pages(obj);
563 __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
564 i915_gem_object_put(obj);
565 }
566
567 return 0;
568
569out_unpin:
570 i915_gem_object_unpin_pages(obj);
571out_put:
572 i915_gem_object_put(obj);
573
574 return err;
575}
576
577static void close_object_list(struct list_head *objects,
578 struct i915_ppgtt *ppgtt)
579{
580 struct drm_i915_gem_object *obj, *on;
581
582 list_for_each_entry_safe(obj, on, objects, st_link) {
583 struct i915_vma *vma;
584
585 vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
586 if (!IS_ERR(vma))
587 i915_vma_close(vma);
588
589 list_del(&obj->st_link);
590 i915_gem_object_unpin_pages(obj);
591 __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
592 i915_gem_object_put(obj);
593 }
594}
595
596static int igt_mock_ppgtt_huge_fill(void *arg)
597{
598 struct i915_ppgtt *ppgtt = arg;
599 struct drm_i915_private *i915 = ppgtt->vm.i915;
600 unsigned long max_pages = ppgtt->vm.total >> PAGE_SHIFT;
601 unsigned long page_num;
602 bool single = false;
603 LIST_HEAD(objects);
604 IGT_TIMEOUT(end_time);
605 int err = -ENODEV;
606
607 for_each_prime_number_from(page_num, 1, max_pages) {
608 struct drm_i915_gem_object *obj;
609 u64 size = page_num << PAGE_SHIFT;
610 struct i915_vma *vma;
611 unsigned int expected_gtt = 0;
612 int i;
613
614 obj = fake_huge_pages_object(i915, size, single);
615 if (IS_ERR(obj)) {
616 err = PTR_ERR(obj);
617 break;
618 }
619
620 if (obj->base.size != size) {
621 pr_err("obj->base.size=%zd, expected=%llu\n",
622 obj->base.size, size);
623 i915_gem_object_put(obj);
624 err = -EINVAL;
625 break;
626 }
627
628 err = i915_gem_object_pin_pages(obj);
629 if (err) {
630 i915_gem_object_put(obj);
631 break;
632 }
633
634 list_add(&obj->st_link, &objects);
635
636 vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
637 if (IS_ERR(vma)) {
638 err = PTR_ERR(vma);
639 break;
640 }
641
642 err = i915_vma_pin(vma, 0, 0, PIN_USER);
643 if (err)
644 break;
645
646 err = igt_check_page_sizes(vma);
647 if (err) {
648 i915_vma_unpin(vma);
649 break;
650 }
651
652
653
654
655
656
657 for (i = 0; i < ARRAY_SIZE(page_sizes); ++i) {
658 unsigned int page_size = page_sizes[i];
659
660 if (HAS_PAGE_SIZES(i915, page_size) &&
661 size >= page_size) {
662 expected_gtt |= page_size;
663 size &= page_size-1;
664 }
665 }
666
667 GEM_BUG_ON(!expected_gtt);
668 GEM_BUG_ON(size);
669
670 if (expected_gtt & I915_GTT_PAGE_SIZE_4K)
671 expected_gtt &= ~I915_GTT_PAGE_SIZE_64K;
672
673 i915_vma_unpin(vma);
674
675 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) {
676 if (!IS_ALIGNED(vma->node.start,
677 I915_GTT_PAGE_SIZE_2M)) {
678 pr_err("node.start(%llx) not aligned to 2M\n",
679 vma->node.start);
680 err = -EINVAL;
681 break;
682 }
683
684 if (!IS_ALIGNED(vma->node.size,
685 I915_GTT_PAGE_SIZE_2M)) {
686 pr_err("node.size(%llx) not aligned to 2M\n",
687 vma->node.size);
688 err = -EINVAL;
689 break;
690 }
691 }
692
693 if (vma->page_sizes.gtt != expected_gtt) {
694 pr_err("gtt=%u, expected=%u, size=%zd, single=%s\n",
695 vma->page_sizes.gtt, expected_gtt,
696 obj->base.size, yesno(!!single));
697 err = -EINVAL;
698 break;
699 }
700
701 if (igt_timeout(end_time,
702 "%s timed out at size %zd\n",
703 __func__, obj->base.size))
704 break;
705
706 single = !single;
707 }
708
709 close_object_list(&objects, ppgtt);
710
711 if (err == -ENOMEM || err == -ENOSPC)
712 err = 0;
713
714 return err;
715}
716
717static int igt_mock_ppgtt_64K(void *arg)
718{
719 struct i915_ppgtt *ppgtt = arg;
720 struct drm_i915_private *i915 = ppgtt->vm.i915;
721 struct drm_i915_gem_object *obj;
722 const struct object_info {
723 unsigned int size;
724 unsigned int gtt;
725 unsigned int offset;
726 } objects[] = {
727
728 {
729 .size = SZ_64K,
730 .gtt = I915_GTT_PAGE_SIZE_64K,
731 .offset = 0,
732 },
733 {
734 .size = SZ_64K + SZ_4K,
735 .gtt = I915_GTT_PAGE_SIZE_4K,
736 .offset = 0,
737 },
738 {
739 .size = SZ_64K - SZ_4K,
740 .gtt = I915_GTT_PAGE_SIZE_4K,
741 .offset = 0,
742 },
743 {
744 .size = SZ_2M,
745 .gtt = I915_GTT_PAGE_SIZE_64K,
746 .offset = 0,
747 },
748 {
749 .size = SZ_2M - SZ_4K,
750 .gtt = I915_GTT_PAGE_SIZE_4K,
751 .offset = 0,
752 },
753 {
754 .size = SZ_2M + SZ_4K,
755 .gtt = I915_GTT_PAGE_SIZE_64K | I915_GTT_PAGE_SIZE_4K,
756 .offset = 0,
757 },
758 {
759 .size = SZ_2M + SZ_64K,
760 .gtt = I915_GTT_PAGE_SIZE_64K,
761 .offset = 0,
762 },
763 {
764 .size = SZ_2M - SZ_64K,
765 .gtt = I915_GTT_PAGE_SIZE_64K,
766 .offset = 0,
767 },
768
769 {
770 .size = SZ_64K,
771 .offset = SZ_2M,
772 .gtt = I915_GTT_PAGE_SIZE_4K,
773 },
774 {
775 .size = SZ_128K,
776 .offset = SZ_2M - SZ_64K,
777 .gtt = I915_GTT_PAGE_SIZE_4K,
778 },
779 };
780 struct i915_vma *vma;
781 int i, single;
782 int err;
783
784
785
786
787
788
789
790 if (!HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K))
791 return 0;
792
793 for (i = 0; i < ARRAY_SIZE(objects); ++i) {
794 unsigned int size = objects[i].size;
795 unsigned int expected_gtt = objects[i].gtt;
796 unsigned int offset = objects[i].offset;
797 unsigned int flags = PIN_USER;
798
799 for (single = 0; single <= 1; single++) {
800 obj = fake_huge_pages_object(i915, size, !!single);
801 if (IS_ERR(obj))
802 return PTR_ERR(obj);
803
804 err = i915_gem_object_pin_pages(obj);
805 if (err)
806 goto out_object_put;
807
808
809
810
811
812 obj->mm.page_sizes.sg &= ~I915_GTT_PAGE_SIZE_2M;
813
814 vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
815 if (IS_ERR(vma)) {
816 err = PTR_ERR(vma);
817 goto out_object_unpin;
818 }
819
820 if (offset)
821 flags |= PIN_OFFSET_FIXED | offset;
822
823 err = i915_vma_pin(vma, 0, 0, flags);
824 if (err)
825 goto out_vma_close;
826
827 err = igt_check_page_sizes(vma);
828 if (err)
829 goto out_vma_unpin;
830
831 if (!offset && vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) {
832 if (!IS_ALIGNED(vma->node.start,
833 I915_GTT_PAGE_SIZE_2M)) {
834 pr_err("node.start(%llx) not aligned to 2M\n",
835 vma->node.start);
836 err = -EINVAL;
837 goto out_vma_unpin;
838 }
839
840 if (!IS_ALIGNED(vma->node.size,
841 I915_GTT_PAGE_SIZE_2M)) {
842 pr_err("node.size(%llx) not aligned to 2M\n",
843 vma->node.size);
844 err = -EINVAL;
845 goto out_vma_unpin;
846 }
847 }
848
849 if (vma->page_sizes.gtt != expected_gtt) {
850 pr_err("gtt=%u, expected=%u, i=%d, single=%s\n",
851 vma->page_sizes.gtt, expected_gtt, i,
852 yesno(!!single));
853 err = -EINVAL;
854 goto out_vma_unpin;
855 }
856
857 i915_vma_unpin(vma);
858 i915_vma_close(vma);
859
860 i915_gem_object_unpin_pages(obj);
861 __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
862 i915_gem_object_put(obj);
863 }
864 }
865
866 return 0;
867
868out_vma_unpin:
869 i915_vma_unpin(vma);
870out_vma_close:
871 i915_vma_close(vma);
872out_object_unpin:
873 i915_gem_object_unpin_pages(obj);
874out_object_put:
875 i915_gem_object_put(obj);
876
877 return err;
878}
879
880static struct i915_vma *
881gpu_write_dw(struct i915_vma *vma, u64 offset, u32 val)
882{
883 struct drm_i915_private *i915 = vma->vm->i915;
884 const int gen = INTEL_GEN(i915);
885 unsigned int count = vma->size >> PAGE_SHIFT;
886 struct drm_i915_gem_object *obj;
887 struct i915_vma *batch;
888 unsigned int size;
889 u32 *cmd;
890 int n;
891 int err;
892
893 size = (1 + 4 * count) * sizeof(u32);
894 size = round_up(size, PAGE_SIZE);
895 obj = i915_gem_object_create_internal(i915, size);
896 if (IS_ERR(obj))
897 return ERR_CAST(obj);
898
899 cmd = i915_gem_object_pin_map(obj, I915_MAP_WC);
900 if (IS_ERR(cmd)) {
901 err = PTR_ERR(cmd);
902 goto err;
903 }
904
905 offset += vma->node.start;
906
907 for (n = 0; n < count; n++) {
908 if (gen >= 8) {
909 *cmd++ = MI_STORE_DWORD_IMM_GEN4;
910 *cmd++ = lower_32_bits(offset);
911 *cmd++ = upper_32_bits(offset);
912 *cmd++ = val;
913 } else if (gen >= 4) {
914 *cmd++ = MI_STORE_DWORD_IMM_GEN4 |
915 (gen < 6 ? MI_USE_GGTT : 0);
916 *cmd++ = 0;
917 *cmd++ = offset;
918 *cmd++ = val;
919 } else {
920 *cmd++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
921 *cmd++ = offset;
922 *cmd++ = val;
923 }
924
925 offset += PAGE_SIZE;
926 }
927
928 *cmd = MI_BATCH_BUFFER_END;
929 i915_gem_chipset_flush(i915);
930
931 i915_gem_object_unpin_map(obj);
932
933 batch = i915_vma_instance(obj, vma->vm, NULL);
934 if (IS_ERR(batch)) {
935 err = PTR_ERR(batch);
936 goto err;
937 }
938
939 err = i915_vma_pin(batch, 0, 0, PIN_USER);
940 if (err)
941 goto err;
942
943 return batch;
944
945err:
946 i915_gem_object_put(obj);
947
948 return ERR_PTR(err);
949}
950
951static int gpu_write(struct i915_vma *vma,
952 struct i915_gem_context *ctx,
953 struct intel_engine_cs *engine,
954 u32 dword,
955 u32 value)
956{
957 struct i915_request *rq;
958 struct i915_vma *batch;
959 int err;
960
961 GEM_BUG_ON(!intel_engine_can_store_dword(engine));
962
963 batch = gpu_write_dw(vma, dword * sizeof(u32), value);
964 if (IS_ERR(batch))
965 return PTR_ERR(batch);
966
967 rq = igt_request_alloc(ctx, engine);
968 if (IS_ERR(rq)) {
969 err = PTR_ERR(rq);
970 goto err_batch;
971 }
972
973 i915_vma_lock(batch);
974 err = i915_vma_move_to_active(batch, rq, 0);
975 i915_vma_unlock(batch);
976 if (err)
977 goto err_request;
978
979 i915_vma_lock(vma);
980 err = i915_gem_object_set_to_gtt_domain(vma->obj, false);
981 if (err == 0)
982 err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
983 i915_vma_unlock(vma);
984 if (err)
985 goto err_request;
986
987 err = engine->emit_bb_start(rq,
988 batch->node.start, batch->node.size,
989 0);
990err_request:
991 if (err)
992 i915_request_skip(rq, err);
993 i915_request_add(rq);
994err_batch:
995 i915_vma_unpin(batch);
996 i915_vma_close(batch);
997 i915_vma_put(batch);
998
999 return err;
1000}
1001
1002static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
1003{
1004 unsigned int needs_flush;
1005 unsigned long n;
1006 int err;
1007
1008 err = i915_gem_object_prepare_read(obj, &needs_flush);
1009 if (err)
1010 return err;
1011
1012 for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) {
1013 u32 *ptr = kmap_atomic(i915_gem_object_get_page(obj, n));
1014
1015 if (needs_flush & CLFLUSH_BEFORE)
1016 drm_clflush_virt_range(ptr, PAGE_SIZE);
1017
1018 if (ptr[dword] != val) {
1019 pr_err("n=%lu ptr[%u]=%u, val=%u\n",
1020 n, dword, ptr[dword], val);
1021 kunmap_atomic(ptr);
1022 err = -EINVAL;
1023 break;
1024 }
1025
1026 kunmap_atomic(ptr);
1027 }
1028
1029 i915_gem_object_finish_access(obj);
1030
1031 return err;
1032}
1033
1034static int __igt_write_huge(struct i915_gem_context *ctx,
1035 struct intel_engine_cs *engine,
1036 struct drm_i915_gem_object *obj,
1037 u64 size, u64 offset,
1038 u32 dword, u32 val)
1039{
1040 struct drm_i915_private *i915 = to_i915(obj->base.dev);
1041 struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
1042 unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
1043 struct i915_vma *vma;
1044 int err;
1045
1046 vma = i915_vma_instance(obj, vm, NULL);
1047 if (IS_ERR(vma))
1048 return PTR_ERR(vma);
1049
1050 err = i915_vma_unbind(vma);
1051 if (err)
1052 goto out_vma_close;
1053
1054 err = i915_vma_pin(vma, size, 0, flags | offset);
1055 if (err) {
1056
1057
1058
1059
1060 if (err == -ENOSPC && i915_is_ggtt(vm))
1061 err = 0;
1062
1063 goto out_vma_close;
1064 }
1065
1066 err = igt_check_page_sizes(vma);
1067 if (err)
1068 goto out_vma_unpin;
1069
1070 err = gpu_write(vma, ctx, engine, dword, val);
1071 if (err) {
1072 pr_err("gpu-write failed at offset=%llx\n", offset);
1073 goto out_vma_unpin;
1074 }
1075
1076 err = cpu_check(obj, dword, val);
1077 if (err) {
1078 pr_err("cpu-check failed at offset=%llx\n", offset);
1079 goto out_vma_unpin;
1080 }
1081
1082out_vma_unpin:
1083 i915_vma_unpin(vma);
1084out_vma_close:
1085 i915_vma_destroy(vma);
1086
1087 return err;
1088}
1089
1090static int igt_write_huge(struct i915_gem_context *ctx,
1091 struct drm_i915_gem_object *obj)
1092{
1093 struct drm_i915_private *i915 = to_i915(obj->base.dev);
1094 struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
1095 static struct intel_engine_cs *engines[I915_NUM_ENGINES];
1096 struct intel_engine_cs *engine;
1097 I915_RND_STATE(prng);
1098 IGT_TIMEOUT(end_time);
1099 unsigned int max_page_size;
1100 unsigned int id;
1101 u64 max;
1102 u64 num;
1103 u64 size;
1104 int *order;
1105 int i, n;
1106 int err = 0;
1107
1108 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
1109
1110 size = obj->base.size;
1111 if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
1112 size = round_up(size, I915_GTT_PAGE_SIZE_2M);
1113
1114 max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg);
1115 max = div_u64((vm->total - size), max_page_size);
1116
1117 n = 0;
1118 for_each_engine(engine, i915, id) {
1119 if (!intel_engine_can_store_dword(engine)) {
1120 pr_info("store-dword-imm not supported on engine=%u\n",
1121 id);
1122 continue;
1123 }
1124 engines[n++] = engine;
1125 }
1126
1127 if (!n)
1128 return 0;
1129
1130
1131
1132
1133
1134
1135 order = i915_random_order(n * I915_NUM_ENGINES, &prng);
1136 if (!order)
1137 return -ENOMEM;
1138
1139
1140
1141
1142
1143
1144 i = 0;
1145 for_each_prime_number_from(num, 0, max) {
1146 u64 offset_low = num * max_page_size;
1147 u64 offset_high = (max - num) * max_page_size;
1148 u32 dword = offset_in_page(num) / 4;
1149
1150 engine = engines[order[i] % n];
1151 i = (i + 1) % (n * I915_NUM_ENGINES);
1152
1153
1154
1155
1156
1157
1158
1159 if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
1160 offset_low = round_down(offset_low,
1161 I915_GTT_PAGE_SIZE_2M);
1162
1163 err = __igt_write_huge(ctx, engine, obj, size, offset_low,
1164 dword, num + 1);
1165 if (err)
1166 break;
1167
1168 err = __igt_write_huge(ctx, engine, obj, size, offset_high,
1169 dword, num + 1);
1170 if (err)
1171 break;
1172
1173 if (igt_timeout(end_time,
1174 "%s timed out on engine=%u, offset_low=%llx offset_high=%llx, max_page_size=%x\n",
1175 __func__, engine->id, offset_low, offset_high,
1176 max_page_size))
1177 break;
1178 }
1179
1180 kfree(order);
1181
1182 return err;
1183}
1184
1185static int igt_ppgtt_exhaust_huge(void *arg)
1186{
1187 struct i915_gem_context *ctx = arg;
1188 struct drm_i915_private *i915 = ctx->i915;
1189 unsigned long supported = INTEL_INFO(i915)->page_sizes;
1190 static unsigned int pages[ARRAY_SIZE(page_sizes)];
1191 struct drm_i915_gem_object *obj;
1192 unsigned int size_mask;
1193 unsigned int page_mask;
1194 int n, i;
1195 int err = -ENODEV;
1196
1197 if (supported == I915_GTT_PAGE_SIZE_4K)
1198 return 0;
1199
1200
1201
1202
1203
1204
1205 n = 0;
1206 for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1)
1207 pages[n++] = BIT(i);
1208
1209 for (size_mask = 2; size_mask < BIT(n); size_mask++) {
1210 unsigned int size = 0;
1211
1212 for (i = 0; i < n; i++) {
1213 if (size_mask & BIT(i))
1214 size |= pages[i];
1215 }
1216
1217
1218
1219
1220
1221 for (page_mask = 2; page_mask <= size_mask; page_mask++) {
1222 unsigned int page_sizes = 0;
1223
1224 for (i = 0; i < n; i++) {
1225 if (page_mask & BIT(i))
1226 page_sizes |= pages[i];
1227 }
1228
1229
1230
1231
1232
1233 if (!IS_ALIGNED(size, BIT(__ffs(page_sizes))))
1234 continue;
1235
1236 obj = huge_pages_object(i915, size, page_sizes);
1237 if (IS_ERR(obj)) {
1238 err = PTR_ERR(obj);
1239 goto out_device;
1240 }
1241
1242 err = i915_gem_object_pin_pages(obj);
1243 if (err) {
1244 i915_gem_object_put(obj);
1245
1246 if (err == -ENOMEM) {
1247 pr_info("unable to get pages, size=%u, pages=%u\n",
1248 size, page_sizes);
1249 err = 0;
1250 break;
1251 }
1252
1253 pr_err("pin_pages failed, size=%u, pages=%u\n",
1254 size_mask, page_mask);
1255
1256 goto out_device;
1257 }
1258
1259
1260 obj->mm.page_sizes.sg = page_sizes;
1261
1262 err = igt_write_huge(ctx, obj);
1263 if (err) {
1264 pr_err("exhaust write-huge failed with size=%u\n",
1265 size);
1266 goto out_unpin;
1267 }
1268
1269 i915_gem_object_unpin_pages(obj);
1270 __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
1271 i915_gem_object_put(obj);
1272 }
1273 }
1274
1275 goto out_device;
1276
1277out_unpin:
1278 i915_gem_object_unpin_pages(obj);
1279 i915_gem_object_put(obj);
1280out_device:
1281 mkwrite_device_info(i915)->page_sizes = supported;
1282
1283 return err;
1284}
1285
1286static int igt_ppgtt_internal_huge(void *arg)
1287{
1288 struct i915_gem_context *ctx = arg;
1289 struct drm_i915_private *i915 = ctx->i915;
1290 struct drm_i915_gem_object *obj;
1291 static const unsigned int sizes[] = {
1292 SZ_64K,
1293 SZ_128K,
1294 SZ_256K,
1295 SZ_512K,
1296 SZ_1M,
1297 SZ_2M,
1298 };
1299 int i;
1300 int err;
1301
1302
1303
1304
1305
1306
1307 for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
1308 unsigned int size = sizes[i];
1309
1310 obj = i915_gem_object_create_internal(i915, size);
1311 if (IS_ERR(obj))
1312 return PTR_ERR(obj);
1313
1314 err = i915_gem_object_pin_pages(obj);
1315 if (err)
1316 goto out_put;
1317
1318 if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_64K) {
1319 pr_info("internal unable to allocate huge-page(s) with size=%u\n",
1320 size);
1321 goto out_unpin;
1322 }
1323
1324 err = igt_write_huge(ctx, obj);
1325 if (err) {
1326 pr_err("internal write-huge failed with size=%u\n",
1327 size);
1328 goto out_unpin;
1329 }
1330
1331 i915_gem_object_unpin_pages(obj);
1332 __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
1333 i915_gem_object_put(obj);
1334 }
1335
1336 return 0;
1337
1338out_unpin:
1339 i915_gem_object_unpin_pages(obj);
1340out_put:
1341 i915_gem_object_put(obj);
1342
1343 return err;
1344}
1345
1346static inline bool igt_can_allocate_thp(struct drm_i915_private *i915)
1347{
1348 return i915->mm.gemfs && has_transparent_hugepage();
1349}
1350
1351static int igt_ppgtt_gemfs_huge(void *arg)
1352{
1353 struct i915_gem_context *ctx = arg;
1354 struct drm_i915_private *i915 = ctx->i915;
1355 struct drm_i915_gem_object *obj;
1356 static const unsigned int sizes[] = {
1357 SZ_2M,
1358 SZ_4M,
1359 SZ_8M,
1360 SZ_16M,
1361 SZ_32M,
1362 };
1363 int i;
1364 int err;
1365
1366
1367
1368
1369
1370
1371 if (!igt_can_allocate_thp(i915)) {
1372 pr_info("missing THP support, skipping\n");
1373 return 0;
1374 }
1375
1376 for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
1377 unsigned int size = sizes[i];
1378
1379 obj = i915_gem_object_create_shmem(i915, size);
1380 if (IS_ERR(obj))
1381 return PTR_ERR(obj);
1382
1383 err = i915_gem_object_pin_pages(obj);
1384 if (err)
1385 goto out_put;
1386
1387 if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_2M) {
1388 pr_info("finishing test early, gemfs unable to allocate huge-page(s) with size=%u\n",
1389 size);
1390 goto out_unpin;
1391 }
1392
1393 err = igt_write_huge(ctx, obj);
1394 if (err) {
1395 pr_err("gemfs write-huge failed with size=%u\n",
1396 size);
1397 goto out_unpin;
1398 }
1399
1400 i915_gem_object_unpin_pages(obj);
1401 __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
1402 i915_gem_object_put(obj);
1403 }
1404
1405 return 0;
1406
1407out_unpin:
1408 i915_gem_object_unpin_pages(obj);
1409out_put:
1410 i915_gem_object_put(obj);
1411
1412 return err;
1413}
1414
1415static int igt_ppgtt_pin_update(void *arg)
1416{
1417 struct i915_gem_context *ctx = arg;
1418 struct drm_i915_private *dev_priv = ctx->i915;
1419 unsigned long supported = INTEL_INFO(dev_priv)->page_sizes;
1420 struct i915_address_space *vm = ctx->vm;
1421 struct drm_i915_gem_object *obj;
1422 struct i915_vma *vma;
1423 unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
1424 int first, last;
1425 int err;
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435 if (!vm || !i915_vm_is_4lvl(vm)) {
1436 pr_info("48b PPGTT not supported, skipping\n");
1437 return 0;
1438 }
1439
1440 first = ilog2(I915_GTT_PAGE_SIZE_64K);
1441 last = ilog2(I915_GTT_PAGE_SIZE_2M);
1442
1443 for_each_set_bit_from(first, &supported, last + 1) {
1444 unsigned int page_size = BIT(first);
1445
1446 obj = i915_gem_object_create_internal(dev_priv, page_size);
1447 if (IS_ERR(obj))
1448 return PTR_ERR(obj);
1449
1450 vma = i915_vma_instance(obj, vm, NULL);
1451 if (IS_ERR(vma)) {
1452 err = PTR_ERR(vma);
1453 goto out_put;
1454 }
1455
1456 err = i915_vma_pin(vma, SZ_2M, 0, flags);
1457 if (err)
1458 goto out_close;
1459
1460 if (vma->page_sizes.sg < page_size) {
1461 pr_info("Unable to allocate page-size %x, finishing test early\n",
1462 page_size);
1463 goto out_unpin;
1464 }
1465
1466 err = igt_check_page_sizes(vma);
1467 if (err)
1468 goto out_unpin;
1469
1470 if (vma->page_sizes.gtt != page_size) {
1471 dma_addr_t addr = i915_gem_object_get_dma_address(obj, 0);
1472
1473
1474
1475
1476
1477
1478
1479 if (IS_ALIGNED(addr, page_size)) {
1480 pr_err("page_sizes.gtt=%u, expected=%u\n",
1481 vma->page_sizes.gtt, page_size);
1482 err = -EINVAL;
1483 } else {
1484 pr_info("dma address misaligned, finishing test early\n");
1485 }
1486
1487 goto out_unpin;
1488 }
1489
1490 err = i915_vma_bind(vma, I915_CACHE_NONE, PIN_UPDATE);
1491 if (err)
1492 goto out_unpin;
1493
1494 i915_vma_unpin(vma);
1495 i915_vma_close(vma);
1496
1497 i915_gem_object_put(obj);
1498 }
1499
1500 obj = i915_gem_object_create_internal(dev_priv, PAGE_SIZE);
1501 if (IS_ERR(obj))
1502 return PTR_ERR(obj);
1503
1504 vma = i915_vma_instance(obj, vm, NULL);
1505 if (IS_ERR(vma)) {
1506 err = PTR_ERR(vma);
1507 goto out_put;
1508 }
1509
1510 err = i915_vma_pin(vma, 0, 0, flags);
1511 if (err)
1512 goto out_close;
1513
1514
1515
1516
1517
1518
1519
1520
1521 err = gpu_write(vma, ctx, dev_priv->engine[RCS0], 0, 0xdeadbeaf);
1522 if (err)
1523 goto out_unpin;
1524
1525 err = cpu_check(obj, 0, 0xdeadbeaf);
1526
1527out_unpin:
1528 i915_vma_unpin(vma);
1529out_close:
1530 i915_vma_close(vma);
1531out_put:
1532 i915_gem_object_put(obj);
1533
1534 return err;
1535}
1536
1537static int igt_tmpfs_fallback(void *arg)
1538{
1539 struct i915_gem_context *ctx = arg;
1540 struct drm_i915_private *i915 = ctx->i915;
1541 struct vfsmount *gemfs = i915->mm.gemfs;
1542 struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
1543 struct drm_i915_gem_object *obj;
1544 struct i915_vma *vma;
1545 u32 *vaddr;
1546 int err = 0;
1547
1548
1549
1550
1551
1552
1553
1554 i915->mm.gemfs = NULL;
1555
1556 obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
1557 if (IS_ERR(obj)) {
1558 err = PTR_ERR(obj);
1559 goto out_restore;
1560 }
1561
1562 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
1563 if (IS_ERR(vaddr)) {
1564 err = PTR_ERR(vaddr);
1565 goto out_put;
1566 }
1567 *vaddr = 0xdeadbeaf;
1568
1569 __i915_gem_object_flush_map(obj, 0, 64);
1570 i915_gem_object_unpin_map(obj);
1571
1572 vma = i915_vma_instance(obj, vm, NULL);
1573 if (IS_ERR(vma)) {
1574 err = PTR_ERR(vma);
1575 goto out_put;
1576 }
1577
1578 err = i915_vma_pin(vma, 0, 0, PIN_USER);
1579 if (err)
1580 goto out_close;
1581
1582 err = igt_check_page_sizes(vma);
1583
1584 i915_vma_unpin(vma);
1585out_close:
1586 i915_vma_close(vma);
1587out_put:
1588 i915_gem_object_put(obj);
1589out_restore:
1590 i915->mm.gemfs = gemfs;
1591
1592 return err;
1593}
1594
1595static int igt_shrink_thp(void *arg)
1596{
1597 struct i915_gem_context *ctx = arg;
1598 struct drm_i915_private *i915 = ctx->i915;
1599 struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
1600 struct drm_i915_gem_object *obj;
1601 struct i915_vma *vma;
1602 unsigned int flags = PIN_USER;
1603 int err;
1604
1605
1606
1607
1608
1609
1610 if (!igt_can_allocate_thp(i915)) {
1611 pr_info("missing THP support, skipping\n");
1612 return 0;
1613 }
1614
1615 obj = i915_gem_object_create_shmem(i915, SZ_2M);
1616 if (IS_ERR(obj))
1617 return PTR_ERR(obj);
1618
1619 vma = i915_vma_instance(obj, vm, NULL);
1620 if (IS_ERR(vma)) {
1621 err = PTR_ERR(vma);
1622 goto out_put;
1623 }
1624
1625 err = i915_vma_pin(vma, 0, 0, flags);
1626 if (err)
1627 goto out_close;
1628
1629 if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_2M) {
1630 pr_info("failed to allocate THP, finishing test early\n");
1631 goto out_unpin;
1632 }
1633
1634 err = igt_check_page_sizes(vma);
1635 if (err)
1636 goto out_unpin;
1637
1638 err = gpu_write(vma, ctx, i915->engine[RCS0], 0, 0xdeadbeaf);
1639 if (err)
1640 goto out_unpin;
1641
1642 i915_vma_unpin(vma);
1643
1644
1645
1646
1647
1648 i915_gem_shrink_all(i915);
1649 if (i915_gem_object_has_pages(obj)) {
1650 pr_err("shrink-all didn't truncate the pages\n");
1651 err = -EINVAL;
1652 goto out_close;
1653 }
1654
1655 if (obj->mm.page_sizes.sg || obj->mm.page_sizes.phys) {
1656 pr_err("residual page-size bits left\n");
1657 err = -EINVAL;
1658 goto out_close;
1659 }
1660
1661 err = i915_vma_pin(vma, 0, 0, flags);
1662 if (err)
1663 goto out_close;
1664
1665 err = cpu_check(obj, 0, 0xdeadbeaf);
1666
1667out_unpin:
1668 i915_vma_unpin(vma);
1669out_close:
1670 i915_vma_close(vma);
1671out_put:
1672 i915_gem_object_put(obj);
1673
1674 return err;
1675}
1676
1677int i915_gem_huge_page_mock_selftests(void)
1678{
1679 static const struct i915_subtest tests[] = {
1680 SUBTEST(igt_mock_exhaust_device_supported_pages),
1681 SUBTEST(igt_mock_ppgtt_misaligned_dma),
1682 SUBTEST(igt_mock_ppgtt_huge_fill),
1683 SUBTEST(igt_mock_ppgtt_64K),
1684 };
1685 struct drm_i915_private *dev_priv;
1686 struct i915_ppgtt *ppgtt;
1687 int err;
1688
1689 dev_priv = mock_gem_device();
1690 if (!dev_priv)
1691 return -ENOMEM;
1692
1693
1694 mkwrite_device_info(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL;
1695 mkwrite_device_info(dev_priv)->ppgtt_size = 48;
1696
1697 mutex_lock(&dev_priv->drm.struct_mutex);
1698 ppgtt = i915_ppgtt_create(dev_priv);
1699 if (IS_ERR(ppgtt)) {
1700 err = PTR_ERR(ppgtt);
1701 goto out_unlock;
1702 }
1703
1704 if (!i915_vm_is_4lvl(&ppgtt->vm)) {
1705 pr_err("failed to create 48b PPGTT\n");
1706 err = -EINVAL;
1707 goto out_close;
1708 }
1709
1710
1711 if (!i915_vm_has_scratch_64K(&ppgtt->vm)) {
1712 pr_err("PPGTT missing 64K scratch page\n");
1713 err = -EINVAL;
1714 goto out_close;
1715 }
1716
1717 err = i915_subtests(tests, ppgtt);
1718
1719out_close:
1720 i915_vm_put(&ppgtt->vm);
1721
1722out_unlock:
1723 mutex_unlock(&dev_priv->drm.struct_mutex);
1724 drm_dev_put(&dev_priv->drm);
1725
1726 return err;
1727}
1728
1729int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
1730{
1731 static const struct i915_subtest tests[] = {
1732 SUBTEST(igt_shrink_thp),
1733 SUBTEST(igt_ppgtt_pin_update),
1734 SUBTEST(igt_tmpfs_fallback),
1735 SUBTEST(igt_ppgtt_exhaust_huge),
1736 SUBTEST(igt_ppgtt_gemfs_huge),
1737 SUBTEST(igt_ppgtt_internal_huge),
1738 };
1739 struct drm_file *file;
1740 struct i915_gem_context *ctx;
1741 intel_wakeref_t wakeref;
1742 int err;
1743
1744 if (!HAS_PPGTT(dev_priv)) {
1745 pr_info("PPGTT not supported, skipping live-selftests\n");
1746 return 0;
1747 }
1748
1749 if (i915_terminally_wedged(dev_priv))
1750 return 0;
1751
1752 file = mock_file(dev_priv);
1753 if (IS_ERR(file))
1754 return PTR_ERR(file);
1755
1756 mutex_lock(&dev_priv->drm.struct_mutex);
1757 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1758
1759 ctx = live_context(dev_priv, file);
1760 if (IS_ERR(ctx)) {
1761 err = PTR_ERR(ctx);
1762 goto out_unlock;
1763 }
1764
1765 if (ctx->vm)
1766 ctx->vm->scrub_64K = true;
1767
1768 err = i915_subtests(tests, ctx);
1769
1770out_unlock:
1771 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1772 mutex_unlock(&dev_priv->drm.struct_mutex);
1773
1774 mock_file_free(dev_priv, file);
1775
1776 return err;
1777}
1778