1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/list_sort.h>
26#include <linux/prime_numbers.h>
27
28#include "gem/selftests/mock_context.h"
29
30#include "i915_random.h"
31#include "i915_selftest.h"
32
33#include "mock_drm.h"
34#include "mock_gem_device.h"
35
36static void cleanup_freed_objects(struct drm_i915_private *i915)
37{
38
39
40
41
42
43 mutex_unlock(&i915->drm.struct_mutex);
44
45 i915_gem_drain_freed_objects(i915);
46
47 mutex_lock(&i915->drm.struct_mutex);
48}
49
50static void fake_free_pages(struct drm_i915_gem_object *obj,
51 struct sg_table *pages)
52{
53 sg_free_table(pages);
54 kfree(pages);
55}
56
57static int fake_get_pages(struct drm_i915_gem_object *obj)
58{
59#define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
60#define PFN_BIAS 0x1000
61 struct sg_table *pages;
62 struct scatterlist *sg;
63 unsigned int sg_page_sizes;
64 typeof(obj->base.size) rem;
65
66 pages = kmalloc(sizeof(*pages), GFP);
67 if (!pages)
68 return -ENOMEM;
69
70 rem = round_up(obj->base.size, BIT(31)) >> 31;
71 if (sg_alloc_table(pages, rem, GFP)) {
72 kfree(pages);
73 return -ENOMEM;
74 }
75
76 sg_page_sizes = 0;
77 rem = obj->base.size;
78 for (sg = pages->sgl; sg; sg = sg_next(sg)) {
79 unsigned long len = min_t(typeof(rem), rem, BIT(31));
80
81 GEM_BUG_ON(!len);
82 sg_set_page(sg, pfn_to_page(PFN_BIAS), len, 0);
83 sg_dma_address(sg) = page_to_phys(sg_page(sg));
84 sg_dma_len(sg) = len;
85 sg_page_sizes |= len;
86
87 rem -= len;
88 }
89 GEM_BUG_ON(rem);
90
91 obj->mm.madv = I915_MADV_DONTNEED;
92
93 __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
94
95 return 0;
96#undef GFP
97}
98
99static void fake_put_pages(struct drm_i915_gem_object *obj,
100 struct sg_table *pages)
101{
102 fake_free_pages(obj, pages);
103 obj->mm.dirty = false;
104 obj->mm.madv = I915_MADV_WILLNEED;
105}
106
107static const struct drm_i915_gem_object_ops fake_ops = {
108 .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
109 .get_pages = fake_get_pages,
110 .put_pages = fake_put_pages,
111};
112
113static struct drm_i915_gem_object *
114fake_dma_object(struct drm_i915_private *i915, u64 size)
115{
116 struct drm_i915_gem_object *obj;
117
118 GEM_BUG_ON(!size);
119 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
120
121 if (overflows_type(size, obj->base.size))
122 return ERR_PTR(-E2BIG);
123
124 obj = i915_gem_object_alloc();
125 if (!obj)
126 goto err;
127
128 drm_gem_private_object_init(&i915->drm, &obj->base, size);
129 i915_gem_object_init(obj, &fake_ops);
130
131 obj->write_domain = I915_GEM_DOMAIN_CPU;
132 obj->read_domains = I915_GEM_DOMAIN_CPU;
133 obj->cache_level = I915_CACHE_NONE;
134
135
136 if (i915_gem_object_pin_pages(obj))
137 goto err_obj;
138
139 i915_gem_object_unpin_pages(obj);
140 return obj;
141
142err_obj:
143 i915_gem_object_put(obj);
144err:
145 return ERR_PTR(-ENOMEM);
146}
147
148static int igt_ppgtt_alloc(void *arg)
149{
150 struct drm_i915_private *dev_priv = arg;
151 struct i915_ppgtt *ppgtt;
152 u64 size, last, limit;
153 int err = 0;
154
155
156
157 if (!HAS_PPGTT(dev_priv))
158 return 0;
159
160 ppgtt = __ppgtt_create(dev_priv);
161 if (IS_ERR(ppgtt))
162 return PTR_ERR(ppgtt);
163
164 if (!ppgtt->vm.allocate_va_range)
165 goto err_ppgtt_cleanup;
166
167
168
169
170
171
172
173
174 limit = totalram_pages() << PAGE_SHIFT;
175 limit = min(ppgtt->vm.total, limit);
176
177
178 for (size = 4096; size <= limit; size <<= 2) {
179 err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, size);
180 if (err) {
181 if (err == -ENOMEM) {
182 pr_info("[1] Ran out of memory for va_range [0 + %llx] [bit %d]\n",
183 size, ilog2(size));
184 err = 0;
185 }
186 goto err_ppgtt_cleanup;
187 }
188
189 cond_resched();
190
191 ppgtt->vm.clear_range(&ppgtt->vm, 0, size);
192 }
193
194
195 for (last = 0, size = 4096; size <= limit; last = size, size <<= 2) {
196 err = ppgtt->vm.allocate_va_range(&ppgtt->vm,
197 last, size - last);
198 if (err) {
199 if (err == -ENOMEM) {
200 pr_info("[2] Ran out of memory for va_range [%llx + %llx] [bit %d]\n",
201 last, size - last, ilog2(size));
202 err = 0;
203 }
204 goto err_ppgtt_cleanup;
205 }
206
207 cond_resched();
208 }
209
210err_ppgtt_cleanup:
211 mutex_lock(&dev_priv->drm.struct_mutex);
212 i915_vm_put(&ppgtt->vm);
213 mutex_unlock(&dev_priv->drm.struct_mutex);
214 return err;
215}
216
217static int lowlevel_hole(struct drm_i915_private *i915,
218 struct i915_address_space *vm,
219 u64 hole_start, u64 hole_end,
220 unsigned long end_time)
221{
222 I915_RND_STATE(seed_prng);
223 unsigned int size;
224 struct i915_vma mock_vma;
225
226 memset(&mock_vma, 0, sizeof(struct i915_vma));
227
228
229 for (size = 12; (hole_end - hole_start) >> size; size++) {
230 I915_RND_SUBSTATE(prng, seed_prng);
231 struct drm_i915_gem_object *obj;
232 unsigned int *order, count, n;
233 u64 hole_size;
234
235 hole_size = (hole_end - hole_start) >> size;
236 if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
237 hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
238 count = hole_size >> 1;
239 if (!count) {
240 pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
241 __func__, hole_start, hole_end, size, hole_size);
242 break;
243 }
244
245 do {
246 order = i915_random_order(count, &prng);
247 if (order)
248 break;
249 } while (count >>= 1);
250 if (!count)
251 return -ENOMEM;
252 GEM_BUG_ON(!order);
253
254 GEM_BUG_ON(count * BIT_ULL(size) > vm->total);
255 GEM_BUG_ON(hole_start + count * BIT_ULL(size) > hole_end);
256
257
258
259
260
261
262
263 obj = fake_dma_object(i915, BIT_ULL(size));
264 if (IS_ERR(obj)) {
265 kfree(order);
266 break;
267 }
268
269 GEM_BUG_ON(obj->base.size != BIT_ULL(size));
270
271 if (i915_gem_object_pin_pages(obj)) {
272 i915_gem_object_put(obj);
273 kfree(order);
274 break;
275 }
276
277 for (n = 0; n < count; n++) {
278 u64 addr = hole_start + order[n] * BIT_ULL(size);
279 intel_wakeref_t wakeref;
280
281 GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
282
283 if (igt_timeout(end_time,
284 "%s timed out before %d/%d\n",
285 __func__, n, count)) {
286 hole_end = hole_start;
287 break;
288 }
289
290 if (vm->allocate_va_range &&
291 vm->allocate_va_range(vm, addr, BIT_ULL(size)))
292 break;
293
294 mock_vma.pages = obj->mm.pages;
295 mock_vma.node.size = BIT_ULL(size);
296 mock_vma.node.start = addr;
297
298 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
299 vm->insert_entries(vm, &mock_vma, I915_CACHE_NONE, 0);
300 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
301 }
302 count = n;
303
304 i915_random_reorder(order, count, &prng);
305 for (n = 0; n < count; n++) {
306 u64 addr = hole_start + order[n] * BIT_ULL(size);
307
308 GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
309 vm->clear_range(vm, addr, BIT_ULL(size));
310 }
311
312 i915_gem_object_unpin_pages(obj);
313 i915_gem_object_put(obj);
314
315 kfree(order);
316
317 cleanup_freed_objects(i915);
318 }
319
320 return 0;
321}
322
323static void close_object_list(struct list_head *objects,
324 struct i915_address_space *vm)
325{
326 struct drm_i915_gem_object *obj, *on;
327 int ignored;
328
329 list_for_each_entry_safe(obj, on, objects, st_link) {
330 struct i915_vma *vma;
331
332 vma = i915_vma_instance(obj, vm, NULL);
333 if (!IS_ERR(vma))
334 ignored = i915_vma_unbind(vma);
335
336 if (!IS_ERR(vma) && !i915_vma_is_ggtt(vma))
337 i915_vma_close(vma);
338
339 list_del(&obj->st_link);
340 i915_gem_object_put(obj);
341 }
342}
343
344static int fill_hole(struct drm_i915_private *i915,
345 struct i915_address_space *vm,
346 u64 hole_start, u64 hole_end,
347 unsigned long end_time)
348{
349 const u64 hole_size = hole_end - hole_start;
350 struct drm_i915_gem_object *obj;
351 const unsigned long max_pages =
352 min_t(u64, ULONG_MAX - 1, hole_size/2 >> PAGE_SHIFT);
353 const unsigned long max_step = max(int_sqrt(max_pages), 2UL);
354 unsigned long npages, prime, flags;
355 struct i915_vma *vma;
356 LIST_HEAD(objects);
357 int err;
358
359
360
361 flags = PIN_OFFSET_FIXED | PIN_USER;
362 if (i915_is_ggtt(vm))
363 flags |= PIN_GLOBAL;
364
365 for_each_prime_number_from(prime, 2, max_step) {
366 for (npages = 1; npages <= max_pages; npages *= prime) {
367 const u64 full_size = npages << PAGE_SHIFT;
368 const struct {
369 const char *name;
370 u64 offset;
371 int step;
372 } phases[] = {
373 { "top-down", hole_end, -1, },
374 { "bottom-up", hole_start, 1, },
375 { }
376 }, *p;
377
378 obj = fake_dma_object(i915, full_size);
379 if (IS_ERR(obj))
380 break;
381
382 list_add(&obj->st_link, &objects);
383
384
385
386
387
388 for (p = phases; p->name; p++) {
389 u64 offset;
390
391 offset = p->offset;
392 list_for_each_entry(obj, &objects, st_link) {
393 vma = i915_vma_instance(obj, vm, NULL);
394 if (IS_ERR(vma))
395 continue;
396
397 if (p->step < 0) {
398 if (offset < hole_start + obj->base.size)
399 break;
400 offset -= obj->base.size;
401 }
402
403 err = i915_vma_pin(vma, 0, 0, offset | flags);
404 if (err) {
405 pr_err("%s(%s) pin (forward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
406 __func__, p->name, err, npages, prime, offset);
407 goto err;
408 }
409
410 if (!drm_mm_node_allocated(&vma->node) ||
411 i915_vma_misplaced(vma, 0, 0, offset | flags)) {
412 pr_err("%s(%s) (forward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
413 __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
414 offset);
415 err = -EINVAL;
416 goto err;
417 }
418
419 i915_vma_unpin(vma);
420
421 if (p->step > 0) {
422 if (offset + obj->base.size > hole_end)
423 break;
424 offset += obj->base.size;
425 }
426 }
427
428 offset = p->offset;
429 list_for_each_entry(obj, &objects, st_link) {
430 vma = i915_vma_instance(obj, vm, NULL);
431 if (IS_ERR(vma))
432 continue;
433
434 if (p->step < 0) {
435 if (offset < hole_start + obj->base.size)
436 break;
437 offset -= obj->base.size;
438 }
439
440 if (!drm_mm_node_allocated(&vma->node) ||
441 i915_vma_misplaced(vma, 0, 0, offset | flags)) {
442 pr_err("%s(%s) (forward) moved vma.node=%llx + %llx, expected offset %llx\n",
443 __func__, p->name, vma->node.start, vma->node.size,
444 offset);
445 err = -EINVAL;
446 goto err;
447 }
448
449 err = i915_vma_unbind(vma);
450 if (err) {
451 pr_err("%s(%s) (forward) unbind of vma.node=%llx + %llx failed with err=%d\n",
452 __func__, p->name, vma->node.start, vma->node.size,
453 err);
454 goto err;
455 }
456
457 if (p->step > 0) {
458 if (offset + obj->base.size > hole_end)
459 break;
460 offset += obj->base.size;
461 }
462 }
463
464 offset = p->offset;
465 list_for_each_entry_reverse(obj, &objects, st_link) {
466 vma = i915_vma_instance(obj, vm, NULL);
467 if (IS_ERR(vma))
468 continue;
469
470 if (p->step < 0) {
471 if (offset < hole_start + obj->base.size)
472 break;
473 offset -= obj->base.size;
474 }
475
476 err = i915_vma_pin(vma, 0, 0, offset | flags);
477 if (err) {
478 pr_err("%s(%s) pin (backward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
479 __func__, p->name, err, npages, prime, offset);
480 goto err;
481 }
482
483 if (!drm_mm_node_allocated(&vma->node) ||
484 i915_vma_misplaced(vma, 0, 0, offset | flags)) {
485 pr_err("%s(%s) (backward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
486 __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
487 offset);
488 err = -EINVAL;
489 goto err;
490 }
491
492 i915_vma_unpin(vma);
493
494 if (p->step > 0) {
495 if (offset + obj->base.size > hole_end)
496 break;
497 offset += obj->base.size;
498 }
499 }
500
501 offset = p->offset;
502 list_for_each_entry_reverse(obj, &objects, st_link) {
503 vma = i915_vma_instance(obj, vm, NULL);
504 if (IS_ERR(vma))
505 continue;
506
507 if (p->step < 0) {
508 if (offset < hole_start + obj->base.size)
509 break;
510 offset -= obj->base.size;
511 }
512
513 if (!drm_mm_node_allocated(&vma->node) ||
514 i915_vma_misplaced(vma, 0, 0, offset | flags)) {
515 pr_err("%s(%s) (backward) moved vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
516 __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
517 offset);
518 err = -EINVAL;
519 goto err;
520 }
521
522 err = i915_vma_unbind(vma);
523 if (err) {
524 pr_err("%s(%s) (backward) unbind of vma.node=%llx + %llx failed with err=%d\n",
525 __func__, p->name, vma->node.start, vma->node.size,
526 err);
527 goto err;
528 }
529
530 if (p->step > 0) {
531 if (offset + obj->base.size > hole_end)
532 break;
533 offset += obj->base.size;
534 }
535 }
536 }
537
538 if (igt_timeout(end_time, "%s timed out (npages=%lu, prime=%lu)\n",
539 __func__, npages, prime)) {
540 err = -EINTR;
541 goto err;
542 }
543 }
544
545 close_object_list(&objects, vm);
546 cleanup_freed_objects(i915);
547 }
548
549 return 0;
550
551err:
552 close_object_list(&objects, vm);
553 return err;
554}
555
556static int walk_hole(struct drm_i915_private *i915,
557 struct i915_address_space *vm,
558 u64 hole_start, u64 hole_end,
559 unsigned long end_time)
560{
561 const u64 hole_size = hole_end - hole_start;
562 const unsigned long max_pages =
563 min_t(u64, ULONG_MAX - 1, hole_size >> PAGE_SHIFT);
564 unsigned long flags;
565 u64 size;
566
567
568
569 flags = PIN_OFFSET_FIXED | PIN_USER;
570 if (i915_is_ggtt(vm))
571 flags |= PIN_GLOBAL;
572
573 for_each_prime_number_from(size, 1, max_pages) {
574 struct drm_i915_gem_object *obj;
575 struct i915_vma *vma;
576 u64 addr;
577 int err = 0;
578
579 obj = fake_dma_object(i915, size << PAGE_SHIFT);
580 if (IS_ERR(obj))
581 break;
582
583 vma = i915_vma_instance(obj, vm, NULL);
584 if (IS_ERR(vma)) {
585 err = PTR_ERR(vma);
586 goto err_put;
587 }
588
589 for (addr = hole_start;
590 addr + obj->base.size < hole_end;
591 addr += obj->base.size) {
592 err = i915_vma_pin(vma, 0, 0, addr | flags);
593 if (err) {
594 pr_err("%s bind failed at %llx + %llx [hole %llx- %llx] with err=%d\n",
595 __func__, addr, vma->size,
596 hole_start, hole_end, err);
597 goto err_close;
598 }
599 i915_vma_unpin(vma);
600
601 if (!drm_mm_node_allocated(&vma->node) ||
602 i915_vma_misplaced(vma, 0, 0, addr | flags)) {
603 pr_err("%s incorrect at %llx + %llx\n",
604 __func__, addr, vma->size);
605 err = -EINVAL;
606 goto err_close;
607 }
608
609 err = i915_vma_unbind(vma);
610 if (err) {
611 pr_err("%s unbind failed at %llx + %llx with err=%d\n",
612 __func__, addr, vma->size, err);
613 goto err_close;
614 }
615
616 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
617
618 if (igt_timeout(end_time,
619 "%s timed out at %llx\n",
620 __func__, addr)) {
621 err = -EINTR;
622 goto err_close;
623 }
624 }
625
626err_close:
627 if (!i915_vma_is_ggtt(vma))
628 i915_vma_close(vma);
629err_put:
630 i915_gem_object_put(obj);
631 if (err)
632 return err;
633
634 cleanup_freed_objects(i915);
635 }
636
637 return 0;
638}
639
640static int pot_hole(struct drm_i915_private *i915,
641 struct i915_address_space *vm,
642 u64 hole_start, u64 hole_end,
643 unsigned long end_time)
644{
645 struct drm_i915_gem_object *obj;
646 struct i915_vma *vma;
647 unsigned long flags;
648 unsigned int pot;
649 int err = 0;
650
651 flags = PIN_OFFSET_FIXED | PIN_USER;
652 if (i915_is_ggtt(vm))
653 flags |= PIN_GLOBAL;
654
655 obj = i915_gem_object_create_internal(i915, 2 * I915_GTT_PAGE_SIZE);
656 if (IS_ERR(obj))
657 return PTR_ERR(obj);
658
659 vma = i915_vma_instance(obj, vm, NULL);
660 if (IS_ERR(vma)) {
661 err = PTR_ERR(vma);
662 goto err_obj;
663 }
664
665
666 for (pot = fls64(hole_end - 1) - 1;
667 pot > ilog2(2 * I915_GTT_PAGE_SIZE);
668 pot--) {
669 u64 step = BIT_ULL(pot);
670 u64 addr;
671
672 for (addr = round_up(hole_start + I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
673 addr <= round_down(hole_end - 2*I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
674 addr += step) {
675 err = i915_vma_pin(vma, 0, 0, addr | flags);
676 if (err) {
677 pr_err("%s failed to pin object at %llx in hole [%llx - %llx], with err=%d\n",
678 __func__,
679 addr,
680 hole_start, hole_end,
681 err);
682 goto err;
683 }
684
685 if (!drm_mm_node_allocated(&vma->node) ||
686 i915_vma_misplaced(vma, 0, 0, addr | flags)) {
687 pr_err("%s incorrect at %llx + %llx\n",
688 __func__, addr, vma->size);
689 i915_vma_unpin(vma);
690 err = i915_vma_unbind(vma);
691 err = -EINVAL;
692 goto err;
693 }
694
695 i915_vma_unpin(vma);
696 err = i915_vma_unbind(vma);
697 GEM_BUG_ON(err);
698 }
699
700 if (igt_timeout(end_time,
701 "%s timed out after %d/%d\n",
702 __func__, pot, fls64(hole_end - 1) - 1)) {
703 err = -EINTR;
704 goto err;
705 }
706 }
707
708err:
709 if (!i915_vma_is_ggtt(vma))
710 i915_vma_close(vma);
711err_obj:
712 i915_gem_object_put(obj);
713 return err;
714}
715
716static int drunk_hole(struct drm_i915_private *i915,
717 struct i915_address_space *vm,
718 u64 hole_start, u64 hole_end,
719 unsigned long end_time)
720{
721 I915_RND_STATE(prng);
722 unsigned int size;
723 unsigned long flags;
724
725 flags = PIN_OFFSET_FIXED | PIN_USER;
726 if (i915_is_ggtt(vm))
727 flags |= PIN_GLOBAL;
728
729
730 for (size = 12; (hole_end - hole_start) >> size; size++) {
731 struct drm_i915_gem_object *obj;
732 unsigned int *order, count, n;
733 struct i915_vma *vma;
734 u64 hole_size;
735 int err = -ENODEV;
736
737 hole_size = (hole_end - hole_start) >> size;
738 if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
739 hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
740 count = hole_size >> 1;
741 if (!count) {
742 pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
743 __func__, hole_start, hole_end, size, hole_size);
744 break;
745 }
746
747 do {
748 order = i915_random_order(count, &prng);
749 if (order)
750 break;
751 } while (count >>= 1);
752 if (!count)
753 return -ENOMEM;
754 GEM_BUG_ON(!order);
755
756
757
758
759
760
761
762 obj = fake_dma_object(i915, BIT_ULL(size));
763 if (IS_ERR(obj)) {
764 kfree(order);
765 break;
766 }
767
768 vma = i915_vma_instance(obj, vm, NULL);
769 if (IS_ERR(vma)) {
770 err = PTR_ERR(vma);
771 goto err_obj;
772 }
773
774 GEM_BUG_ON(vma->size != BIT_ULL(size));
775
776 for (n = 0; n < count; n++) {
777 u64 addr = hole_start + order[n] * BIT_ULL(size);
778
779 err = i915_vma_pin(vma, 0, 0, addr | flags);
780 if (err) {
781 pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
782 __func__,
783 addr, BIT_ULL(size),
784 hole_start, hole_end,
785 err);
786 goto err;
787 }
788
789 if (!drm_mm_node_allocated(&vma->node) ||
790 i915_vma_misplaced(vma, 0, 0, addr | flags)) {
791 pr_err("%s incorrect at %llx + %llx\n",
792 __func__, addr, BIT_ULL(size));
793 i915_vma_unpin(vma);
794 err = i915_vma_unbind(vma);
795 err = -EINVAL;
796 goto err;
797 }
798
799 i915_vma_unpin(vma);
800 err = i915_vma_unbind(vma);
801 GEM_BUG_ON(err);
802
803 if (igt_timeout(end_time,
804 "%s timed out after %d/%d\n",
805 __func__, n, count)) {
806 err = -EINTR;
807 goto err;
808 }
809 }
810
811err:
812 if (!i915_vma_is_ggtt(vma))
813 i915_vma_close(vma);
814err_obj:
815 i915_gem_object_put(obj);
816 kfree(order);
817 if (err)
818 return err;
819
820 cleanup_freed_objects(i915);
821 }
822
823 return 0;
824}
825
826static int __shrink_hole(struct drm_i915_private *i915,
827 struct i915_address_space *vm,
828 u64 hole_start, u64 hole_end,
829 unsigned long end_time)
830{
831 struct drm_i915_gem_object *obj;
832 unsigned long flags = PIN_OFFSET_FIXED | PIN_USER;
833 unsigned int order = 12;
834 LIST_HEAD(objects);
835 int err = 0;
836 u64 addr;
837
838
839 for (addr = hole_start; addr < hole_end; ) {
840 struct i915_vma *vma;
841 u64 size = BIT_ULL(order++);
842
843 size = min(size, hole_end - addr);
844 obj = fake_dma_object(i915, size);
845 if (IS_ERR(obj)) {
846 err = PTR_ERR(obj);
847 break;
848 }
849
850 list_add(&obj->st_link, &objects);
851
852 vma = i915_vma_instance(obj, vm, NULL);
853 if (IS_ERR(vma)) {
854 err = PTR_ERR(vma);
855 break;
856 }
857
858 GEM_BUG_ON(vma->size != size);
859
860 err = i915_vma_pin(vma, 0, 0, addr | flags);
861 if (err) {
862 pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
863 __func__, addr, size, hole_start, hole_end, err);
864 break;
865 }
866
867 if (!drm_mm_node_allocated(&vma->node) ||
868 i915_vma_misplaced(vma, 0, 0, addr | flags)) {
869 pr_err("%s incorrect at %llx + %llx\n",
870 __func__, addr, size);
871 i915_vma_unpin(vma);
872 err = i915_vma_unbind(vma);
873 err = -EINVAL;
874 break;
875 }
876
877 i915_vma_unpin(vma);
878 addr += size;
879
880 if (igt_timeout(end_time,
881 "%s timed out at ofset %llx [%llx - %llx]\n",
882 __func__, addr, hole_start, hole_end)) {
883 err = -EINTR;
884 break;
885 }
886 }
887
888 close_object_list(&objects, vm);
889 cleanup_freed_objects(i915);
890 return err;
891}
892
893static int shrink_hole(struct drm_i915_private *i915,
894 struct i915_address_space *vm,
895 u64 hole_start, u64 hole_end,
896 unsigned long end_time)
897{
898 unsigned long prime;
899 int err;
900
901 vm->fault_attr.probability = 999;
902 atomic_set(&vm->fault_attr.times, -1);
903
904 for_each_prime_number_from(prime, 0, ULONG_MAX - 1) {
905 vm->fault_attr.interval = prime;
906 err = __shrink_hole(i915, vm, hole_start, hole_end, end_time);
907 if (err)
908 break;
909 }
910
911 memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
912
913 return err;
914}
915
916static int shrink_boom(struct drm_i915_private *i915,
917 struct i915_address_space *vm,
918 u64 hole_start, u64 hole_end,
919 unsigned long end_time)
920{
921 unsigned int sizes[] = { SZ_2M, SZ_1G };
922 struct drm_i915_gem_object *purge;
923 struct drm_i915_gem_object *explode;
924 int err;
925 int i;
926
927
928
929
930
931
932
933
934 for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
935 unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
936 unsigned int size = sizes[i];
937 struct i915_vma *vma;
938
939 purge = fake_dma_object(i915, size);
940 if (IS_ERR(purge))
941 return PTR_ERR(purge);
942
943 vma = i915_vma_instance(purge, vm, NULL);
944 if (IS_ERR(vma)) {
945 err = PTR_ERR(vma);
946 goto err_purge;
947 }
948
949 err = i915_vma_pin(vma, 0, 0, flags);
950 if (err)
951 goto err_purge;
952
953
954 i915_vma_unpin(vma);
955
956 explode = fake_dma_object(i915, size);
957 if (IS_ERR(explode)) {
958 err = PTR_ERR(explode);
959 goto err_purge;
960 }
961
962 vm->fault_attr.probability = 100;
963 vm->fault_attr.interval = 1;
964 atomic_set(&vm->fault_attr.times, -1);
965
966 vma = i915_vma_instance(explode, vm, NULL);
967 if (IS_ERR(vma)) {
968 err = PTR_ERR(vma);
969 goto err_explode;
970 }
971
972 err = i915_vma_pin(vma, 0, 0, flags | size);
973 if (err)
974 goto err_explode;
975
976 i915_vma_unpin(vma);
977
978 i915_gem_object_put(purge);
979 i915_gem_object_put(explode);
980
981 memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
982 cleanup_freed_objects(i915);
983 }
984
985 return 0;
986
987err_explode:
988 i915_gem_object_put(explode);
989err_purge:
990 i915_gem_object_put(purge);
991 memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
992 return err;
993}
994
995static int exercise_ppgtt(struct drm_i915_private *dev_priv,
996 int (*func)(struct drm_i915_private *i915,
997 struct i915_address_space *vm,
998 u64 hole_start, u64 hole_end,
999 unsigned long end_time))
1000{
1001 struct drm_file *file;
1002 struct i915_ppgtt *ppgtt;
1003 IGT_TIMEOUT(end_time);
1004 int err;
1005
1006 if (!HAS_FULL_PPGTT(dev_priv))
1007 return 0;
1008
1009 file = mock_file(dev_priv);
1010 if (IS_ERR(file))
1011 return PTR_ERR(file);
1012
1013 mutex_lock(&dev_priv->drm.struct_mutex);
1014 ppgtt = i915_ppgtt_create(dev_priv);
1015 if (IS_ERR(ppgtt)) {
1016 err = PTR_ERR(ppgtt);
1017 goto out_unlock;
1018 }
1019 GEM_BUG_ON(offset_in_page(ppgtt->vm.total));
1020 GEM_BUG_ON(ppgtt->vm.closed);
1021
1022 err = func(dev_priv, &ppgtt->vm, 0, ppgtt->vm.total, end_time);
1023
1024 i915_vm_put(&ppgtt->vm);
1025out_unlock:
1026 mutex_unlock(&dev_priv->drm.struct_mutex);
1027
1028 mock_file_free(dev_priv, file);
1029 return err;
1030}
1031
1032static int igt_ppgtt_fill(void *arg)
1033{
1034 return exercise_ppgtt(arg, fill_hole);
1035}
1036
1037static int igt_ppgtt_walk(void *arg)
1038{
1039 return exercise_ppgtt(arg, walk_hole);
1040}
1041
1042static int igt_ppgtt_pot(void *arg)
1043{
1044 return exercise_ppgtt(arg, pot_hole);
1045}
1046
1047static int igt_ppgtt_drunk(void *arg)
1048{
1049 return exercise_ppgtt(arg, drunk_hole);
1050}
1051
1052static int igt_ppgtt_lowlevel(void *arg)
1053{
1054 return exercise_ppgtt(arg, lowlevel_hole);
1055}
1056
1057static int igt_ppgtt_shrink(void *arg)
1058{
1059 return exercise_ppgtt(arg, shrink_hole);
1060}
1061
1062static int igt_ppgtt_shrink_boom(void *arg)
1063{
1064 return exercise_ppgtt(arg, shrink_boom);
1065}
1066
1067static int sort_holes(void *priv, struct list_head *A, struct list_head *B)
1068{
1069 struct drm_mm_node *a = list_entry(A, typeof(*a), hole_stack);
1070 struct drm_mm_node *b = list_entry(B, typeof(*b), hole_stack);
1071
1072 if (a->start < b->start)
1073 return -1;
1074 else
1075 return 1;
1076}
1077
1078static int exercise_ggtt(struct drm_i915_private *i915,
1079 int (*func)(struct drm_i915_private *i915,
1080 struct i915_address_space *vm,
1081 u64 hole_start, u64 hole_end,
1082 unsigned long end_time))
1083{
1084 struct i915_ggtt *ggtt = &i915->ggtt;
1085 u64 hole_start, hole_end, last = 0;
1086 struct drm_mm_node *node;
1087 IGT_TIMEOUT(end_time);
1088 int err = 0;
1089
1090 mutex_lock(&i915->drm.struct_mutex);
1091restart:
1092 list_sort(NULL, &ggtt->vm.mm.hole_stack, sort_holes);
1093 drm_mm_for_each_hole(node, &ggtt->vm.mm, hole_start, hole_end) {
1094 if (hole_start < last)
1095 continue;
1096
1097 if (ggtt->vm.mm.color_adjust)
1098 ggtt->vm.mm.color_adjust(node, 0,
1099 &hole_start, &hole_end);
1100 if (hole_start >= hole_end)
1101 continue;
1102
1103 err = func(i915, &ggtt->vm, hole_start, hole_end, end_time);
1104 if (err)
1105 break;
1106
1107
1108 last = hole_end;
1109 goto restart;
1110 }
1111 mutex_unlock(&i915->drm.struct_mutex);
1112
1113 return err;
1114}
1115
1116static int igt_ggtt_fill(void *arg)
1117{
1118 return exercise_ggtt(arg, fill_hole);
1119}
1120
1121static int igt_ggtt_walk(void *arg)
1122{
1123 return exercise_ggtt(arg, walk_hole);
1124}
1125
1126static int igt_ggtt_pot(void *arg)
1127{
1128 return exercise_ggtt(arg, pot_hole);
1129}
1130
1131static int igt_ggtt_drunk(void *arg)
1132{
1133 return exercise_ggtt(arg, drunk_hole);
1134}
1135
1136static int igt_ggtt_lowlevel(void *arg)
1137{
1138 return exercise_ggtt(arg, lowlevel_hole);
1139}
1140
1141static int igt_ggtt_page(void *arg)
1142{
1143 const unsigned int count = PAGE_SIZE/sizeof(u32);
1144 I915_RND_STATE(prng);
1145 struct drm_i915_private *i915 = arg;
1146 struct i915_ggtt *ggtt = &i915->ggtt;
1147 struct drm_i915_gem_object *obj;
1148 intel_wakeref_t wakeref;
1149 struct drm_mm_node tmp;
1150 unsigned int *order, n;
1151 int err;
1152
1153 mutex_lock(&i915->drm.struct_mutex);
1154
1155 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1156 if (IS_ERR(obj)) {
1157 err = PTR_ERR(obj);
1158 goto out_unlock;
1159 }
1160
1161 err = i915_gem_object_pin_pages(obj);
1162 if (err)
1163 goto out_free;
1164
1165 memset(&tmp, 0, sizeof(tmp));
1166 err = drm_mm_insert_node_in_range(&ggtt->vm.mm, &tmp,
1167 count * PAGE_SIZE, 0,
1168 I915_COLOR_UNEVICTABLE,
1169 0, ggtt->mappable_end,
1170 DRM_MM_INSERT_LOW);
1171 if (err)
1172 goto out_unpin;
1173
1174 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1175
1176 for (n = 0; n < count; n++) {
1177 u64 offset = tmp.start + n * PAGE_SIZE;
1178
1179 ggtt->vm.insert_page(&ggtt->vm,
1180 i915_gem_object_get_dma_address(obj, 0),
1181 offset, I915_CACHE_NONE, 0);
1182 }
1183
1184 order = i915_random_order(count, &prng);
1185 if (!order) {
1186 err = -ENOMEM;
1187 goto out_remove;
1188 }
1189
1190 for (n = 0; n < count; n++) {
1191 u64 offset = tmp.start + order[n] * PAGE_SIZE;
1192 u32 __iomem *vaddr;
1193
1194 vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
1195 iowrite32(n, vaddr + n);
1196 io_mapping_unmap_atomic(vaddr);
1197 }
1198 i915_gem_flush_ggtt_writes(i915);
1199
1200 i915_random_reorder(order, count, &prng);
1201 for (n = 0; n < count; n++) {
1202 u64 offset = tmp.start + order[n] * PAGE_SIZE;
1203 u32 __iomem *vaddr;
1204 u32 val;
1205
1206 vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
1207 val = ioread32(vaddr + n);
1208 io_mapping_unmap_atomic(vaddr);
1209
1210 if (val != n) {
1211 pr_err("insert page failed: found %d, expected %d\n",
1212 val, n);
1213 err = -EINVAL;
1214 break;
1215 }
1216 }
1217
1218 kfree(order);
1219out_remove:
1220 ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size);
1221 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1222 drm_mm_remove_node(&tmp);
1223out_unpin:
1224 i915_gem_object_unpin_pages(obj);
1225out_free:
1226 i915_gem_object_put(obj);
1227out_unlock:
1228 mutex_unlock(&i915->drm.struct_mutex);
1229 return err;
1230}
1231
1232static void track_vma_bind(struct i915_vma *vma)
1233{
1234 struct drm_i915_gem_object *obj = vma->obj;
1235
1236 atomic_inc(&obj->bind_count);
1237 __i915_gem_object_pin_pages(obj);
1238
1239 vma->pages = obj->mm.pages;
1240
1241 mutex_lock(&vma->vm->mutex);
1242 list_move_tail(&vma->vm_link, &vma->vm->bound_list);
1243 mutex_unlock(&vma->vm->mutex);
1244}
1245
1246static int exercise_mock(struct drm_i915_private *i915,
1247 int (*func)(struct drm_i915_private *i915,
1248 struct i915_address_space *vm,
1249 u64 hole_start, u64 hole_end,
1250 unsigned long end_time))
1251{
1252 const u64 limit = totalram_pages() << PAGE_SHIFT;
1253 struct i915_gem_context *ctx;
1254 IGT_TIMEOUT(end_time);
1255 int err;
1256
1257 ctx = mock_context(i915, "mock");
1258 if (!ctx)
1259 return -ENOMEM;
1260
1261 err = func(i915, ctx->vm, 0, min(ctx->vm->total, limit), end_time);
1262
1263 mock_context_close(ctx);
1264 return err;
1265}
1266
1267static int igt_mock_fill(void *arg)
1268{
1269 struct i915_ggtt *ggtt = arg;
1270
1271 return exercise_mock(ggtt->vm.i915, fill_hole);
1272}
1273
1274static int igt_mock_walk(void *arg)
1275{
1276 struct i915_ggtt *ggtt = arg;
1277
1278 return exercise_mock(ggtt->vm.i915, walk_hole);
1279}
1280
1281static int igt_mock_pot(void *arg)
1282{
1283 struct i915_ggtt *ggtt = arg;
1284
1285 return exercise_mock(ggtt->vm.i915, pot_hole);
1286}
1287
1288static int igt_mock_drunk(void *arg)
1289{
1290 struct i915_ggtt *ggtt = arg;
1291
1292 return exercise_mock(ggtt->vm.i915, drunk_hole);
1293}
1294
1295static int igt_gtt_reserve(void *arg)
1296{
1297 struct i915_ggtt *ggtt = arg;
1298 struct drm_i915_gem_object *obj, *on;
1299 LIST_HEAD(objects);
1300 u64 total;
1301 int err = -ENODEV;
1302
1303
1304
1305
1306
1307
1308
1309 for (total = 0;
1310 total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1311 total += 2 * I915_GTT_PAGE_SIZE) {
1312 struct i915_vma *vma;
1313
1314 obj = i915_gem_object_create_internal(ggtt->vm.i915,
1315 2 * PAGE_SIZE);
1316 if (IS_ERR(obj)) {
1317 err = PTR_ERR(obj);
1318 goto out;
1319 }
1320
1321 err = i915_gem_object_pin_pages(obj);
1322 if (err) {
1323 i915_gem_object_put(obj);
1324 goto out;
1325 }
1326
1327 list_add(&obj->st_link, &objects);
1328
1329 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1330 if (IS_ERR(vma)) {
1331 err = PTR_ERR(vma);
1332 goto out;
1333 }
1334
1335 err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
1336 obj->base.size,
1337 total,
1338 obj->cache_level,
1339 0);
1340 if (err) {
1341 pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n",
1342 total, ggtt->vm.total, err);
1343 goto out;
1344 }
1345 track_vma_bind(vma);
1346
1347 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1348 if (vma->node.start != total ||
1349 vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1350 pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1351 vma->node.start, vma->node.size,
1352 total, 2*I915_GTT_PAGE_SIZE);
1353 err = -EINVAL;
1354 goto out;
1355 }
1356 }
1357
1358
1359 for (total = I915_GTT_PAGE_SIZE;
1360 total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1361 total += 2 * I915_GTT_PAGE_SIZE) {
1362 struct i915_vma *vma;
1363
1364 obj = i915_gem_object_create_internal(ggtt->vm.i915,
1365 2 * PAGE_SIZE);
1366 if (IS_ERR(obj)) {
1367 err = PTR_ERR(obj);
1368 goto out;
1369 }
1370
1371 err = i915_gem_object_pin_pages(obj);
1372 if (err) {
1373 i915_gem_object_put(obj);
1374 goto out;
1375 }
1376
1377 list_add(&obj->st_link, &objects);
1378
1379 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1380 if (IS_ERR(vma)) {
1381 err = PTR_ERR(vma);
1382 goto out;
1383 }
1384
1385 err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
1386 obj->base.size,
1387 total,
1388 obj->cache_level,
1389 0);
1390 if (err) {
1391 pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n",
1392 total, ggtt->vm.total, err);
1393 goto out;
1394 }
1395 track_vma_bind(vma);
1396
1397 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1398 if (vma->node.start != total ||
1399 vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1400 pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1401 vma->node.start, vma->node.size,
1402 total, 2*I915_GTT_PAGE_SIZE);
1403 err = -EINVAL;
1404 goto out;
1405 }
1406 }
1407
1408
1409 list_for_each_entry_safe(obj, on, &objects, st_link) {
1410 struct i915_vma *vma;
1411 u64 offset;
1412
1413 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1414 if (IS_ERR(vma)) {
1415 err = PTR_ERR(vma);
1416 goto out;
1417 }
1418
1419 err = i915_vma_unbind(vma);
1420 if (err) {
1421 pr_err("i915_vma_unbind failed with err=%d!\n", err);
1422 goto out;
1423 }
1424
1425 offset = random_offset(0, ggtt->vm.total,
1426 2*I915_GTT_PAGE_SIZE,
1427 I915_GTT_MIN_ALIGNMENT);
1428
1429 err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
1430 obj->base.size,
1431 offset,
1432 obj->cache_level,
1433 0);
1434 if (err) {
1435 pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n",
1436 total, ggtt->vm.total, err);
1437 goto out;
1438 }
1439 track_vma_bind(vma);
1440
1441 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1442 if (vma->node.start != offset ||
1443 vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1444 pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1445 vma->node.start, vma->node.size,
1446 offset, 2*I915_GTT_PAGE_SIZE);
1447 err = -EINVAL;
1448 goto out;
1449 }
1450 }
1451
1452out:
1453 list_for_each_entry_safe(obj, on, &objects, st_link) {
1454 i915_gem_object_unpin_pages(obj);
1455 i915_gem_object_put(obj);
1456 }
1457 return err;
1458}
1459
1460static int igt_gtt_insert(void *arg)
1461{
1462 struct i915_ggtt *ggtt = arg;
1463 struct drm_i915_gem_object *obj, *on;
1464 struct drm_mm_node tmp = {};
1465 const struct invalid_insert {
1466 u64 size;
1467 u64 alignment;
1468 u64 start, end;
1469 } invalid_insert[] = {
1470 {
1471 ggtt->vm.total + I915_GTT_PAGE_SIZE, 0,
1472 0, ggtt->vm.total,
1473 },
1474 {
1475 2*I915_GTT_PAGE_SIZE, 0,
1476 0, I915_GTT_PAGE_SIZE,
1477 },
1478 {
1479 -(u64)I915_GTT_PAGE_SIZE, 0,
1480 0, 4*I915_GTT_PAGE_SIZE,
1481 },
1482 {
1483 -(u64)2*I915_GTT_PAGE_SIZE, 2*I915_GTT_PAGE_SIZE,
1484 0, 4*I915_GTT_PAGE_SIZE,
1485 },
1486 {
1487 I915_GTT_PAGE_SIZE, I915_GTT_MIN_ALIGNMENT << 1,
1488 I915_GTT_MIN_ALIGNMENT, I915_GTT_MIN_ALIGNMENT << 1,
1489 },
1490 {}
1491 }, *ii;
1492 LIST_HEAD(objects);
1493 u64 total;
1494 int err = -ENODEV;
1495
1496
1497
1498
1499
1500
1501 for (ii = invalid_insert; ii->size; ii++) {
1502 err = i915_gem_gtt_insert(&ggtt->vm, &tmp,
1503 ii->size, ii->alignment,
1504 I915_COLOR_UNEVICTABLE,
1505 ii->start, ii->end,
1506 0);
1507 if (err != -ENOSPC) {
1508 pr_err("Invalid i915_gem_gtt_insert(.size=%llx, .alignment=%llx, .start=%llx, .end=%llx) succeeded (err=%d)\n",
1509 ii->size, ii->alignment, ii->start, ii->end,
1510 err);
1511 return -EINVAL;
1512 }
1513 }
1514
1515
1516 for (total = 0;
1517 total + I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1518 total += I915_GTT_PAGE_SIZE) {
1519 struct i915_vma *vma;
1520
1521 obj = i915_gem_object_create_internal(ggtt->vm.i915,
1522 I915_GTT_PAGE_SIZE);
1523 if (IS_ERR(obj)) {
1524 err = PTR_ERR(obj);
1525 goto out;
1526 }
1527
1528 err = i915_gem_object_pin_pages(obj);
1529 if (err) {
1530 i915_gem_object_put(obj);
1531 goto out;
1532 }
1533
1534 list_add(&obj->st_link, &objects);
1535
1536 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1537 if (IS_ERR(vma)) {
1538 err = PTR_ERR(vma);
1539 goto out;
1540 }
1541
1542 err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
1543 obj->base.size, 0, obj->cache_level,
1544 0, ggtt->vm.total,
1545 0);
1546 if (err == -ENOSPC) {
1547
1548 i915_gem_object_put(obj);
1549 break;
1550 }
1551 if (err) {
1552 pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n",
1553 total, ggtt->vm.total, err);
1554 goto out;
1555 }
1556 track_vma_bind(vma);
1557 __i915_vma_pin(vma);
1558
1559 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1560 }
1561
1562 list_for_each_entry(obj, &objects, st_link) {
1563 struct i915_vma *vma;
1564
1565 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1566 if (IS_ERR(vma)) {
1567 err = PTR_ERR(vma);
1568 goto out;
1569 }
1570
1571 if (!drm_mm_node_allocated(&vma->node)) {
1572 pr_err("VMA was unexpectedly evicted!\n");
1573 err = -EINVAL;
1574 goto out;
1575 }
1576
1577 __i915_vma_unpin(vma);
1578 }
1579
1580
1581 list_for_each_entry_safe(obj, on, &objects, st_link) {
1582 struct i915_vma *vma;
1583 u64 offset;
1584
1585 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1586 if (IS_ERR(vma)) {
1587 err = PTR_ERR(vma);
1588 goto out;
1589 }
1590
1591 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1592 offset = vma->node.start;
1593
1594 err = i915_vma_unbind(vma);
1595 if (err) {
1596 pr_err("i915_vma_unbind failed with err=%d!\n", err);
1597 goto out;
1598 }
1599
1600 err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
1601 obj->base.size, 0, obj->cache_level,
1602 0, ggtt->vm.total,
1603 0);
1604 if (err) {
1605 pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n",
1606 total, ggtt->vm.total, err);
1607 goto out;
1608 }
1609 track_vma_bind(vma);
1610
1611 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1612 if (vma->node.start != offset) {
1613 pr_err("i915_gem_gtt_insert did not return node to its previous location (the only hole), expected address %llx, found %llx\n",
1614 offset, vma->node.start);
1615 err = -EINVAL;
1616 goto out;
1617 }
1618 }
1619
1620
1621 for (total = 0;
1622 total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1623 total += 2 * I915_GTT_PAGE_SIZE) {
1624 struct i915_vma *vma;
1625
1626 obj = i915_gem_object_create_internal(ggtt->vm.i915,
1627 2 * I915_GTT_PAGE_SIZE);
1628 if (IS_ERR(obj)) {
1629 err = PTR_ERR(obj);
1630 goto out;
1631 }
1632
1633 err = i915_gem_object_pin_pages(obj);
1634 if (err) {
1635 i915_gem_object_put(obj);
1636 goto out;
1637 }
1638
1639 list_add(&obj->st_link, &objects);
1640
1641 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1642 if (IS_ERR(vma)) {
1643 err = PTR_ERR(vma);
1644 goto out;
1645 }
1646
1647 err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
1648 obj->base.size, 0, obj->cache_level,
1649 0, ggtt->vm.total,
1650 0);
1651 if (err) {
1652 pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n",
1653 total, ggtt->vm.total, err);
1654 goto out;
1655 }
1656 track_vma_bind(vma);
1657
1658 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1659 }
1660
1661out:
1662 list_for_each_entry_safe(obj, on, &objects, st_link) {
1663 i915_gem_object_unpin_pages(obj);
1664 i915_gem_object_put(obj);
1665 }
1666 return err;
1667}
1668
1669int i915_gem_gtt_mock_selftests(void)
1670{
1671 static const struct i915_subtest tests[] = {
1672 SUBTEST(igt_mock_drunk),
1673 SUBTEST(igt_mock_walk),
1674 SUBTEST(igt_mock_pot),
1675 SUBTEST(igt_mock_fill),
1676 SUBTEST(igt_gtt_reserve),
1677 SUBTEST(igt_gtt_insert),
1678 };
1679 struct drm_i915_private *i915;
1680 struct i915_ggtt *ggtt;
1681 int err;
1682
1683 i915 = mock_gem_device();
1684 if (!i915)
1685 return -ENOMEM;
1686
1687 ggtt = kmalloc(sizeof(*ggtt), GFP_KERNEL);
1688 if (!ggtt) {
1689 err = -ENOMEM;
1690 goto out_put;
1691 }
1692 mock_init_ggtt(i915, ggtt);
1693
1694 mutex_lock(&i915->drm.struct_mutex);
1695 err = i915_subtests(tests, ggtt);
1696 mock_device_flush(i915);
1697 mutex_unlock(&i915->drm.struct_mutex);
1698
1699 i915_gem_drain_freed_objects(i915);
1700
1701 mock_fini_ggtt(ggtt);
1702 kfree(ggtt);
1703out_put:
1704 drm_dev_put(&i915->drm);
1705 return err;
1706}
1707
1708int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
1709{
1710 static const struct i915_subtest tests[] = {
1711 SUBTEST(igt_ppgtt_alloc),
1712 SUBTEST(igt_ppgtt_lowlevel),
1713 SUBTEST(igt_ppgtt_drunk),
1714 SUBTEST(igt_ppgtt_walk),
1715 SUBTEST(igt_ppgtt_pot),
1716 SUBTEST(igt_ppgtt_fill),
1717 SUBTEST(igt_ppgtt_shrink),
1718 SUBTEST(igt_ppgtt_shrink_boom),
1719 SUBTEST(igt_ggtt_lowlevel),
1720 SUBTEST(igt_ggtt_drunk),
1721 SUBTEST(igt_ggtt_walk),
1722 SUBTEST(igt_ggtt_pot),
1723 SUBTEST(igt_ggtt_fill),
1724 SUBTEST(igt_ggtt_page),
1725 };
1726
1727 GEM_BUG_ON(offset_in_page(i915->ggtt.vm.total));
1728
1729 return i915_subtests(tests, i915);
1730}
1731