1
2
3
4
5
6
7#include <linux/prime_numbers.h>
8
9#include "gt/intel_engine_pm.h"
10#include "gt/intel_gt.h"
11#include "gt/intel_gt_pm.h"
12#include "gem/i915_gem_region.h"
13#include "huge_gem_object.h"
14#include "i915_selftest.h"
15#include "selftests/i915_random.h"
16#include "selftests/igt_flush_test.h"
17#include "selftests/igt_mmap.h"
18
19struct tile {
20 unsigned int width;
21 unsigned int height;
22 unsigned int stride;
23 unsigned int size;
24 unsigned int tiling;
25 unsigned int swizzle;
26};
27
28static u64 swizzle_bit(unsigned int bit, u64 offset)
29{
30 return (offset & BIT_ULL(bit)) >> (bit - 6);
31}
32
33static u64 tiled_offset(const struct tile *tile, u64 v)
34{
35 u64 x, y;
36
37 if (tile->tiling == I915_TILING_NONE)
38 return v;
39
40 y = div64_u64_rem(v, tile->stride, &x);
41 v = div64_u64_rem(y, tile->height, &y) * tile->stride * tile->height;
42
43 if (tile->tiling == I915_TILING_X) {
44 v += y * tile->width;
45 v += div64_u64_rem(x, tile->width, &x) << tile->size;
46 v += x;
47 } else if (tile->width == 128) {
48 const unsigned int ytile_span = 16;
49 const unsigned int ytile_height = 512;
50
51 v += y * ytile_span;
52 v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
53 v += x;
54 } else {
55 const unsigned int ytile_span = 32;
56 const unsigned int ytile_height = 256;
57
58 v += y * ytile_span;
59 v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
60 v += x;
61 }
62
63 switch (tile->swizzle) {
64 case I915_BIT_6_SWIZZLE_9:
65 v ^= swizzle_bit(9, v);
66 break;
67 case I915_BIT_6_SWIZZLE_9_10:
68 v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v);
69 break;
70 case I915_BIT_6_SWIZZLE_9_11:
71 v ^= swizzle_bit(9, v) ^ swizzle_bit(11, v);
72 break;
73 case I915_BIT_6_SWIZZLE_9_10_11:
74 v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v) ^ swizzle_bit(11, v);
75 break;
76 }
77
78 return v;
79}
80
81static int check_partial_mapping(struct drm_i915_gem_object *obj,
82 const struct tile *tile,
83 struct rnd_state *prng)
84{
85 const unsigned long npages = obj->base.size / PAGE_SIZE;
86 struct i915_ggtt_view view;
87 struct i915_vma *vma;
88 unsigned long page;
89 u32 __iomem *io;
90 struct page *p;
91 unsigned int n;
92 u64 offset;
93 u32 *cpu;
94 int err;
95
96 err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride);
97 if (err) {
98 pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n",
99 tile->tiling, tile->stride, err);
100 return err;
101 }
102
103 GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling);
104 GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride);
105
106 i915_gem_object_lock(obj);
107 err = i915_gem_object_set_to_gtt_domain(obj, true);
108 i915_gem_object_unlock(obj);
109 if (err) {
110 pr_err("Failed to flush to GTT write domain; err=%d\n", err);
111 return err;
112 }
113
114 page = i915_prandom_u32_max_state(npages, prng);
115 view = compute_partial_view(obj, page, MIN_CHUNK_PAGES);
116
117 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
118 if (IS_ERR(vma)) {
119 pr_err("Failed to pin partial view: offset=%lu; err=%d\n",
120 page, (int)PTR_ERR(vma));
121 return PTR_ERR(vma);
122 }
123
124 n = page - view.partial.offset;
125 GEM_BUG_ON(n >= view.partial.size);
126
127 io = i915_vma_pin_iomap(vma);
128 i915_vma_unpin(vma);
129 if (IS_ERR(io)) {
130 pr_err("Failed to iomap partial view: offset=%lu; err=%d\n",
131 page, (int)PTR_ERR(io));
132 err = PTR_ERR(io);
133 goto out;
134 }
135
136 iowrite32(page, io + n * PAGE_SIZE / sizeof(*io));
137 i915_vma_unpin_iomap(vma);
138
139 offset = tiled_offset(tile, page << PAGE_SHIFT);
140 if (offset >= obj->base.size)
141 goto out;
142
143 intel_gt_flush_ggtt_writes(&to_i915(obj->base.dev)->gt);
144
145 p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
146 cpu = kmap(p) + offset_in_page(offset);
147 drm_clflush_virt_range(cpu, sizeof(*cpu));
148 if (*cpu != (u32)page) {
149 pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n",
150 page, n,
151 view.partial.offset,
152 view.partial.size,
153 vma->size >> PAGE_SHIFT,
154 tile->tiling ? tile_row_pages(obj) : 0,
155 vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride,
156 offset >> PAGE_SHIFT,
157 (unsigned int)offset_in_page(offset),
158 offset,
159 (u32)page, *cpu);
160 err = -EINVAL;
161 }
162 *cpu = 0;
163 drm_clflush_virt_range(cpu, sizeof(*cpu));
164 kunmap(p);
165
166out:
167 __i915_vma_put(vma);
168 return err;
169}
170
171static int check_partial_mappings(struct drm_i915_gem_object *obj,
172 const struct tile *tile,
173 unsigned long end_time)
174{
175 const unsigned int nreal = obj->scratch / PAGE_SIZE;
176 const unsigned long npages = obj->base.size / PAGE_SIZE;
177 struct i915_vma *vma;
178 unsigned long page;
179 int err;
180
181 err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride);
182 if (err) {
183 pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n",
184 tile->tiling, tile->stride, err);
185 return err;
186 }
187
188 GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling);
189 GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride);
190
191 i915_gem_object_lock(obj);
192 err = i915_gem_object_set_to_gtt_domain(obj, true);
193 i915_gem_object_unlock(obj);
194 if (err) {
195 pr_err("Failed to flush to GTT write domain; err=%d\n", err);
196 return err;
197 }
198
199 for_each_prime_number_from(page, 1, npages) {
200 struct i915_ggtt_view view =
201 compute_partial_view(obj, page, MIN_CHUNK_PAGES);
202 u32 __iomem *io;
203 struct page *p;
204 unsigned int n;
205 u64 offset;
206 u32 *cpu;
207
208 GEM_BUG_ON(view.partial.size > nreal);
209 cond_resched();
210
211 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
212 if (IS_ERR(vma)) {
213 pr_err("Failed to pin partial view: offset=%lu; err=%d\n",
214 page, (int)PTR_ERR(vma));
215 return PTR_ERR(vma);
216 }
217
218 n = page - view.partial.offset;
219 GEM_BUG_ON(n >= view.partial.size);
220
221 io = i915_vma_pin_iomap(vma);
222 i915_vma_unpin(vma);
223 if (IS_ERR(io)) {
224 pr_err("Failed to iomap partial view: offset=%lu; err=%d\n",
225 page, (int)PTR_ERR(io));
226 return PTR_ERR(io);
227 }
228
229 iowrite32(page, io + n * PAGE_SIZE / sizeof(*io));
230 i915_vma_unpin_iomap(vma);
231
232 offset = tiled_offset(tile, page << PAGE_SHIFT);
233 if (offset >= obj->base.size)
234 continue;
235
236 intel_gt_flush_ggtt_writes(&to_i915(obj->base.dev)->gt);
237
238 p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
239 cpu = kmap(p) + offset_in_page(offset);
240 drm_clflush_virt_range(cpu, sizeof(*cpu));
241 if (*cpu != (u32)page) {
242 pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n",
243 page, n,
244 view.partial.offset,
245 view.partial.size,
246 vma->size >> PAGE_SHIFT,
247 tile->tiling ? tile_row_pages(obj) : 0,
248 vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride,
249 offset >> PAGE_SHIFT,
250 (unsigned int)offset_in_page(offset),
251 offset,
252 (u32)page, *cpu);
253 err = -EINVAL;
254 }
255 *cpu = 0;
256 drm_clflush_virt_range(cpu, sizeof(*cpu));
257 kunmap(p);
258 if (err)
259 return err;
260
261 __i915_vma_put(vma);
262
263 if (igt_timeout(end_time,
264 "%s: timed out after tiling=%d stride=%d\n",
265 __func__, tile->tiling, tile->stride))
266 return -EINTR;
267 }
268
269 return 0;
270}
271
272static unsigned int
273setup_tile_size(struct tile *tile, struct drm_i915_private *i915)
274{
275 if (INTEL_GEN(i915) <= 2) {
276 tile->height = 16;
277 tile->width = 128;
278 tile->size = 11;
279 } else if (tile->tiling == I915_TILING_Y &&
280 HAS_128_BYTE_Y_TILING(i915)) {
281 tile->height = 32;
282 tile->width = 128;
283 tile->size = 12;
284 } else {
285 tile->height = 8;
286 tile->width = 512;
287 tile->size = 12;
288 }
289
290 if (INTEL_GEN(i915) < 4)
291 return 8192 / tile->width;
292 else if (INTEL_GEN(i915) < 7)
293 return 128 * I965_FENCE_MAX_PITCH_VAL / tile->width;
294 else
295 return 128 * GEN7_FENCE_MAX_PITCH_VAL / tile->width;
296}
297
298static int igt_partial_tiling(void *arg)
299{
300 const unsigned int nreal = 1 << 12;
301 struct drm_i915_private *i915 = arg;
302 struct drm_i915_gem_object *obj;
303 intel_wakeref_t wakeref;
304 int tiling;
305 int err;
306
307 if (!i915_ggtt_has_aperture(&i915->ggtt))
308 return 0;
309
310
311
312
313
314
315
316
317
318 obj = huge_gem_object(i915,
319 nreal << PAGE_SHIFT,
320 (1 + next_prime_number(i915->ggtt.vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
321 if (IS_ERR(obj))
322 return PTR_ERR(obj);
323
324 err = i915_gem_object_pin_pages(obj);
325 if (err) {
326 pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
327 nreal, obj->base.size / PAGE_SIZE, err);
328 goto out;
329 }
330
331 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
332
333 if (1) {
334 IGT_TIMEOUT(end);
335 struct tile tile;
336
337 tile.height = 1;
338 tile.width = 1;
339 tile.size = 0;
340 tile.stride = 0;
341 tile.swizzle = I915_BIT_6_SWIZZLE_NONE;
342 tile.tiling = I915_TILING_NONE;
343
344 err = check_partial_mappings(obj, &tile, end);
345 if (err && err != -EINTR)
346 goto out_unlock;
347 }
348
349 for (tiling = I915_TILING_X; tiling <= I915_TILING_Y; tiling++) {
350 IGT_TIMEOUT(end);
351 unsigned int max_pitch;
352 unsigned int pitch;
353 struct tile tile;
354
355 if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES)
356
357
358
359
360
361 break;
362
363 tile.tiling = tiling;
364 switch (tiling) {
365 case I915_TILING_X:
366 tile.swizzle = i915->ggtt.bit_6_swizzle_x;
367 break;
368 case I915_TILING_Y:
369 tile.swizzle = i915->ggtt.bit_6_swizzle_y;
370 break;
371 }
372
373 GEM_BUG_ON(tile.swizzle == I915_BIT_6_SWIZZLE_UNKNOWN);
374 if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 ||
375 tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17)
376 continue;
377
378 max_pitch = setup_tile_size(&tile, i915);
379
380 for (pitch = max_pitch; pitch; pitch >>= 1) {
381 tile.stride = tile.width * pitch;
382 err = check_partial_mappings(obj, &tile, end);
383 if (err == -EINTR)
384 goto next_tiling;
385 if (err)
386 goto out_unlock;
387
388 if (pitch > 2 && INTEL_GEN(i915) >= 4) {
389 tile.stride = tile.width * (pitch - 1);
390 err = check_partial_mappings(obj, &tile, end);
391 if (err == -EINTR)
392 goto next_tiling;
393 if (err)
394 goto out_unlock;
395 }
396
397 if (pitch < max_pitch && INTEL_GEN(i915) >= 4) {
398 tile.stride = tile.width * (pitch + 1);
399 err = check_partial_mappings(obj, &tile, end);
400 if (err == -EINTR)
401 goto next_tiling;
402 if (err)
403 goto out_unlock;
404 }
405 }
406
407 if (INTEL_GEN(i915) >= 4) {
408 for_each_prime_number(pitch, max_pitch) {
409 tile.stride = tile.width * pitch;
410 err = check_partial_mappings(obj, &tile, end);
411 if (err == -EINTR)
412 goto next_tiling;
413 if (err)
414 goto out_unlock;
415 }
416 }
417
418next_tiling: ;
419 }
420
421out_unlock:
422 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
423 i915_gem_object_unpin_pages(obj);
424out:
425 i915_gem_object_put(obj);
426 return err;
427}
428
429static int igt_smoke_tiling(void *arg)
430{
431 const unsigned int nreal = 1 << 12;
432 struct drm_i915_private *i915 = arg;
433 struct drm_i915_gem_object *obj;
434 intel_wakeref_t wakeref;
435 I915_RND_STATE(prng);
436 unsigned long count;
437 IGT_TIMEOUT(end);
438 int err;
439
440 if (!i915_ggtt_has_aperture(&i915->ggtt))
441 return 0;
442
443
444
445
446
447
448
449
450
451
452 if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES)
453 return 0;
454
455 obj = huge_gem_object(i915,
456 nreal << PAGE_SHIFT,
457 (1 + next_prime_number(i915->ggtt.vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
458 if (IS_ERR(obj))
459 return PTR_ERR(obj);
460
461 err = i915_gem_object_pin_pages(obj);
462 if (err) {
463 pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
464 nreal, obj->base.size / PAGE_SIZE, err);
465 goto out;
466 }
467
468 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
469
470 count = 0;
471 do {
472 struct tile tile;
473
474 tile.tiling =
475 i915_prandom_u32_max_state(I915_TILING_Y + 1, &prng);
476 switch (tile.tiling) {
477 case I915_TILING_NONE:
478 tile.height = 1;
479 tile.width = 1;
480 tile.size = 0;
481 tile.stride = 0;
482 tile.swizzle = I915_BIT_6_SWIZZLE_NONE;
483 break;
484
485 case I915_TILING_X:
486 tile.swizzle = i915->ggtt.bit_6_swizzle_x;
487 break;
488 case I915_TILING_Y:
489 tile.swizzle = i915->ggtt.bit_6_swizzle_y;
490 break;
491 }
492
493 if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 ||
494 tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17)
495 continue;
496
497 if (tile.tiling != I915_TILING_NONE) {
498 unsigned int max_pitch = setup_tile_size(&tile, i915);
499
500 tile.stride =
501 i915_prandom_u32_max_state(max_pitch, &prng);
502 tile.stride = (1 + tile.stride) * tile.width;
503 if (INTEL_GEN(i915) < 4)
504 tile.stride = rounddown_pow_of_two(tile.stride);
505 }
506
507 err = check_partial_mapping(obj, &tile, &prng);
508 if (err)
509 break;
510
511 count++;
512 } while (!__igt_timeout(end, NULL));
513
514 pr_info("%s: Completed %lu trials\n", __func__, count);
515
516 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
517 i915_gem_object_unpin_pages(obj);
518out:
519 i915_gem_object_put(obj);
520 return err;
521}
522
523static int make_obj_busy(struct drm_i915_gem_object *obj)
524{
525 struct drm_i915_private *i915 = to_i915(obj->base.dev);
526 struct intel_engine_cs *engine;
527
528 for_each_uabi_engine(engine, i915) {
529 struct i915_request *rq;
530 struct i915_vma *vma;
531 int err;
532
533 vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
534 if (IS_ERR(vma))
535 return PTR_ERR(vma);
536
537 err = i915_vma_pin(vma, 0, 0, PIN_USER);
538 if (err)
539 return err;
540
541 rq = intel_engine_create_kernel_request(engine);
542 if (IS_ERR(rq)) {
543 i915_vma_unpin(vma);
544 return PTR_ERR(rq);
545 }
546
547 i915_vma_lock(vma);
548 err = i915_request_await_object(rq, vma->obj, true);
549 if (err == 0)
550 err = i915_vma_move_to_active(vma, rq,
551 EXEC_OBJECT_WRITE);
552 i915_vma_unlock(vma);
553
554 i915_request_add(rq);
555 i915_vma_unpin(vma);
556 if (err)
557 return err;
558 }
559
560 i915_gem_object_put(obj);
561 return 0;
562}
563
564static bool assert_mmap_offset(struct drm_i915_private *i915,
565 unsigned long size,
566 int expected)
567{
568 struct drm_i915_gem_object *obj;
569 struct i915_mmap_offset *mmo;
570
571 obj = i915_gem_object_create_internal(i915, size);
572 if (IS_ERR(obj))
573 return false;
574
575 mmo = mmap_offset_attach(obj, I915_MMAP_OFFSET_GTT, NULL);
576 i915_gem_object_put(obj);
577
578 return PTR_ERR_OR_ZERO(mmo) == expected;
579}
580
581static void disable_retire_worker(struct drm_i915_private *i915)
582{
583 i915_gem_driver_unregister__shrinker(i915);
584 intel_gt_pm_get(&i915->gt);
585 cancel_delayed_work_sync(&i915->gt.requests.retire_work);
586}
587
588static void restore_retire_worker(struct drm_i915_private *i915)
589{
590 igt_flush_test(i915);
591 intel_gt_pm_put(&i915->gt);
592 i915_gem_driver_register__shrinker(i915);
593}
594
595static void mmap_offset_lock(struct drm_i915_private *i915)
596 __acquires(&i915->drm.vma_offset_manager->vm_lock)
597{
598 write_lock(&i915->drm.vma_offset_manager->vm_lock);
599}
600
601static void mmap_offset_unlock(struct drm_i915_private *i915)
602 __releases(&i915->drm.vma_offset_manager->vm_lock)
603{
604 write_unlock(&i915->drm.vma_offset_manager->vm_lock);
605}
606
607static int igt_mmap_offset_exhaustion(void *arg)
608{
609 struct drm_i915_private *i915 = arg;
610 struct drm_mm *mm = &i915->drm.vma_offset_manager->vm_addr_space_mm;
611 struct drm_i915_gem_object *obj;
612 struct drm_mm_node *hole, *next;
613 struct i915_mmap_offset *mmo;
614 int loop, err = 0;
615
616
617 disable_retire_worker(i915);
618 GEM_BUG_ON(!i915->gt.awake);
619 intel_gt_retire_requests(&i915->gt);
620 i915_gem_drain_freed_objects(i915);
621
622
623 mmap_offset_lock(i915);
624 loop = 1;
625 list_for_each_entry_safe(hole, next, &mm->hole_stack, hole_stack) {
626 struct drm_mm_node *resv;
627
628 resv = kzalloc(sizeof(*resv), GFP_NOWAIT);
629 if (!resv) {
630 err = -ENOMEM;
631 goto out_park;
632 }
633
634 resv->start = drm_mm_hole_node_start(hole) + loop;
635 resv->size = hole->hole_size - loop;
636 resv->color = -1ul;
637 loop = 0;
638
639 if (!resv->size) {
640 kfree(resv);
641 continue;
642 }
643
644 pr_debug("Reserving hole [%llx + %llx]\n",
645 resv->start, resv->size);
646
647 err = drm_mm_reserve_node(mm, resv);
648 if (err) {
649 pr_err("Failed to trim VMA manager, err=%d\n", err);
650 kfree(resv);
651 goto out_park;
652 }
653 }
654 GEM_BUG_ON(!list_is_singular(&mm->hole_stack));
655 mmap_offset_unlock(i915);
656
657
658 if (!assert_mmap_offset(i915, PAGE_SIZE, 0)) {
659 pr_err("Unable to insert object into single page hole\n");
660 err = -EINVAL;
661 goto out;
662 }
663
664
665 if (!assert_mmap_offset(i915, 2 * PAGE_SIZE, -ENOSPC)) {
666 pr_err("Unexpectedly succeeded in inserting too large object into single page hole\n");
667 err = -EINVAL;
668 goto out;
669 }
670
671
672 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
673 if (IS_ERR(obj)) {
674 err = PTR_ERR(obj);
675 goto out;
676 }
677
678 mmo = mmap_offset_attach(obj, I915_MMAP_OFFSET_GTT, NULL);
679 if (IS_ERR(mmo)) {
680 pr_err("Unable to insert object into reclaimed hole\n");
681 err = PTR_ERR(mmo);
682 goto err_obj;
683 }
684
685 if (!assert_mmap_offset(i915, PAGE_SIZE, -ENOSPC)) {
686 pr_err("Unexpectedly succeeded in inserting object into no holes!\n");
687 err = -EINVAL;
688 goto err_obj;
689 }
690
691 i915_gem_object_put(obj);
692
693
694 for (loop = 0; loop < 3; loop++) {
695 if (intel_gt_is_wedged(&i915->gt))
696 break;
697
698 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
699 if (IS_ERR(obj)) {
700 err = PTR_ERR(obj);
701 goto out;
702 }
703
704 err = make_obj_busy(obj);
705 if (err) {
706 pr_err("[loop %d] Failed to busy the object\n", loop);
707 goto err_obj;
708 }
709 }
710
711out:
712 mmap_offset_lock(i915);
713out_park:
714 drm_mm_for_each_node_safe(hole, next, mm) {
715 if (hole->color != -1ul)
716 continue;
717
718 drm_mm_remove_node(hole);
719 kfree(hole);
720 }
721 mmap_offset_unlock(i915);
722 restore_retire_worker(i915);
723 return err;
724err_obj:
725 i915_gem_object_put(obj);
726 goto out;
727}
728
729static int gtt_set(struct drm_i915_gem_object *obj)
730{
731 struct i915_vma *vma;
732 void __iomem *map;
733 int err = 0;
734
735 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
736 if (IS_ERR(vma))
737 return PTR_ERR(vma);
738
739 intel_gt_pm_get(vma->vm->gt);
740 map = i915_vma_pin_iomap(vma);
741 i915_vma_unpin(vma);
742 if (IS_ERR(map)) {
743 err = PTR_ERR(map);
744 goto out;
745 }
746
747 memset_io(map, POISON_INUSE, obj->base.size);
748 i915_vma_unpin_iomap(vma);
749
750out:
751 intel_gt_pm_put(vma->vm->gt);
752 return err;
753}
754
755static int gtt_check(struct drm_i915_gem_object *obj)
756{
757 struct i915_vma *vma;
758 void __iomem *map;
759 int err = 0;
760
761 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
762 if (IS_ERR(vma))
763 return PTR_ERR(vma);
764
765 intel_gt_pm_get(vma->vm->gt);
766 map = i915_vma_pin_iomap(vma);
767 i915_vma_unpin(vma);
768 if (IS_ERR(map)) {
769 err = PTR_ERR(map);
770 goto out;
771 }
772
773 if (memchr_inv((void __force *)map, POISON_FREE, obj->base.size)) {
774 pr_err("%s: Write via mmap did not land in backing store (GTT)\n",
775 obj->mm.region->name);
776 err = -EINVAL;
777 }
778 i915_vma_unpin_iomap(vma);
779
780out:
781 intel_gt_pm_put(vma->vm->gt);
782 return err;
783}
784
785static int wc_set(struct drm_i915_gem_object *obj)
786{
787 void *vaddr;
788
789 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
790 if (IS_ERR(vaddr))
791 return PTR_ERR(vaddr);
792
793 memset(vaddr, POISON_INUSE, obj->base.size);
794 i915_gem_object_flush_map(obj);
795 i915_gem_object_unpin_map(obj);
796
797 return 0;
798}
799
800static int wc_check(struct drm_i915_gem_object *obj)
801{
802 void *vaddr;
803 int err = 0;
804
805 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
806 if (IS_ERR(vaddr))
807 return PTR_ERR(vaddr);
808
809 if (memchr_inv(vaddr, POISON_FREE, obj->base.size)) {
810 pr_err("%s: Write via mmap did not land in backing store (WC)\n",
811 obj->mm.region->name);
812 err = -EINVAL;
813 }
814 i915_gem_object_unpin_map(obj);
815
816 return err;
817}
818
819static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type)
820{
821 if (type == I915_MMAP_TYPE_GTT &&
822 !i915_ggtt_has_aperture(&to_i915(obj->base.dev)->ggtt))
823 return false;
824
825 if (type != I915_MMAP_TYPE_GTT &&
826 !i915_gem_object_type_has(obj,
827 I915_GEM_OBJECT_HAS_STRUCT_PAGE |
828 I915_GEM_OBJECT_HAS_IOMEM))
829 return false;
830
831 return true;
832}
833
834#define expand32(x) (((x) << 0) | ((x) << 8) | ((x) << 16) | ((x) << 24))
835static int __igt_mmap(struct drm_i915_private *i915,
836 struct drm_i915_gem_object *obj,
837 enum i915_mmap_type type)
838{
839 struct i915_mmap_offset *mmo;
840 struct vm_area_struct *area;
841 unsigned long addr;
842 int err, i;
843
844 if (!can_mmap(obj, type))
845 return 0;
846
847 err = wc_set(obj);
848 if (err == -ENXIO)
849 err = gtt_set(obj);
850 if (err)
851 return err;
852
853 mmo = mmap_offset_attach(obj, type, NULL);
854 if (IS_ERR(mmo))
855 return PTR_ERR(mmo);
856
857 addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED);
858 if (IS_ERR_VALUE(addr))
859 return addr;
860
861 pr_debug("igt_mmap(%s, %d) @ %lx\n", obj->mm.region->name, type, addr);
862
863 area = find_vma(current->mm, addr);
864 if (!area) {
865 pr_err("%s: Did not create a vm_area_struct for the mmap\n",
866 obj->mm.region->name);
867 err = -EINVAL;
868 goto out_unmap;
869 }
870
871 if (area->vm_private_data != mmo) {
872 pr_err("%s: vm_area_struct did not point back to our mmap_offset object!\n",
873 obj->mm.region->name);
874 err = -EINVAL;
875 goto out_unmap;
876 }
877
878 for (i = 0; i < obj->base.size / sizeof(u32); i++) {
879 u32 __user *ux = u64_to_user_ptr((u64)(addr + i * sizeof(*ux)));
880 u32 x;
881
882 if (get_user(x, ux)) {
883 pr_err("%s: Unable to read from mmap, offset:%zd\n",
884 obj->mm.region->name, i * sizeof(x));
885 err = -EFAULT;
886 goto out_unmap;
887 }
888
889 if (x != expand32(POISON_INUSE)) {
890 pr_err("%s: Read incorrect value from mmap, offset:%zd, found:%x, expected:%x\n",
891 obj->mm.region->name,
892 i * sizeof(x), x, expand32(POISON_INUSE));
893 err = -EINVAL;
894 goto out_unmap;
895 }
896
897 x = expand32(POISON_FREE);
898 if (put_user(x, ux)) {
899 pr_err("%s: Unable to write to mmap, offset:%zd\n",
900 obj->mm.region->name, i * sizeof(x));
901 err = -EFAULT;
902 goto out_unmap;
903 }
904 }
905
906 if (type == I915_MMAP_TYPE_GTT)
907 intel_gt_flush_ggtt_writes(&i915->gt);
908
909 err = wc_check(obj);
910 if (err == -ENXIO)
911 err = gtt_check(obj);
912out_unmap:
913 vm_munmap(addr, obj->base.size);
914 return err;
915}
916
917static int igt_mmap(void *arg)
918{
919 struct drm_i915_private *i915 = arg;
920 struct intel_memory_region *mr;
921 enum intel_region_id id;
922
923 for_each_memory_region(mr, i915, id) {
924 unsigned long sizes[] = {
925 PAGE_SIZE,
926 mr->min_page_size,
927 SZ_4M,
928 };
929 int i;
930
931 for (i = 0; i < ARRAY_SIZE(sizes); i++) {
932 struct drm_i915_gem_object *obj;
933 int err;
934
935 obj = i915_gem_object_create_region(mr, sizes[i], 0);
936 if (obj == ERR_PTR(-ENODEV))
937 continue;
938
939 if (IS_ERR(obj))
940 return PTR_ERR(obj);
941
942 err = __igt_mmap(i915, obj, I915_MMAP_TYPE_GTT);
943 if (err == 0)
944 err = __igt_mmap(i915, obj, I915_MMAP_TYPE_WC);
945
946 i915_gem_object_put(obj);
947 if (err)
948 return err;
949 }
950 }
951
952 return 0;
953}
954
955static const char *repr_mmap_type(enum i915_mmap_type type)
956{
957 switch (type) {
958 case I915_MMAP_TYPE_GTT: return "gtt";
959 case I915_MMAP_TYPE_WB: return "wb";
960 case I915_MMAP_TYPE_WC: return "wc";
961 case I915_MMAP_TYPE_UC: return "uc";
962 default: return "unknown";
963 }
964}
965
966static bool can_access(const struct drm_i915_gem_object *obj)
967{
968 unsigned int flags =
969 I915_GEM_OBJECT_HAS_STRUCT_PAGE | I915_GEM_OBJECT_HAS_IOMEM;
970
971 return i915_gem_object_type_has(obj, flags);
972}
973
974static int __igt_mmap_access(struct drm_i915_private *i915,
975 struct drm_i915_gem_object *obj,
976 enum i915_mmap_type type)
977{
978 struct i915_mmap_offset *mmo;
979 unsigned long __user *ptr;
980 unsigned long A, B;
981 unsigned long x, y;
982 unsigned long addr;
983 int err;
984
985 memset(&A, 0xAA, sizeof(A));
986 memset(&B, 0xBB, sizeof(B));
987
988 if (!can_mmap(obj, type) || !can_access(obj))
989 return 0;
990
991 mmo = mmap_offset_attach(obj, type, NULL);
992 if (IS_ERR(mmo))
993 return PTR_ERR(mmo);
994
995 addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED);
996 if (IS_ERR_VALUE(addr))
997 return addr;
998 ptr = (unsigned long __user *)addr;
999
1000 err = __put_user(A, ptr);
1001 if (err) {
1002 pr_err("%s(%s): failed to write into user mmap\n",
1003 obj->mm.region->name, repr_mmap_type(type));
1004 goto out_unmap;
1005 }
1006
1007 intel_gt_flush_ggtt_writes(&i915->gt);
1008
1009 err = access_process_vm(current, addr, &x, sizeof(x), 0);
1010 if (err != sizeof(x)) {
1011 pr_err("%s(%s): access_process_vm() read failed\n",
1012 obj->mm.region->name, repr_mmap_type(type));
1013 goto out_unmap;
1014 }
1015
1016 err = access_process_vm(current, addr, &B, sizeof(B), FOLL_WRITE);
1017 if (err != sizeof(B)) {
1018 pr_err("%s(%s): access_process_vm() write failed\n",
1019 obj->mm.region->name, repr_mmap_type(type));
1020 goto out_unmap;
1021 }
1022
1023 intel_gt_flush_ggtt_writes(&i915->gt);
1024
1025 err = __get_user(y, ptr);
1026 if (err) {
1027 pr_err("%s(%s): failed to read from user mmap\n",
1028 obj->mm.region->name, repr_mmap_type(type));
1029 goto out_unmap;
1030 }
1031
1032 if (x != A || y != B) {
1033 pr_err("%s(%s): failed to read/write values, found (%lx, %lx)\n",
1034 obj->mm.region->name, repr_mmap_type(type),
1035 x, y);
1036 err = -EINVAL;
1037 goto out_unmap;
1038 }
1039
1040out_unmap:
1041 vm_munmap(addr, obj->base.size);
1042 return err;
1043}
1044
1045static int igt_mmap_access(void *arg)
1046{
1047 struct drm_i915_private *i915 = arg;
1048 struct intel_memory_region *mr;
1049 enum intel_region_id id;
1050
1051 for_each_memory_region(mr, i915, id) {
1052 struct drm_i915_gem_object *obj;
1053 int err;
1054
1055 obj = i915_gem_object_create_region(mr, PAGE_SIZE, 0);
1056 if (obj == ERR_PTR(-ENODEV))
1057 continue;
1058
1059 if (IS_ERR(obj))
1060 return PTR_ERR(obj);
1061
1062 err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_GTT);
1063 if (err == 0)
1064 err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_WB);
1065 if (err == 0)
1066 err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_WC);
1067 if (err == 0)
1068 err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_UC);
1069
1070 i915_gem_object_put(obj);
1071 if (err)
1072 return err;
1073 }
1074
1075 return 0;
1076}
1077
1078static int __igt_mmap_gpu(struct drm_i915_private *i915,
1079 struct drm_i915_gem_object *obj,
1080 enum i915_mmap_type type)
1081{
1082 struct intel_engine_cs *engine;
1083 struct i915_mmap_offset *mmo;
1084 unsigned long addr;
1085 u32 __user *ux;
1086 u32 bbe;
1087 int err;
1088
1089
1090
1091
1092
1093
1094
1095 if (!can_mmap(obj, type))
1096 return 0;
1097
1098 err = wc_set(obj);
1099 if (err == -ENXIO)
1100 err = gtt_set(obj);
1101 if (err)
1102 return err;
1103
1104 mmo = mmap_offset_attach(obj, type, NULL);
1105 if (IS_ERR(mmo))
1106 return PTR_ERR(mmo);
1107
1108 addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED);
1109 if (IS_ERR_VALUE(addr))
1110 return addr;
1111
1112 ux = u64_to_user_ptr((u64)addr);
1113 bbe = MI_BATCH_BUFFER_END;
1114 if (put_user(bbe, ux)) {
1115 pr_err("%s: Unable to write to mmap\n", obj->mm.region->name);
1116 err = -EFAULT;
1117 goto out_unmap;
1118 }
1119
1120 if (type == I915_MMAP_TYPE_GTT)
1121 intel_gt_flush_ggtt_writes(&i915->gt);
1122
1123 for_each_uabi_engine(engine, i915) {
1124 struct i915_request *rq;
1125 struct i915_vma *vma;
1126
1127 vma = i915_vma_instance(obj, engine->kernel_context->vm, NULL);
1128 if (IS_ERR(vma)) {
1129 err = PTR_ERR(vma);
1130 goto out_unmap;
1131 }
1132
1133 err = i915_vma_pin(vma, 0, 0, PIN_USER);
1134 if (err)
1135 goto out_unmap;
1136
1137 rq = i915_request_create(engine->kernel_context);
1138 if (IS_ERR(rq)) {
1139 err = PTR_ERR(rq);
1140 goto out_unpin;
1141 }
1142
1143 i915_vma_lock(vma);
1144 err = i915_request_await_object(rq, vma->obj, false);
1145 if (err == 0)
1146 err = i915_vma_move_to_active(vma, rq, 0);
1147 i915_vma_unlock(vma);
1148
1149 err = engine->emit_bb_start(rq, vma->node.start, 0, 0);
1150 i915_request_get(rq);
1151 i915_request_add(rq);
1152
1153 if (i915_request_wait(rq, 0, HZ / 5) < 0) {
1154 struct drm_printer p =
1155 drm_info_printer(engine->i915->drm.dev);
1156
1157 pr_err("%s(%s, %s): Failed to execute batch\n",
1158 __func__, engine->name, obj->mm.region->name);
1159 intel_engine_dump(engine, &p,
1160 "%s\n", engine->name);
1161
1162 intel_gt_set_wedged(engine->gt);
1163 err = -EIO;
1164 }
1165 i915_request_put(rq);
1166
1167out_unpin:
1168 i915_vma_unpin(vma);
1169 if (err)
1170 goto out_unmap;
1171 }
1172
1173out_unmap:
1174 vm_munmap(addr, obj->base.size);
1175 return err;
1176}
1177
1178static int igt_mmap_gpu(void *arg)
1179{
1180 struct drm_i915_private *i915 = arg;
1181 struct intel_memory_region *mr;
1182 enum intel_region_id id;
1183
1184 for_each_memory_region(mr, i915, id) {
1185 struct drm_i915_gem_object *obj;
1186 int err;
1187
1188 obj = i915_gem_object_create_region(mr, PAGE_SIZE, 0);
1189 if (obj == ERR_PTR(-ENODEV))
1190 continue;
1191
1192 if (IS_ERR(obj))
1193 return PTR_ERR(obj);
1194
1195 err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_GTT);
1196 if (err == 0)
1197 err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_WC);
1198
1199 i915_gem_object_put(obj);
1200 if (err)
1201 return err;
1202 }
1203
1204 return 0;
1205}
1206
1207static int check_present_pte(pte_t *pte, unsigned long addr, void *data)
1208{
1209 if (!pte_present(*pte) || pte_none(*pte)) {
1210 pr_err("missing PTE:%lx\n",
1211 (addr - (unsigned long)data) >> PAGE_SHIFT);
1212 return -EINVAL;
1213 }
1214
1215 return 0;
1216}
1217
1218static int check_absent_pte(pte_t *pte, unsigned long addr, void *data)
1219{
1220 if (pte_present(*pte) && !pte_none(*pte)) {
1221 pr_err("present PTE:%lx; expected to be revoked\n",
1222 (addr - (unsigned long)data) >> PAGE_SHIFT);
1223 return -EINVAL;
1224 }
1225
1226 return 0;
1227}
1228
1229static int check_present(unsigned long addr, unsigned long len)
1230{
1231 return apply_to_page_range(current->mm, addr, len,
1232 check_present_pte, (void *)addr);
1233}
1234
1235static int check_absent(unsigned long addr, unsigned long len)
1236{
1237 return apply_to_page_range(current->mm, addr, len,
1238 check_absent_pte, (void *)addr);
1239}
1240
1241static int prefault_range(u64 start, u64 len)
1242{
1243 const char __user *addr, *end;
1244 char __maybe_unused c;
1245 int err;
1246
1247 addr = u64_to_user_ptr(start);
1248 end = addr + len;
1249
1250 for (; addr < end; addr += PAGE_SIZE) {
1251 err = __get_user(c, addr);
1252 if (err)
1253 return err;
1254 }
1255
1256 return __get_user(c, end - 1);
1257}
1258
1259static int __igt_mmap_revoke(struct drm_i915_private *i915,
1260 struct drm_i915_gem_object *obj,
1261 enum i915_mmap_type type)
1262{
1263 struct i915_mmap_offset *mmo;
1264 unsigned long addr;
1265 int err;
1266
1267 if (!can_mmap(obj, type))
1268 return 0;
1269
1270 mmo = mmap_offset_attach(obj, type, NULL);
1271 if (IS_ERR(mmo))
1272 return PTR_ERR(mmo);
1273
1274 addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED);
1275 if (IS_ERR_VALUE(addr))
1276 return addr;
1277
1278 err = prefault_range(addr, obj->base.size);
1279 if (err)
1280 goto out_unmap;
1281
1282 err = check_present(addr, obj->base.size);
1283 if (err) {
1284 pr_err("%s: was not present\n", obj->mm.region->name);
1285 goto out_unmap;
1286 }
1287
1288
1289
1290
1291
1292
1293 err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
1294 if (err) {
1295 pr_err("Failed to unbind object!\n");
1296 goto out_unmap;
1297 }
1298
1299 if (type != I915_MMAP_TYPE_GTT) {
1300 __i915_gem_object_put_pages(obj);
1301 if (i915_gem_object_has_pages(obj)) {
1302 pr_err("Failed to put-pages object!\n");
1303 err = -EINVAL;
1304 goto out_unmap;
1305 }
1306 }
1307
1308 err = check_absent(addr, obj->base.size);
1309 if (err) {
1310 pr_err("%s: was not absent\n", obj->mm.region->name);
1311 goto out_unmap;
1312 }
1313
1314out_unmap:
1315 vm_munmap(addr, obj->base.size);
1316 return err;
1317}
1318
1319static int igt_mmap_revoke(void *arg)
1320{
1321 struct drm_i915_private *i915 = arg;
1322 struct intel_memory_region *mr;
1323 enum intel_region_id id;
1324
1325 for_each_memory_region(mr, i915, id) {
1326 struct drm_i915_gem_object *obj;
1327 int err;
1328
1329 obj = i915_gem_object_create_region(mr, PAGE_SIZE, 0);
1330 if (obj == ERR_PTR(-ENODEV))
1331 continue;
1332
1333 if (IS_ERR(obj))
1334 return PTR_ERR(obj);
1335
1336 err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_GTT);
1337 if (err == 0)
1338 err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_WC);
1339
1340 i915_gem_object_put(obj);
1341 if (err)
1342 return err;
1343 }
1344
1345 return 0;
1346}
1347
1348int i915_gem_mman_live_selftests(struct drm_i915_private *i915)
1349{
1350 static const struct i915_subtest tests[] = {
1351 SUBTEST(igt_partial_tiling),
1352 SUBTEST(igt_smoke_tiling),
1353 SUBTEST(igt_mmap_offset_exhaustion),
1354 SUBTEST(igt_mmap),
1355 SUBTEST(igt_mmap_access),
1356 SUBTEST(igt_mmap_revoke),
1357 SUBTEST(igt_mmap_gpu),
1358 };
1359
1360 return i915_subtests(tests, i915);
1361}
1362