1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include "../i915_selftest.h"
26
27#include "mock_gem_device.h"
28#include "huge_gem_object.h"
29
30static int igt_gem_object(void *arg)
31{
32 struct drm_i915_private *i915 = arg;
33 struct drm_i915_gem_object *obj;
34 int err = -ENOMEM;
35
36
37
38 obj = i915_gem_object_create(i915, PAGE_SIZE);
39 if (IS_ERR(obj)) {
40 err = PTR_ERR(obj);
41 pr_err("i915_gem_object_create failed, err=%d\n", err);
42 goto out;
43 }
44
45 err = 0;
46 i915_gem_object_put(obj);
47out:
48 return err;
49}
50
51static int igt_phys_object(void *arg)
52{
53 struct drm_i915_private *i915 = arg;
54 struct drm_i915_gem_object *obj;
55 int err;
56
57
58
59
60
61 obj = i915_gem_object_create(i915, PAGE_SIZE);
62 if (IS_ERR(obj)) {
63 err = PTR_ERR(obj);
64 pr_err("i915_gem_object_create failed, err=%d\n", err);
65 goto out;
66 }
67
68 mutex_lock(&i915->drm.struct_mutex);
69 err = i915_gem_object_attach_phys(obj, PAGE_SIZE);
70 mutex_unlock(&i915->drm.struct_mutex);
71 if (err) {
72 pr_err("i915_gem_object_attach_phys failed, err=%d\n", err);
73 goto out_obj;
74 }
75
76 if (obj->ops != &i915_gem_phys_ops) {
77 pr_err("i915_gem_object_attach_phys did not create a phys object\n");
78 err = -EINVAL;
79 goto out_obj;
80 }
81
82 if (!atomic_read(&obj->mm.pages_pin_count)) {
83 pr_err("i915_gem_object_attach_phys did not pin its phys pages\n");
84 err = -EINVAL;
85 goto out_obj;
86 }
87
88
89 mutex_lock(&i915->drm.struct_mutex);
90 err = i915_gem_object_set_to_gtt_domain(obj, true);
91 mutex_unlock(&i915->drm.struct_mutex);
92 if (err) {
93 pr_err("i915_gem_object_set_to_gtt_domain failed with err=%d\n",
94 err);
95 goto out_obj;
96 }
97
98out_obj:
99 i915_gem_object_put(obj);
100out:
101 return err;
102}
103
104static int igt_gem_huge(void *arg)
105{
106 const unsigned int nreal = 509;
107 struct drm_i915_private *i915 = arg;
108 struct drm_i915_gem_object *obj;
109 unsigned int n;
110 int err;
111
112
113
114 obj = huge_gem_object(i915,
115 nreal * PAGE_SIZE,
116 i915->ggtt.base.total + PAGE_SIZE);
117 if (IS_ERR(obj))
118 return PTR_ERR(obj);
119
120 err = i915_gem_object_pin_pages(obj);
121 if (err) {
122 pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
123 nreal, obj->base.size / PAGE_SIZE, err);
124 goto out;
125 }
126
127 for (n = 0; n < obj->base.size / PAGE_SIZE; n++) {
128 if (i915_gem_object_get_page(obj, n) !=
129 i915_gem_object_get_page(obj, n % nreal)) {
130 pr_err("Page lookup mismatch at index %u [%u]\n",
131 n, n % nreal);
132 err = -EINVAL;
133 goto out_unpin;
134 }
135 }
136
137out_unpin:
138 i915_gem_object_unpin_pages(obj);
139out:
140 i915_gem_object_put(obj);
141 return err;
142}
143
144struct tile {
145 unsigned int width;
146 unsigned int height;
147 unsigned int stride;
148 unsigned int size;
149 unsigned int tiling;
150 unsigned int swizzle;
151};
152
153static u64 swizzle_bit(unsigned int bit, u64 offset)
154{
155 return (offset & BIT_ULL(bit)) >> (bit - 6);
156}
157
158static u64 tiled_offset(const struct tile *tile, u64 v)
159{
160 u64 x, y;
161
162 if (tile->tiling == I915_TILING_NONE)
163 return v;
164
165 y = div64_u64_rem(v, tile->stride, &x);
166 v = div64_u64_rem(y, tile->height, &y) * tile->stride * tile->height;
167
168 if (tile->tiling == I915_TILING_X) {
169 v += y * tile->width;
170 v += div64_u64_rem(x, tile->width, &x) << tile->size;
171 v += x;
172 } else {
173 const unsigned int ytile_span = 16;
174 const unsigned int ytile_height = 32 * ytile_span;
175
176 v += y * ytile_span;
177 v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
178 v += x;
179 }
180
181 switch (tile->swizzle) {
182 case I915_BIT_6_SWIZZLE_9:
183 v ^= swizzle_bit(9, v);
184 break;
185 case I915_BIT_6_SWIZZLE_9_10:
186 v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v);
187 break;
188 case I915_BIT_6_SWIZZLE_9_11:
189 v ^= swizzle_bit(9, v) ^ swizzle_bit(11, v);
190 break;
191 case I915_BIT_6_SWIZZLE_9_10_11:
192 v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v) ^ swizzle_bit(11, v);
193 break;
194 }
195
196 return v;
197}
198
199static int check_partial_mapping(struct drm_i915_gem_object *obj,
200 const struct tile *tile,
201 unsigned long end_time)
202{
203 const unsigned int nreal = obj->scratch / PAGE_SIZE;
204 const unsigned long npages = obj->base.size / PAGE_SIZE;
205 struct i915_vma *vma;
206 unsigned long page;
207 int err;
208
209 if (igt_timeout(end_time,
210 "%s: timed out before tiling=%d stride=%d\n",
211 __func__, tile->tiling, tile->stride))
212 return -EINTR;
213
214 err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride);
215 if (err) {
216 pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n",
217 tile->tiling, tile->stride, err);
218 return err;
219 }
220
221 GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling);
222 GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride);
223
224 for_each_prime_number_from(page, 1, npages) {
225 struct i915_ggtt_view view =
226 compute_partial_view(obj, page, MIN_CHUNK_PAGES);
227 u32 __iomem *io;
228 struct page *p;
229 unsigned int n;
230 u64 offset;
231 u32 *cpu;
232
233 GEM_BUG_ON(view.partial.size > nreal);
234
235 err = i915_gem_object_set_to_gtt_domain(obj, true);
236 if (err) {
237 pr_err("Failed to flush to GTT write domain; err=%d\n",
238 err);
239 return err;
240 }
241
242 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
243 if (IS_ERR(vma)) {
244 pr_err("Failed to pin partial view: offset=%lu; err=%d\n",
245 page, (int)PTR_ERR(vma));
246 return PTR_ERR(vma);
247 }
248
249 n = page - view.partial.offset;
250 GEM_BUG_ON(n >= view.partial.size);
251
252 io = i915_vma_pin_iomap(vma);
253 i915_vma_unpin(vma);
254 if (IS_ERR(io)) {
255 pr_err("Failed to iomap partial view: offset=%lu; err=%d\n",
256 page, (int)PTR_ERR(io));
257 return PTR_ERR(io);
258 }
259
260 iowrite32(page, io + n * PAGE_SIZE/sizeof(*io));
261 i915_vma_unpin_iomap(vma);
262
263 offset = tiled_offset(tile, page << PAGE_SHIFT);
264 if (offset >= obj->base.size)
265 continue;
266
267 flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
268
269 p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
270 cpu = kmap(p) + offset_in_page(offset);
271 drm_clflush_virt_range(cpu, sizeof(*cpu));
272 if (*cpu != (u32)page) {
273 pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n",
274 page, n,
275 view.partial.offset,
276 view.partial.size,
277 vma->size >> PAGE_SHIFT,
278 tile_row_pages(obj),
279 vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride,
280 offset >> PAGE_SHIFT,
281 (unsigned int)offset_in_page(offset),
282 offset,
283 (u32)page, *cpu);
284 err = -EINVAL;
285 }
286 *cpu = 0;
287 drm_clflush_virt_range(cpu, sizeof(*cpu));
288 kunmap(p);
289 if (err)
290 return err;
291 }
292
293 return 0;
294}
295
296static int igt_partial_tiling(void *arg)
297{
298 const unsigned int nreal = 1 << 12;
299 struct drm_i915_private *i915 = arg;
300 struct drm_i915_gem_object *obj;
301 int tiling;
302 int err;
303
304
305
306
307
308
309
310
311
312 obj = huge_gem_object(i915,
313 nreal << PAGE_SHIFT,
314 (1 + next_prime_number(i915->ggtt.base.total >> PAGE_SHIFT)) << PAGE_SHIFT);
315 if (IS_ERR(obj))
316 return PTR_ERR(obj);
317
318 err = i915_gem_object_pin_pages(obj);
319 if (err) {
320 pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
321 nreal, obj->base.size / PAGE_SIZE, err);
322 goto out;
323 }
324
325 mutex_lock(&i915->drm.struct_mutex);
326 intel_runtime_pm_get(i915);
327
328 if (1) {
329 IGT_TIMEOUT(end);
330 struct tile tile;
331
332 tile.height = 1;
333 tile.width = 1;
334 tile.size = 0;
335 tile.stride = 0;
336 tile.swizzle = I915_BIT_6_SWIZZLE_NONE;
337 tile.tiling = I915_TILING_NONE;
338
339 err = check_partial_mapping(obj, &tile, end);
340 if (err && err != -EINTR)
341 goto out_unlock;
342 }
343
344 for (tiling = I915_TILING_X; tiling <= I915_TILING_Y; tiling++) {
345 IGT_TIMEOUT(end);
346 unsigned int max_pitch;
347 unsigned int pitch;
348 struct tile tile;
349
350 tile.tiling = tiling;
351 switch (tiling) {
352 case I915_TILING_X:
353 tile.swizzle = i915->mm.bit_6_swizzle_x;
354 break;
355 case I915_TILING_Y:
356 tile.swizzle = i915->mm.bit_6_swizzle_y;
357 break;
358 }
359
360 if (tile.swizzle == I915_BIT_6_SWIZZLE_UNKNOWN ||
361 tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17)
362 continue;
363
364 if (INTEL_GEN(i915) <= 2) {
365 tile.height = 16;
366 tile.width = 128;
367 tile.size = 11;
368 } else if (tile.tiling == I915_TILING_Y &&
369 HAS_128_BYTE_Y_TILING(i915)) {
370 tile.height = 32;
371 tile.width = 128;
372 tile.size = 12;
373 } else {
374 tile.height = 8;
375 tile.width = 512;
376 tile.size = 12;
377 }
378
379 if (INTEL_GEN(i915) < 4)
380 max_pitch = 8192 / tile.width;
381 else if (INTEL_GEN(i915) < 7)
382 max_pitch = 128 * I965_FENCE_MAX_PITCH_VAL / tile.width;
383 else
384 max_pitch = 128 * GEN7_FENCE_MAX_PITCH_VAL / tile.width;
385
386 for (pitch = max_pitch; pitch; pitch >>= 1) {
387 tile.stride = tile.width * pitch;
388 err = check_partial_mapping(obj, &tile, end);
389 if (err == -EINTR)
390 goto next_tiling;
391 if (err)
392 goto out_unlock;
393
394 if (pitch > 2 && INTEL_GEN(i915) >= 4) {
395 tile.stride = tile.width * (pitch - 1);
396 err = check_partial_mapping(obj, &tile, end);
397 if (err == -EINTR)
398 goto next_tiling;
399 if (err)
400 goto out_unlock;
401 }
402
403 if (pitch < max_pitch && INTEL_GEN(i915) >= 4) {
404 tile.stride = tile.width * (pitch + 1);
405 err = check_partial_mapping(obj, &tile, end);
406 if (err == -EINTR)
407 goto next_tiling;
408 if (err)
409 goto out_unlock;
410 }
411 }
412
413 if (INTEL_GEN(i915) >= 4) {
414 for_each_prime_number(pitch, max_pitch) {
415 tile.stride = tile.width * pitch;
416 err = check_partial_mapping(obj, &tile, end);
417 if (err == -EINTR)
418 goto next_tiling;
419 if (err)
420 goto out_unlock;
421 }
422 }
423
424next_tiling: ;
425 }
426
427out_unlock:
428 intel_runtime_pm_put(i915);
429 mutex_unlock(&i915->drm.struct_mutex);
430 i915_gem_object_unpin_pages(obj);
431out:
432 i915_gem_object_put(obj);
433 return err;
434}
435
436static int make_obj_busy(struct drm_i915_gem_object *obj)
437{
438 struct drm_i915_private *i915 = to_i915(obj->base.dev);
439 struct i915_request *rq;
440 struct i915_vma *vma;
441 int err;
442
443 vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
444 if (IS_ERR(vma))
445 return PTR_ERR(vma);
446
447 err = i915_vma_pin(vma, 0, 0, PIN_USER);
448 if (err)
449 return err;
450
451 rq = i915_request_alloc(i915->engine[RCS], i915->kernel_context);
452 if (IS_ERR(rq)) {
453 i915_vma_unpin(vma);
454 return PTR_ERR(rq);
455 }
456
457 i915_vma_move_to_active(vma, rq, 0);
458 i915_request_add(rq);
459
460 i915_gem_object_set_active_reference(obj);
461 i915_vma_unpin(vma);
462 return 0;
463}
464
465static bool assert_mmap_offset(struct drm_i915_private *i915,
466 unsigned long size,
467 int expected)
468{
469 struct drm_i915_gem_object *obj;
470 int err;
471
472 obj = i915_gem_object_create_internal(i915, size);
473 if (IS_ERR(obj))
474 return PTR_ERR(obj);
475
476 err = i915_gem_object_create_mmap_offset(obj);
477 i915_gem_object_put(obj);
478
479 return err == expected;
480}
481
482static int igt_mmap_offset_exhaustion(void *arg)
483{
484 struct drm_i915_private *i915 = arg;
485 struct drm_mm *mm = &i915->drm.vma_offset_manager->vm_addr_space_mm;
486 struct drm_i915_gem_object *obj;
487 struct drm_mm_node resv, *hole;
488 u64 hole_start, hole_end;
489 int loop, err;
490
491
492 memset(&resv, 0, sizeof(resv));
493 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
494 resv.start = hole_start;
495 resv.size = hole_end - hole_start - 1;
496 err = drm_mm_reserve_node(mm, &resv);
497 if (err) {
498 pr_err("Failed to trim VMA manager, err=%d\n", err);
499 return err;
500 }
501 break;
502 }
503
504
505 if (!assert_mmap_offset(i915, PAGE_SIZE, 0)) {
506 pr_err("Unable to insert object into single page hole\n");
507 err = -EINVAL;
508 goto out;
509 }
510
511
512 if (!assert_mmap_offset(i915, 2*PAGE_SIZE, -ENOSPC)) {
513 pr_err("Unexpectedly succeeded in inserting too large object into single page hole\n");
514 err = -EINVAL;
515 goto out;
516 }
517
518
519 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
520 if (IS_ERR(obj)) {
521 err = PTR_ERR(obj);
522 goto out;
523 }
524
525 err = i915_gem_object_create_mmap_offset(obj);
526 if (err) {
527 pr_err("Unable to insert object into reclaimed hole\n");
528 goto err_obj;
529 }
530
531 if (!assert_mmap_offset(i915, PAGE_SIZE, -ENOSPC)) {
532 pr_err("Unexpectedly succeeded in inserting object into no holes!\n");
533 err = -EINVAL;
534 goto err_obj;
535 }
536
537 i915_gem_object_put(obj);
538
539
540 for (loop = 0; loop < 3; loop++) {
541 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
542 if (IS_ERR(obj)) {
543 err = PTR_ERR(obj);
544 goto out;
545 }
546
547 mutex_lock(&i915->drm.struct_mutex);
548 intel_runtime_pm_get(i915);
549 err = make_obj_busy(obj);
550 intel_runtime_pm_put(i915);
551 mutex_unlock(&i915->drm.struct_mutex);
552 if (err) {
553 pr_err("[loop %d] Failed to busy the object\n", loop);
554 goto err_obj;
555 }
556
557 GEM_BUG_ON(!i915_gem_object_is_active(obj));
558 err = i915_gem_object_create_mmap_offset(obj);
559 if (err) {
560 pr_err("[loop %d] i915_gem_object_create_mmap_offset failed with err=%d\n",
561 loop, err);
562 goto out;
563 }
564 }
565
566out:
567 drm_mm_remove_node(&resv);
568 return err;
569err_obj:
570 i915_gem_object_put(obj);
571 goto out;
572}
573
574int i915_gem_object_mock_selftests(void)
575{
576 static const struct i915_subtest tests[] = {
577 SUBTEST(igt_gem_object),
578 SUBTEST(igt_phys_object),
579 };
580 struct drm_i915_private *i915;
581 int err;
582
583 i915 = mock_gem_device();
584 if (!i915)
585 return -ENOMEM;
586
587 err = i915_subtests(tests, i915);
588
589 drm_dev_unref(&i915->drm);
590 return err;
591}
592
593int i915_gem_object_live_selftests(struct drm_i915_private *i915)
594{
595 static const struct i915_subtest tests[] = {
596 SUBTEST(igt_gem_huge),
597 SUBTEST(igt_partial_tiling),
598 SUBTEST(igt_mmap_offset_exhaustion),
599 };
600
601 return i915_subtests(tests, i915);
602}
603