1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include "gem/i915_gem_pm.h"
26#include "gem/selftests/igt_gem_utils.h"
27#include "gem/selftests/mock_context.h"
28#include "gt/intel_gt.h"
29
30#include "i915_selftest.h"
31
32#include "igt_flush_test.h"
33#include "lib_sw_fence.h"
34#include "mock_drm.h"
35#include "mock_gem_device.h"
36
37static void quirk_add(struct drm_i915_gem_object *obj,
38 struct list_head *objects)
39{
40
41 GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
42 i915_gem_object_set_tiling_quirk(obj);
43 list_add(&obj->st_link, objects);
44}
45
46static int populate_ggtt(struct i915_ggtt *ggtt, struct list_head *objects)
47{
48 struct drm_i915_gem_object *obj;
49 unsigned long count;
50
51 count = 0;
52 do {
53 struct i915_vma *vma;
54
55 obj = i915_gem_object_create_internal(ggtt->vm.i915,
56 I915_GTT_PAGE_SIZE);
57 if (IS_ERR(obj))
58 return PTR_ERR(obj);
59
60 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
61 if (IS_ERR(vma)) {
62 i915_gem_object_put(obj);
63 if (vma == ERR_PTR(-ENOSPC))
64 break;
65
66 return PTR_ERR(vma);
67 }
68
69 quirk_add(obj, objects);
70 count++;
71 } while (1);
72 pr_debug("Filled GGTT with %lu pages [%llu total]\n",
73 count, ggtt->vm.total / PAGE_SIZE);
74
75 if (list_empty(&ggtt->vm.bound_list)) {
76 pr_err("No objects on the GGTT inactive list!\n");
77 return -EINVAL;
78 }
79
80 return 0;
81}
82
83static void unpin_ggtt(struct i915_ggtt *ggtt)
84{
85 struct i915_vma *vma;
86
87 list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
88 if (i915_gem_object_has_tiling_quirk(vma->obj))
89 i915_vma_unpin(vma);
90}
91
92static void cleanup_objects(struct i915_ggtt *ggtt, struct list_head *list)
93{
94 struct drm_i915_gem_object *obj, *on;
95
96 list_for_each_entry_safe(obj, on, list, st_link) {
97 GEM_BUG_ON(!i915_gem_object_has_tiling_quirk(obj));
98 i915_gem_object_set_tiling_quirk(obj);
99 i915_gem_object_put(obj);
100 }
101
102 i915_gem_drain_freed_objects(ggtt->vm.i915);
103}
104
105static int igt_evict_something(void *arg)
106{
107 struct intel_gt *gt = arg;
108 struct i915_ggtt *ggtt = gt->ggtt;
109 LIST_HEAD(objects);
110 int err;
111
112
113
114 err = populate_ggtt(ggtt, &objects);
115 if (err)
116 goto cleanup;
117
118
119 mutex_lock(&ggtt->vm.mutex);
120 err = i915_gem_evict_something(&ggtt->vm,
121 I915_GTT_PAGE_SIZE, 0, 0,
122 0, U64_MAX,
123 0);
124 mutex_unlock(&ggtt->vm.mutex);
125 if (err != -ENOSPC) {
126 pr_err("i915_gem_evict_something failed on a full GGTT with err=%d\n",
127 err);
128 goto cleanup;
129 }
130
131 unpin_ggtt(ggtt);
132
133
134 mutex_lock(&ggtt->vm.mutex);
135 err = i915_gem_evict_something(&ggtt->vm,
136 I915_GTT_PAGE_SIZE, 0, 0,
137 0, U64_MAX,
138 0);
139 mutex_unlock(&ggtt->vm.mutex);
140 if (err) {
141 pr_err("i915_gem_evict_something failed on a full GGTT with err=%d\n",
142 err);
143 goto cleanup;
144 }
145
146cleanup:
147 cleanup_objects(ggtt, &objects);
148 return err;
149}
150
151static int igt_overcommit(void *arg)
152{
153 struct intel_gt *gt = arg;
154 struct i915_ggtt *ggtt = gt->ggtt;
155 struct drm_i915_gem_object *obj;
156 struct i915_vma *vma;
157 LIST_HEAD(objects);
158 int err;
159
160
161
162
163
164 err = populate_ggtt(ggtt, &objects);
165 if (err)
166 goto cleanup;
167
168 obj = i915_gem_object_create_internal(gt->i915, I915_GTT_PAGE_SIZE);
169 if (IS_ERR(obj)) {
170 err = PTR_ERR(obj);
171 goto cleanup;
172 }
173
174 quirk_add(obj, &objects);
175
176 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
177 if (vma != ERR_PTR(-ENOSPC)) {
178 pr_err("Failed to evict+insert, i915_gem_object_ggtt_pin returned err=%d\n", (int)PTR_ERR_OR_ZERO(vma));
179 err = -EINVAL;
180 goto cleanup;
181 }
182
183cleanup:
184 cleanup_objects(ggtt, &objects);
185 return err;
186}
187
188static int igt_evict_for_vma(void *arg)
189{
190 struct intel_gt *gt = arg;
191 struct i915_ggtt *ggtt = gt->ggtt;
192 struct drm_mm_node target = {
193 .start = 0,
194 .size = 4096,
195 };
196 LIST_HEAD(objects);
197 int err;
198
199
200
201 err = populate_ggtt(ggtt, &objects);
202 if (err)
203 goto cleanup;
204
205
206 mutex_lock(&ggtt->vm.mutex);
207 err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
208 mutex_unlock(&ggtt->vm.mutex);
209 if (err != -ENOSPC) {
210 pr_err("i915_gem_evict_for_node on a full GGTT returned err=%d\n",
211 err);
212 goto cleanup;
213 }
214
215 unpin_ggtt(ggtt);
216
217
218 mutex_lock(&ggtt->vm.mutex);
219 err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
220 mutex_unlock(&ggtt->vm.mutex);
221 if (err) {
222 pr_err("i915_gem_evict_for_node returned err=%d\n",
223 err);
224 goto cleanup;
225 }
226
227cleanup:
228 cleanup_objects(ggtt, &objects);
229 return err;
230}
231
232static void mock_color_adjust(const struct drm_mm_node *node,
233 unsigned long color,
234 u64 *start,
235 u64 *end)
236{
237}
238
239static int igt_evict_for_cache_color(void *arg)
240{
241 struct intel_gt *gt = arg;
242 struct i915_ggtt *ggtt = gt->ggtt;
243 const unsigned long flags = PIN_OFFSET_FIXED;
244 struct drm_mm_node target = {
245 .start = I915_GTT_PAGE_SIZE * 2,
246 .size = I915_GTT_PAGE_SIZE,
247 .color = I915_CACHE_LLC,
248 };
249 struct drm_i915_gem_object *obj;
250 struct i915_vma *vma;
251 LIST_HEAD(objects);
252 int err;
253
254
255
256
257
258
259
260 ggtt->vm.mm.color_adjust = mock_color_adjust;
261 GEM_BUG_ON(!i915_vm_has_cache_coloring(&ggtt->vm));
262
263 obj = i915_gem_object_create_internal(gt->i915, I915_GTT_PAGE_SIZE);
264 if (IS_ERR(obj)) {
265 err = PTR_ERR(obj);
266 goto cleanup;
267 }
268 i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
269 quirk_add(obj, &objects);
270
271 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
272 I915_GTT_PAGE_SIZE | flags);
273 if (IS_ERR(vma)) {
274 pr_err("[0]i915_gem_object_ggtt_pin failed\n");
275 err = PTR_ERR(vma);
276 goto cleanup;
277 }
278
279 obj = i915_gem_object_create_internal(gt->i915, I915_GTT_PAGE_SIZE);
280 if (IS_ERR(obj)) {
281 err = PTR_ERR(obj);
282 goto cleanup;
283 }
284 i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
285 quirk_add(obj, &objects);
286
287
288 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
289 (I915_GTT_PAGE_SIZE * 2) | flags);
290 if (IS_ERR(vma)) {
291 pr_err("[1]i915_gem_object_ggtt_pin failed\n");
292 err = PTR_ERR(vma);
293 goto cleanup;
294 }
295
296 i915_vma_unpin(vma);
297
298
299 mutex_lock(&ggtt->vm.mutex);
300 err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
301 mutex_unlock(&ggtt->vm.mutex);
302 if (err) {
303 pr_err("[0]i915_gem_evict_for_node returned err=%d\n", err);
304 goto cleanup;
305 }
306
307
308
309
310 target.color = I915_CACHE_L3_LLC;
311
312 mutex_lock(&ggtt->vm.mutex);
313 err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
314 mutex_unlock(&ggtt->vm.mutex);
315 if (!err) {
316 pr_err("[1]i915_gem_evict_for_node returned err=%d\n", err);
317 err = -EINVAL;
318 goto cleanup;
319 }
320
321 err = 0;
322
323cleanup:
324 unpin_ggtt(ggtt);
325 cleanup_objects(ggtt, &objects);
326 ggtt->vm.mm.color_adjust = NULL;
327 return err;
328}
329
330static int igt_evict_vm(void *arg)
331{
332 struct intel_gt *gt = arg;
333 struct i915_ggtt *ggtt = gt->ggtt;
334 LIST_HEAD(objects);
335 int err;
336
337
338
339 err = populate_ggtt(ggtt, &objects);
340 if (err)
341 goto cleanup;
342
343
344 mutex_lock(&ggtt->vm.mutex);
345 err = i915_gem_evict_vm(&ggtt->vm);
346 mutex_unlock(&ggtt->vm.mutex);
347 if (err) {
348 pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n",
349 err);
350 goto cleanup;
351 }
352
353 unpin_ggtt(ggtt);
354
355 mutex_lock(&ggtt->vm.mutex);
356 err = i915_gem_evict_vm(&ggtt->vm);
357 mutex_unlock(&ggtt->vm.mutex);
358 if (err) {
359 pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n",
360 err);
361 goto cleanup;
362 }
363
364cleanup:
365 cleanup_objects(ggtt, &objects);
366 return err;
367}
368
369static int igt_evict_contexts(void *arg)
370{
371 const u64 PRETEND_GGTT_SIZE = 16ull << 20;
372 struct intel_gt *gt = arg;
373 struct i915_ggtt *ggtt = gt->ggtt;
374 struct drm_i915_private *i915 = gt->i915;
375 struct intel_engine_cs *engine;
376 enum intel_engine_id id;
377 struct reserved {
378 struct drm_mm_node node;
379 struct reserved *next;
380 } *reserved = NULL;
381 intel_wakeref_t wakeref;
382 struct drm_mm_node hole;
383 unsigned long count;
384 int err;
385
386
387
388
389
390
391
392
393
394
395
396
397 if (!HAS_FULL_PPGTT(i915))
398 return 0;
399
400 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
401
402
403 memset(&hole, 0, sizeof(hole));
404 mutex_lock(&ggtt->vm.mutex);
405 err = i915_gem_gtt_insert(&ggtt->vm, &hole,
406 PRETEND_GGTT_SIZE, 0, I915_COLOR_UNEVICTABLE,
407 0, ggtt->vm.total,
408 PIN_NOEVICT);
409 if (err)
410 goto out_locked;
411
412
413 count = 0;
414 do {
415 struct reserved *r;
416
417 mutex_unlock(&ggtt->vm.mutex);
418 r = kcalloc(1, sizeof(*r), GFP_KERNEL);
419 mutex_lock(&ggtt->vm.mutex);
420 if (!r) {
421 err = -ENOMEM;
422 goto out_locked;
423 }
424
425 if (i915_gem_gtt_insert(&ggtt->vm, &r->node,
426 1ul << 20, 0, I915_COLOR_UNEVICTABLE,
427 0, ggtt->vm.total,
428 PIN_NOEVICT)) {
429 kfree(r);
430 break;
431 }
432
433 r->next = reserved;
434 reserved = r;
435
436 count++;
437 } while (1);
438 drm_mm_remove_node(&hole);
439 mutex_unlock(&ggtt->vm.mutex);
440 pr_info("Filled GGTT with %lu 1MiB nodes\n", count);
441
442
443 for_each_engine(engine, gt, id) {
444 struct i915_sw_fence fence;
445
446 count = 0;
447 onstack_fence_init(&fence);
448 do {
449 struct intel_context *ce;
450 struct i915_request *rq;
451
452 ce = intel_context_create(engine);
453 if (IS_ERR(ce))
454 break;
455
456
457 igt_evict_ctl.fail_if_busy = true;
458 rq = intel_context_create_request(ce);
459 igt_evict_ctl.fail_if_busy = false;
460 intel_context_put(ce);
461
462 if (IS_ERR(rq)) {
463
464 if (PTR_ERR(rq) != -EBUSY) {
465 pr_err("Unexpected error from request alloc (on %s): %d\n",
466 engine->name,
467 (int)PTR_ERR(rq));
468 err = PTR_ERR(rq);
469 }
470 break;
471 }
472
473
474 err = i915_sw_fence_await_sw_fence_gfp(&rq->submit,
475 &fence,
476 GFP_KERNEL);
477 if (err < 0)
478 break;
479
480 i915_request_add(rq);
481 count++;
482 err = 0;
483 } while(1);
484 onstack_fence_fini(&fence);
485 pr_info("Submitted %lu contexts/requests on %s\n",
486 count, engine->name);
487 if (err)
488 break;
489 }
490
491 mutex_lock(&ggtt->vm.mutex);
492out_locked:
493 if (igt_flush_test(i915))
494 err = -EIO;
495 while (reserved) {
496 struct reserved *next = reserved->next;
497
498 drm_mm_remove_node(&reserved->node);
499 kfree(reserved);
500
501 reserved = next;
502 }
503 if (drm_mm_node_allocated(&hole))
504 drm_mm_remove_node(&hole);
505 mutex_unlock(&ggtt->vm.mutex);
506 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
507
508 return err;
509}
510
511int i915_gem_evict_mock_selftests(void)
512{
513 static const struct i915_subtest tests[] = {
514 SUBTEST(igt_evict_something),
515 SUBTEST(igt_evict_for_vma),
516 SUBTEST(igt_evict_for_cache_color),
517 SUBTEST(igt_evict_vm),
518 SUBTEST(igt_overcommit),
519 };
520 struct drm_i915_private *i915;
521 intel_wakeref_t wakeref;
522 int err = 0;
523
524 i915 = mock_gem_device();
525 if (!i915)
526 return -ENOMEM;
527
528 with_intel_runtime_pm(&i915->runtime_pm, wakeref)
529 err = i915_subtests(tests, &i915->gt);
530
531 mock_destroy_device(i915);
532 return err;
533}
534
535int i915_gem_evict_live_selftests(struct drm_i915_private *i915)
536{
537 static const struct i915_subtest tests[] = {
538 SUBTEST(igt_evict_contexts),
539 };
540
541 if (intel_gt_is_wedged(&i915->gt))
542 return 0;
543
544 return intel_gt_live_subtests(tests, &i915->gt);
545}
546