1
2
3
4
5
6#include "gt/intel_migrate.h"
7#include "gt/intel_gpu_commands.h"
8#include "gem/i915_gem_ttm_move.h"
9
10#include "i915_deps.h"
11
12#include "selftests/igt_spinner.h"
13
14static int igt_fill_check_buffer(struct drm_i915_gem_object *obj,
15 bool fill)
16{
17 struct drm_i915_private *i915 = to_i915(obj->base.dev);
18 unsigned int i, count = obj->base.size / sizeof(u32);
19 enum i915_map_type map_type =
20 i915_coherent_map_type(i915, obj, false);
21 u32 *cur;
22 int err = 0;
23
24 assert_object_held(obj);
25 cur = i915_gem_object_pin_map(obj, map_type);
26 if (IS_ERR(cur))
27 return PTR_ERR(cur);
28
29 if (fill)
30 for (i = 0; i < count; ++i)
31 *cur++ = i;
32 else
33 for (i = 0; i < count; ++i)
34 if (*cur++ != i) {
35 pr_err("Object content mismatch at location %d of %d\n", i, count);
36 err = -EINVAL;
37 break;
38 }
39
40 i915_gem_object_unpin_map(obj);
41
42 return err;
43}
44
45static int igt_create_migrate(struct intel_gt *gt, enum intel_region_id src,
46 enum intel_region_id dst)
47{
48 struct drm_i915_private *i915 = gt->i915;
49 struct intel_memory_region *src_mr = i915->mm.regions[src];
50 struct drm_i915_gem_object *obj;
51 struct i915_gem_ww_ctx ww;
52 int err = 0;
53
54 GEM_BUG_ON(!src_mr);
55
56
57 obj = i915_gem_object_create_region(src_mr, PAGE_SIZE, 0, 0);
58 if (IS_ERR(obj))
59 return PTR_ERR(obj);
60
61 for_i915_gem_ww(&ww, err, true) {
62 err = i915_gem_object_lock(obj, &ww);
63 if (err)
64 continue;
65
66 err = igt_fill_check_buffer(obj, true);
67 if (err)
68 continue;
69
70 err = i915_gem_object_migrate(obj, &ww, dst);
71 if (err)
72 continue;
73
74 err = i915_gem_object_pin_pages(obj);
75 if (err)
76 continue;
77
78 if (i915_gem_object_can_migrate(obj, src))
79 err = -EINVAL;
80
81 i915_gem_object_unpin_pages(obj);
82 err = i915_gem_object_wait_migration(obj, true);
83 if (err)
84 continue;
85
86 err = igt_fill_check_buffer(obj, false);
87 }
88 i915_gem_object_put(obj);
89
90 return err;
91}
92
93static int igt_smem_create_migrate(void *arg)
94{
95 return igt_create_migrate(arg, INTEL_REGION_LMEM, INTEL_REGION_SMEM);
96}
97
98static int igt_lmem_create_migrate(void *arg)
99{
100 return igt_create_migrate(arg, INTEL_REGION_SMEM, INTEL_REGION_LMEM);
101}
102
103static int igt_same_create_migrate(void *arg)
104{
105 return igt_create_migrate(arg, INTEL_REGION_LMEM, INTEL_REGION_LMEM);
106}
107
108static int lmem_pages_migrate_one(struct i915_gem_ww_ctx *ww,
109 struct drm_i915_gem_object *obj,
110 struct i915_vma *vma)
111{
112 int err;
113
114 err = i915_gem_object_lock(obj, ww);
115 if (err)
116 return err;
117
118 if (vma) {
119 err = i915_vma_pin_ww(vma, ww, obj->base.size, 0,
120 0UL | PIN_OFFSET_FIXED |
121 PIN_USER);
122 if (err) {
123 if (err != -EINTR && err != ERESTARTSYS &&
124 err != -EDEADLK)
125 pr_err("Failed to pin vma.\n");
126 return err;
127 }
128
129 i915_vma_unpin(vma);
130 }
131
132
133
134
135
136 if (i915_gem_object_is_lmem(obj)) {
137 err = i915_gem_object_migrate(obj, ww, INTEL_REGION_SMEM);
138 if (err) {
139 pr_err("Object failed migration to smem\n");
140 if (err)
141 return err;
142 }
143
144 if (i915_gem_object_is_lmem(obj)) {
145 pr_err("object still backed by lmem\n");
146 err = -EINVAL;
147 }
148
149 if (!i915_gem_object_has_struct_page(obj)) {
150 pr_err("object not backed by struct page\n");
151 err = -EINVAL;
152 }
153
154 } else {
155 err = i915_gem_object_migrate(obj, ww, INTEL_REGION_LMEM);
156 if (err) {
157 pr_err("Object failed migration to lmem\n");
158 if (err)
159 return err;
160 }
161
162 if (i915_gem_object_has_struct_page(obj)) {
163 pr_err("object still backed by struct page\n");
164 err = -EINVAL;
165 }
166
167 if (!i915_gem_object_is_lmem(obj)) {
168 pr_err("object not backed by lmem\n");
169 err = -EINVAL;
170 }
171 }
172
173 return err;
174}
175
176static int __igt_lmem_pages_migrate(struct intel_gt *gt,
177 struct i915_address_space *vm,
178 struct i915_deps *deps,
179 struct igt_spinner *spin,
180 struct dma_fence *spin_fence)
181{
182 struct drm_i915_private *i915 = gt->i915;
183 struct drm_i915_gem_object *obj;
184 struct i915_vma *vma = NULL;
185 struct i915_gem_ww_ctx ww;
186 struct i915_request *rq;
187 int err;
188 int i;
189
190
191
192 obj = i915_gem_object_create_lmem(i915, SZ_2M, 0);
193 if (IS_ERR(obj))
194 return PTR_ERR(obj);
195
196 if (vm) {
197 vma = i915_vma_instance(obj, vm, NULL);
198 if (IS_ERR(vma)) {
199 err = PTR_ERR(vma);
200 goto out_put;
201 }
202 }
203
204
205 for_i915_gem_ww(&ww, err, true) {
206 err = i915_gem_object_lock(obj, &ww);
207 if (err)
208 continue;
209
210 err = ____i915_gem_object_get_pages(obj);
211 if (err)
212 continue;
213
214 err = intel_migrate_clear(>->migrate, &ww, deps,
215 obj->mm.pages->sgl, obj->cache_level,
216 i915_gem_object_is_lmem(obj),
217 0xdeadbeaf, &rq);
218 if (rq) {
219 dma_resv_add_excl_fence(obj->base.resv, &rq->fence);
220 i915_gem_object_set_moving_fence(obj, &rq->fence);
221 i915_request_put(rq);
222 }
223 if (err)
224 continue;
225
226 if (!vma) {
227 err = igt_fill_check_buffer(obj, true);
228 if (err)
229 continue;
230 }
231 }
232 if (err)
233 goto out_put;
234
235
236
237
238
239 for (i = 1; i <= 5; ++i) {
240 for_i915_gem_ww(&ww, err, true)
241 err = lmem_pages_migrate_one(&ww, obj, vma);
242 if (err)
243 goto out_put;
244 }
245
246 err = i915_gem_object_lock_interruptible(obj, NULL);
247 if (err)
248 goto out_put;
249
250 if (spin) {
251 if (dma_fence_is_signaled(spin_fence)) {
252 pr_err("Spinner was terminated by hangcheck.\n");
253 err = -EBUSY;
254 goto out_unlock;
255 }
256 igt_spinner_end(spin);
257 }
258
259
260 err = i915_gem_object_wait_migration(obj, true);
261 if (err)
262 goto out_unlock;
263
264 if (vma) {
265 err = i915_vma_wait_for_bind(vma);
266 if (err)
267 goto out_unlock;
268 } else {
269 err = igt_fill_check_buffer(obj, false);
270 }
271
272out_unlock:
273 i915_gem_object_unlock(obj);
274out_put:
275 i915_gem_object_put(obj);
276
277 return err;
278}
279
280static int igt_lmem_pages_failsafe_migrate(void *arg)
281{
282 int fail_gpu, fail_alloc, ret;
283 struct intel_gt *gt = arg;
284
285 for (fail_gpu = 0; fail_gpu < 2; ++fail_gpu) {
286 for (fail_alloc = 0; fail_alloc < 2; ++fail_alloc) {
287 pr_info("Simulated failure modes: gpu: %d, alloc: %d\n",
288 fail_gpu, fail_alloc);
289 i915_ttm_migrate_set_failure_modes(fail_gpu,
290 fail_alloc);
291 ret = __igt_lmem_pages_migrate(gt, NULL, NULL, NULL, NULL);
292 if (ret)
293 goto out_err;
294 }
295 }
296
297out_err:
298 i915_ttm_migrate_set_failure_modes(false, false);
299 return ret;
300}
301
302
303
304
305
306
307
308
309
310
311
312
313
314static int igt_async_migrate(struct intel_gt *gt)
315{
316 struct intel_engine_cs *engine;
317 enum intel_engine_id id;
318 struct i915_ppgtt *ppgtt;
319 struct igt_spinner spin;
320 int err;
321
322 ppgtt = i915_ppgtt_create(gt, 0);
323 if (IS_ERR(ppgtt))
324 return PTR_ERR(ppgtt);
325
326 if (igt_spinner_init(&spin, gt)) {
327 err = -ENOMEM;
328 goto out_spin;
329 }
330
331 for_each_engine(engine, gt, id) {
332 struct ttm_operation_ctx ctx = {
333 .interruptible = true
334 };
335 struct dma_fence *spin_fence;
336 struct intel_context *ce;
337 struct i915_request *rq;
338 struct i915_deps deps;
339
340 ce = intel_context_create(engine);
341 if (IS_ERR(ce)) {
342 err = PTR_ERR(ce);
343 goto out_ce;
344 }
345
346
347
348
349
350
351
352
353
354 rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
355 intel_context_put(ce);
356 if (IS_ERR(rq)) {
357 err = PTR_ERR(rq);
358 goto out_ce;
359 }
360
361 i915_deps_init(&deps, GFP_KERNEL);
362 err = i915_deps_add_dependency(&deps, &rq->fence, &ctx);
363 spin_fence = dma_fence_get(&rq->fence);
364 i915_request_add(rq);
365 if (err)
366 goto out_ce;
367
368 err = __igt_lmem_pages_migrate(gt, &ppgtt->vm, &deps, &spin,
369 spin_fence);
370 i915_deps_fini(&deps);
371 dma_fence_put(spin_fence);
372 if (err)
373 goto out_ce;
374 }
375
376out_ce:
377 igt_spinner_fini(&spin);
378out_spin:
379 i915_vm_put(&ppgtt->vm);
380
381 return err;
382}
383
384
385
386
387
388
389
390#define ASYNC_FAIL_ALLOC 1
391static int igt_lmem_async_migrate(void *arg)
392{
393 int fail_gpu, fail_alloc, ret;
394 struct intel_gt *gt = arg;
395
396 for (fail_gpu = 0; fail_gpu < 2; ++fail_gpu) {
397 for (fail_alloc = 0; fail_alloc < ASYNC_FAIL_ALLOC; ++fail_alloc) {
398 pr_info("Simulated failure modes: gpu: %d, alloc: %d\n",
399 fail_gpu, fail_alloc);
400 i915_ttm_migrate_set_failure_modes(fail_gpu,
401 fail_alloc);
402 ret = igt_async_migrate(gt);
403 if (ret)
404 goto out_err;
405 }
406 }
407
408out_err:
409 i915_ttm_migrate_set_failure_modes(false, false);
410 return ret;
411}
412
413int i915_gem_migrate_live_selftests(struct drm_i915_private *i915)
414{
415 static const struct i915_subtest tests[] = {
416 SUBTEST(igt_smem_create_migrate),
417 SUBTEST(igt_lmem_create_migrate),
418 SUBTEST(igt_same_create_migrate),
419 SUBTEST(igt_lmem_pages_failsafe_migrate),
420 SUBTEST(igt_lmem_async_migrate),
421 };
422
423 if (!HAS_LMEM(i915))
424 return 0;
425
426 return intel_gt_live_subtests(tests, to_gt(i915));
427}
428