1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include "../i915_selftest.h"
26
27#include "mock_drm.h"
28#include "huge_gem_object.h"
29
30#define DW_PER_PAGE (PAGE_SIZE / sizeof(u32))
31
32static struct i915_vma *
33gpu_fill_dw(struct i915_vma *vma, u64 offset, unsigned long count, u32 value)
34{
35 struct drm_i915_gem_object *obj;
36 const int gen = INTEL_GEN(vma->vm->i915);
37 unsigned long n, size;
38 u32 *cmd;
39 int err;
40
41 size = (4 * count + 1) * sizeof(u32);
42 size = round_up(size, PAGE_SIZE);
43 obj = i915_gem_object_create_internal(vma->vm->i915, size);
44 if (IS_ERR(obj))
45 return ERR_CAST(obj);
46
47 cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
48 if (IS_ERR(cmd)) {
49 err = PTR_ERR(cmd);
50 goto err;
51 }
52
53 GEM_BUG_ON(offset + (count - 1) * PAGE_SIZE > vma->node.size);
54 offset += vma->node.start;
55
56 for (n = 0; n < count; n++) {
57 if (gen >= 8) {
58 *cmd++ = MI_STORE_DWORD_IMM_GEN4;
59 *cmd++ = lower_32_bits(offset);
60 *cmd++ = upper_32_bits(offset);
61 *cmd++ = value;
62 } else if (gen >= 4) {
63 *cmd++ = MI_STORE_DWORD_IMM_GEN4 |
64 (gen < 6 ? 1 << 22 : 0);
65 *cmd++ = 0;
66 *cmd++ = offset;
67 *cmd++ = value;
68 } else {
69 *cmd++ = MI_STORE_DWORD_IMM | 1 << 22;
70 *cmd++ = offset;
71 *cmd++ = value;
72 }
73 offset += PAGE_SIZE;
74 }
75 *cmd = MI_BATCH_BUFFER_END;
76 i915_gem_object_unpin_map(obj);
77
78 err = i915_gem_object_set_to_gtt_domain(obj, false);
79 if (err)
80 goto err;
81
82 vma = i915_vma_instance(obj, vma->vm, NULL);
83 if (IS_ERR(vma)) {
84 err = PTR_ERR(vma);
85 goto err;
86 }
87
88 err = i915_vma_pin(vma, 0, 0, PIN_USER);
89 if (err)
90 goto err;
91
92 return vma;
93
94err:
95 i915_gem_object_put(obj);
96 return ERR_PTR(err);
97}
98
99static unsigned long real_page_count(struct drm_i915_gem_object *obj)
100{
101 return huge_gem_object_phys_size(obj) >> PAGE_SHIFT;
102}
103
104static unsigned long fake_page_count(struct drm_i915_gem_object *obj)
105{
106 return huge_gem_object_dma_size(obj) >> PAGE_SHIFT;
107}
108
109static int gpu_fill(struct drm_i915_gem_object *obj,
110 struct i915_gem_context *ctx,
111 struct intel_engine_cs *engine,
112 unsigned int dw)
113{
114 struct drm_i915_private *i915 = to_i915(obj->base.dev);
115 struct i915_address_space *vm =
116 ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
117 struct drm_i915_gem_request *rq;
118 struct i915_vma *vma;
119 struct i915_vma *batch;
120 unsigned int flags;
121 int err;
122
123 GEM_BUG_ON(obj->base.size > vm->total);
124 GEM_BUG_ON(!intel_engine_can_store_dword(engine));
125
126 vma = i915_vma_instance(obj, vm, NULL);
127 if (IS_ERR(vma))
128 return PTR_ERR(vma);
129
130 err = i915_gem_object_set_to_gtt_domain(obj, false);
131 if (err)
132 return err;
133
134 err = i915_vma_pin(vma, 0, 0, PIN_HIGH | PIN_USER);
135 if (err)
136 return err;
137
138
139
140
141
142
143
144
145 batch = gpu_fill_dw(vma,
146 (dw * real_page_count(obj)) << PAGE_SHIFT |
147 (dw * sizeof(u32)),
148 real_page_count(obj),
149 dw);
150 if (IS_ERR(batch)) {
151 err = PTR_ERR(batch);
152 goto err_vma;
153 }
154
155 rq = i915_gem_request_alloc(engine, ctx);
156 if (IS_ERR(rq)) {
157 err = PTR_ERR(rq);
158 goto err_batch;
159 }
160
161 err = engine->emit_flush(rq, EMIT_INVALIDATE);
162 if (err)
163 goto err_request;
164
165 err = i915_switch_context(rq);
166 if (err)
167 goto err_request;
168
169 flags = 0;
170 if (INTEL_GEN(vm->i915) <= 5)
171 flags |= I915_DISPATCH_SECURE;
172
173 err = engine->emit_bb_start(rq,
174 batch->node.start, batch->node.size,
175 flags);
176 if (err)
177 goto err_request;
178
179 i915_vma_move_to_active(batch, rq, 0);
180 i915_gem_object_set_active_reference(batch->obj);
181 i915_vma_unpin(batch);
182 i915_vma_close(batch);
183
184 i915_vma_move_to_active(vma, rq, 0);
185 i915_vma_unpin(vma);
186
187 reservation_object_lock(obj->resv, NULL);
188 reservation_object_add_excl_fence(obj->resv, &rq->fence);
189 reservation_object_unlock(obj->resv);
190
191 __i915_add_request(rq, true);
192
193 return 0;
194
195err_request:
196 __i915_add_request(rq, false);
197err_batch:
198 i915_vma_unpin(batch);
199err_vma:
200 i915_vma_unpin(vma);
201 return err;
202}
203
204static int cpu_fill(struct drm_i915_gem_object *obj, u32 value)
205{
206 const bool has_llc = HAS_LLC(to_i915(obj->base.dev));
207 unsigned int n, m, need_flush;
208 int err;
209
210 err = i915_gem_obj_prepare_shmem_write(obj, &need_flush);
211 if (err)
212 return err;
213
214 for (n = 0; n < real_page_count(obj); n++) {
215 u32 *map;
216
217 map = kmap_atomic(i915_gem_object_get_page(obj, n));
218 for (m = 0; m < DW_PER_PAGE; m++)
219 map[m] = value;
220 if (!has_llc)
221 drm_clflush_virt_range(map, PAGE_SIZE);
222 kunmap_atomic(map);
223 }
224
225 i915_gem_obj_finish_shmem_access(obj);
226 obj->base.read_domains = I915_GEM_DOMAIN_GTT | I915_GEM_DOMAIN_CPU;
227 obj->base.write_domain = 0;
228 return 0;
229}
230
231static int cpu_check(struct drm_i915_gem_object *obj, unsigned int max)
232{
233 unsigned int n, m, needs_flush;
234 int err;
235
236 err = i915_gem_obj_prepare_shmem_read(obj, &needs_flush);
237 if (err)
238 return err;
239
240 for (n = 0; n < real_page_count(obj); n++) {
241 u32 *map;
242
243 map = kmap_atomic(i915_gem_object_get_page(obj, n));
244 if (needs_flush & CLFLUSH_BEFORE)
245 drm_clflush_virt_range(map, PAGE_SIZE);
246
247 for (m = 0; m < max; m++) {
248 if (map[m] != m) {
249 pr_err("Invalid value at page %d, offset %d: found %x expected %x\n",
250 n, m, map[m], m);
251 err = -EINVAL;
252 goto out_unmap;
253 }
254 }
255
256 for (; m < DW_PER_PAGE; m++) {
257 if (map[m] != 0xdeadbeef) {
258 pr_err("Invalid value at page %d, offset %d: found %x expected %x\n",
259 n, m, map[m], 0xdeadbeef);
260 err = -EINVAL;
261 goto out_unmap;
262 }
263 }
264
265out_unmap:
266 kunmap_atomic(map);
267 if (err)
268 break;
269 }
270
271 i915_gem_obj_finish_shmem_access(obj);
272 return err;
273}
274
275static struct drm_i915_gem_object *
276create_test_object(struct i915_gem_context *ctx,
277 struct drm_file *file,
278 struct list_head *objects)
279{
280 struct drm_i915_gem_object *obj;
281 struct i915_address_space *vm =
282 ctx->ppgtt ? &ctx->ppgtt->base : &ctx->i915->ggtt.base;
283 u64 size;
284 u32 handle;
285 int err;
286
287 size = min(vm->total / 2, 1024ull * DW_PER_PAGE * PAGE_SIZE);
288 size = round_down(size, DW_PER_PAGE * PAGE_SIZE);
289
290 obj = huge_gem_object(ctx->i915, DW_PER_PAGE * PAGE_SIZE, size);
291 if (IS_ERR(obj))
292 return obj;
293
294
295 err = drm_gem_handle_create(file, &obj->base, &handle);
296 i915_gem_object_put(obj);
297 if (err)
298 return ERR_PTR(err);
299
300 err = cpu_fill(obj, 0xdeadbeef);
301 if (err) {
302 pr_err("Failed to fill object with cpu, err=%d\n",
303 err);
304 return ERR_PTR(err);
305 }
306
307 list_add_tail(&obj->st_link, objects);
308 return obj;
309}
310
311static unsigned long max_dwords(struct drm_i915_gem_object *obj)
312{
313 unsigned long npages = fake_page_count(obj);
314
315 GEM_BUG_ON(!IS_ALIGNED(npages, DW_PER_PAGE));
316 return npages / DW_PER_PAGE;
317}
318
319static int igt_ctx_exec(void *arg)
320{
321 struct drm_i915_private *i915 = arg;
322 struct drm_i915_gem_object *obj = NULL;
323 struct drm_file *file;
324 IGT_TIMEOUT(end_time);
325 LIST_HEAD(objects);
326 unsigned long ncontexts, ndwords, dw;
327 bool first_shared_gtt = true;
328 int err;
329
330
331
332
333
334
335 file = mock_file(i915);
336 if (IS_ERR(file))
337 return PTR_ERR(file);
338
339 mutex_lock(&i915->drm.struct_mutex);
340
341 ncontexts = 0;
342 ndwords = 0;
343 dw = 0;
344 while (!time_after(jiffies, end_time)) {
345 struct intel_engine_cs *engine;
346 struct i915_gem_context *ctx;
347 unsigned int id;
348
349 if (first_shared_gtt) {
350 ctx = __create_hw_context(i915, file->driver_priv);
351 first_shared_gtt = false;
352 } else {
353 ctx = i915_gem_create_context(i915, file->driver_priv);
354 }
355 if (IS_ERR(ctx)) {
356 err = PTR_ERR(ctx);
357 goto out_unlock;
358 }
359
360 for_each_engine(engine, i915, id) {
361 if (!intel_engine_can_store_dword(engine))
362 continue;
363
364 if (!obj) {
365 obj = create_test_object(ctx, file, &objects);
366 if (IS_ERR(obj)) {
367 err = PTR_ERR(obj);
368 goto out_unlock;
369 }
370 }
371
372 err = gpu_fill(obj, ctx, engine, dw);
373 if (err) {
374 pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
375 ndwords, dw, max_dwords(obj),
376 engine->name, ctx->hw_id,
377 yesno(!!ctx->ppgtt), err);
378 goto out_unlock;
379 }
380
381 if (++dw == max_dwords(obj)) {
382 obj = NULL;
383 dw = 0;
384 }
385 ndwords++;
386 }
387 ncontexts++;
388 }
389 pr_info("Submitted %lu contexts (across %u engines), filling %lu dwords\n",
390 ncontexts, INTEL_INFO(i915)->num_rings, ndwords);
391
392 dw = 0;
393 list_for_each_entry(obj, &objects, st_link) {
394 unsigned int rem =
395 min_t(unsigned int, ndwords - dw, max_dwords(obj));
396
397 err = cpu_check(obj, rem);
398 if (err)
399 break;
400
401 dw += rem;
402 }
403
404out_unlock:
405 mutex_unlock(&i915->drm.struct_mutex);
406
407 mock_file_free(i915, file);
408 return err;
409}
410
411static int fake_aliasing_ppgtt_enable(struct drm_i915_private *i915)
412{
413 struct drm_i915_gem_object *obj;
414 int err;
415
416 err = i915_gem_init_aliasing_ppgtt(i915);
417 if (err)
418 return err;
419
420 list_for_each_entry(obj, &i915->mm.bound_list, global_link) {
421 struct i915_vma *vma;
422
423 vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
424 if (IS_ERR(vma))
425 continue;
426
427 vma->flags &= ~I915_VMA_LOCAL_BIND;
428 }
429
430 return 0;
431}
432
433static void fake_aliasing_ppgtt_disable(struct drm_i915_private *i915)
434{
435 i915_gem_fini_aliasing_ppgtt(i915);
436}
437
438int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv)
439{
440 static const struct i915_subtest tests[] = {
441 SUBTEST(igt_ctx_exec),
442 };
443 bool fake_alias = false;
444 int err;
445
446
447 if (USES_PPGTT(dev_priv) && !dev_priv->mm.aliasing_ppgtt) {
448 mutex_lock(&dev_priv->drm.struct_mutex);
449 err = fake_aliasing_ppgtt_enable(dev_priv);
450 mutex_unlock(&dev_priv->drm.struct_mutex);
451 if (err)
452 return err;
453
454 GEM_BUG_ON(!dev_priv->mm.aliasing_ppgtt);
455 fake_alias = true;
456 }
457
458 err = i915_subtests(tests, dev_priv);
459
460 if (fake_alias) {
461 mutex_lock(&dev_priv->drm.struct_mutex);
462 fake_aliasing_ppgtt_disable(dev_priv);
463 mutex_unlock(&dev_priv->drm.struct_mutex);
464 }
465
466 return err;
467}
468