1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30#include <generated/utsrelease.h>
31#include <linux/stop_machine.h>
32#include <linux/zlib.h>
33#include "i915_drv.h"
34
35static const char *engine_str(int engine)
36{
37 switch (engine) {
38 case RCS: return "render";
39 case VCS: return "bsd";
40 case BCS: return "blt";
41 case VECS: return "vebox";
42 case VCS2: return "bsd2";
43 default: return "";
44 }
45}
46
47static const char *tiling_flag(int tiling)
48{
49 switch (tiling) {
50 default:
51 case I915_TILING_NONE: return "";
52 case I915_TILING_X: return " X";
53 case I915_TILING_Y: return " Y";
54 }
55}
56
57static const char *dirty_flag(int dirty)
58{
59 return dirty ? " dirty" : "";
60}
61
62static const char *purgeable_flag(int purgeable)
63{
64 return purgeable ? " purgeable" : "";
65}
66
67static bool __i915_error_ok(struct drm_i915_error_state_buf *e)
68{
69
70 if (!e->err && WARN(e->bytes > (e->size - 1), "overflow")) {
71 e->err = -ENOSPC;
72 return false;
73 }
74
75 if (e->bytes == e->size - 1 || e->err)
76 return false;
77
78 return true;
79}
80
81static bool __i915_error_seek(struct drm_i915_error_state_buf *e,
82 unsigned len)
83{
84 if (e->pos + len <= e->start) {
85 e->pos += len;
86 return false;
87 }
88
89
90 if (len >= e->size) {
91 e->err = -EIO;
92 return false;
93 }
94
95 return true;
96}
97
98static void __i915_error_advance(struct drm_i915_error_state_buf *e,
99 unsigned len)
100{
101
102
103
104
105 if (e->pos < e->start) {
106 const size_t off = e->start - e->pos;
107
108
109 if (off > len || e->bytes) {
110 e->err = -EIO;
111 return;
112 }
113
114 memmove(e->buf, e->buf + off, len - off);
115 e->bytes = len - off;
116 e->pos = e->start;
117 return;
118 }
119
120 e->bytes += len;
121 e->pos += len;
122}
123
124__printf(2, 0)
125static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
126 const char *f, va_list args)
127{
128 unsigned len;
129
130 if (!__i915_error_ok(e))
131 return;
132
133
134 if (e->pos < e->start) {
135 va_list tmp;
136
137 va_copy(tmp, args);
138 len = vsnprintf(NULL, 0, f, tmp);
139 va_end(tmp);
140
141 if (!__i915_error_seek(e, len))
142 return;
143 }
144
145 len = vsnprintf(e->buf + e->bytes, e->size - e->bytes, f, args);
146 if (len >= e->size - e->bytes)
147 len = e->size - e->bytes - 1;
148
149 __i915_error_advance(e, len);
150}
151
152static void i915_error_puts(struct drm_i915_error_state_buf *e,
153 const char *str)
154{
155 unsigned len;
156
157 if (!__i915_error_ok(e))
158 return;
159
160 len = strlen(str);
161
162
163 if (e->pos < e->start) {
164 if (!__i915_error_seek(e, len))
165 return;
166 }
167
168 if (len >= e->size - e->bytes)
169 len = e->size - e->bytes - 1;
170 memcpy(e->buf + e->bytes, str, len);
171
172 __i915_error_advance(e, len);
173}
174
175#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
176#define err_puts(e, s) i915_error_puts(e, s)
177
178#ifdef CONFIG_DRM_I915_COMPRESS_ERROR
179
180struct compress {
181 struct z_stream_s zstream;
182 void *tmp;
183};
184
185static bool compress_init(struct compress *c)
186{
187 struct z_stream_s *zstream = memset(&c->zstream, 0, sizeof(c->zstream));
188
189 zstream->workspace =
190 kmalloc(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
191 GFP_ATOMIC | __GFP_NOWARN);
192 if (!zstream->workspace)
193 return false;
194
195 if (zlib_deflateInit(zstream, Z_DEFAULT_COMPRESSION) != Z_OK) {
196 kfree(zstream->workspace);
197 return false;
198 }
199
200 c->tmp = NULL;
201 if (i915_has_memcpy_from_wc())
202 c->tmp = (void *)__get_free_page(GFP_ATOMIC | __GFP_NOWARN);
203
204 return true;
205}
206
207static int compress_page(struct compress *c,
208 void *src,
209 struct drm_i915_error_object *dst)
210{
211 struct z_stream_s *zstream = &c->zstream;
212
213 zstream->next_in = src;
214 if (c->tmp && i915_memcpy_from_wc(c->tmp, src, PAGE_SIZE))
215 zstream->next_in = c->tmp;
216 zstream->avail_in = PAGE_SIZE;
217
218 do {
219 if (zstream->avail_out == 0) {
220 unsigned long page;
221
222 page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN);
223 if (!page)
224 return -ENOMEM;
225
226 dst->pages[dst->page_count++] = (void *)page;
227
228 zstream->next_out = (void *)page;
229 zstream->avail_out = PAGE_SIZE;
230 }
231
232 if (zlib_deflate(zstream, Z_SYNC_FLUSH) != Z_OK)
233 return -EIO;
234 } while (zstream->avail_in);
235
236
237 if (0 && zstream->total_out > zstream->total_in)
238 return -E2BIG;
239
240 return 0;
241}
242
243static void compress_fini(struct compress *c,
244 struct drm_i915_error_object *dst)
245{
246 struct z_stream_s *zstream = &c->zstream;
247
248 if (dst) {
249 zlib_deflate(zstream, Z_FINISH);
250 dst->unused = zstream->avail_out;
251 }
252
253 zlib_deflateEnd(zstream);
254 kfree(zstream->workspace);
255
256 if (c->tmp)
257 free_page((unsigned long)c->tmp);
258}
259
260static void err_compression_marker(struct drm_i915_error_state_buf *m)
261{
262 err_puts(m, ":");
263}
264
265#else
266
267struct compress {
268};
269
270static bool compress_init(struct compress *c)
271{
272 return true;
273}
274
275static int compress_page(struct compress *c,
276 void *src,
277 struct drm_i915_error_object *dst)
278{
279 unsigned long page;
280 void *ptr;
281
282 page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN);
283 if (!page)
284 return -ENOMEM;
285
286 ptr = (void *)page;
287 if (!i915_memcpy_from_wc(ptr, src, PAGE_SIZE))
288 memcpy(ptr, src, PAGE_SIZE);
289 dst->pages[dst->page_count++] = ptr;
290
291 return 0;
292}
293
294static void compress_fini(struct compress *c,
295 struct drm_i915_error_object *dst)
296{
297}
298
299static void err_compression_marker(struct drm_i915_error_state_buf *m)
300{
301 err_puts(m, "~");
302}
303
304#endif
305
306static void print_error_buffers(struct drm_i915_error_state_buf *m,
307 const char *name,
308 struct drm_i915_error_buffer *err,
309 int count)
310{
311 int i;
312
313 err_printf(m, "%s [%d]:\n", name, count);
314
315 while (count--) {
316 err_printf(m, " %08x_%08x %8u %02x %02x [ ",
317 upper_32_bits(err->gtt_offset),
318 lower_32_bits(err->gtt_offset),
319 err->size,
320 err->read_domains,
321 err->write_domain);
322 for (i = 0; i < I915_NUM_ENGINES; i++)
323 err_printf(m, "%02x ", err->rseqno[i]);
324
325 err_printf(m, "] %02x", err->wseqno);
326 err_puts(m, tiling_flag(err->tiling));
327 err_puts(m, dirty_flag(err->dirty));
328 err_puts(m, purgeable_flag(err->purgeable));
329 err_puts(m, err->userptr ? " userptr" : "");
330 err_puts(m, err->engine != -1 ? " " : "");
331 err_puts(m, engine_str(err->engine));
332 err_puts(m, i915_cache_level_str(m->i915, err->cache_level));
333
334 if (err->name)
335 err_printf(m, " (name: %d)", err->name);
336 if (err->fence_reg != I915_FENCE_REG_NONE)
337 err_printf(m, " (fence: %d)", err->fence_reg);
338
339 err_puts(m, "\n");
340 err++;
341 }
342}
343
344static void error_print_instdone(struct drm_i915_error_state_buf *m,
345 const struct drm_i915_error_engine *ee)
346{
347 int slice;
348 int subslice;
349
350 err_printf(m, " INSTDONE: 0x%08x\n",
351 ee->instdone.instdone);
352
353 if (ee->engine_id != RCS || INTEL_GEN(m->i915) <= 3)
354 return;
355
356 err_printf(m, " SC_INSTDONE: 0x%08x\n",
357 ee->instdone.slice_common);
358
359 if (INTEL_GEN(m->i915) <= 6)
360 return;
361
362 for_each_instdone_slice_subslice(m->i915, slice, subslice)
363 err_printf(m, " SAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
364 slice, subslice,
365 ee->instdone.sampler[slice][subslice]);
366
367 for_each_instdone_slice_subslice(m->i915, slice, subslice)
368 err_printf(m, " ROW_INSTDONE[%d][%d]: 0x%08x\n",
369 slice, subslice,
370 ee->instdone.row[slice][subslice]);
371}
372
373static void error_print_request(struct drm_i915_error_state_buf *m,
374 const char *prefix,
375 const struct drm_i915_error_request *erq)
376{
377 if (!erq->seqno)
378 return;
379
380 err_printf(m, "%s pid %d, ban score %d, seqno %8x:%08x, emitted %dms ago, head %08x, tail %08x\n",
381 prefix, erq->pid, erq->ban_score,
382 erq->context, erq->seqno,
383 jiffies_to_msecs(jiffies - erq->jiffies),
384 erq->head, erq->tail);
385}
386
387static void error_print_context(struct drm_i915_error_state_buf *m,
388 const char *header,
389 const struct drm_i915_error_context *ctx)
390{
391 err_printf(m, "%s%s[%d] user_handle %d hw_id %d, ban score %d guilty %d active %d\n",
392 header, ctx->comm, ctx->pid, ctx->handle, ctx->hw_id,
393 ctx->ban_score, ctx->guilty, ctx->active);
394}
395
396static void error_print_engine(struct drm_i915_error_state_buf *m,
397 const struct drm_i915_error_engine *ee)
398{
399 err_printf(m, "%s command stream:\n", engine_str(ee->engine_id));
400 err_printf(m, " START: 0x%08x\n", ee->start);
401 err_printf(m, " HEAD: 0x%08x [0x%08x]\n", ee->head, ee->rq_head);
402 err_printf(m, " TAIL: 0x%08x [0x%08x, 0x%08x]\n",
403 ee->tail, ee->rq_post, ee->rq_tail);
404 err_printf(m, " CTL: 0x%08x\n", ee->ctl);
405 err_printf(m, " MODE: 0x%08x\n", ee->mode);
406 err_printf(m, " HWS: 0x%08x\n", ee->hws);
407 err_printf(m, " ACTHD: 0x%08x %08x\n",
408 (u32)(ee->acthd>>32), (u32)ee->acthd);
409 err_printf(m, " IPEIR: 0x%08x\n", ee->ipeir);
410 err_printf(m, " IPEHR: 0x%08x\n", ee->ipehr);
411
412 error_print_instdone(m, ee);
413
414 if (ee->batchbuffer) {
415 u64 start = ee->batchbuffer->gtt_offset;
416 u64 end = start + ee->batchbuffer->gtt_size;
417
418 err_printf(m, " batch: [0x%08x_%08x, 0x%08x_%08x]\n",
419 upper_32_bits(start), lower_32_bits(start),
420 upper_32_bits(end), lower_32_bits(end));
421 }
422 if (INTEL_GEN(m->i915) >= 4) {
423 err_printf(m, " BBADDR: 0x%08x_%08x\n",
424 (u32)(ee->bbaddr>>32), (u32)ee->bbaddr);
425 err_printf(m, " BB_STATE: 0x%08x\n", ee->bbstate);
426 err_printf(m, " INSTPS: 0x%08x\n", ee->instps);
427 }
428 err_printf(m, " INSTPM: 0x%08x\n", ee->instpm);
429 err_printf(m, " FADDR: 0x%08x %08x\n", upper_32_bits(ee->faddr),
430 lower_32_bits(ee->faddr));
431 if (INTEL_GEN(m->i915) >= 6) {
432 err_printf(m, " RC PSMI: 0x%08x\n", ee->rc_psmi);
433 err_printf(m, " FAULT_REG: 0x%08x\n", ee->fault_reg);
434 err_printf(m, " SYNC_0: 0x%08x\n",
435 ee->semaphore_mboxes[0]);
436 err_printf(m, " SYNC_1: 0x%08x\n",
437 ee->semaphore_mboxes[1]);
438 if (HAS_VEBOX(m->i915))
439 err_printf(m, " SYNC_2: 0x%08x\n",
440 ee->semaphore_mboxes[2]);
441 }
442 if (USES_PPGTT(m->i915)) {
443 err_printf(m, " GFX_MODE: 0x%08x\n", ee->vm_info.gfx_mode);
444
445 if (INTEL_GEN(m->i915) >= 8) {
446 int i;
447 for (i = 0; i < 4; i++)
448 err_printf(m, " PDP%d: 0x%016llx\n",
449 i, ee->vm_info.pdp[i]);
450 } else {
451 err_printf(m, " PP_DIR_BASE: 0x%08x\n",
452 ee->vm_info.pp_dir_base);
453 }
454 }
455 err_printf(m, " seqno: 0x%08x\n", ee->seqno);
456 err_printf(m, " last_seqno: 0x%08x\n", ee->last_seqno);
457 err_printf(m, " waiting: %s\n", yesno(ee->waiting));
458 err_printf(m, " ring->head: 0x%08x\n", ee->cpu_ring_head);
459 err_printf(m, " ring->tail: 0x%08x\n", ee->cpu_ring_tail);
460 err_printf(m, " hangcheck stall: %s\n", yesno(ee->hangcheck_stalled));
461 err_printf(m, " hangcheck action: %s\n",
462 hangcheck_action_to_str(ee->hangcheck_action));
463 err_printf(m, " hangcheck action timestamp: %lu, %u ms ago\n",
464 ee->hangcheck_timestamp,
465 jiffies_to_msecs(jiffies - ee->hangcheck_timestamp));
466
467 error_print_request(m, " ELSP[0]: ", &ee->execlist[0]);
468 error_print_request(m, " ELSP[1]: ", &ee->execlist[1]);
469 error_print_context(m, " Active context: ", &ee->context);
470}
471
472void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
473{
474 va_list args;
475
476 va_start(args, f);
477 i915_error_vprintf(e, f, args);
478 va_end(args);
479}
480
481static int
482ascii85_encode_len(int len)
483{
484 return DIV_ROUND_UP(len, 4);
485}
486
487static bool
488ascii85_encode(u32 in, char *out)
489{
490 int i;
491
492 if (in == 0)
493 return false;
494
495 out[5] = '\0';
496 for (i = 5; i--; ) {
497 out[i] = '!' + in % 85;
498 in /= 85;
499 }
500
501 return true;
502}
503
504static void print_error_obj(struct drm_i915_error_state_buf *m,
505 struct intel_engine_cs *engine,
506 const char *name,
507 struct drm_i915_error_object *obj)
508{
509 char out[6];
510 int page;
511
512 if (!obj)
513 return;
514
515 if (name) {
516 err_printf(m, "%s --- %s = 0x%08x %08x\n",
517 engine ? engine->name : "global", name,
518 upper_32_bits(obj->gtt_offset),
519 lower_32_bits(obj->gtt_offset));
520 }
521
522 err_compression_marker(m);
523 for (page = 0; page < obj->page_count; page++) {
524 int i, len;
525
526 len = PAGE_SIZE;
527 if (page == obj->page_count - 1)
528 len -= obj->unused;
529 len = ascii85_encode_len(len);
530
531 for (i = 0; i < len; i++) {
532 if (ascii85_encode(obj->pages[page][i], out))
533 err_puts(m, out);
534 else
535 err_puts(m, "z");
536 }
537 }
538 err_puts(m, "\n");
539}
540
541static void err_print_capabilities(struct drm_i915_error_state_buf *m,
542 const struct intel_device_info *info)
543{
544#define PRINT_FLAG(x) err_printf(m, #x ": %s\n", yesno(info->x))
545 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
546#undef PRINT_FLAG
547}
548
549static __always_inline void err_print_param(struct drm_i915_error_state_buf *m,
550 const char *name,
551 const char *type,
552 const void *x)
553{
554 if (!__builtin_strcmp(type, "bool"))
555 err_printf(m, "i915.%s=%s\n", name, yesno(*(const bool *)x));
556 else if (!__builtin_strcmp(type, "int"))
557 err_printf(m, "i915.%s=%d\n", name, *(const int *)x);
558 else if (!__builtin_strcmp(type, "unsigned int"))
559 err_printf(m, "i915.%s=%u\n", name, *(const unsigned int *)x);
560 else if (!__builtin_strcmp(type, "char *"))
561 err_printf(m, "i915.%s=%s\n", name, *(const char **)x);
562 else
563 BUILD_BUG();
564}
565
566static void err_print_params(struct drm_i915_error_state_buf *m,
567 const struct i915_params *p)
568{
569#define PRINT(T, x) err_print_param(m, #x, #T, &p->x);
570 I915_PARAMS_FOR_EACH(PRINT);
571#undef PRINT
572}
573
574static void err_print_pciid(struct drm_i915_error_state_buf *m,
575 struct drm_i915_private *i915)
576{
577 struct pci_dev *pdev = i915->drm.pdev;
578
579 err_printf(m, "PCI ID: 0x%04x\n", pdev->device);
580 err_printf(m, "PCI Revision: 0x%02x\n", pdev->revision);
581 err_printf(m, "PCI Subsystem: %04x:%04x\n",
582 pdev->subsystem_vendor,
583 pdev->subsystem_device);
584}
585
586int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
587 const struct i915_gpu_state *error)
588{
589 struct drm_i915_private *dev_priv = m->i915;
590 struct drm_i915_error_object *obj;
591 int i, j;
592
593 if (!error) {
594 err_printf(m, "No error state collected\n");
595 return 0;
596 }
597
598 if (*error->error_msg)
599 err_printf(m, "%s\n", error->error_msg);
600 err_printf(m, "Kernel: " UTS_RELEASE "\n");
601 err_printf(m, "Time: %ld s %ld us\n",
602 error->time.tv_sec, error->time.tv_usec);
603 err_printf(m, "Boottime: %ld s %ld us\n",
604 error->boottime.tv_sec, error->boottime.tv_usec);
605 err_printf(m, "Uptime: %ld s %ld us\n",
606 error->uptime.tv_sec, error->uptime.tv_usec);
607
608 for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
609 if (error->engine[i].hangcheck_stalled &&
610 error->engine[i].context.pid) {
611 err_printf(m, "Active process (on ring %s): %s [%d], score %d\n",
612 engine_str(i),
613 error->engine[i].context.comm,
614 error->engine[i].context.pid,
615 error->engine[i].context.ban_score);
616 }
617 }
618 err_printf(m, "Reset count: %u\n", error->reset_count);
619 err_printf(m, "Suspend count: %u\n", error->suspend_count);
620 err_printf(m, "Platform: %s\n", intel_platform_name(error->device_info.platform));
621 err_print_pciid(m, error->i915);
622
623 err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
624
625 if (HAS_CSR(dev_priv)) {
626 struct intel_csr *csr = &dev_priv->csr;
627
628 err_printf(m, "DMC loaded: %s\n",
629 yesno(csr->dmc_payload != NULL));
630 err_printf(m, "DMC fw version: %d.%d\n",
631 CSR_VERSION_MAJOR(csr->version),
632 CSR_VERSION_MINOR(csr->version));
633 }
634
635 err_printf(m, "GT awake: %s\n", yesno(error->awake));
636 err_printf(m, "RPM wakelock: %s\n", yesno(error->wakelock));
637 err_printf(m, "PM suspended: %s\n", yesno(error->suspended));
638 err_printf(m, "EIR: 0x%08x\n", error->eir);
639 err_printf(m, "IER: 0x%08x\n", error->ier);
640 for (i = 0; i < error->ngtier; i++)
641 err_printf(m, "GTIER[%d]: 0x%08x\n", i, error->gtier[i]);
642 err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
643 err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
644 err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
645 err_printf(m, "CCID: 0x%08x\n", error->ccid);
646 err_printf(m, "Missed interrupts: 0x%08lx\n", dev_priv->gpu_error.missed_irq_rings);
647
648 for (i = 0; i < error->nfence; i++)
649 err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
650
651 if (INTEL_GEN(dev_priv) >= 6) {
652 err_printf(m, "ERROR: 0x%08x\n", error->error);
653
654 if (INTEL_GEN(dev_priv) >= 8)
655 err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
656 error->fault_data1, error->fault_data0);
657
658 err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
659 }
660
661 if (IS_GEN7(dev_priv))
662 err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
663
664 for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
665 if (error->engine[i].engine_id != -1)
666 error_print_engine(m, &error->engine[i]);
667 }
668
669 for (i = 0; i < ARRAY_SIZE(error->active_vm); i++) {
670 char buf[128];
671 int len, first = 1;
672
673 if (!error->active_vm[i])
674 break;
675
676 len = scnprintf(buf, sizeof(buf), "Active (");
677 for (j = 0; j < ARRAY_SIZE(error->engine); j++) {
678 if (error->engine[j].vm != error->active_vm[i])
679 continue;
680
681 len += scnprintf(buf + len, sizeof(buf), "%s%s",
682 first ? "" : ", ",
683 dev_priv->engine[j]->name);
684 first = 0;
685 }
686 scnprintf(buf + len, sizeof(buf), ")");
687 print_error_buffers(m, buf,
688 error->active_bo[i],
689 error->active_bo_count[i]);
690 }
691
692 print_error_buffers(m, "Pinned (global)",
693 error->pinned_bo,
694 error->pinned_bo_count);
695
696 for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
697 const struct drm_i915_error_engine *ee = &error->engine[i];
698
699 obj = ee->batchbuffer;
700 if (obj) {
701 err_puts(m, dev_priv->engine[i]->name);
702 if (ee->context.pid)
703 err_printf(m, " (submitted by %s [%d], ctx %d [%d], score %d)",
704 ee->context.comm,
705 ee->context.pid,
706 ee->context.handle,
707 ee->context.hw_id,
708 ee->context.ban_score);
709 err_printf(m, " --- gtt_offset = 0x%08x %08x\n",
710 upper_32_bits(obj->gtt_offset),
711 lower_32_bits(obj->gtt_offset));
712 print_error_obj(m, dev_priv->engine[i], NULL, obj);
713 }
714
715 if (ee->num_requests) {
716 err_printf(m, "%s --- %d requests\n",
717 dev_priv->engine[i]->name,
718 ee->num_requests);
719 for (j = 0; j < ee->num_requests; j++)
720 error_print_request(m, " ", &ee->requests[j]);
721 }
722
723 if (IS_ERR(ee->waiters)) {
724 err_printf(m, "%s --- ? waiters [unable to acquire spinlock]\n",
725 dev_priv->engine[i]->name);
726 } else if (ee->num_waiters) {
727 err_printf(m, "%s --- %d waiters\n",
728 dev_priv->engine[i]->name,
729 ee->num_waiters);
730 for (j = 0; j < ee->num_waiters; j++) {
731 err_printf(m, " seqno 0x%08x for %s [%d]\n",
732 ee->waiters[j].seqno,
733 ee->waiters[j].comm,
734 ee->waiters[j].pid);
735 }
736 }
737
738 print_error_obj(m, dev_priv->engine[i],
739 "ringbuffer", ee->ringbuffer);
740
741 print_error_obj(m, dev_priv->engine[i],
742 "HW Status", ee->hws_page);
743
744 print_error_obj(m, dev_priv->engine[i],
745 "HW context", ee->ctx);
746
747 print_error_obj(m, dev_priv->engine[i],
748 "WA context", ee->wa_ctx);
749
750 print_error_obj(m, dev_priv->engine[i],
751 "WA batchbuffer", ee->wa_batchbuffer);
752 }
753
754 print_error_obj(m, NULL, "Semaphores", error->semaphore);
755
756 print_error_obj(m, NULL, "GuC log buffer", error->guc_log);
757
758 if (error->overlay)
759 intel_overlay_print_error_state(m, error->overlay);
760
761 if (error->display)
762 intel_display_print_error_state(m, error->display);
763
764 err_print_capabilities(m, &error->device_info);
765 err_print_params(m, &error->params);
766
767 if (m->bytes == 0 && m->err)
768 return m->err;
769
770 return 0;
771}
772
773int i915_error_state_buf_init(struct drm_i915_error_state_buf *ebuf,
774 struct drm_i915_private *i915,
775 size_t count, loff_t pos)
776{
777 memset(ebuf, 0, sizeof(*ebuf));
778 ebuf->i915 = i915;
779
780
781
782
783 ebuf->size = count + 1 > PAGE_SIZE ? count + 1 : PAGE_SIZE;
784 ebuf->buf = kmalloc(ebuf->size,
785 GFP_TEMPORARY | __GFP_NORETRY | __GFP_NOWARN);
786
787 if (ebuf->buf == NULL) {
788 ebuf->size = PAGE_SIZE;
789 ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY);
790 }
791
792 if (ebuf->buf == NULL) {
793 ebuf->size = 128;
794 ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY);
795 }
796
797 if (ebuf->buf == NULL)
798 return -ENOMEM;
799
800 ebuf->start = pos;
801
802 return 0;
803}
804
805static void i915_error_object_free(struct drm_i915_error_object *obj)
806{
807 int page;
808
809 if (obj == NULL)
810 return;
811
812 for (page = 0; page < obj->page_count; page++)
813 free_page((unsigned long)obj->pages[page]);
814
815 kfree(obj);
816}
817
818static __always_inline void free_param(const char *type, void *x)
819{
820 if (!__builtin_strcmp(type, "char *"))
821 kfree(*(void **)x);
822}
823
824void __i915_gpu_state_free(struct kref *error_ref)
825{
826 struct i915_gpu_state *error =
827 container_of(error_ref, typeof(*error), ref);
828 int i;
829
830 for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
831 struct drm_i915_error_engine *ee = &error->engine[i];
832
833 i915_error_object_free(ee->batchbuffer);
834 i915_error_object_free(ee->wa_batchbuffer);
835 i915_error_object_free(ee->ringbuffer);
836 i915_error_object_free(ee->hws_page);
837 i915_error_object_free(ee->ctx);
838 i915_error_object_free(ee->wa_ctx);
839
840 kfree(ee->requests);
841 if (!IS_ERR_OR_NULL(ee->waiters))
842 kfree(ee->waiters);
843 }
844
845 i915_error_object_free(error->semaphore);
846 i915_error_object_free(error->guc_log);
847
848 for (i = 0; i < ARRAY_SIZE(error->active_bo); i++)
849 kfree(error->active_bo[i]);
850 kfree(error->pinned_bo);
851
852 kfree(error->overlay);
853 kfree(error->display);
854
855#define FREE(T, x) free_param(#T, &error->params.x);
856 I915_PARAMS_FOR_EACH(FREE);
857#undef FREE
858
859 kfree(error);
860}
861
862static struct drm_i915_error_object *
863i915_error_object_create(struct drm_i915_private *i915,
864 struct i915_vma *vma)
865{
866 struct i915_ggtt *ggtt = &i915->ggtt;
867 const u64 slot = ggtt->error_capture.start;
868 struct drm_i915_error_object *dst;
869 struct compress compress;
870 unsigned long num_pages;
871 struct sgt_iter iter;
872 dma_addr_t dma;
873
874 if (!vma)
875 return NULL;
876
877 num_pages = min_t(u64, vma->size, vma->obj->base.size) >> PAGE_SHIFT;
878 num_pages = DIV_ROUND_UP(10 * num_pages, 8);
879 dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *),
880 GFP_ATOMIC | __GFP_NOWARN);
881 if (!dst)
882 return NULL;
883
884 dst->gtt_offset = vma->node.start;
885 dst->gtt_size = vma->node.size;
886 dst->page_count = 0;
887 dst->unused = 0;
888
889 if (!compress_init(&compress)) {
890 kfree(dst);
891 return NULL;
892 }
893
894 for_each_sgt_dma(dma, iter, vma->pages) {
895 void __iomem *s;
896 int ret;
897
898 ggtt->base.insert_page(&ggtt->base, dma, slot,
899 I915_CACHE_NONE, 0);
900
901 s = io_mapping_map_atomic_wc(&ggtt->mappable, slot);
902 ret = compress_page(&compress, (void __force *)s, dst);
903 io_mapping_unmap_atomic(s);
904
905 if (ret)
906 goto unwind;
907 }
908 goto out;
909
910unwind:
911 while (dst->page_count--)
912 free_page((unsigned long)dst->pages[dst->page_count]);
913 kfree(dst);
914 dst = NULL;
915
916out:
917 compress_fini(&compress, dst);
918 ggtt->base.clear_range(&ggtt->base, slot, PAGE_SIZE);
919 return dst;
920}
921
922
923
924
925static inline uint32_t
926__active_get_seqno(struct i915_gem_active *active)
927{
928 struct drm_i915_gem_request *request;
929
930 request = __i915_gem_active_peek(active);
931 return request ? request->global_seqno : 0;
932}
933
934static inline int
935__active_get_engine_id(struct i915_gem_active *active)
936{
937 struct drm_i915_gem_request *request;
938
939 request = __i915_gem_active_peek(active);
940 return request ? request->engine->id : -1;
941}
942
943static void capture_bo(struct drm_i915_error_buffer *err,
944 struct i915_vma *vma)
945{
946 struct drm_i915_gem_object *obj = vma->obj;
947 int i;
948
949 err->size = obj->base.size;
950 err->name = obj->base.name;
951
952 for (i = 0; i < I915_NUM_ENGINES; i++)
953 err->rseqno[i] = __active_get_seqno(&vma->last_read[i]);
954 err->wseqno = __active_get_seqno(&obj->frontbuffer_write);
955 err->engine = __active_get_engine_id(&obj->frontbuffer_write);
956
957 err->gtt_offset = vma->node.start;
958 err->read_domains = obj->base.read_domains;
959 err->write_domain = obj->base.write_domain;
960 err->fence_reg = vma->fence ? vma->fence->id : -1;
961 err->tiling = i915_gem_object_get_tiling(obj);
962 err->dirty = obj->mm.dirty;
963 err->purgeable = obj->mm.madv != I915_MADV_WILLNEED;
964 err->userptr = obj->userptr.mm != NULL;
965 err->cache_level = obj->cache_level;
966}
967
968static u32 capture_error_bo(struct drm_i915_error_buffer *err,
969 int count, struct list_head *head,
970 bool pinned_only)
971{
972 struct i915_vma *vma;
973 int i = 0;
974
975 list_for_each_entry(vma, head, vm_link) {
976 if (pinned_only && !i915_vma_is_pinned(vma))
977 continue;
978
979 capture_bo(err++, vma);
980 if (++i == count)
981 break;
982 }
983
984 return i;
985}
986
987
988
989
990
991
992
993
994
995
996static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
997 struct i915_gpu_state *error,
998 int *engine_id)
999{
1000 uint32_t error_code = 0;
1001 int i;
1002
1003
1004
1005
1006
1007
1008 for (i = 0; i < I915_NUM_ENGINES; i++) {
1009 if (error->engine[i].hangcheck_stalled) {
1010 if (engine_id)
1011 *engine_id = i;
1012
1013 return error->engine[i].ipehr ^
1014 error->engine[i].instdone.instdone;
1015 }
1016 }
1017
1018 return error_code;
1019}
1020
1021static void i915_gem_record_fences(struct drm_i915_private *dev_priv,
1022 struct i915_gpu_state *error)
1023{
1024 int i;
1025
1026 if (INTEL_GEN(dev_priv) >= 6) {
1027 for (i = 0; i < dev_priv->num_fence_regs; i++)
1028 error->fence[i] = I915_READ64(FENCE_REG_GEN6_LO(i));
1029 } else if (INTEL_GEN(dev_priv) >= 4) {
1030 for (i = 0; i < dev_priv->num_fence_regs; i++)
1031 error->fence[i] = I915_READ64(FENCE_REG_965_LO(i));
1032 } else {
1033 for (i = 0; i < dev_priv->num_fence_regs; i++)
1034 error->fence[i] = I915_READ(FENCE_REG(i));
1035 }
1036 error->nfence = i;
1037}
1038
1039static inline u32
1040gen8_engine_sync_index(struct intel_engine_cs *engine,
1041 struct intel_engine_cs *other)
1042{
1043 int idx;
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053 idx = (other - engine) - 1;
1054 if (idx < 0)
1055 idx += I915_NUM_ENGINES;
1056
1057 return idx;
1058}
1059
1060static void gen8_record_semaphore_state(struct i915_gpu_state *error,
1061 struct intel_engine_cs *engine,
1062 struct drm_i915_error_engine *ee)
1063{
1064 struct drm_i915_private *dev_priv = engine->i915;
1065 struct intel_engine_cs *to;
1066 enum intel_engine_id id;
1067
1068 if (!error->semaphore)
1069 return;
1070
1071 for_each_engine(to, dev_priv, id) {
1072 int idx;
1073 u16 signal_offset;
1074 u32 *tmp;
1075
1076 if (engine == to)
1077 continue;
1078
1079 signal_offset =
1080 (GEN8_SIGNAL_OFFSET(engine, id) & (PAGE_SIZE - 1)) / 4;
1081 tmp = error->semaphore->pages[0];
1082 idx = gen8_engine_sync_index(engine, to);
1083
1084 ee->semaphore_mboxes[idx] = tmp[signal_offset];
1085 }
1086}
1087
1088static void gen6_record_semaphore_state(struct intel_engine_cs *engine,
1089 struct drm_i915_error_engine *ee)
1090{
1091 struct drm_i915_private *dev_priv = engine->i915;
1092
1093 ee->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(engine->mmio_base));
1094 ee->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(engine->mmio_base));
1095 if (HAS_VEBOX(dev_priv))
1096 ee->semaphore_mboxes[2] =
1097 I915_READ(RING_SYNC_2(engine->mmio_base));
1098}
1099
1100static void error_record_engine_waiters(struct intel_engine_cs *engine,
1101 struct drm_i915_error_engine *ee)
1102{
1103 struct intel_breadcrumbs *b = &engine->breadcrumbs;
1104 struct drm_i915_error_waiter *waiter;
1105 struct rb_node *rb;
1106 int count;
1107
1108 ee->num_waiters = 0;
1109 ee->waiters = NULL;
1110
1111 if (RB_EMPTY_ROOT(&b->waiters))
1112 return;
1113
1114 if (!spin_trylock_irq(&b->rb_lock)) {
1115 ee->waiters = ERR_PTR(-EDEADLK);
1116 return;
1117 }
1118
1119 count = 0;
1120 for (rb = rb_first(&b->waiters); rb != NULL; rb = rb_next(rb))
1121 count++;
1122 spin_unlock_irq(&b->rb_lock);
1123
1124 waiter = NULL;
1125 if (count)
1126 waiter = kmalloc_array(count,
1127 sizeof(struct drm_i915_error_waiter),
1128 GFP_ATOMIC);
1129 if (!waiter)
1130 return;
1131
1132 if (!spin_trylock_irq(&b->rb_lock)) {
1133 kfree(waiter);
1134 ee->waiters = ERR_PTR(-EDEADLK);
1135 return;
1136 }
1137
1138 ee->waiters = waiter;
1139 for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
1140 struct intel_wait *w = rb_entry(rb, typeof(*w), node);
1141
1142 strcpy(waiter->comm, w->tsk->comm);
1143 waiter->pid = w->tsk->pid;
1144 waiter->seqno = w->seqno;
1145 waiter++;
1146
1147 if (++ee->num_waiters == count)
1148 break;
1149 }
1150 spin_unlock_irq(&b->rb_lock);
1151}
1152
1153static void error_record_engine_registers(struct i915_gpu_state *error,
1154 struct intel_engine_cs *engine,
1155 struct drm_i915_error_engine *ee)
1156{
1157 struct drm_i915_private *dev_priv = engine->i915;
1158
1159 if (INTEL_GEN(dev_priv) >= 6) {
1160 ee->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base));
1161 ee->fault_reg = I915_READ(RING_FAULT_REG(engine));
1162 if (INTEL_GEN(dev_priv) >= 8)
1163 gen8_record_semaphore_state(error, engine, ee);
1164 else
1165 gen6_record_semaphore_state(engine, ee);
1166 }
1167
1168 if (INTEL_GEN(dev_priv) >= 4) {
1169 ee->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base));
1170 ee->ipeir = I915_READ(RING_IPEIR(engine->mmio_base));
1171 ee->ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
1172 ee->instps = I915_READ(RING_INSTPS(engine->mmio_base));
1173 ee->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
1174 if (INTEL_GEN(dev_priv) >= 8) {
1175 ee->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(engine->mmio_base)) << 32;
1176 ee->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(engine->mmio_base)) << 32;
1177 }
1178 ee->bbstate = I915_READ(RING_BBSTATE(engine->mmio_base));
1179 } else {
1180 ee->faddr = I915_READ(DMA_FADD_I8XX);
1181 ee->ipeir = I915_READ(IPEIR);
1182 ee->ipehr = I915_READ(IPEHR);
1183 }
1184
1185 intel_engine_get_instdone(engine, &ee->instdone);
1186
1187 ee->waiting = intel_engine_has_waiter(engine);
1188 ee->instpm = I915_READ(RING_INSTPM(engine->mmio_base));
1189 ee->acthd = intel_engine_get_active_head(engine);
1190 ee->seqno = intel_engine_get_seqno(engine);
1191 ee->last_seqno = intel_engine_last_submit(engine);
1192 ee->start = I915_READ_START(engine);
1193 ee->head = I915_READ_HEAD(engine);
1194 ee->tail = I915_READ_TAIL(engine);
1195 ee->ctl = I915_READ_CTL(engine);
1196 if (INTEL_GEN(dev_priv) > 2)
1197 ee->mode = I915_READ_MODE(engine);
1198
1199 if (!HWS_NEEDS_PHYSICAL(dev_priv)) {
1200 i915_reg_t mmio;
1201
1202 if (IS_GEN7(dev_priv)) {
1203 switch (engine->id) {
1204 default:
1205 case RCS:
1206 mmio = RENDER_HWS_PGA_GEN7;
1207 break;
1208 case BCS:
1209 mmio = BLT_HWS_PGA_GEN7;
1210 break;
1211 case VCS:
1212 mmio = BSD_HWS_PGA_GEN7;
1213 break;
1214 case VECS:
1215 mmio = VEBOX_HWS_PGA_GEN7;
1216 break;
1217 }
1218 } else if (IS_GEN6(engine->i915)) {
1219 mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
1220 } else {
1221
1222 mmio = RING_HWS_PGA(engine->mmio_base);
1223 }
1224
1225 ee->hws = I915_READ(mmio);
1226 }
1227
1228 ee->hangcheck_timestamp = engine->hangcheck.action_timestamp;
1229 ee->hangcheck_action = engine->hangcheck.action;
1230 ee->hangcheck_stalled = engine->hangcheck.stalled;
1231
1232 if (USES_PPGTT(dev_priv)) {
1233 int i;
1234
1235 ee->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine));
1236
1237 if (IS_GEN6(dev_priv))
1238 ee->vm_info.pp_dir_base =
1239 I915_READ(RING_PP_DIR_BASE_READ(engine));
1240 else if (IS_GEN7(dev_priv))
1241 ee->vm_info.pp_dir_base =
1242 I915_READ(RING_PP_DIR_BASE(engine));
1243 else if (INTEL_GEN(dev_priv) >= 8)
1244 for (i = 0; i < 4; i++) {
1245 ee->vm_info.pdp[i] =
1246 I915_READ(GEN8_RING_PDP_UDW(engine, i));
1247 ee->vm_info.pdp[i] <<= 32;
1248 ee->vm_info.pdp[i] |=
1249 I915_READ(GEN8_RING_PDP_LDW(engine, i));
1250 }
1251 }
1252}
1253
1254static void record_request(struct drm_i915_gem_request *request,
1255 struct drm_i915_error_request *erq)
1256{
1257 erq->context = request->ctx->hw_id;
1258 erq->ban_score = request->ctx->ban_score;
1259 erq->seqno = request->global_seqno;
1260 erq->jiffies = request->emitted_jiffies;
1261 erq->head = request->head;
1262 erq->tail = request->tail;
1263
1264 rcu_read_lock();
1265 erq->pid = request->ctx->pid ? pid_nr(request->ctx->pid) : 0;
1266 rcu_read_unlock();
1267}
1268
1269static void engine_record_requests(struct intel_engine_cs *engine,
1270 struct drm_i915_gem_request *first,
1271 struct drm_i915_error_engine *ee)
1272{
1273 struct drm_i915_gem_request *request;
1274 int count;
1275
1276 count = 0;
1277 request = first;
1278 list_for_each_entry_from(request, &engine->timeline->requests, link)
1279 count++;
1280 if (!count)
1281 return;
1282
1283 ee->requests = kcalloc(count, sizeof(*ee->requests), GFP_ATOMIC);
1284 if (!ee->requests)
1285 return;
1286
1287 ee->num_requests = count;
1288
1289 count = 0;
1290 request = first;
1291 list_for_each_entry_from(request, &engine->timeline->requests, link) {
1292 if (count >= ee->num_requests) {
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308 break;
1309 }
1310
1311 record_request(request, &ee->requests[count++]);
1312 }
1313 ee->num_requests = count;
1314}
1315
1316static void error_record_engine_execlists(struct intel_engine_cs *engine,
1317 struct drm_i915_error_engine *ee)
1318{
1319 unsigned int n;
1320
1321 for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++)
1322 if (engine->execlist_port[n].request)
1323 record_request(engine->execlist_port[n].request,
1324 &ee->execlist[n]);
1325}
1326
1327static void record_context(struct drm_i915_error_context *e,
1328 struct i915_gem_context *ctx)
1329{
1330 if (ctx->pid) {
1331 struct task_struct *task;
1332
1333 rcu_read_lock();
1334 task = pid_task(ctx->pid, PIDTYPE_PID);
1335 if (task) {
1336 strcpy(e->comm, task->comm);
1337 e->pid = task->pid;
1338 }
1339 rcu_read_unlock();
1340 }
1341
1342 e->handle = ctx->user_handle;
1343 e->hw_id = ctx->hw_id;
1344 e->ban_score = ctx->ban_score;
1345 e->guilty = ctx->guilty_count;
1346 e->active = ctx->active_count;
1347}
1348
1349static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
1350 struct i915_gpu_state *error)
1351{
1352 struct i915_ggtt *ggtt = &dev_priv->ggtt;
1353 int i;
1354
1355 error->semaphore =
1356 i915_error_object_create(dev_priv, dev_priv->semaphore);
1357
1358 for (i = 0; i < I915_NUM_ENGINES; i++) {
1359 struct intel_engine_cs *engine = dev_priv->engine[i];
1360 struct drm_i915_error_engine *ee = &error->engine[i];
1361 struct drm_i915_gem_request *request;
1362
1363 ee->engine_id = -1;
1364
1365 if (!engine)
1366 continue;
1367
1368 ee->engine_id = i;
1369
1370 error_record_engine_registers(error, engine, ee);
1371 error_record_engine_waiters(engine, ee);
1372 error_record_engine_execlists(engine, ee);
1373
1374 request = i915_gem_find_active_request(engine);
1375 if (request) {
1376 struct intel_ring *ring;
1377
1378 ee->vm = request->ctx->ppgtt ?
1379 &request->ctx->ppgtt->base : &ggtt->base;
1380
1381 record_context(&ee->context, request->ctx);
1382
1383
1384
1385
1386
1387 ee->batchbuffer =
1388 i915_error_object_create(dev_priv,
1389 request->batch);
1390
1391 if (HAS_BROKEN_CS_TLB(dev_priv))
1392 ee->wa_batchbuffer =
1393 i915_error_object_create(dev_priv,
1394 engine->scratch);
1395
1396 ee->ctx =
1397 i915_error_object_create(dev_priv,
1398 request->ctx->engine[i].state);
1399
1400 error->simulated |=
1401 i915_gem_context_no_error_capture(request->ctx);
1402
1403 ee->rq_head = request->head;
1404 ee->rq_post = request->postfix;
1405 ee->rq_tail = request->tail;
1406
1407 ring = request->ring;
1408 ee->cpu_ring_head = ring->head;
1409 ee->cpu_ring_tail = ring->tail;
1410 ee->ringbuffer =
1411 i915_error_object_create(dev_priv, ring->vma);
1412
1413 engine_record_requests(engine, request, ee);
1414 }
1415
1416 ee->hws_page =
1417 i915_error_object_create(dev_priv,
1418 engine->status_page.vma);
1419
1420 ee->wa_ctx =
1421 i915_error_object_create(dev_priv, engine->wa_ctx.vma);
1422 }
1423}
1424
1425static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
1426 struct i915_gpu_state *error,
1427 struct i915_address_space *vm,
1428 int idx)
1429{
1430 struct drm_i915_error_buffer *active_bo;
1431 struct i915_vma *vma;
1432 int count;
1433
1434 count = 0;
1435 list_for_each_entry(vma, &vm->active_list, vm_link)
1436 count++;
1437
1438 active_bo = NULL;
1439 if (count)
1440 active_bo = kcalloc(count, sizeof(*active_bo), GFP_ATOMIC);
1441 if (active_bo)
1442 count = capture_error_bo(active_bo, count, &vm->active_list, false);
1443 else
1444 count = 0;
1445
1446 error->active_vm[idx] = vm;
1447 error->active_bo[idx] = active_bo;
1448 error->active_bo_count[idx] = count;
1449}
1450
1451static void i915_capture_active_buffers(struct drm_i915_private *dev_priv,
1452 struct i915_gpu_state *error)
1453{
1454 int cnt = 0, i, j;
1455
1456 BUILD_BUG_ON(ARRAY_SIZE(error->engine) > ARRAY_SIZE(error->active_bo));
1457 BUILD_BUG_ON(ARRAY_SIZE(error->active_bo) != ARRAY_SIZE(error->active_vm));
1458 BUILD_BUG_ON(ARRAY_SIZE(error->active_bo) != ARRAY_SIZE(error->active_bo_count));
1459
1460
1461 for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
1462 struct drm_i915_error_engine *ee = &error->engine[i];
1463 bool found;
1464
1465 if (!ee->vm)
1466 continue;
1467
1468 found = false;
1469 for (j = 0; j < i && !found; j++)
1470 found = error->engine[j].vm == ee->vm;
1471 if (!found)
1472 i915_gem_capture_vm(dev_priv, error, ee->vm, cnt++);
1473 }
1474}
1475
1476static void i915_capture_pinned_buffers(struct drm_i915_private *dev_priv,
1477 struct i915_gpu_state *error)
1478{
1479 struct i915_address_space *vm = &dev_priv->ggtt.base;
1480 struct drm_i915_error_buffer *bo;
1481 struct i915_vma *vma;
1482 int count_inactive, count_active;
1483
1484 count_inactive = 0;
1485 list_for_each_entry(vma, &vm->active_list, vm_link)
1486 count_inactive++;
1487
1488 count_active = 0;
1489 list_for_each_entry(vma, &vm->inactive_list, vm_link)
1490 count_active++;
1491
1492 bo = NULL;
1493 if (count_inactive + count_active)
1494 bo = kcalloc(count_inactive + count_active,
1495 sizeof(*bo), GFP_ATOMIC);
1496 if (!bo)
1497 return;
1498
1499 count_inactive = capture_error_bo(bo, count_inactive,
1500 &vm->active_list, true);
1501 count_active = capture_error_bo(bo + count_inactive, count_active,
1502 &vm->inactive_list, true);
1503 error->pinned_bo_count = count_inactive + count_active;
1504 error->pinned_bo = bo;
1505}
1506
1507static void i915_gem_capture_guc_log_buffer(struct drm_i915_private *dev_priv,
1508 struct i915_gpu_state *error)
1509{
1510
1511 if (!dev_priv->guc.log.vma || (i915.guc_log_level < 0))
1512 return;
1513
1514 error->guc_log = i915_error_object_create(dev_priv,
1515 dev_priv->guc.log.vma);
1516}
1517
1518
1519static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
1520 struct i915_gpu_state *error)
1521{
1522 int i;
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533 if (IS_VALLEYVIEW(dev_priv)) {
1534 error->gtier[0] = I915_READ(GTIER);
1535 error->ier = I915_READ(VLV_IER);
1536 error->forcewake = I915_READ_FW(FORCEWAKE_VLV);
1537 }
1538
1539 if (IS_GEN7(dev_priv))
1540 error->err_int = I915_READ(GEN7_ERR_INT);
1541
1542 if (INTEL_GEN(dev_priv) >= 8) {
1543 error->fault_data0 = I915_READ(GEN8_FAULT_TLB_DATA0);
1544 error->fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1);
1545 }
1546
1547 if (IS_GEN6(dev_priv)) {
1548 error->forcewake = I915_READ_FW(FORCEWAKE);
1549 error->gab_ctl = I915_READ(GAB_CTL);
1550 error->gfx_mode = I915_READ(GFX_MODE);
1551 }
1552
1553
1554 if (INTEL_GEN(dev_priv) >= 7)
1555 error->forcewake = I915_READ_FW(FORCEWAKE_MT);
1556
1557 if (INTEL_GEN(dev_priv) >= 6) {
1558 error->derrmr = I915_READ(DERRMR);
1559 error->error = I915_READ(ERROR_GEN6);
1560 error->done_reg = I915_READ(DONE_REG);
1561 }
1562
1563
1564 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
1565 error->gam_ecochk = I915_READ(GAM_ECOCHK);
1566 error->gac_eco = I915_READ(GAC_ECO_BITS);
1567 }
1568
1569
1570 if (HAS_HW_CONTEXTS(dev_priv))
1571 error->ccid = I915_READ(CCID);
1572
1573 if (INTEL_GEN(dev_priv) >= 8) {
1574 error->ier = I915_READ(GEN8_DE_MISC_IER);
1575 for (i = 0; i < 4; i++)
1576 error->gtier[i] = I915_READ(GEN8_GT_IER(i));
1577 error->ngtier = 4;
1578 } else if (HAS_PCH_SPLIT(dev_priv)) {
1579 error->ier = I915_READ(DEIER);
1580 error->gtier[0] = I915_READ(GTIER);
1581 error->ngtier = 1;
1582 } else if (IS_GEN2(dev_priv)) {
1583 error->ier = I915_READ16(IER);
1584 } else if (!IS_VALLEYVIEW(dev_priv)) {
1585 error->ier = I915_READ(IER);
1586 }
1587 error->eir = I915_READ(EIR);
1588 error->pgtbl_er = I915_READ(PGTBL_ER);
1589}
1590
1591static void i915_error_capture_msg(struct drm_i915_private *dev_priv,
1592 struct i915_gpu_state *error,
1593 u32 engine_mask,
1594 const char *error_msg)
1595{
1596 u32 ecode;
1597 int engine_id = -1, len;
1598
1599 ecode = i915_error_generate_code(dev_priv, error, &engine_id);
1600
1601 len = scnprintf(error->error_msg, sizeof(error->error_msg),
1602 "GPU HANG: ecode %d:%d:0x%08x",
1603 INTEL_GEN(dev_priv), engine_id, ecode);
1604
1605 if (engine_id != -1 && error->engine[engine_id].context.pid)
1606 len += scnprintf(error->error_msg + len,
1607 sizeof(error->error_msg) - len,
1608 ", in %s [%d]",
1609 error->engine[engine_id].context.comm,
1610 error->engine[engine_id].context.pid);
1611
1612 scnprintf(error->error_msg + len, sizeof(error->error_msg) - len,
1613 ", reason: %s, action: %s",
1614 error_msg,
1615 engine_mask ? "reset" : "continue");
1616}
1617
1618static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
1619 struct i915_gpu_state *error)
1620{
1621 error->awake = dev_priv->gt.awake;
1622 error->wakelock = atomic_read(&dev_priv->pm.wakeref_count);
1623 error->suspended = dev_priv->pm.suspended;
1624
1625 error->iommu = -1;
1626#ifdef CONFIG_INTEL_IOMMU
1627 error->iommu = intel_iommu_gfx_mapped;
1628#endif
1629 error->reset_count = i915_reset_count(&dev_priv->gpu_error);
1630 error->suspend_count = dev_priv->suspend_count;
1631
1632 memcpy(&error->device_info,
1633 INTEL_INFO(dev_priv),
1634 sizeof(error->device_info));
1635}
1636
1637static __always_inline void dup_param(const char *type, void *x)
1638{
1639 if (!__builtin_strcmp(type, "char *"))
1640 *(void **)x = kstrdup(*(void **)x, GFP_ATOMIC);
1641}
1642
1643static int capture(void *data)
1644{
1645 struct i915_gpu_state *error = data;
1646
1647 do_gettimeofday(&error->time);
1648 error->boottime = ktime_to_timeval(ktime_get_boottime());
1649 error->uptime =
1650 ktime_to_timeval(ktime_sub(ktime_get(),
1651 error->i915->gt.last_init_time));
1652
1653 error->params = i915;
1654#define DUP(T, x) dup_param(#T, &error->params.x);
1655 I915_PARAMS_FOR_EACH(DUP);
1656#undef DUP
1657
1658 i915_capture_gen_state(error->i915, error);
1659 i915_capture_reg_state(error->i915, error);
1660 i915_gem_record_fences(error->i915, error);
1661 i915_gem_record_rings(error->i915, error);
1662 i915_capture_active_buffers(error->i915, error);
1663 i915_capture_pinned_buffers(error->i915, error);
1664 i915_gem_capture_guc_log_buffer(error->i915, error);
1665
1666 error->overlay = intel_overlay_capture_error_state(error->i915);
1667 error->display = intel_display_capture_error_state(error->i915);
1668
1669 return 0;
1670}
1671
1672#define DAY_AS_SECONDS(x) (24 * 60 * 60 * (x))
1673
1674struct i915_gpu_state *
1675i915_capture_gpu_state(struct drm_i915_private *i915)
1676{
1677 struct i915_gpu_state *error;
1678
1679 error = kzalloc(sizeof(*error), GFP_ATOMIC);
1680 if (!error)
1681 return NULL;
1682
1683 kref_init(&error->ref);
1684 error->i915 = i915;
1685
1686 stop_machine(capture, error, NULL);
1687
1688 return error;
1689}
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700void i915_capture_error_state(struct drm_i915_private *dev_priv,
1701 u32 engine_mask,
1702 const char *error_msg)
1703{
1704 static bool warned;
1705 struct i915_gpu_state *error;
1706 unsigned long flags;
1707
1708 if (!i915.error_capture)
1709 return;
1710
1711 if (READ_ONCE(dev_priv->gpu_error.first_error))
1712 return;
1713
1714 error = i915_capture_gpu_state(dev_priv);
1715 if (!error) {
1716 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1717 return;
1718 }
1719
1720 i915_error_capture_msg(dev_priv, error, engine_mask, error_msg);
1721 DRM_INFO("%s\n", error->error_msg);
1722
1723 if (!error->simulated) {
1724 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1725 if (!dev_priv->gpu_error.first_error) {
1726 dev_priv->gpu_error.first_error = error;
1727 error = NULL;
1728 }
1729 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1730 }
1731
1732 if (error) {
1733 __i915_gpu_state_free(&error->ref);
1734 return;
1735 }
1736
1737 if (!warned &&
1738 ktime_get_real_seconds() - DRIVER_TIMESTAMP < DAY_AS_SECONDS(180)) {
1739 DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
1740 DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
1741 DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
1742 DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
1743 DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n",
1744 dev_priv->drm.primary->index);
1745 warned = true;
1746 }
1747}
1748
1749struct i915_gpu_state *
1750i915_first_error_state(struct drm_i915_private *i915)
1751{
1752 struct i915_gpu_state *error;
1753
1754 spin_lock_irq(&i915->gpu_error.lock);
1755 error = i915->gpu_error.first_error;
1756 if (error)
1757 i915_gpu_state_get(error);
1758 spin_unlock_irq(&i915->gpu_error.lock);
1759
1760 return error;
1761}
1762
1763void i915_reset_error_state(struct drm_i915_private *i915)
1764{
1765 struct i915_gpu_state *error;
1766
1767 spin_lock_irq(&i915->gpu_error.lock);
1768 error = i915->gpu_error.first_error;
1769 i915->gpu_error.first_error = NULL;
1770 spin_unlock_irq(&i915->gpu_error.lock);
1771
1772 i915_gpu_state_put(error);
1773}
1774