1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30#include <linux/ascii85.h>
31#include <linux/nmi.h>
32#include <linux/pagevec.h>
33#include <linux/scatterlist.h>
34#include <linux/utsname.h>
35#include <linux/zlib.h>
36
37#include <drm/drm_print.h>
38
39#include "display/intel_atomic.h"
40#include "display/intel_csr.h"
41#include "display/intel_overlay.h"
42
43#include "gem/i915_gem_context.h"
44#include "gem/i915_gem_lmem.h"
45#include "gt/intel_gt.h"
46#include "gt/intel_gt_pm.h"
47
48#include "i915_drv.h"
49#include "i915_gpu_error.h"
50#include "i915_memcpy.h"
51#include "i915_scatterlist.h"
52
53#define ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
54#define ATOMIC_MAYFAIL (GFP_ATOMIC | __GFP_NOWARN)
55
56static void __sg_set_buf(struct scatterlist *sg,
57 void *addr, unsigned int len, loff_t it)
58{
59 sg->page_link = (unsigned long)virt_to_page(addr);
60 sg->offset = offset_in_page(addr);
61 sg->length = len;
62 sg->dma_address = it;
63}
64
65static bool __i915_error_grow(struct drm_i915_error_state_buf *e, size_t len)
66{
67 if (!len)
68 return false;
69
70 if (e->bytes + len + 1 <= e->size)
71 return true;
72
73 if (e->bytes) {
74 __sg_set_buf(e->cur++, e->buf, e->bytes, e->iter);
75 e->iter += e->bytes;
76 e->buf = NULL;
77 e->bytes = 0;
78 }
79
80 if (e->cur == e->end) {
81 struct scatterlist *sgl;
82
83 sgl = (typeof(sgl))__get_free_page(ALLOW_FAIL);
84 if (!sgl) {
85 e->err = -ENOMEM;
86 return false;
87 }
88
89 if (e->cur) {
90 e->cur->offset = 0;
91 e->cur->length = 0;
92 e->cur->page_link =
93 (unsigned long)sgl | SG_CHAIN;
94 } else {
95 e->sgl = sgl;
96 }
97
98 e->cur = sgl;
99 e->end = sgl + SG_MAX_SINGLE_ALLOC - 1;
100 }
101
102 e->size = ALIGN(len + 1, SZ_64K);
103 e->buf = kmalloc(e->size, ALLOW_FAIL);
104 if (!e->buf) {
105 e->size = PAGE_ALIGN(len + 1);
106 e->buf = kmalloc(e->size, GFP_KERNEL);
107 }
108 if (!e->buf) {
109 e->err = -ENOMEM;
110 return false;
111 }
112
113 return true;
114}
115
116__printf(2, 0)
117static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
118 const char *fmt, va_list args)
119{
120 va_list ap;
121 int len;
122
123 if (e->err)
124 return;
125
126 va_copy(ap, args);
127 len = vsnprintf(NULL, 0, fmt, ap);
128 va_end(ap);
129 if (len <= 0) {
130 e->err = len;
131 return;
132 }
133
134 if (!__i915_error_grow(e, len))
135 return;
136
137 GEM_BUG_ON(e->bytes >= e->size);
138 len = vscnprintf(e->buf + e->bytes, e->size - e->bytes, fmt, args);
139 if (len < 0) {
140 e->err = len;
141 return;
142 }
143 e->bytes += len;
144}
145
146static void i915_error_puts(struct drm_i915_error_state_buf *e, const char *str)
147{
148 unsigned len;
149
150 if (e->err || !str)
151 return;
152
153 len = strlen(str);
154 if (!__i915_error_grow(e, len))
155 return;
156
157 GEM_BUG_ON(e->bytes + len > e->size);
158 memcpy(e->buf + e->bytes, str, len);
159 e->bytes += len;
160}
161
162#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
163#define err_puts(e, s) i915_error_puts(e, s)
164
165static void __i915_printfn_error(struct drm_printer *p, struct va_format *vaf)
166{
167 i915_error_vprintf(p->arg, vaf->fmt, *vaf->va);
168}
169
170static inline struct drm_printer
171i915_error_printer(struct drm_i915_error_state_buf *e)
172{
173 struct drm_printer p = {
174 .printfn = __i915_printfn_error,
175 .arg = e,
176 };
177 return p;
178}
179
180
181static void pool_fini(struct pagevec *pv)
182{
183 pagevec_release(pv);
184}
185
186static int pool_refill(struct pagevec *pv, gfp_t gfp)
187{
188 while (pagevec_space(pv)) {
189 struct page *p;
190
191 p = alloc_page(gfp);
192 if (!p)
193 return -ENOMEM;
194
195 pagevec_add(pv, p);
196 }
197
198 return 0;
199}
200
201static int pool_init(struct pagevec *pv, gfp_t gfp)
202{
203 int err;
204
205 pagevec_init(pv);
206
207 err = pool_refill(pv, gfp);
208 if (err)
209 pool_fini(pv);
210
211 return err;
212}
213
214static void *pool_alloc(struct pagevec *pv, gfp_t gfp)
215{
216 struct page *p;
217
218 p = alloc_page(gfp);
219 if (!p && pagevec_count(pv))
220 p = pv->pages[--pv->nr];
221
222 return p ? page_address(p) : NULL;
223}
224
225static void pool_free(struct pagevec *pv, void *addr)
226{
227 struct page *p = virt_to_page(addr);
228
229 if (pagevec_space(pv))
230 pagevec_add(pv, p);
231 else
232 __free_page(p);
233}
234
235#ifdef CONFIG_DRM_I915_COMPRESS_ERROR
236
237struct i915_vma_compress {
238 struct pagevec pool;
239 struct z_stream_s zstream;
240 void *tmp;
241};
242
243static bool compress_init(struct i915_vma_compress *c)
244{
245 struct z_stream_s *zstream = &c->zstream;
246
247 if (pool_init(&c->pool, ALLOW_FAIL))
248 return false;
249
250 zstream->workspace =
251 kmalloc(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
252 ALLOW_FAIL);
253 if (!zstream->workspace) {
254 pool_fini(&c->pool);
255 return false;
256 }
257
258 c->tmp = NULL;
259 if (i915_has_memcpy_from_wc())
260 c->tmp = pool_alloc(&c->pool, ALLOW_FAIL);
261
262 return true;
263}
264
265static bool compress_start(struct i915_vma_compress *c)
266{
267 struct z_stream_s *zstream = &c->zstream;
268 void *workspace = zstream->workspace;
269
270 memset(zstream, 0, sizeof(*zstream));
271 zstream->workspace = workspace;
272
273 return zlib_deflateInit(zstream, Z_DEFAULT_COMPRESSION) == Z_OK;
274}
275
276static void *compress_next_page(struct i915_vma_compress *c,
277 struct i915_vma_coredump *dst)
278{
279 void *page;
280
281 if (dst->page_count >= dst->num_pages)
282 return ERR_PTR(-ENOSPC);
283
284 page = pool_alloc(&c->pool, ALLOW_FAIL);
285 if (!page)
286 return ERR_PTR(-ENOMEM);
287
288 return dst->pages[dst->page_count++] = page;
289}
290
291static int compress_page(struct i915_vma_compress *c,
292 void *src,
293 struct i915_vma_coredump *dst,
294 bool wc)
295{
296 struct z_stream_s *zstream = &c->zstream;
297
298 zstream->next_in = src;
299 if (wc && c->tmp && i915_memcpy_from_wc(c->tmp, src, PAGE_SIZE))
300 zstream->next_in = c->tmp;
301 zstream->avail_in = PAGE_SIZE;
302
303 do {
304 if (zstream->avail_out == 0) {
305 zstream->next_out = compress_next_page(c, dst);
306 if (IS_ERR(zstream->next_out))
307 return PTR_ERR(zstream->next_out);
308
309 zstream->avail_out = PAGE_SIZE;
310 }
311
312 if (zlib_deflate(zstream, Z_NO_FLUSH) != Z_OK)
313 return -EIO;
314
315 cond_resched();
316 } while (zstream->avail_in);
317
318
319 if (0 && zstream->total_out > zstream->total_in)
320 return -E2BIG;
321
322 return 0;
323}
324
325static int compress_flush(struct i915_vma_compress *c,
326 struct i915_vma_coredump *dst)
327{
328 struct z_stream_s *zstream = &c->zstream;
329
330 do {
331 switch (zlib_deflate(zstream, Z_FINISH)) {
332 case Z_OK:
333 zstream->next_out = compress_next_page(c, dst);
334 if (IS_ERR(zstream->next_out))
335 return PTR_ERR(zstream->next_out);
336
337 zstream->avail_out = PAGE_SIZE;
338 break;
339
340 case Z_STREAM_END:
341 goto end;
342
343 default:
344 return -EIO;
345 }
346 } while (1);
347
348end:
349 memset(zstream->next_out, 0, zstream->avail_out);
350 dst->unused = zstream->avail_out;
351 return 0;
352}
353
354static void compress_finish(struct i915_vma_compress *c)
355{
356 zlib_deflateEnd(&c->zstream);
357}
358
359static void compress_fini(struct i915_vma_compress *c)
360{
361 kfree(c->zstream.workspace);
362 if (c->tmp)
363 pool_free(&c->pool, c->tmp);
364 pool_fini(&c->pool);
365}
366
367static void err_compression_marker(struct drm_i915_error_state_buf *m)
368{
369 err_puts(m, ":");
370}
371
372#else
373
374struct i915_vma_compress {
375 struct pagevec pool;
376};
377
378static bool compress_init(struct i915_vma_compress *c)
379{
380 return pool_init(&c->pool, ALLOW_FAIL) == 0;
381}
382
383static bool compress_start(struct i915_vma_compress *c)
384{
385 return true;
386}
387
388static int compress_page(struct i915_vma_compress *c,
389 void *src,
390 struct i915_vma_coredump *dst,
391 bool wc)
392{
393 void *ptr;
394
395 ptr = pool_alloc(&c->pool, ALLOW_FAIL);
396 if (!ptr)
397 return -ENOMEM;
398
399 if (!(wc && i915_memcpy_from_wc(ptr, src, PAGE_SIZE)))
400 memcpy(ptr, src, PAGE_SIZE);
401 dst->pages[dst->page_count++] = ptr;
402 cond_resched();
403
404 return 0;
405}
406
407static int compress_flush(struct i915_vma_compress *c,
408 struct i915_vma_coredump *dst)
409{
410 return 0;
411}
412
413static void compress_finish(struct i915_vma_compress *c)
414{
415}
416
417static void compress_fini(struct i915_vma_compress *c)
418{
419 pool_fini(&c->pool);
420}
421
422static void err_compression_marker(struct drm_i915_error_state_buf *m)
423{
424 err_puts(m, "~");
425}
426
427#endif
428
429static void error_print_instdone(struct drm_i915_error_state_buf *m,
430 const struct intel_engine_coredump *ee)
431{
432 const struct sseu_dev_info *sseu = &ee->engine->gt->info.sseu;
433 int slice;
434 int subslice;
435
436 err_printf(m, " INSTDONE: 0x%08x\n",
437 ee->instdone.instdone);
438
439 if (ee->engine->class != RENDER_CLASS || INTEL_GEN(m->i915) <= 3)
440 return;
441
442 err_printf(m, " SC_INSTDONE: 0x%08x\n",
443 ee->instdone.slice_common);
444
445 if (INTEL_GEN(m->i915) <= 6)
446 return;
447
448 for_each_instdone_slice_subslice(m->i915, sseu, slice, subslice)
449 err_printf(m, " SAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
450 slice, subslice,
451 ee->instdone.sampler[slice][subslice]);
452
453 for_each_instdone_slice_subslice(m->i915, sseu, slice, subslice)
454 err_printf(m, " ROW_INSTDONE[%d][%d]: 0x%08x\n",
455 slice, subslice,
456 ee->instdone.row[slice][subslice]);
457
458 if (INTEL_GEN(m->i915) < 12)
459 return;
460
461 err_printf(m, " SC_INSTDONE_EXTRA: 0x%08x\n",
462 ee->instdone.slice_common_extra[0]);
463 err_printf(m, " SC_INSTDONE_EXTRA2: 0x%08x\n",
464 ee->instdone.slice_common_extra[1]);
465}
466
467static void error_print_request(struct drm_i915_error_state_buf *m,
468 const char *prefix,
469 const struct i915_request_coredump *erq)
470{
471 if (!erq->seqno)
472 return;
473
474 err_printf(m, "%s pid %d, seqno %8x:%08x%s%s, prio %d, head %08x, tail %08x\n",
475 prefix, erq->pid, erq->context, erq->seqno,
476 test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
477 &erq->flags) ? "!" : "",
478 test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
479 &erq->flags) ? "+" : "",
480 erq->sched_attr.priority,
481 erq->head, erq->tail);
482}
483
484static void error_print_context(struct drm_i915_error_state_buf *m,
485 const char *header,
486 const struct i915_gem_context_coredump *ctx)
487{
488 const u32 period = RUNTIME_INFO(m->i915)->cs_timestamp_period_ns;
489
490 err_printf(m, "%s%s[%d] prio %d, guilty %d active %d, runtime total %lluns, avg %lluns\n",
491 header, ctx->comm, ctx->pid, ctx->sched_attr.priority,
492 ctx->guilty, ctx->active,
493 ctx->total_runtime * period,
494 mul_u32_u32(ctx->avg_runtime, period));
495}
496
497static struct i915_vma_coredump *
498__find_vma(struct i915_vma_coredump *vma, const char *name)
499{
500 while (vma) {
501 if (strcmp(vma->name, name) == 0)
502 return vma;
503 vma = vma->next;
504 }
505
506 return NULL;
507}
508
509static struct i915_vma_coredump *
510find_batch(const struct intel_engine_coredump *ee)
511{
512 return __find_vma(ee->vma, "batch");
513}
514
515static void error_print_engine(struct drm_i915_error_state_buf *m,
516 const struct intel_engine_coredump *ee)
517{
518 struct i915_vma_coredump *batch;
519 int n;
520
521 err_printf(m, "%s command stream:\n", ee->engine->name);
522 err_printf(m, " CCID: 0x%08x\n", ee->ccid);
523 err_printf(m, " START: 0x%08x\n", ee->start);
524 err_printf(m, " HEAD: 0x%08x [0x%08x]\n", ee->head, ee->rq_head);
525 err_printf(m, " TAIL: 0x%08x [0x%08x, 0x%08x]\n",
526 ee->tail, ee->rq_post, ee->rq_tail);
527 err_printf(m, " CTL: 0x%08x\n", ee->ctl);
528 err_printf(m, " MODE: 0x%08x\n", ee->mode);
529 err_printf(m, " HWS: 0x%08x\n", ee->hws);
530 err_printf(m, " ACTHD: 0x%08x %08x\n",
531 (u32)(ee->acthd>>32), (u32)ee->acthd);
532 err_printf(m, " IPEIR: 0x%08x\n", ee->ipeir);
533 err_printf(m, " IPEHR: 0x%08x\n", ee->ipehr);
534 err_printf(m, " ESR: 0x%08x\n", ee->esr);
535
536 error_print_instdone(m, ee);
537
538 batch = find_batch(ee);
539 if (batch) {
540 u64 start = batch->gtt_offset;
541 u64 end = start + batch->gtt_size;
542
543 err_printf(m, " batch: [0x%08x_%08x, 0x%08x_%08x]\n",
544 upper_32_bits(start), lower_32_bits(start),
545 upper_32_bits(end), lower_32_bits(end));
546 }
547 if (INTEL_GEN(m->i915) >= 4) {
548 err_printf(m, " BBADDR: 0x%08x_%08x\n",
549 (u32)(ee->bbaddr>>32), (u32)ee->bbaddr);
550 err_printf(m, " BB_STATE: 0x%08x\n", ee->bbstate);
551 err_printf(m, " INSTPS: 0x%08x\n", ee->instps);
552 }
553 err_printf(m, " INSTPM: 0x%08x\n", ee->instpm);
554 err_printf(m, " FADDR: 0x%08x %08x\n", upper_32_bits(ee->faddr),
555 lower_32_bits(ee->faddr));
556 if (INTEL_GEN(m->i915) >= 6) {
557 err_printf(m, " RC PSMI: 0x%08x\n", ee->rc_psmi);
558 err_printf(m, " FAULT_REG: 0x%08x\n", ee->fault_reg);
559 }
560 if (HAS_PPGTT(m->i915)) {
561 err_printf(m, " GFX_MODE: 0x%08x\n", ee->vm_info.gfx_mode);
562
563 if (INTEL_GEN(m->i915) >= 8) {
564 int i;
565 for (i = 0; i < 4; i++)
566 err_printf(m, " PDP%d: 0x%016llx\n",
567 i, ee->vm_info.pdp[i]);
568 } else {
569 err_printf(m, " PP_DIR_BASE: 0x%08x\n",
570 ee->vm_info.pp_dir_base);
571 }
572 }
573 err_printf(m, " hung: %u\n", ee->hung);
574 err_printf(m, " engine reset count: %u\n", ee->reset_count);
575
576 for (n = 0; n < ee->num_ports; n++) {
577 err_printf(m, " ELSP[%d]:", n);
578 error_print_request(m, " ", &ee->execlist[n]);
579 }
580
581 error_print_context(m, " Active context: ", &ee->context);
582}
583
584void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
585{
586 va_list args;
587
588 va_start(args, f);
589 i915_error_vprintf(e, f, args);
590 va_end(args);
591}
592
593static void print_error_vma(struct drm_i915_error_state_buf *m,
594 const struct intel_engine_cs *engine,
595 const struct i915_vma_coredump *vma)
596{
597 char out[ASCII85_BUFSZ];
598 int page;
599
600 if (!vma)
601 return;
602
603 err_printf(m, "%s --- %s = 0x%08x %08x\n",
604 engine ? engine->name : "global", vma->name,
605 upper_32_bits(vma->gtt_offset),
606 lower_32_bits(vma->gtt_offset));
607
608 if (vma->gtt_page_sizes > I915_GTT_PAGE_SIZE_4K)
609 err_printf(m, "gtt_page_sizes = 0x%08x\n", vma->gtt_page_sizes);
610
611 err_compression_marker(m);
612 for (page = 0; page < vma->page_count; page++) {
613 int i, len;
614
615 len = PAGE_SIZE;
616 if (page == vma->page_count - 1)
617 len -= vma->unused;
618 len = ascii85_encode_len(len);
619
620 for (i = 0; i < len; i++)
621 err_puts(m, ascii85_encode(vma->pages[page][i], out));
622 }
623 err_puts(m, "\n");
624}
625
626static void err_print_capabilities(struct drm_i915_error_state_buf *m,
627 struct i915_gpu_coredump *error)
628{
629 struct drm_printer p = i915_error_printer(m);
630
631 intel_device_info_print_static(&error->device_info, &p);
632 intel_device_info_print_runtime(&error->runtime_info, &p);
633 intel_driver_caps_print(&error->driver_caps, &p);
634}
635
636static void err_print_params(struct drm_i915_error_state_buf *m,
637 const struct i915_params *params)
638{
639 struct drm_printer p = i915_error_printer(m);
640
641 i915_params_dump(params, &p);
642}
643
644static void err_print_pciid(struct drm_i915_error_state_buf *m,
645 struct drm_i915_private *i915)
646{
647 struct pci_dev *pdev = i915->drm.pdev;
648
649 err_printf(m, "PCI ID: 0x%04x\n", pdev->device);
650 err_printf(m, "PCI Revision: 0x%02x\n", pdev->revision);
651 err_printf(m, "PCI Subsystem: %04x:%04x\n",
652 pdev->subsystem_vendor,
653 pdev->subsystem_device);
654}
655
656static void err_print_uc(struct drm_i915_error_state_buf *m,
657 const struct intel_uc_coredump *error_uc)
658{
659 struct drm_printer p = i915_error_printer(m);
660
661 intel_uc_fw_dump(&error_uc->guc_fw, &p);
662 intel_uc_fw_dump(&error_uc->huc_fw, &p);
663 print_error_vma(m, NULL, error_uc->guc_log);
664}
665
666static void err_free_sgl(struct scatterlist *sgl)
667{
668 while (sgl) {
669 struct scatterlist *sg;
670
671 for (sg = sgl; !sg_is_chain(sg); sg++) {
672 kfree(sg_virt(sg));
673 if (sg_is_last(sg))
674 break;
675 }
676
677 sg = sg_is_last(sg) ? NULL : sg_chain_ptr(sg);
678 free_page((unsigned long)sgl);
679 sgl = sg;
680 }
681}
682
683static void err_print_gt_info(struct drm_i915_error_state_buf *m,
684 struct intel_gt_coredump *gt)
685{
686 struct drm_printer p = i915_error_printer(m);
687
688 intel_gt_info_print(>->info, &p);
689 intel_sseu_print_topology(>->info.sseu, &p);
690}
691
692static void err_print_gt(struct drm_i915_error_state_buf *m,
693 struct intel_gt_coredump *gt)
694{
695 const struct intel_engine_coredump *ee;
696 int i;
697
698 err_printf(m, "GT awake: %s\n", yesno(gt->awake));
699 err_printf(m, "EIR: 0x%08x\n", gt->eir);
700 err_printf(m, "IER: 0x%08x\n", gt->ier);
701 for (i = 0; i < gt->ngtier; i++)
702 err_printf(m, "GTIER[%d]: 0x%08x\n", i, gt->gtier[i]);
703 err_printf(m, "PGTBL_ER: 0x%08x\n", gt->pgtbl_er);
704 err_printf(m, "FORCEWAKE: 0x%08x\n", gt->forcewake);
705 err_printf(m, "DERRMR: 0x%08x\n", gt->derrmr);
706
707 for (i = 0; i < gt->nfence; i++)
708 err_printf(m, " fence[%d] = %08llx\n", i, gt->fence[i]);
709
710 if (IS_GEN_RANGE(m->i915, 6, 11)) {
711 err_printf(m, "ERROR: 0x%08x\n", gt->error);
712 err_printf(m, "DONE_REG: 0x%08x\n", gt->done_reg);
713 }
714
715 if (INTEL_GEN(m->i915) >= 8)
716 err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
717 gt->fault_data1, gt->fault_data0);
718
719 if (IS_GEN(m->i915, 7))
720 err_printf(m, "ERR_INT: 0x%08x\n", gt->err_int);
721
722 if (IS_GEN_RANGE(m->i915, 8, 11))
723 err_printf(m, "GTT_CACHE_EN: 0x%08x\n", gt->gtt_cache);
724
725 if (IS_GEN(m->i915, 12))
726 err_printf(m, "AUX_ERR_DBG: 0x%08x\n", gt->aux_err);
727
728 if (INTEL_GEN(m->i915) >= 12) {
729 int i;
730
731 for (i = 0; i < GEN12_SFC_DONE_MAX; i++)
732 err_printf(m, " SFC_DONE[%d]: 0x%08x\n", i,
733 gt->sfc_done[i]);
734
735 err_printf(m, " GAM_DONE: 0x%08x\n", gt->gam_done);
736 }
737
738 for (ee = gt->engine; ee; ee = ee->next) {
739 const struct i915_vma_coredump *vma;
740
741 error_print_engine(m, ee);
742 for (vma = ee->vma; vma; vma = vma->next)
743 print_error_vma(m, ee->engine, vma);
744 }
745
746 if (gt->uc)
747 err_print_uc(m, gt->uc);
748
749 err_print_gt_info(m, gt);
750}
751
752static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
753 struct i915_gpu_coredump *error)
754{
755 const struct intel_engine_coredump *ee;
756 struct timespec64 ts;
757
758 if (*error->error_msg)
759 err_printf(m, "%s\n", error->error_msg);
760 err_printf(m, "Kernel: %s %s\n",
761 init_utsname()->release,
762 init_utsname()->machine);
763 err_printf(m, "Driver: %s\n", DRIVER_DATE);
764 ts = ktime_to_timespec64(error->time);
765 err_printf(m, "Time: %lld s %ld us\n",
766 (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
767 ts = ktime_to_timespec64(error->boottime);
768 err_printf(m, "Boottime: %lld s %ld us\n",
769 (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
770 ts = ktime_to_timespec64(error->uptime);
771 err_printf(m, "Uptime: %lld s %ld us\n",
772 (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
773 err_printf(m, "Capture: %lu jiffies; %d ms ago\n",
774 error->capture, jiffies_to_msecs(jiffies - error->capture));
775
776 for (ee = error->gt ? error->gt->engine : NULL; ee; ee = ee->next)
777 err_printf(m, "Active process (on ring %s): %s [%d]\n",
778 ee->engine->name,
779 ee->context.comm,
780 ee->context.pid);
781
782 err_printf(m, "Reset count: %u\n", error->reset_count);
783 err_printf(m, "Suspend count: %u\n", error->suspend_count);
784 err_printf(m, "Platform: %s\n", intel_platform_name(error->device_info.platform));
785 err_printf(m, "Subplatform: 0x%x\n",
786 intel_subplatform(&error->runtime_info,
787 error->device_info.platform));
788 err_print_pciid(m, m->i915);
789
790 err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
791
792 if (HAS_CSR(m->i915)) {
793 struct intel_csr *csr = &m->i915->csr;
794
795 err_printf(m, "DMC loaded: %s\n",
796 yesno(csr->dmc_payload != NULL));
797 err_printf(m, "DMC fw version: %d.%d\n",
798 CSR_VERSION_MAJOR(csr->version),
799 CSR_VERSION_MINOR(csr->version));
800 }
801
802 err_printf(m, "RPM wakelock: %s\n", yesno(error->wakelock));
803 err_printf(m, "PM suspended: %s\n", yesno(error->suspended));
804
805 if (error->gt)
806 err_print_gt(m, error->gt);
807
808 if (error->overlay)
809 intel_overlay_print_error_state(m, error->overlay);
810
811 if (error->display)
812 intel_display_print_error_state(m, error->display);
813
814 err_print_capabilities(m, error);
815 err_print_params(m, &error->params);
816}
817
818static int err_print_to_sgl(struct i915_gpu_coredump *error)
819{
820 struct drm_i915_error_state_buf m;
821
822 if (IS_ERR(error))
823 return PTR_ERR(error);
824
825 if (READ_ONCE(error->sgl))
826 return 0;
827
828 memset(&m, 0, sizeof(m));
829 m.i915 = error->i915;
830
831 __err_print_to_sgl(&m, error);
832
833 if (m.buf) {
834 __sg_set_buf(m.cur++, m.buf, m.bytes, m.iter);
835 m.bytes = 0;
836 m.buf = NULL;
837 }
838 if (m.cur) {
839 GEM_BUG_ON(m.end < m.cur);
840 sg_mark_end(m.cur - 1);
841 }
842 GEM_BUG_ON(m.sgl && !m.cur);
843
844 if (m.err) {
845 err_free_sgl(m.sgl);
846 return m.err;
847 }
848
849 if (cmpxchg(&error->sgl, NULL, m.sgl))
850 err_free_sgl(m.sgl);
851
852 return 0;
853}
854
855ssize_t i915_gpu_coredump_copy_to_buffer(struct i915_gpu_coredump *error,
856 char *buf, loff_t off, size_t rem)
857{
858 struct scatterlist *sg;
859 size_t count;
860 loff_t pos;
861 int err;
862
863 if (!error || !rem)
864 return 0;
865
866 err = err_print_to_sgl(error);
867 if (err)
868 return err;
869
870 sg = READ_ONCE(error->fit);
871 if (!sg || off < sg->dma_address)
872 sg = error->sgl;
873 if (!sg)
874 return 0;
875
876 pos = sg->dma_address;
877 count = 0;
878 do {
879 size_t len, start;
880
881 if (sg_is_chain(sg)) {
882 sg = sg_chain_ptr(sg);
883 GEM_BUG_ON(sg_is_chain(sg));
884 }
885
886 len = sg->length;
887 if (pos + len <= off) {
888 pos += len;
889 continue;
890 }
891
892 start = sg->offset;
893 if (pos < off) {
894 GEM_BUG_ON(off - pos > len);
895 len -= off - pos;
896 start += off - pos;
897 pos = off;
898 }
899
900 len = min(len, rem);
901 GEM_BUG_ON(!len || len > sg->length);
902
903 memcpy(buf, page_address(sg_page(sg)) + start, len);
904
905 count += len;
906 pos += len;
907
908 buf += len;
909 rem -= len;
910 if (!rem) {
911 WRITE_ONCE(error->fit, sg);
912 break;
913 }
914 } while (!sg_is_last(sg++));
915
916 return count;
917}
918
919static void i915_vma_coredump_free(struct i915_vma_coredump *vma)
920{
921 while (vma) {
922 struct i915_vma_coredump *next = vma->next;
923 int page;
924
925 for (page = 0; page < vma->page_count; page++)
926 free_page((unsigned long)vma->pages[page]);
927
928 kfree(vma);
929 vma = next;
930 }
931}
932
933static void cleanup_params(struct i915_gpu_coredump *error)
934{
935 i915_params_free(&error->params);
936}
937
938static void cleanup_uc(struct intel_uc_coredump *uc)
939{
940 kfree(uc->guc_fw.path);
941 kfree(uc->huc_fw.path);
942 i915_vma_coredump_free(uc->guc_log);
943
944 kfree(uc);
945}
946
947static void cleanup_gt(struct intel_gt_coredump *gt)
948{
949 while (gt->engine) {
950 struct intel_engine_coredump *ee = gt->engine;
951
952 gt->engine = ee->next;
953
954 i915_vma_coredump_free(ee->vma);
955 kfree(ee);
956 }
957
958 if (gt->uc)
959 cleanup_uc(gt->uc);
960
961 kfree(gt);
962}
963
964void __i915_gpu_coredump_free(struct kref *error_ref)
965{
966 struct i915_gpu_coredump *error =
967 container_of(error_ref, typeof(*error), ref);
968
969 while (error->gt) {
970 struct intel_gt_coredump *gt = error->gt;
971
972 error->gt = gt->next;
973 cleanup_gt(gt);
974 }
975
976 kfree(error->overlay);
977 kfree(error->display);
978
979 cleanup_params(error);
980
981 err_free_sgl(error->sgl);
982 kfree(error);
983}
984
985static struct i915_vma_coredump *
986i915_vma_coredump_create(const struct intel_gt *gt,
987 const struct i915_vma *vma,
988 const char *name,
989 struct i915_vma_compress *compress)
990{
991 struct i915_ggtt *ggtt = gt->ggtt;
992 const u64 slot = ggtt->error_capture.start;
993 struct i915_vma_coredump *dst;
994 unsigned long num_pages;
995 struct sgt_iter iter;
996 int ret;
997
998 might_sleep();
999
1000 if (!vma || !vma->pages || !compress)
1001 return NULL;
1002
1003 num_pages = min_t(u64, vma->size, vma->obj->base.size) >> PAGE_SHIFT;
1004 num_pages = DIV_ROUND_UP(10 * num_pages, 8);
1005 dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), ALLOW_FAIL);
1006 if (!dst)
1007 return NULL;
1008
1009 if (!compress_start(compress)) {
1010 kfree(dst);
1011 return NULL;
1012 }
1013
1014 strcpy(dst->name, name);
1015 dst->next = NULL;
1016
1017 dst->gtt_offset = vma->node.start;
1018 dst->gtt_size = vma->node.size;
1019 dst->gtt_page_sizes = vma->page_sizes.gtt;
1020 dst->num_pages = num_pages;
1021 dst->page_count = 0;
1022 dst->unused = 0;
1023
1024 ret = -EINVAL;
1025 if (drm_mm_node_allocated(&ggtt->error_capture)) {
1026 void __iomem *s;
1027 dma_addr_t dma;
1028
1029 for_each_sgt_daddr(dma, iter, vma->pages) {
1030 mutex_lock(&ggtt->error_mutex);
1031 ggtt->vm.insert_page(&ggtt->vm, dma, slot,
1032 I915_CACHE_NONE, 0);
1033 mb();
1034
1035 s = io_mapping_map_wc(&ggtt->iomap, slot, PAGE_SIZE);
1036 ret = compress_page(compress,
1037 (void __force *)s, dst,
1038 true);
1039 io_mapping_unmap(s);
1040
1041 mb();
1042 ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
1043 mutex_unlock(&ggtt->error_mutex);
1044 if (ret)
1045 break;
1046 }
1047 } else if (i915_gem_object_is_lmem(vma->obj)) {
1048 struct intel_memory_region *mem = vma->obj->mm.region;
1049 dma_addr_t dma;
1050
1051 for_each_sgt_daddr(dma, iter, vma->pages) {
1052 void __iomem *s;
1053
1054 s = io_mapping_map_wc(&mem->iomap, dma, PAGE_SIZE);
1055 ret = compress_page(compress,
1056 (void __force *)s, dst,
1057 true);
1058 io_mapping_unmap(s);
1059 if (ret)
1060 break;
1061 }
1062 } else {
1063 struct page *page;
1064
1065 for_each_sgt_page(page, iter, vma->pages) {
1066 void *s;
1067
1068 drm_clflush_pages(&page, 1);
1069
1070 s = kmap(page);
1071 ret = compress_page(compress, s, dst, false);
1072 kunmap(page);
1073
1074 drm_clflush_pages(&page, 1);
1075
1076 if (ret)
1077 break;
1078 }
1079 }
1080
1081 if (ret || compress_flush(compress, dst)) {
1082 while (dst->page_count--)
1083 pool_free(&compress->pool, dst->pages[dst->page_count]);
1084 kfree(dst);
1085 dst = NULL;
1086 }
1087 compress_finish(compress);
1088
1089 return dst;
1090}
1091
1092static void gt_record_fences(struct intel_gt_coredump *gt)
1093{
1094 struct i915_ggtt *ggtt = gt->_gt->ggtt;
1095 struct intel_uncore *uncore = gt->_gt->uncore;
1096 int i;
1097
1098 if (INTEL_GEN(uncore->i915) >= 6) {
1099 for (i = 0; i < ggtt->num_fences; i++)
1100 gt->fence[i] =
1101 intel_uncore_read64(uncore,
1102 FENCE_REG_GEN6_LO(i));
1103 } else if (INTEL_GEN(uncore->i915) >= 4) {
1104 for (i = 0; i < ggtt->num_fences; i++)
1105 gt->fence[i] =
1106 intel_uncore_read64(uncore,
1107 FENCE_REG_965_LO(i));
1108 } else {
1109 for (i = 0; i < ggtt->num_fences; i++)
1110 gt->fence[i] =
1111 intel_uncore_read(uncore, FENCE_REG(i));
1112 }
1113 gt->nfence = i;
1114}
1115
1116static void engine_record_registers(struct intel_engine_coredump *ee)
1117{
1118 const struct intel_engine_cs *engine = ee->engine;
1119 struct drm_i915_private *i915 = engine->i915;
1120
1121 if (INTEL_GEN(i915) >= 6) {
1122 ee->rc_psmi = ENGINE_READ(engine, RING_PSMI_CTL);
1123
1124 if (INTEL_GEN(i915) >= 12)
1125 ee->fault_reg = intel_uncore_read(engine->uncore,
1126 GEN12_RING_FAULT_REG);
1127 else if (INTEL_GEN(i915) >= 8)
1128 ee->fault_reg = intel_uncore_read(engine->uncore,
1129 GEN8_RING_FAULT_REG);
1130 else
1131 ee->fault_reg = GEN6_RING_FAULT_REG_READ(engine);
1132 }
1133
1134 if (INTEL_GEN(i915) >= 4) {
1135 ee->esr = ENGINE_READ(engine, RING_ESR);
1136 ee->faddr = ENGINE_READ(engine, RING_DMA_FADD);
1137 ee->ipeir = ENGINE_READ(engine, RING_IPEIR);
1138 ee->ipehr = ENGINE_READ(engine, RING_IPEHR);
1139 ee->instps = ENGINE_READ(engine, RING_INSTPS);
1140 ee->bbaddr = ENGINE_READ(engine, RING_BBADDR);
1141 ee->ccid = ENGINE_READ(engine, CCID);
1142 if (INTEL_GEN(i915) >= 8) {
1143 ee->faddr |= (u64)ENGINE_READ(engine, RING_DMA_FADD_UDW) << 32;
1144 ee->bbaddr |= (u64)ENGINE_READ(engine, RING_BBADDR_UDW) << 32;
1145 }
1146 ee->bbstate = ENGINE_READ(engine, RING_BBSTATE);
1147 } else {
1148 ee->faddr = ENGINE_READ(engine, DMA_FADD_I8XX);
1149 ee->ipeir = ENGINE_READ(engine, IPEIR);
1150 ee->ipehr = ENGINE_READ(engine, IPEHR);
1151 }
1152
1153 intel_engine_get_instdone(engine, &ee->instdone);
1154
1155 ee->instpm = ENGINE_READ(engine, RING_INSTPM);
1156 ee->acthd = intel_engine_get_active_head(engine);
1157 ee->start = ENGINE_READ(engine, RING_START);
1158 ee->head = ENGINE_READ(engine, RING_HEAD);
1159 ee->tail = ENGINE_READ(engine, RING_TAIL);
1160 ee->ctl = ENGINE_READ(engine, RING_CTL);
1161 if (INTEL_GEN(i915) > 2)
1162 ee->mode = ENGINE_READ(engine, RING_MI_MODE);
1163
1164 if (!HWS_NEEDS_PHYSICAL(i915)) {
1165 i915_reg_t mmio;
1166
1167 if (IS_GEN(i915, 7)) {
1168 switch (engine->id) {
1169 default:
1170 MISSING_CASE(engine->id);
1171 fallthrough;
1172 case RCS0:
1173 mmio = RENDER_HWS_PGA_GEN7;
1174 break;
1175 case BCS0:
1176 mmio = BLT_HWS_PGA_GEN7;
1177 break;
1178 case VCS0:
1179 mmio = BSD_HWS_PGA_GEN7;
1180 break;
1181 case VECS0:
1182 mmio = VEBOX_HWS_PGA_GEN7;
1183 break;
1184 }
1185 } else if (IS_GEN(engine->i915, 6)) {
1186 mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
1187 } else {
1188
1189 mmio = RING_HWS_PGA(engine->mmio_base);
1190 }
1191
1192 ee->hws = intel_uncore_read(engine->uncore, mmio);
1193 }
1194
1195 ee->reset_count = i915_reset_engine_count(&i915->gpu_error, engine);
1196
1197 if (HAS_PPGTT(i915)) {
1198 int i;
1199
1200 ee->vm_info.gfx_mode = ENGINE_READ(engine, RING_MODE_GEN7);
1201
1202 if (IS_GEN(i915, 6)) {
1203 ee->vm_info.pp_dir_base =
1204 ENGINE_READ(engine, RING_PP_DIR_BASE_READ);
1205 } else if (IS_GEN(i915, 7)) {
1206 ee->vm_info.pp_dir_base =
1207 ENGINE_READ(engine, RING_PP_DIR_BASE);
1208 } else if (INTEL_GEN(i915) >= 8) {
1209 u32 base = engine->mmio_base;
1210
1211 for (i = 0; i < 4; i++) {
1212 ee->vm_info.pdp[i] =
1213 intel_uncore_read(engine->uncore,
1214 GEN8_RING_PDP_UDW(base, i));
1215 ee->vm_info.pdp[i] <<= 32;
1216 ee->vm_info.pdp[i] |=
1217 intel_uncore_read(engine->uncore,
1218 GEN8_RING_PDP_LDW(base, i));
1219 }
1220 }
1221 }
1222}
1223
1224static void record_request(const struct i915_request *request,
1225 struct i915_request_coredump *erq)
1226{
1227 erq->flags = request->fence.flags;
1228 erq->context = request->fence.context;
1229 erq->seqno = request->fence.seqno;
1230 erq->sched_attr = request->sched.attr;
1231 erq->head = request->head;
1232 erq->tail = request->tail;
1233
1234 erq->pid = 0;
1235 rcu_read_lock();
1236 if (!intel_context_is_closed(request->context)) {
1237 const struct i915_gem_context *ctx;
1238
1239 ctx = rcu_dereference(request->context->gem_context);
1240 if (ctx)
1241 erq->pid = pid_nr(ctx->pid);
1242 }
1243 rcu_read_unlock();
1244}
1245
1246static void engine_record_execlists(struct intel_engine_coredump *ee)
1247{
1248 const struct intel_engine_execlists * const el = &ee->engine->execlists;
1249 struct i915_request * const *port = el->active;
1250 unsigned int n = 0;
1251
1252 while (*port)
1253 record_request(*port++, &ee->execlist[n++]);
1254
1255 ee->num_ports = n;
1256}
1257
1258static bool record_context(struct i915_gem_context_coredump *e,
1259 const struct i915_request *rq)
1260{
1261 struct i915_gem_context *ctx;
1262 struct task_struct *task;
1263 bool simulated;
1264
1265 rcu_read_lock();
1266 ctx = rcu_dereference(rq->context->gem_context);
1267 if (ctx && !kref_get_unless_zero(&ctx->ref))
1268 ctx = NULL;
1269 rcu_read_unlock();
1270 if (!ctx)
1271 return true;
1272
1273 rcu_read_lock();
1274 task = pid_task(ctx->pid, PIDTYPE_PID);
1275 if (task) {
1276 strcpy(e->comm, task->comm);
1277 e->pid = task->pid;
1278 }
1279 rcu_read_unlock();
1280
1281 e->sched_attr = ctx->sched;
1282 e->guilty = atomic_read(&ctx->guilty_count);
1283 e->active = atomic_read(&ctx->active_count);
1284
1285 e->total_runtime = rq->context->runtime.total;
1286 e->avg_runtime = ewma_runtime_read(&rq->context->runtime.avg);
1287
1288 simulated = i915_gem_context_no_error_capture(ctx);
1289
1290 i915_gem_context_put(ctx);
1291 return simulated;
1292}
1293
1294struct intel_engine_capture_vma {
1295 struct intel_engine_capture_vma *next;
1296 struct i915_vma *vma;
1297 char name[16];
1298};
1299
1300static struct intel_engine_capture_vma *
1301capture_vma(struct intel_engine_capture_vma *next,
1302 struct i915_vma *vma,
1303 const char *name,
1304 gfp_t gfp)
1305{
1306 struct intel_engine_capture_vma *c;
1307
1308 if (!vma)
1309 return next;
1310
1311 c = kmalloc(sizeof(*c), gfp);
1312 if (!c)
1313 return next;
1314
1315 if (!i915_active_acquire_if_busy(&vma->active)) {
1316 kfree(c);
1317 return next;
1318 }
1319
1320 strcpy(c->name, name);
1321 c->vma = vma;
1322
1323 c->next = next;
1324 return c;
1325}
1326
1327static struct intel_engine_capture_vma *
1328capture_user(struct intel_engine_capture_vma *capture,
1329 const struct i915_request *rq,
1330 gfp_t gfp)
1331{
1332 struct i915_capture_list *c;
1333
1334 for (c = rq->capture_list; c; c = c->next)
1335 capture = capture_vma(capture, c->vma, "user", gfp);
1336
1337 return capture;
1338}
1339
1340static void add_vma(struct intel_engine_coredump *ee,
1341 struct i915_vma_coredump *vma)
1342{
1343 if (vma) {
1344 vma->next = ee->vma;
1345 ee->vma = vma;
1346 }
1347}
1348
1349struct intel_engine_coredump *
1350intel_engine_coredump_alloc(struct intel_engine_cs *engine, gfp_t gfp)
1351{
1352 struct intel_engine_coredump *ee;
1353
1354 ee = kzalloc(sizeof(*ee), gfp);
1355 if (!ee)
1356 return NULL;
1357
1358 ee->engine = engine;
1359
1360 engine_record_registers(ee);
1361 engine_record_execlists(ee);
1362
1363 return ee;
1364}
1365
1366struct intel_engine_capture_vma *
1367intel_engine_coredump_add_request(struct intel_engine_coredump *ee,
1368 struct i915_request *rq,
1369 gfp_t gfp)
1370{
1371 struct intel_engine_capture_vma *vma = NULL;
1372
1373 ee->simulated |= record_context(&ee->context, rq);
1374 if (ee->simulated)
1375 return NULL;
1376
1377
1378
1379
1380
1381
1382 vma = capture_vma(vma, rq->batch, "batch", gfp);
1383 vma = capture_user(vma, rq, gfp);
1384 vma = capture_vma(vma, rq->ring->vma, "ring", gfp);
1385 vma = capture_vma(vma, rq->context->state, "HW context", gfp);
1386
1387 ee->rq_head = rq->head;
1388 ee->rq_post = rq->postfix;
1389 ee->rq_tail = rq->tail;
1390
1391 return vma;
1392}
1393
1394void
1395intel_engine_coredump_add_vma(struct intel_engine_coredump *ee,
1396 struct intel_engine_capture_vma *capture,
1397 struct i915_vma_compress *compress)
1398{
1399 const struct intel_engine_cs *engine = ee->engine;
1400
1401 while (capture) {
1402 struct intel_engine_capture_vma *this = capture;
1403 struct i915_vma *vma = this->vma;
1404
1405 add_vma(ee,
1406 i915_vma_coredump_create(engine->gt,
1407 vma, this->name,
1408 compress));
1409
1410 i915_active_release(&vma->active);
1411
1412 capture = this->next;
1413 kfree(this);
1414 }
1415
1416 add_vma(ee,
1417 i915_vma_coredump_create(engine->gt,
1418 engine->status_page.vma,
1419 "HW Status",
1420 compress));
1421
1422 add_vma(ee,
1423 i915_vma_coredump_create(engine->gt,
1424 engine->wa_ctx.vma,
1425 "WA context",
1426 compress));
1427}
1428
1429static struct intel_engine_coredump *
1430capture_engine(struct intel_engine_cs *engine,
1431 struct i915_vma_compress *compress)
1432{
1433 struct intel_engine_capture_vma *capture = NULL;
1434 struct intel_engine_coredump *ee;
1435 struct i915_request *rq;
1436 unsigned long flags;
1437
1438 ee = intel_engine_coredump_alloc(engine, GFP_KERNEL);
1439 if (!ee)
1440 return NULL;
1441
1442 spin_lock_irqsave(&engine->active.lock, flags);
1443 rq = intel_engine_find_active_request(engine);
1444 if (rq)
1445 capture = intel_engine_coredump_add_request(ee, rq,
1446 ATOMIC_MAYFAIL);
1447 spin_unlock_irqrestore(&engine->active.lock, flags);
1448 if (!capture) {
1449 kfree(ee);
1450 return NULL;
1451 }
1452
1453 intel_engine_coredump_add_vma(ee, capture, compress);
1454
1455 return ee;
1456}
1457
1458static void
1459gt_record_engines(struct intel_gt_coredump *gt,
1460 intel_engine_mask_t engine_mask,
1461 struct i915_vma_compress *compress)
1462{
1463 struct intel_engine_cs *engine;
1464 enum intel_engine_id id;
1465
1466 for_each_engine(engine, gt->_gt, id) {
1467 struct intel_engine_coredump *ee;
1468
1469
1470 pool_refill(&compress->pool, ALLOW_FAIL);
1471
1472 ee = capture_engine(engine, compress);
1473 if (!ee)
1474 continue;
1475
1476 ee->hung = engine->mask & engine_mask;
1477
1478 gt->simulated |= ee->simulated;
1479 if (ee->simulated) {
1480 kfree(ee);
1481 continue;
1482 }
1483
1484 ee->next = gt->engine;
1485 gt->engine = ee;
1486 }
1487}
1488
1489static struct intel_uc_coredump *
1490gt_record_uc(struct intel_gt_coredump *gt,
1491 struct i915_vma_compress *compress)
1492{
1493 const struct intel_uc *uc = >->_gt->uc;
1494 struct intel_uc_coredump *error_uc;
1495
1496 error_uc = kzalloc(sizeof(*error_uc), ALLOW_FAIL);
1497 if (!error_uc)
1498 return NULL;
1499
1500 memcpy(&error_uc->guc_fw, &uc->guc.fw, sizeof(uc->guc.fw));
1501 memcpy(&error_uc->huc_fw, &uc->huc.fw, sizeof(uc->huc.fw));
1502
1503
1504
1505
1506
1507 error_uc->guc_fw.path = kstrdup(uc->guc.fw.path, ALLOW_FAIL);
1508 error_uc->huc_fw.path = kstrdup(uc->huc.fw.path, ALLOW_FAIL);
1509 error_uc->guc_log =
1510 i915_vma_coredump_create(gt->_gt,
1511 uc->guc.log.vma, "GuC log buffer",
1512 compress);
1513
1514 return error_uc;
1515}
1516
1517
1518static void gt_record_regs(struct intel_gt_coredump *gt)
1519{
1520 struct intel_uncore *uncore = gt->_gt->uncore;
1521 struct drm_i915_private *i915 = uncore->i915;
1522 int i;
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534 if (IS_VALLEYVIEW(i915)) {
1535 gt->gtier[0] = intel_uncore_read(uncore, GTIER);
1536 gt->ier = intel_uncore_read(uncore, VLV_IER);
1537 gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_VLV);
1538 }
1539
1540 if (IS_GEN(i915, 7))
1541 gt->err_int = intel_uncore_read(uncore, GEN7_ERR_INT);
1542
1543 if (INTEL_GEN(i915) >= 12) {
1544 gt->fault_data0 = intel_uncore_read(uncore,
1545 GEN12_FAULT_TLB_DATA0);
1546 gt->fault_data1 = intel_uncore_read(uncore,
1547 GEN12_FAULT_TLB_DATA1);
1548 } else if (INTEL_GEN(i915) >= 8) {
1549 gt->fault_data0 = intel_uncore_read(uncore,
1550 GEN8_FAULT_TLB_DATA0);
1551 gt->fault_data1 = intel_uncore_read(uncore,
1552 GEN8_FAULT_TLB_DATA1);
1553 }
1554
1555 if (IS_GEN(i915, 6)) {
1556 gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE);
1557 gt->gab_ctl = intel_uncore_read(uncore, GAB_CTL);
1558 gt->gfx_mode = intel_uncore_read(uncore, GFX_MODE);
1559 }
1560
1561
1562 if (INTEL_GEN(i915) >= 7)
1563 gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_MT);
1564
1565 if (INTEL_GEN(i915) >= 6) {
1566 gt->derrmr = intel_uncore_read(uncore, DERRMR);
1567 if (INTEL_GEN(i915) < 12) {
1568 gt->error = intel_uncore_read(uncore, ERROR_GEN6);
1569 gt->done_reg = intel_uncore_read(uncore, DONE_REG);
1570 }
1571 }
1572
1573
1574 if (IS_GEN_RANGE(i915, 6, 7)) {
1575 gt->gam_ecochk = intel_uncore_read(uncore, GAM_ECOCHK);
1576 gt->gac_eco = intel_uncore_read(uncore, GAC_ECO_BITS);
1577 }
1578
1579 if (IS_GEN_RANGE(i915, 8, 11))
1580 gt->gtt_cache = intel_uncore_read(uncore, HSW_GTT_CACHE_EN);
1581
1582 if (IS_GEN(i915, 12))
1583 gt->aux_err = intel_uncore_read(uncore, GEN12_AUX_ERR_DBG);
1584
1585 if (INTEL_GEN(i915) >= 12) {
1586 for (i = 0; i < GEN12_SFC_DONE_MAX; i++) {
1587 gt->sfc_done[i] =
1588 intel_uncore_read(uncore, GEN12_SFC_DONE(i));
1589 }
1590
1591 gt->gam_done = intel_uncore_read(uncore, GEN12_GAM_DONE);
1592 }
1593
1594
1595 if (INTEL_GEN(i915) >= 11) {
1596 gt->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER);
1597 gt->gtier[0] =
1598 intel_uncore_read(uncore,
1599 GEN11_RENDER_COPY_INTR_ENABLE);
1600 gt->gtier[1] =
1601 intel_uncore_read(uncore, GEN11_VCS_VECS_INTR_ENABLE);
1602 gt->gtier[2] =
1603 intel_uncore_read(uncore, GEN11_GUC_SG_INTR_ENABLE);
1604 gt->gtier[3] =
1605 intel_uncore_read(uncore,
1606 GEN11_GPM_WGBOXPERF_INTR_ENABLE);
1607 gt->gtier[4] =
1608 intel_uncore_read(uncore,
1609 GEN11_CRYPTO_RSVD_INTR_ENABLE);
1610 gt->gtier[5] =
1611 intel_uncore_read(uncore,
1612 GEN11_GUNIT_CSME_INTR_ENABLE);
1613 gt->ngtier = 6;
1614 } else if (INTEL_GEN(i915) >= 8) {
1615 gt->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER);
1616 for (i = 0; i < 4; i++)
1617 gt->gtier[i] =
1618 intel_uncore_read(uncore, GEN8_GT_IER(i));
1619 gt->ngtier = 4;
1620 } else if (HAS_PCH_SPLIT(i915)) {
1621 gt->ier = intel_uncore_read(uncore, DEIER);
1622 gt->gtier[0] = intel_uncore_read(uncore, GTIER);
1623 gt->ngtier = 1;
1624 } else if (IS_GEN(i915, 2)) {
1625 gt->ier = intel_uncore_read16(uncore, GEN2_IER);
1626 } else if (!IS_VALLEYVIEW(i915)) {
1627 gt->ier = intel_uncore_read(uncore, GEN2_IER);
1628 }
1629 gt->eir = intel_uncore_read(uncore, EIR);
1630 gt->pgtbl_er = intel_uncore_read(uncore, PGTBL_ER);
1631}
1632
1633static void gt_record_info(struct intel_gt_coredump *gt)
1634{
1635 memcpy(>->info, >->_gt->info, sizeof(struct intel_gt_info));
1636}
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648static u32 generate_ecode(const struct intel_engine_coredump *ee)
1649{
1650
1651
1652
1653
1654
1655
1656 return ee ? ee->ipehr ^ ee->instdone.instdone : 0;
1657}
1658
1659static const char *error_msg(struct i915_gpu_coredump *error)
1660{
1661 struct intel_engine_coredump *first = NULL;
1662 unsigned int hung_classes = 0;
1663 struct intel_gt_coredump *gt;
1664 int len;
1665
1666 for (gt = error->gt; gt; gt = gt->next) {
1667 struct intel_engine_coredump *cs;
1668
1669 for (cs = gt->engine; cs; cs = cs->next) {
1670 if (cs->hung) {
1671 hung_classes |= BIT(cs->engine->uabi_class);
1672 if (!first)
1673 first = cs;
1674 }
1675 }
1676 }
1677
1678 len = scnprintf(error->error_msg, sizeof(error->error_msg),
1679 "GPU HANG: ecode %d:%x:%08x",
1680 INTEL_GEN(error->i915), hung_classes,
1681 generate_ecode(first));
1682 if (first && first->context.pid) {
1683
1684 len += scnprintf(error->error_msg + len,
1685 sizeof(error->error_msg) - len,
1686 ", in %s [%d]",
1687 first->context.comm, first->context.pid);
1688 }
1689
1690 return error->error_msg;
1691}
1692
1693static void capture_gen(struct i915_gpu_coredump *error)
1694{
1695 struct drm_i915_private *i915 = error->i915;
1696
1697 error->wakelock = atomic_read(&i915->runtime_pm.wakeref_count);
1698 error->suspended = i915->runtime_pm.suspended;
1699
1700 error->iommu = -1;
1701#ifdef CONFIG_INTEL_IOMMU
1702 error->iommu = intel_iommu_gfx_mapped;
1703#endif
1704 error->reset_count = i915_reset_count(&i915->gpu_error);
1705 error->suspend_count = i915->suspend_count;
1706
1707 i915_params_copy(&error->params, &i915->params);
1708 memcpy(&error->device_info,
1709 INTEL_INFO(i915),
1710 sizeof(error->device_info));
1711 memcpy(&error->runtime_info,
1712 RUNTIME_INFO(i915),
1713 sizeof(error->runtime_info));
1714 error->driver_caps = i915->caps;
1715}
1716
1717struct i915_gpu_coredump *
1718i915_gpu_coredump_alloc(struct drm_i915_private *i915, gfp_t gfp)
1719{
1720 struct i915_gpu_coredump *error;
1721
1722 if (!i915->params.error_capture)
1723 return NULL;
1724
1725 error = kzalloc(sizeof(*error), gfp);
1726 if (!error)
1727 return NULL;
1728
1729 kref_init(&error->ref);
1730 error->i915 = i915;
1731
1732 error->time = ktime_get_real();
1733 error->boottime = ktime_get_boottime();
1734 error->uptime = ktime_sub(ktime_get(), i915->gt.last_init_time);
1735 error->capture = jiffies;
1736
1737 capture_gen(error);
1738
1739 return error;
1740}
1741
1742#define DAY_AS_SECONDS(x) (24 * 60 * 60 * (x))
1743
1744struct intel_gt_coredump *
1745intel_gt_coredump_alloc(struct intel_gt *gt, gfp_t gfp)
1746{
1747 struct intel_gt_coredump *gc;
1748
1749 gc = kzalloc(sizeof(*gc), gfp);
1750 if (!gc)
1751 return NULL;
1752
1753 gc->_gt = gt;
1754 gc->awake = intel_gt_pm_is_awake(gt);
1755
1756 gt_record_regs(gc);
1757 gt_record_fences(gc);
1758
1759 return gc;
1760}
1761
1762struct i915_vma_compress *
1763i915_vma_capture_prepare(struct intel_gt_coredump *gt)
1764{
1765 struct i915_vma_compress *compress;
1766
1767 compress = kmalloc(sizeof(*compress), ALLOW_FAIL);
1768 if (!compress)
1769 return NULL;
1770
1771 if (!compress_init(compress)) {
1772 kfree(compress);
1773 return NULL;
1774 }
1775
1776 return compress;
1777}
1778
1779void i915_vma_capture_finish(struct intel_gt_coredump *gt,
1780 struct i915_vma_compress *compress)
1781{
1782 if (!compress)
1783 return;
1784
1785 compress_fini(compress);
1786 kfree(compress);
1787}
1788
1789struct i915_gpu_coredump *
1790i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask)
1791{
1792 struct drm_i915_private *i915 = gt->i915;
1793 struct i915_gpu_coredump *error;
1794
1795
1796 error = READ_ONCE(i915->gpu_error.first_error);
1797 if (IS_ERR(error))
1798 return error;
1799
1800 error = i915_gpu_coredump_alloc(i915, ALLOW_FAIL);
1801 if (!error)
1802 return ERR_PTR(-ENOMEM);
1803
1804 error->gt = intel_gt_coredump_alloc(gt, ALLOW_FAIL);
1805 if (error->gt) {
1806 struct i915_vma_compress *compress;
1807
1808 compress = i915_vma_capture_prepare(error->gt);
1809 if (!compress) {
1810 kfree(error->gt);
1811 kfree(error);
1812 return ERR_PTR(-ENOMEM);
1813 }
1814
1815 gt_record_info(error->gt);
1816 gt_record_engines(error->gt, engine_mask, compress);
1817
1818 if (INTEL_INFO(i915)->has_gt_uc)
1819 error->gt->uc = gt_record_uc(error->gt, compress);
1820
1821 i915_vma_capture_finish(error->gt, compress);
1822
1823 error->simulated |= error->gt->simulated;
1824 }
1825
1826 error->overlay = intel_overlay_capture_error_state(i915);
1827 error->display = intel_display_capture_error_state(i915);
1828
1829 return error;
1830}
1831
1832void i915_error_state_store(struct i915_gpu_coredump *error)
1833{
1834 struct drm_i915_private *i915;
1835 static bool warned;
1836
1837 if (IS_ERR_OR_NULL(error))
1838 return;
1839
1840 i915 = error->i915;
1841 drm_info(&i915->drm, "%s\n", error_msg(error));
1842
1843 if (error->simulated ||
1844 cmpxchg(&i915->gpu_error.first_error, NULL, error))
1845 return;
1846
1847 i915_gpu_coredump_get(error);
1848
1849 if (!xchg(&warned, true) &&
1850 ktime_get_real_seconds() - DRIVER_TIMESTAMP < DAY_AS_SECONDS(180)) {
1851 pr_info("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
1852 pr_info("Please file a _new_ bug report at https://gitlab.freedesktop.org/drm/intel/issues/new.\n");
1853 pr_info("Please see https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs for details.\n");
1854 pr_info("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
1855 pr_info("The GPU crash dump is required to analyze GPU hangs, so please always attach it.\n");
1856 pr_info("GPU crash dump saved to /sys/class/drm/card%d/error\n",
1857 i915->drm.primary->index);
1858 }
1859}
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872void i915_capture_error_state(struct intel_gt *gt,
1873 intel_engine_mask_t engine_mask)
1874{
1875 struct i915_gpu_coredump *error;
1876
1877 error = i915_gpu_coredump(gt, engine_mask);
1878 if (IS_ERR(error)) {
1879 cmpxchg(>->i915->gpu_error.first_error, NULL, error);
1880 return;
1881 }
1882
1883 i915_error_state_store(error);
1884 i915_gpu_coredump_put(error);
1885}
1886
1887struct i915_gpu_coredump *
1888i915_first_error_state(struct drm_i915_private *i915)
1889{
1890 struct i915_gpu_coredump *error;
1891
1892 spin_lock_irq(&i915->gpu_error.lock);
1893 error = i915->gpu_error.first_error;
1894 if (!IS_ERR_OR_NULL(error))
1895 i915_gpu_coredump_get(error);
1896 spin_unlock_irq(&i915->gpu_error.lock);
1897
1898 return error;
1899}
1900
1901void i915_reset_error_state(struct drm_i915_private *i915)
1902{
1903 struct i915_gpu_coredump *error;
1904
1905 spin_lock_irq(&i915->gpu_error.lock);
1906 error = i915->gpu_error.first_error;
1907 if (error != ERR_PTR(-ENODEV))
1908 i915->gpu_error.first_error = NULL;
1909 spin_unlock_irq(&i915->gpu_error.lock);
1910
1911 if (!IS_ERR_OR_NULL(error))
1912 i915_gpu_coredump_put(error);
1913}
1914
1915void i915_disable_error_state(struct drm_i915_private *i915, int err)
1916{
1917 spin_lock_irq(&i915->gpu_error.lock);
1918 if (!i915->gpu_error.first_error)
1919 i915->gpu_error.first_error = ERR_PTR(err);
1920 spin_unlock_irq(&i915->gpu_error.lock);
1921}
1922