1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/debugfs.h>
25#include <linux/relay.h>
26#include "i915_drv.h"
27
28static void guc_log_capture_logs(struct intel_guc *guc);
29
30
31
32
33
34
35
36
37
38
39
40static int guc_log_flush_complete(struct intel_guc *guc)
41{
42 u32 action[] = {
43 INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE
44 };
45
46 return intel_guc_send(guc, action, ARRAY_SIZE(action));
47}
48
49static int guc_log_flush(struct intel_guc *guc)
50{
51 u32 action[] = {
52 INTEL_GUC_ACTION_FORCE_LOG_BUFFER_FLUSH,
53 0
54 };
55
56 return intel_guc_send(guc, action, ARRAY_SIZE(action));
57}
58
59static int guc_log_control(struct intel_guc *guc, u32 control_val)
60{
61 u32 action[] = {
62 INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING,
63 control_val
64 };
65
66 return intel_guc_send(guc, action, ARRAY_SIZE(action));
67}
68
69
70
71
72
73static int subbuf_start_callback(struct rchan_buf *buf,
74 void *subbuf,
75 void *prev_subbuf,
76 size_t prev_padding)
77{
78
79
80
81
82
83
84
85
86
87 if (relay_buf_full(buf))
88 return 0;
89
90 return 1;
91}
92
93
94
95
96static struct dentry *create_buf_file_callback(const char *filename,
97 struct dentry *parent,
98 umode_t mode,
99 struct rchan_buf *buf,
100 int *is_global)
101{
102 struct dentry *buf_file;
103
104
105
106
107
108
109 *is_global = 1;
110
111 if (!parent)
112 return NULL;
113
114
115
116
117
118
119
120 buf_file = debugfs_create_file("guc_log", mode,
121 parent, buf, &relay_file_operations);
122 return buf_file;
123}
124
125
126
127
128static int remove_buf_file_callback(struct dentry *dentry)
129{
130 debugfs_remove(dentry);
131 return 0;
132}
133
134
135static struct rchan_callbacks relay_callbacks = {
136 .subbuf_start = subbuf_start_callback,
137 .create_buf_file = create_buf_file_callback,
138 .remove_buf_file = remove_buf_file_callback,
139};
140
141static int guc_log_relay_file_create(struct intel_guc *guc)
142{
143 struct drm_i915_private *dev_priv = guc_to_i915(guc);
144 struct dentry *log_dir;
145 int ret;
146
147 if (i915.guc_log_level < 0)
148 return 0;
149
150
151 log_dir = dev_priv->drm.primary->debugfs_root;
152
153
154
155
156
157
158
159
160
161
162
163
164 if (!log_dir) {
165 DRM_ERROR("Debugfs dir not available yet for GuC log file\n");
166 return -ENODEV;
167 }
168
169 ret = relay_late_setup_files(guc->log.runtime.relay_chan, "guc_log", log_dir);
170 if (ret < 0 && ret != -EEXIST) {
171 DRM_ERROR("Couldn't associate relay chan with file %d\n", ret);
172 return ret;
173 }
174
175 return 0;
176}
177
178static void guc_move_to_next_buf(struct intel_guc *guc)
179{
180
181
182
183 smp_wmb();
184
185
186 relay_reserve(guc->log.runtime.relay_chan, guc->log.vma->obj->base.size);
187
188
189 relay_flush(guc->log.runtime.relay_chan);
190}
191
192static void *guc_get_write_buffer(struct intel_guc *guc)
193{
194 if (!guc->log.runtime.relay_chan)
195 return NULL;
196
197
198
199
200
201
202
203
204
205 return relay_reserve(guc->log.runtime.relay_chan, 0);
206}
207
208static bool guc_check_log_buf_overflow(struct intel_guc *guc,
209 enum guc_log_buffer_type type,
210 unsigned int full_cnt)
211{
212 unsigned int prev_full_cnt = guc->log.prev_overflow_count[type];
213 bool overflow = false;
214
215 if (full_cnt != prev_full_cnt) {
216 overflow = true;
217
218 guc->log.prev_overflow_count[type] = full_cnt;
219 guc->log.total_overflow_count[type] += full_cnt - prev_full_cnt;
220
221 if (full_cnt < prev_full_cnt) {
222
223 guc->log.total_overflow_count[type] += 16;
224 }
225 DRM_ERROR_RATELIMITED("GuC log buffer overflow\n");
226 }
227
228 return overflow;
229}
230
231static unsigned int guc_get_log_buffer_size(enum guc_log_buffer_type type)
232{
233 switch (type) {
234 case GUC_ISR_LOG_BUFFER:
235 return (GUC_LOG_ISR_PAGES + 1) * PAGE_SIZE;
236 case GUC_DPC_LOG_BUFFER:
237 return (GUC_LOG_DPC_PAGES + 1) * PAGE_SIZE;
238 case GUC_CRASH_DUMP_LOG_BUFFER:
239 return (GUC_LOG_CRASH_PAGES + 1) * PAGE_SIZE;
240 default:
241 MISSING_CASE(type);
242 }
243
244 return 0;
245}
246
247static void guc_read_update_log_buffer(struct intel_guc *guc)
248{
249 unsigned int buffer_size, read_offset, write_offset, bytes_to_copy, full_cnt;
250 struct guc_log_buffer_state *log_buf_state, *log_buf_snapshot_state;
251 struct guc_log_buffer_state log_buf_state_local;
252 enum guc_log_buffer_type type;
253 void *src_data, *dst_data;
254 bool new_overflow;
255
256 if (WARN_ON(!guc->log.runtime.buf_addr))
257 return;
258
259
260 log_buf_state = src_data = guc->log.runtime.buf_addr;
261
262
263 log_buf_snapshot_state = dst_data = guc_get_write_buffer(guc);
264
265
266 src_data += PAGE_SIZE;
267 dst_data += PAGE_SIZE;
268
269 for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
270
271
272
273
274 memcpy(&log_buf_state_local, log_buf_state,
275 sizeof(struct guc_log_buffer_state));
276 buffer_size = guc_get_log_buffer_size(type);
277 read_offset = log_buf_state_local.read_ptr;
278 write_offset = log_buf_state_local.sampled_write_ptr;
279 full_cnt = log_buf_state_local.buffer_full_cnt;
280
281
282 guc->log.flush_count[type] += log_buf_state_local.flush_to_file;
283 new_overflow = guc_check_log_buf_overflow(guc, type, full_cnt);
284
285
286 log_buf_state->read_ptr = write_offset;
287 log_buf_state->flush_to_file = 0;
288 log_buf_state++;
289
290 if (unlikely(!log_buf_snapshot_state))
291 continue;
292
293
294 memcpy(log_buf_snapshot_state, &log_buf_state_local,
295 sizeof(struct guc_log_buffer_state));
296
297
298
299
300
301
302 log_buf_snapshot_state->write_ptr = write_offset;
303 log_buf_snapshot_state++;
304
305
306 if (unlikely(new_overflow)) {
307
308 read_offset = 0;
309 write_offset = buffer_size;
310 } else if (unlikely((read_offset > buffer_size) ||
311 (write_offset > buffer_size))) {
312 DRM_ERROR("invalid log buffer state\n");
313
314 read_offset = 0;
315 write_offset = buffer_size;
316 }
317
318
319 if (read_offset > write_offset) {
320 i915_memcpy_from_wc(dst_data, src_data, write_offset);
321 bytes_to_copy = buffer_size - read_offset;
322 } else {
323 bytes_to_copy = write_offset - read_offset;
324 }
325 i915_memcpy_from_wc(dst_data + read_offset,
326 src_data + read_offset, bytes_to_copy);
327
328 src_data += buffer_size;
329 dst_data += buffer_size;
330 }
331
332 if (log_buf_snapshot_state)
333 guc_move_to_next_buf(guc);
334 else {
335
336
337
338 DRM_ERROR_RATELIMITED("no sub-buffer to capture logs\n");
339 guc->log.capture_miss_count++;
340 }
341}
342
343static void capture_logs_work(struct work_struct *work)
344{
345 struct intel_guc *guc =
346 container_of(work, struct intel_guc, log.runtime.flush_work);
347
348 guc_log_capture_logs(guc);
349}
350
351static bool guc_log_has_runtime(struct intel_guc *guc)
352{
353 return guc->log.runtime.buf_addr != NULL;
354}
355
356static int guc_log_runtime_create(struct intel_guc *guc)
357{
358 struct drm_i915_private *dev_priv = guc_to_i915(guc);
359 void *vaddr;
360 struct rchan *guc_log_relay_chan;
361 size_t n_subbufs, subbuf_size;
362 int ret;
363
364 lockdep_assert_held(&dev_priv->drm.struct_mutex);
365
366 GEM_BUG_ON(guc_log_has_runtime(guc));
367
368 ret = i915_gem_object_set_to_wc_domain(guc->log.vma->obj, true);
369 if (ret)
370 return ret;
371
372
373
374
375
376 vaddr = i915_gem_object_pin_map(guc->log.vma->obj, I915_MAP_WC);
377 if (IS_ERR(vaddr)) {
378 DRM_ERROR("Couldn't map log buffer pages %d\n", ret);
379 return PTR_ERR(vaddr);
380 }
381
382 guc->log.runtime.buf_addr = vaddr;
383
384
385 subbuf_size = guc->log.vma->obj->base.size;
386
387
388
389
390
391
392 n_subbufs = 8;
393
394
395
396
397
398 guc_log_relay_chan = relay_open(NULL, NULL, subbuf_size,
399 n_subbufs, &relay_callbacks, dev_priv);
400 if (!guc_log_relay_chan) {
401 DRM_ERROR("Couldn't create relay chan for GuC logging\n");
402
403 ret = -ENOMEM;
404 goto err_vaddr;
405 }
406
407 GEM_BUG_ON(guc_log_relay_chan->subbuf_size < subbuf_size);
408 guc->log.runtime.relay_chan = guc_log_relay_chan;
409
410 INIT_WORK(&guc->log.runtime.flush_work, capture_logs_work);
411
412
413
414
415
416
417
418
419
420
421
422
423 guc->log.runtime.flush_wq = alloc_ordered_workqueue("i915-guc_log",
424 WQ_HIGHPRI | WQ_FREEZABLE);
425 if (!guc->log.runtime.flush_wq) {
426 DRM_ERROR("Couldn't allocate the wq for GuC logging\n");
427 ret = -ENOMEM;
428 goto err_relaychan;
429 }
430
431 return 0;
432
433err_relaychan:
434 relay_close(guc->log.runtime.relay_chan);
435err_vaddr:
436 i915_gem_object_unpin_map(guc->log.vma->obj);
437 guc->log.runtime.buf_addr = NULL;
438 return ret;
439}
440
441static void guc_log_runtime_destroy(struct intel_guc *guc)
442{
443
444
445
446
447 if (!guc_log_has_runtime(guc))
448 return;
449
450 destroy_workqueue(guc->log.runtime.flush_wq);
451 relay_close(guc->log.runtime.relay_chan);
452 i915_gem_object_unpin_map(guc->log.vma->obj);
453 guc->log.runtime.buf_addr = NULL;
454}
455
456static int guc_log_late_setup(struct intel_guc *guc)
457{
458 struct drm_i915_private *dev_priv = guc_to_i915(guc);
459 int ret;
460
461 lockdep_assert_held(&dev_priv->drm.struct_mutex);
462
463 if (!guc_log_has_runtime(guc)) {
464
465
466
467
468 ret = guc_log_runtime_create(guc);
469 if (ret)
470 goto err;
471 }
472
473 ret = guc_log_relay_file_create(guc);
474 if (ret)
475 goto err_runtime;
476
477 return 0;
478
479err_runtime:
480 guc_log_runtime_destroy(guc);
481err:
482
483 i915.guc_log_level = -1;
484 return ret;
485}
486
487static void guc_log_capture_logs(struct intel_guc *guc)
488{
489 struct drm_i915_private *dev_priv = guc_to_i915(guc);
490
491 guc_read_update_log_buffer(guc);
492
493
494
495
496 intel_runtime_pm_get(dev_priv);
497 guc_log_flush_complete(guc);
498 intel_runtime_pm_put(dev_priv);
499}
500
501static void guc_flush_logs(struct intel_guc *guc)
502{
503 struct drm_i915_private *dev_priv = guc_to_i915(guc);
504
505 if (!i915.enable_guc_submission || (i915.guc_log_level < 0))
506 return;
507
508
509 gen9_disable_guc_interrupts(dev_priv);
510
511
512
513
514 flush_work(&guc->log.runtime.flush_work);
515
516
517 guc_log_flush(guc);
518
519
520 guc_log_capture_logs(guc);
521}
522
523int intel_guc_log_create(struct intel_guc *guc)
524{
525 struct i915_vma *vma;
526 unsigned long offset;
527 uint32_t size, flags;
528 int ret;
529
530 GEM_BUG_ON(guc->log.vma);
531
532 if (i915.guc_log_level > GUC_LOG_VERBOSITY_MAX)
533 i915.guc_log_level = GUC_LOG_VERBOSITY_MAX;
534
535
536
537 size = (1 + GUC_LOG_DPC_PAGES + 1 +
538 GUC_LOG_ISR_PAGES + 1 +
539 GUC_LOG_CRASH_PAGES + 1) << PAGE_SHIFT;
540
541
542
543
544
545 if (WARN_ON(!i915_has_memcpy_from_wc())) {
546 ret = -EINVAL;
547 goto err;
548 }
549
550 vma = intel_guc_allocate_vma(guc, size);
551 if (IS_ERR(vma)) {
552 ret = PTR_ERR(vma);
553 goto err;
554 }
555
556 guc->log.vma = vma;
557
558 if (i915.guc_log_level >= 0) {
559 ret = guc_log_runtime_create(guc);
560 if (ret < 0)
561 goto err_vma;
562 }
563
564
565 flags = GUC_LOG_VALID | GUC_LOG_NOTIFY_ON_HALF_FULL |
566 (GUC_LOG_DPC_PAGES << GUC_LOG_DPC_SHIFT) |
567 (GUC_LOG_ISR_PAGES << GUC_LOG_ISR_SHIFT) |
568 (GUC_LOG_CRASH_PAGES << GUC_LOG_CRASH_SHIFT);
569
570 offset = guc_ggtt_offset(vma) >> PAGE_SHIFT;
571 guc->log.flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
572
573 return 0;
574
575err_vma:
576 i915_vma_unpin_and_release(&guc->log.vma);
577err:
578
579 i915.guc_log_level = -1;
580 return ret;
581}
582
583void intel_guc_log_destroy(struct intel_guc *guc)
584{
585 guc_log_runtime_destroy(guc);
586 i915_vma_unpin_and_release(&guc->log.vma);
587}
588
589int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val)
590{
591 struct intel_guc *guc = &dev_priv->guc;
592
593 union guc_log_control log_param;
594 int ret;
595
596 log_param.value = control_val;
597
598 if (log_param.verbosity < GUC_LOG_VERBOSITY_MIN ||
599 log_param.verbosity > GUC_LOG_VERBOSITY_MAX)
600 return -EINVAL;
601
602
603 if (!log_param.logging_enabled && (i915.guc_log_level < 0))
604 return 0;
605
606 ret = guc_log_control(guc, log_param.value);
607 if (ret < 0) {
608 DRM_DEBUG_DRIVER("guc_logging_control action failed %d\n", ret);
609 return ret;
610 }
611
612 if (log_param.logging_enabled) {
613 i915.guc_log_level = log_param.verbosity;
614
615
616
617
618
619 ret = guc_log_late_setup(guc);
620 if (ret < 0) {
621 DRM_DEBUG_DRIVER("GuC log late setup failed %d\n", ret);
622 return ret;
623 }
624
625
626 gen9_enable_guc_interrupts(dev_priv);
627 } else {
628
629
630
631
632
633 guc_flush_logs(guc);
634
635
636 i915.guc_log_level = -1;
637 }
638
639 return ret;
640}
641
642void i915_guc_log_register(struct drm_i915_private *dev_priv)
643{
644 if (!i915.enable_guc_submission || i915.guc_log_level < 0)
645 return;
646
647 mutex_lock(&dev_priv->drm.struct_mutex);
648 guc_log_late_setup(&dev_priv->guc);
649 mutex_unlock(&dev_priv->drm.struct_mutex);
650}
651
652void i915_guc_log_unregister(struct drm_i915_private *dev_priv)
653{
654 if (!i915.enable_guc_submission)
655 return;
656
657 mutex_lock(&dev_priv->drm.struct_mutex);
658
659 gen9_disable_guc_interrupts(dev_priv);
660 guc_log_runtime_destroy(&dev_priv->guc);
661 mutex_unlock(&dev_priv->drm.struct_mutex);
662}
663