1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <linux/sched/mm.h>
30#include <linux/sort.h>
31
32#include <drm/drm_debugfs.h>
33#include <drm/drm_fourcc.h>
34
35#include "display/intel_dp.h"
36#include "display/intel_fbc.h"
37#include "display/intel_hdcp.h"
38#include "display/intel_hdmi.h"
39#include "display/intel_psr.h"
40
41#include "gem/i915_gem_context.h"
42#include "gt/intel_reset.h"
43
44#include "i915_debugfs.h"
45#include "i915_irq.h"
46#include "intel_csr.h"
47#include "intel_drv.h"
48#include "intel_guc_submission.h"
49#include "intel_pm.h"
50#include "intel_sideband.h"
51
52static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
53{
54 return to_i915(node->minor->dev);
55}
56
57static int i915_capabilities(struct seq_file *m, void *data)
58{
59 struct drm_i915_private *dev_priv = node_to_i915(m->private);
60 const struct intel_device_info *info = INTEL_INFO(dev_priv);
61 struct drm_printer p = drm_seq_file_printer(m);
62
63 seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
64 seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
65 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
66
67 intel_device_info_dump_flags(info, &p);
68 intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
69 intel_driver_caps_print(&dev_priv->caps, &p);
70
71 kernel_param_lock(THIS_MODULE);
72 i915_params_dump(&i915_modparams, &p);
73 kernel_param_unlock(THIS_MODULE);
74
75 return 0;
76}
77
78static char get_active_flag(struct drm_i915_gem_object *obj)
79{
80 return i915_gem_object_is_active(obj) ? '*' : ' ';
81}
82
83static char get_pin_flag(struct drm_i915_gem_object *obj)
84{
85 return obj->pin_global ? 'p' : ' ';
86}
87
88static char get_tiling_flag(struct drm_i915_gem_object *obj)
89{
90 switch (i915_gem_object_get_tiling(obj)) {
91 default:
92 case I915_TILING_NONE: return ' ';
93 case I915_TILING_X: return 'X';
94 case I915_TILING_Y: return 'Y';
95 }
96}
97
98static char get_global_flag(struct drm_i915_gem_object *obj)
99{
100 return obj->userfault_count ? 'g' : ' ';
101}
102
103static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
104{
105 return obj->mm.mapping ? 'M' : ' ';
106}
107
108static const char *
109stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
110{
111 size_t x = 0;
112
113 switch (page_sizes) {
114 case 0:
115 return "";
116 case I915_GTT_PAGE_SIZE_4K:
117 return "4K";
118 case I915_GTT_PAGE_SIZE_64K:
119 return "64K";
120 case I915_GTT_PAGE_SIZE_2M:
121 return "2M";
122 default:
123 if (!buf)
124 return "M";
125
126 if (page_sizes & I915_GTT_PAGE_SIZE_2M)
127 x += snprintf(buf + x, len - x, "2M, ");
128 if (page_sizes & I915_GTT_PAGE_SIZE_64K)
129 x += snprintf(buf + x, len - x, "64K, ");
130 if (page_sizes & I915_GTT_PAGE_SIZE_4K)
131 x += snprintf(buf + x, len - x, "4K, ");
132 buf[x-2] = '\0';
133
134 return buf;
135 }
136}
137
138static void
139describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
140{
141 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
142 struct intel_engine_cs *engine;
143 struct i915_vma *vma;
144 unsigned int frontbuffer_bits;
145 int pin_count = 0;
146
147 seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
148 &obj->base,
149 get_active_flag(obj),
150 get_pin_flag(obj),
151 get_tiling_flag(obj),
152 get_global_flag(obj),
153 get_pin_mapped_flag(obj),
154 obj->base.size / 1024,
155 obj->read_domains,
156 obj->write_domain,
157 i915_cache_level_str(dev_priv, obj->cache_level),
158 obj->mm.dirty ? " dirty" : "",
159 obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
160 if (obj->base.name)
161 seq_printf(m, " (name: %d)", obj->base.name);
162
163 spin_lock(&obj->vma.lock);
164 list_for_each_entry(vma, &obj->vma.list, obj_link) {
165 if (!drm_mm_node_allocated(&vma->node))
166 continue;
167
168 spin_unlock(&obj->vma.lock);
169
170 if (i915_vma_is_pinned(vma))
171 pin_count++;
172
173 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
174 i915_vma_is_ggtt(vma) ? "g" : "pp",
175 vma->node.start, vma->node.size,
176 stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
177 if (i915_vma_is_ggtt(vma)) {
178 switch (vma->ggtt_view.type) {
179 case I915_GGTT_VIEW_NORMAL:
180 seq_puts(m, ", normal");
181 break;
182
183 case I915_GGTT_VIEW_PARTIAL:
184 seq_printf(m, ", partial [%08llx+%x]",
185 vma->ggtt_view.partial.offset << PAGE_SHIFT,
186 vma->ggtt_view.partial.size << PAGE_SHIFT);
187 break;
188
189 case I915_GGTT_VIEW_ROTATED:
190 seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
191 vma->ggtt_view.rotated.plane[0].width,
192 vma->ggtt_view.rotated.plane[0].height,
193 vma->ggtt_view.rotated.plane[0].stride,
194 vma->ggtt_view.rotated.plane[0].offset,
195 vma->ggtt_view.rotated.plane[1].width,
196 vma->ggtt_view.rotated.plane[1].height,
197 vma->ggtt_view.rotated.plane[1].stride,
198 vma->ggtt_view.rotated.plane[1].offset);
199 break;
200
201 case I915_GGTT_VIEW_REMAPPED:
202 seq_printf(m, ", remapped [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
203 vma->ggtt_view.remapped.plane[0].width,
204 vma->ggtt_view.remapped.plane[0].height,
205 vma->ggtt_view.remapped.plane[0].stride,
206 vma->ggtt_view.remapped.plane[0].offset,
207 vma->ggtt_view.remapped.plane[1].width,
208 vma->ggtt_view.remapped.plane[1].height,
209 vma->ggtt_view.remapped.plane[1].stride,
210 vma->ggtt_view.remapped.plane[1].offset);
211 break;
212
213 default:
214 MISSING_CASE(vma->ggtt_view.type);
215 break;
216 }
217 }
218 if (vma->fence)
219 seq_printf(m, " , fence: %d%s",
220 vma->fence->id,
221 i915_active_request_isset(&vma->last_fence) ? "*" : "");
222 seq_puts(m, ")");
223
224 spin_lock(&obj->vma.lock);
225 }
226 spin_unlock(&obj->vma.lock);
227
228 seq_printf(m, " (pinned x %d)", pin_count);
229 if (obj->stolen)
230 seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
231 if (obj->pin_global)
232 seq_printf(m, " (global)");
233
234 engine = i915_gem_object_last_write_engine(obj);
235 if (engine)
236 seq_printf(m, " (%s)", engine->name);
237
238 frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
239 if (frontbuffer_bits)
240 seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
241}
242
243struct file_stats {
244 struct i915_address_space *vm;
245 unsigned long count;
246 u64 total, unbound;
247 u64 global, shared;
248 u64 active, inactive;
249 u64 closed;
250};
251
252static int per_file_stats(int id, void *ptr, void *data)
253{
254 struct drm_i915_gem_object *obj = ptr;
255 struct file_stats *stats = data;
256 struct i915_vma *vma;
257
258 lockdep_assert_held(&obj->base.dev->struct_mutex);
259
260 stats->count++;
261 stats->total += obj->base.size;
262 if (!atomic_read(&obj->bind_count))
263 stats->unbound += obj->base.size;
264 if (obj->base.name || obj->base.dma_buf)
265 stats->shared += obj->base.size;
266
267 list_for_each_entry(vma, &obj->vma.list, obj_link) {
268 if (!drm_mm_node_allocated(&vma->node))
269 continue;
270
271 if (i915_vma_is_ggtt(vma)) {
272 stats->global += vma->node.size;
273 } else {
274 if (vma->vm != stats->vm)
275 continue;
276 }
277
278 if (i915_vma_is_active(vma))
279 stats->active += vma->node.size;
280 else
281 stats->inactive += vma->node.size;
282
283 if (i915_vma_is_closed(vma))
284 stats->closed += vma->node.size;
285 }
286
287 return 0;
288}
289
290#define print_file_stats(m, name, stats) do { \
291 if (stats.count) \
292 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound, %llu closed)\n", \
293 name, \
294 stats.count, \
295 stats.total, \
296 stats.active, \
297 stats.inactive, \
298 stats.global, \
299 stats.shared, \
300 stats.unbound, \
301 stats.closed); \
302} while (0)
303
304static void print_batch_pool_stats(struct seq_file *m,
305 struct drm_i915_private *dev_priv)
306{
307 struct drm_i915_gem_object *obj;
308 struct intel_engine_cs *engine;
309 struct file_stats stats = {};
310 enum intel_engine_id id;
311 int j;
312
313 for_each_engine(engine, dev_priv, id) {
314 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
315 list_for_each_entry(obj,
316 &engine->batch_pool.cache_list[j],
317 batch_pool_link)
318 per_file_stats(0, obj, &stats);
319 }
320 }
321
322 print_file_stats(m, "[k]batch pool", stats);
323}
324
325static void print_context_stats(struct seq_file *m,
326 struct drm_i915_private *i915)
327{
328 struct file_stats kstats = {};
329 struct i915_gem_context *ctx;
330
331 list_for_each_entry(ctx, &i915->contexts.list, link) {
332 struct i915_gem_engines_iter it;
333 struct intel_context *ce;
334
335 for_each_gem_engine(ce,
336 i915_gem_context_lock_engines(ctx), it) {
337 if (ce->state)
338 per_file_stats(0, ce->state->obj, &kstats);
339 if (ce->ring)
340 per_file_stats(0, ce->ring->vma->obj, &kstats);
341 }
342 i915_gem_context_unlock_engines(ctx);
343
344 if (!IS_ERR_OR_NULL(ctx->file_priv)) {
345 struct file_stats stats = { .vm = ctx->vm, };
346 struct drm_file *file = ctx->file_priv->file;
347 struct task_struct *task;
348 char name[80];
349
350 spin_lock(&file->table_lock);
351 idr_for_each(&file->object_idr, per_file_stats, &stats);
352 spin_unlock(&file->table_lock);
353
354 rcu_read_lock();
355 task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID);
356 snprintf(name, sizeof(name), "%s",
357 task ? task->comm : "<unknown>");
358 rcu_read_unlock();
359
360 print_file_stats(m, name, stats);
361 }
362 }
363
364 print_file_stats(m, "[k]contexts", kstats);
365}
366
367static int i915_gem_object_info(struct seq_file *m, void *data)
368{
369 struct drm_i915_private *i915 = node_to_i915(m->private);
370 int ret;
371
372 seq_printf(m, "%u shrinkable objects, %llu bytes\n",
373 i915->mm.shrink_count,
374 i915->mm.shrink_memory);
375
376 seq_putc(m, '\n');
377
378 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
379 if (ret)
380 return ret;
381
382 print_batch_pool_stats(m, i915);
383 print_context_stats(m, i915);
384 mutex_unlock(&i915->drm.struct_mutex);
385
386 return 0;
387}
388
389static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
390{
391 struct drm_i915_private *dev_priv = node_to_i915(m->private);
392 struct drm_device *dev = &dev_priv->drm;
393 struct drm_i915_gem_object *obj;
394 struct intel_engine_cs *engine;
395 enum intel_engine_id id;
396 int total = 0;
397 int ret, j;
398
399 ret = mutex_lock_interruptible(&dev->struct_mutex);
400 if (ret)
401 return ret;
402
403 for_each_engine(engine, dev_priv, id) {
404 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
405 int count;
406
407 count = 0;
408 list_for_each_entry(obj,
409 &engine->batch_pool.cache_list[j],
410 batch_pool_link)
411 count++;
412 seq_printf(m, "%s cache[%d]: %d objects\n",
413 engine->name, j, count);
414
415 list_for_each_entry(obj,
416 &engine->batch_pool.cache_list[j],
417 batch_pool_link) {
418 seq_puts(m, " ");
419 describe_obj(m, obj);
420 seq_putc(m, '\n');
421 }
422
423 total += count;
424 }
425 }
426
427 seq_printf(m, "total: %d\n", total);
428
429 mutex_unlock(&dev->struct_mutex);
430
431 return 0;
432}
433
434static void gen8_display_interrupt_info(struct seq_file *m)
435{
436 struct drm_i915_private *dev_priv = node_to_i915(m->private);
437 int pipe;
438
439 for_each_pipe(dev_priv, pipe) {
440 enum intel_display_power_domain power_domain;
441 intel_wakeref_t wakeref;
442
443 power_domain = POWER_DOMAIN_PIPE(pipe);
444 wakeref = intel_display_power_get_if_enabled(dev_priv,
445 power_domain);
446 if (!wakeref) {
447 seq_printf(m, "Pipe %c power disabled\n",
448 pipe_name(pipe));
449 continue;
450 }
451 seq_printf(m, "Pipe %c IMR:\t%08x\n",
452 pipe_name(pipe),
453 I915_READ(GEN8_DE_PIPE_IMR(pipe)));
454 seq_printf(m, "Pipe %c IIR:\t%08x\n",
455 pipe_name(pipe),
456 I915_READ(GEN8_DE_PIPE_IIR(pipe)));
457 seq_printf(m, "Pipe %c IER:\t%08x\n",
458 pipe_name(pipe),
459 I915_READ(GEN8_DE_PIPE_IER(pipe)));
460
461 intel_display_power_put(dev_priv, power_domain, wakeref);
462 }
463
464 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
465 I915_READ(GEN8_DE_PORT_IMR));
466 seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
467 I915_READ(GEN8_DE_PORT_IIR));
468 seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
469 I915_READ(GEN8_DE_PORT_IER));
470
471 seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
472 I915_READ(GEN8_DE_MISC_IMR));
473 seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
474 I915_READ(GEN8_DE_MISC_IIR));
475 seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
476 I915_READ(GEN8_DE_MISC_IER));
477
478 seq_printf(m, "PCU interrupt mask:\t%08x\n",
479 I915_READ(GEN8_PCU_IMR));
480 seq_printf(m, "PCU interrupt identity:\t%08x\n",
481 I915_READ(GEN8_PCU_IIR));
482 seq_printf(m, "PCU interrupt enable:\t%08x\n",
483 I915_READ(GEN8_PCU_IER));
484}
485
486static int i915_interrupt_info(struct seq_file *m, void *data)
487{
488 struct drm_i915_private *dev_priv = node_to_i915(m->private);
489 struct intel_engine_cs *engine;
490 enum intel_engine_id id;
491 intel_wakeref_t wakeref;
492 int i, pipe;
493
494 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
495
496 if (IS_CHERRYVIEW(dev_priv)) {
497 intel_wakeref_t pref;
498
499 seq_printf(m, "Master Interrupt Control:\t%08x\n",
500 I915_READ(GEN8_MASTER_IRQ));
501
502 seq_printf(m, "Display IER:\t%08x\n",
503 I915_READ(VLV_IER));
504 seq_printf(m, "Display IIR:\t%08x\n",
505 I915_READ(VLV_IIR));
506 seq_printf(m, "Display IIR_RW:\t%08x\n",
507 I915_READ(VLV_IIR_RW));
508 seq_printf(m, "Display IMR:\t%08x\n",
509 I915_READ(VLV_IMR));
510 for_each_pipe(dev_priv, pipe) {
511 enum intel_display_power_domain power_domain;
512
513 power_domain = POWER_DOMAIN_PIPE(pipe);
514 pref = intel_display_power_get_if_enabled(dev_priv,
515 power_domain);
516 if (!pref) {
517 seq_printf(m, "Pipe %c power disabled\n",
518 pipe_name(pipe));
519 continue;
520 }
521
522 seq_printf(m, "Pipe %c stat:\t%08x\n",
523 pipe_name(pipe),
524 I915_READ(PIPESTAT(pipe)));
525
526 intel_display_power_put(dev_priv, power_domain, pref);
527 }
528
529 pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
530 seq_printf(m, "Port hotplug:\t%08x\n",
531 I915_READ(PORT_HOTPLUG_EN));
532 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
533 I915_READ(VLV_DPFLIPSTAT));
534 seq_printf(m, "DPINVGTT:\t%08x\n",
535 I915_READ(DPINVGTT));
536 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
537
538 for (i = 0; i < 4; i++) {
539 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
540 i, I915_READ(GEN8_GT_IMR(i)));
541 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
542 i, I915_READ(GEN8_GT_IIR(i)));
543 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
544 i, I915_READ(GEN8_GT_IER(i)));
545 }
546
547 seq_printf(m, "PCU interrupt mask:\t%08x\n",
548 I915_READ(GEN8_PCU_IMR));
549 seq_printf(m, "PCU interrupt identity:\t%08x\n",
550 I915_READ(GEN8_PCU_IIR));
551 seq_printf(m, "PCU interrupt enable:\t%08x\n",
552 I915_READ(GEN8_PCU_IER));
553 } else if (INTEL_GEN(dev_priv) >= 11) {
554 seq_printf(m, "Master Interrupt Control: %08x\n",
555 I915_READ(GEN11_GFX_MSTR_IRQ));
556
557 seq_printf(m, "Render/Copy Intr Enable: %08x\n",
558 I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
559 seq_printf(m, "VCS/VECS Intr Enable: %08x\n",
560 I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
561 seq_printf(m, "GUC/SG Intr Enable:\t %08x\n",
562 I915_READ(GEN11_GUC_SG_INTR_ENABLE));
563 seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
564 I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
565 seq_printf(m, "Crypto Intr Enable:\t %08x\n",
566 I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
567 seq_printf(m, "GUnit/CSME Intr Enable:\t %08x\n",
568 I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
569
570 seq_printf(m, "Display Interrupt Control:\t%08x\n",
571 I915_READ(GEN11_DISPLAY_INT_CTL));
572
573 gen8_display_interrupt_info(m);
574 } else if (INTEL_GEN(dev_priv) >= 8) {
575 seq_printf(m, "Master Interrupt Control:\t%08x\n",
576 I915_READ(GEN8_MASTER_IRQ));
577
578 for (i = 0; i < 4; i++) {
579 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
580 i, I915_READ(GEN8_GT_IMR(i)));
581 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
582 i, I915_READ(GEN8_GT_IIR(i)));
583 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
584 i, I915_READ(GEN8_GT_IER(i)));
585 }
586
587 gen8_display_interrupt_info(m);
588 } else if (IS_VALLEYVIEW(dev_priv)) {
589 seq_printf(m, "Display IER:\t%08x\n",
590 I915_READ(VLV_IER));
591 seq_printf(m, "Display IIR:\t%08x\n",
592 I915_READ(VLV_IIR));
593 seq_printf(m, "Display IIR_RW:\t%08x\n",
594 I915_READ(VLV_IIR_RW));
595 seq_printf(m, "Display IMR:\t%08x\n",
596 I915_READ(VLV_IMR));
597 for_each_pipe(dev_priv, pipe) {
598 enum intel_display_power_domain power_domain;
599 intel_wakeref_t pref;
600
601 power_domain = POWER_DOMAIN_PIPE(pipe);
602 pref = intel_display_power_get_if_enabled(dev_priv,
603 power_domain);
604 if (!pref) {
605 seq_printf(m, "Pipe %c power disabled\n",
606 pipe_name(pipe));
607 continue;
608 }
609
610 seq_printf(m, "Pipe %c stat:\t%08x\n",
611 pipe_name(pipe),
612 I915_READ(PIPESTAT(pipe)));
613 intel_display_power_put(dev_priv, power_domain, pref);
614 }
615
616 seq_printf(m, "Master IER:\t%08x\n",
617 I915_READ(VLV_MASTER_IER));
618
619 seq_printf(m, "Render IER:\t%08x\n",
620 I915_READ(GTIER));
621 seq_printf(m, "Render IIR:\t%08x\n",
622 I915_READ(GTIIR));
623 seq_printf(m, "Render IMR:\t%08x\n",
624 I915_READ(GTIMR));
625
626 seq_printf(m, "PM IER:\t\t%08x\n",
627 I915_READ(GEN6_PMIER));
628 seq_printf(m, "PM IIR:\t\t%08x\n",
629 I915_READ(GEN6_PMIIR));
630 seq_printf(m, "PM IMR:\t\t%08x\n",
631 I915_READ(GEN6_PMIMR));
632
633 seq_printf(m, "Port hotplug:\t%08x\n",
634 I915_READ(PORT_HOTPLUG_EN));
635 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
636 I915_READ(VLV_DPFLIPSTAT));
637 seq_printf(m, "DPINVGTT:\t%08x\n",
638 I915_READ(DPINVGTT));
639
640 } else if (!HAS_PCH_SPLIT(dev_priv)) {
641 seq_printf(m, "Interrupt enable: %08x\n",
642 I915_READ(GEN2_IER));
643 seq_printf(m, "Interrupt identity: %08x\n",
644 I915_READ(GEN2_IIR));
645 seq_printf(m, "Interrupt mask: %08x\n",
646 I915_READ(GEN2_IMR));
647 for_each_pipe(dev_priv, pipe)
648 seq_printf(m, "Pipe %c stat: %08x\n",
649 pipe_name(pipe),
650 I915_READ(PIPESTAT(pipe)));
651 } else {
652 seq_printf(m, "North Display Interrupt enable: %08x\n",
653 I915_READ(DEIER));
654 seq_printf(m, "North Display Interrupt identity: %08x\n",
655 I915_READ(DEIIR));
656 seq_printf(m, "North Display Interrupt mask: %08x\n",
657 I915_READ(DEIMR));
658 seq_printf(m, "South Display Interrupt enable: %08x\n",
659 I915_READ(SDEIER));
660 seq_printf(m, "South Display Interrupt identity: %08x\n",
661 I915_READ(SDEIIR));
662 seq_printf(m, "South Display Interrupt mask: %08x\n",
663 I915_READ(SDEIMR));
664 seq_printf(m, "Graphics Interrupt enable: %08x\n",
665 I915_READ(GTIER));
666 seq_printf(m, "Graphics Interrupt identity: %08x\n",
667 I915_READ(GTIIR));
668 seq_printf(m, "Graphics Interrupt mask: %08x\n",
669 I915_READ(GTIMR));
670 }
671
672 if (INTEL_GEN(dev_priv) >= 11) {
673 seq_printf(m, "RCS Intr Mask:\t %08x\n",
674 I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
675 seq_printf(m, "BCS Intr Mask:\t %08x\n",
676 I915_READ(GEN11_BCS_RSVD_INTR_MASK));
677 seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
678 I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
679 seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
680 I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
681 seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
682 I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
683 seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
684 I915_READ(GEN11_GUC_SG_INTR_MASK));
685 seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
686 I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
687 seq_printf(m, "Crypto Intr Mask:\t %08x\n",
688 I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
689 seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
690 I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
691
692 } else if (INTEL_GEN(dev_priv) >= 6) {
693 for_each_engine(engine, dev_priv, id) {
694 seq_printf(m,
695 "Graphics Interrupt mask (%s): %08x\n",
696 engine->name, ENGINE_READ(engine, RING_IMR));
697 }
698 }
699
700 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
701
702 return 0;
703}
704
705static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
706{
707 struct drm_i915_private *i915 = node_to_i915(m->private);
708 unsigned int i;
709
710 seq_printf(m, "Total fences = %d\n", i915->ggtt.num_fences);
711
712 rcu_read_lock();
713 for (i = 0; i < i915->ggtt.num_fences; i++) {
714 struct i915_vma *vma = i915->ggtt.fence_regs[i].vma;
715
716 seq_printf(m, "Fence %d, pin count = %d, object = ",
717 i, i915->ggtt.fence_regs[i].pin_count);
718 if (!vma)
719 seq_puts(m, "unused");
720 else
721 describe_obj(m, vma->obj);
722 seq_putc(m, '\n');
723 }
724 rcu_read_unlock();
725
726 return 0;
727}
728
729#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
730static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
731 size_t count, loff_t *pos)
732{
733 struct i915_gpu_state *error;
734 ssize_t ret;
735 void *buf;
736
737 error = file->private_data;
738 if (!error)
739 return 0;
740
741
742 buf = kmalloc(count, GFP_KERNEL);
743 if (!buf)
744 return -ENOMEM;
745
746 ret = i915_gpu_state_copy_to_buffer(error, buf, *pos, count);
747 if (ret <= 0)
748 goto out;
749
750 if (!copy_to_user(ubuf, buf, ret))
751 *pos += ret;
752 else
753 ret = -EFAULT;
754
755out:
756 kfree(buf);
757 return ret;
758}
759
760static int gpu_state_release(struct inode *inode, struct file *file)
761{
762 i915_gpu_state_put(file->private_data);
763 return 0;
764}
765
766static int i915_gpu_info_open(struct inode *inode, struct file *file)
767{
768 struct drm_i915_private *i915 = inode->i_private;
769 struct i915_gpu_state *gpu;
770 intel_wakeref_t wakeref;
771
772 gpu = NULL;
773 with_intel_runtime_pm(&i915->runtime_pm, wakeref)
774 gpu = i915_capture_gpu_state(i915);
775 if (IS_ERR(gpu))
776 return PTR_ERR(gpu);
777
778 file->private_data = gpu;
779 return 0;
780}
781
782static const struct file_operations i915_gpu_info_fops = {
783 .owner = THIS_MODULE,
784 .open = i915_gpu_info_open,
785 .read = gpu_state_read,
786 .llseek = default_llseek,
787 .release = gpu_state_release,
788};
789
790static ssize_t
791i915_error_state_write(struct file *filp,
792 const char __user *ubuf,
793 size_t cnt,
794 loff_t *ppos)
795{
796 struct i915_gpu_state *error = filp->private_data;
797
798 if (!error)
799 return 0;
800
801 DRM_DEBUG_DRIVER("Resetting error state\n");
802 i915_reset_error_state(error->i915);
803
804 return cnt;
805}
806
807static int i915_error_state_open(struct inode *inode, struct file *file)
808{
809 struct i915_gpu_state *error;
810
811 error = i915_first_error_state(inode->i_private);
812 if (IS_ERR(error))
813 return PTR_ERR(error);
814
815 file->private_data = error;
816 return 0;
817}
818
819static const struct file_operations i915_error_state_fops = {
820 .owner = THIS_MODULE,
821 .open = i915_error_state_open,
822 .read = gpu_state_read,
823 .write = i915_error_state_write,
824 .llseek = default_llseek,
825 .release = gpu_state_release,
826};
827#endif
828
829static int i915_frequency_info(struct seq_file *m, void *unused)
830{
831 struct drm_i915_private *dev_priv = node_to_i915(m->private);
832 struct intel_uncore *uncore = &dev_priv->uncore;
833 struct intel_rps *rps = &dev_priv->gt_pm.rps;
834 intel_wakeref_t wakeref;
835 int ret = 0;
836
837 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
838
839 if (IS_GEN(dev_priv, 5)) {
840 u16 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
841 u16 rgvstat = intel_uncore_read16(uncore, MEMSTAT_ILK);
842
843 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
844 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
845 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
846 MEMSTAT_VID_SHIFT);
847 seq_printf(m, "Current P-state: %d\n",
848 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
849 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
850 u32 rpmodectl, freq_sts;
851
852 rpmodectl = I915_READ(GEN6_RP_CONTROL);
853 seq_printf(m, "Video Turbo Mode: %s\n",
854 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
855 seq_printf(m, "HW control enabled: %s\n",
856 yesno(rpmodectl & GEN6_RP_ENABLE));
857 seq_printf(m, "SW control enabled: %s\n",
858 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
859 GEN6_RP_MEDIA_SW_MODE));
860
861 vlv_punit_get(dev_priv);
862 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
863 vlv_punit_put(dev_priv);
864
865 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
866 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
867
868 seq_printf(m, "actual GPU freq: %d MHz\n",
869 intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
870
871 seq_printf(m, "current GPU freq: %d MHz\n",
872 intel_gpu_freq(dev_priv, rps->cur_freq));
873
874 seq_printf(m, "max GPU freq: %d MHz\n",
875 intel_gpu_freq(dev_priv, rps->max_freq));
876
877 seq_printf(m, "min GPU freq: %d MHz\n",
878 intel_gpu_freq(dev_priv, rps->min_freq));
879
880 seq_printf(m, "idle GPU freq: %d MHz\n",
881 intel_gpu_freq(dev_priv, rps->idle_freq));
882
883 seq_printf(m,
884 "efficient (RPe) frequency: %d MHz\n",
885 intel_gpu_freq(dev_priv, rps->efficient_freq));
886 } else if (INTEL_GEN(dev_priv) >= 6) {
887 u32 rp_state_limits;
888 u32 gt_perf_status;
889 u32 rp_state_cap;
890 u32 rpmodectl, rpinclimit, rpdeclimit;
891 u32 rpstat, cagf, reqf;
892 u32 rpupei, rpcurup, rpprevup;
893 u32 rpdownei, rpcurdown, rpprevdown;
894 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
895 int max_freq;
896
897 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
898 if (IS_GEN9_LP(dev_priv)) {
899 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
900 gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
901 } else {
902 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
903 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
904 }
905
906
907 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
908
909 reqf = I915_READ(GEN6_RPNSWREQ);
910 if (INTEL_GEN(dev_priv) >= 9)
911 reqf >>= 23;
912 else {
913 reqf &= ~GEN6_TURBO_DISABLE;
914 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
915 reqf >>= 24;
916 else
917 reqf >>= 25;
918 }
919 reqf = intel_gpu_freq(dev_priv, reqf);
920
921 rpmodectl = I915_READ(GEN6_RP_CONTROL);
922 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
923 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
924
925 rpstat = I915_READ(GEN6_RPSTAT1);
926 rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
927 rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
928 rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
929 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
930 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
931 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
932 cagf = intel_gpu_freq(dev_priv,
933 intel_get_cagf(dev_priv, rpstat));
934
935 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
936
937 if (INTEL_GEN(dev_priv) >= 11) {
938 pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
939 pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
940
941
942
943
944 pm_isr = 0;
945 pm_iir = 0;
946 } else if (INTEL_GEN(dev_priv) >= 8) {
947 pm_ier = I915_READ(GEN8_GT_IER(2));
948 pm_imr = I915_READ(GEN8_GT_IMR(2));
949 pm_isr = I915_READ(GEN8_GT_ISR(2));
950 pm_iir = I915_READ(GEN8_GT_IIR(2));
951 } else {
952 pm_ier = I915_READ(GEN6_PMIER);
953 pm_imr = I915_READ(GEN6_PMIMR);
954 pm_isr = I915_READ(GEN6_PMISR);
955 pm_iir = I915_READ(GEN6_PMIIR);
956 }
957 pm_mask = I915_READ(GEN6_PMINTRMSK);
958
959 seq_printf(m, "Video Turbo Mode: %s\n",
960 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
961 seq_printf(m, "HW control enabled: %s\n",
962 yesno(rpmodectl & GEN6_RP_ENABLE));
963 seq_printf(m, "SW control enabled: %s\n",
964 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
965 GEN6_RP_MEDIA_SW_MODE));
966
967 seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
968 pm_ier, pm_imr, pm_mask);
969 if (INTEL_GEN(dev_priv) <= 10)
970 seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
971 pm_isr, pm_iir);
972 seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
973 rps->pm_intrmsk_mbz);
974 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
975 seq_printf(m, "Render p-state ratio: %d\n",
976 (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
977 seq_printf(m, "Render p-state VID: %d\n",
978 gt_perf_status & 0xff);
979 seq_printf(m, "Render p-state limit: %d\n",
980 rp_state_limits & 0xff);
981 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
982 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
983 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
984 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
985 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
986 seq_printf(m, "CAGF: %dMHz\n", cagf);
987 seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
988 rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
989 seq_printf(m, "RP CUR UP: %d (%dus)\n",
990 rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
991 seq_printf(m, "RP PREV UP: %d (%dus)\n",
992 rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
993 seq_printf(m, "Up threshold: %d%%\n",
994 rps->power.up_threshold);
995
996 seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
997 rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
998 seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
999 rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
1000 seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
1001 rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
1002 seq_printf(m, "Down threshold: %d%%\n",
1003 rps->power.down_threshold);
1004
1005 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
1006 rp_state_cap >> 16) & 0xff;
1007 max_freq *= (IS_GEN9_BC(dev_priv) ||
1008 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1009 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
1010 intel_gpu_freq(dev_priv, max_freq));
1011
1012 max_freq = (rp_state_cap & 0xff00) >> 8;
1013 max_freq *= (IS_GEN9_BC(dev_priv) ||
1014 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1015 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
1016 intel_gpu_freq(dev_priv, max_freq));
1017
1018 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
1019 rp_state_cap >> 0) & 0xff;
1020 max_freq *= (IS_GEN9_BC(dev_priv) ||
1021 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1022 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
1023 intel_gpu_freq(dev_priv, max_freq));
1024 seq_printf(m, "Max overclocked frequency: %dMHz\n",
1025 intel_gpu_freq(dev_priv, rps->max_freq));
1026
1027 seq_printf(m, "Current freq: %d MHz\n",
1028 intel_gpu_freq(dev_priv, rps->cur_freq));
1029 seq_printf(m, "Actual freq: %d MHz\n", cagf);
1030 seq_printf(m, "Idle freq: %d MHz\n",
1031 intel_gpu_freq(dev_priv, rps->idle_freq));
1032 seq_printf(m, "Min freq: %d MHz\n",
1033 intel_gpu_freq(dev_priv, rps->min_freq));
1034 seq_printf(m, "Boost freq: %d MHz\n",
1035 intel_gpu_freq(dev_priv, rps->boost_freq));
1036 seq_printf(m, "Max freq: %d MHz\n",
1037 intel_gpu_freq(dev_priv, rps->max_freq));
1038 seq_printf(m,
1039 "efficient (RPe) frequency: %d MHz\n",
1040 intel_gpu_freq(dev_priv, rps->efficient_freq));
1041 } else {
1042 seq_puts(m, "no P-state info available\n");
1043 }
1044
1045 seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
1046 seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
1047 seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
1048
1049 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1050 return ret;
1051}
1052
1053static void i915_instdone_info(struct drm_i915_private *dev_priv,
1054 struct seq_file *m,
1055 struct intel_instdone *instdone)
1056{
1057 int slice;
1058 int subslice;
1059
1060 seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
1061 instdone->instdone);
1062
1063 if (INTEL_GEN(dev_priv) <= 3)
1064 return;
1065
1066 seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
1067 instdone->slice_common);
1068
1069 if (INTEL_GEN(dev_priv) <= 6)
1070 return;
1071
1072 for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1073 seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
1074 slice, subslice, instdone->sampler[slice][subslice]);
1075
1076 for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1077 seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
1078 slice, subslice, instdone->row[slice][subslice]);
1079}
1080
1081static int i915_hangcheck_info(struct seq_file *m, void *unused)
1082{
1083 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1084 struct intel_engine_cs *engine;
1085 u64 acthd[I915_NUM_ENGINES];
1086 struct intel_instdone instdone;
1087 intel_wakeref_t wakeref;
1088 enum intel_engine_id id;
1089
1090 seq_printf(m, "Reset flags: %lx\n", dev_priv->gpu_error.flags);
1091 if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
1092 seq_puts(m, "\tWedged\n");
1093 if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags))
1094 seq_puts(m, "\tDevice (global) reset in progress\n");
1095
1096 if (!i915_modparams.enable_hangcheck) {
1097 seq_puts(m, "Hangcheck disabled\n");
1098 return 0;
1099 }
1100
1101 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
1102 for_each_engine(engine, dev_priv, id)
1103 acthd[id] = intel_engine_get_active_head(engine);
1104
1105 intel_engine_get_instdone(dev_priv->engine[RCS0], &instdone);
1106 }
1107
1108 if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
1109 seq_printf(m, "Hangcheck active, timer fires in %dms\n",
1110 jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
1111 jiffies));
1112 else if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work))
1113 seq_puts(m, "Hangcheck active, work pending\n");
1114 else
1115 seq_puts(m, "Hangcheck inactive\n");
1116
1117 seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake));
1118
1119 for_each_engine(engine, dev_priv, id) {
1120 seq_printf(m, "%s: %d ms ago\n",
1121 engine->name,
1122 jiffies_to_msecs(jiffies -
1123 engine->hangcheck.action_timestamp));
1124
1125 seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
1126 (long long)engine->hangcheck.acthd,
1127 (long long)acthd[id]);
1128
1129 if (engine->id == RCS0) {
1130 seq_puts(m, "\tinstdone read =\n");
1131
1132 i915_instdone_info(dev_priv, m, &instdone);
1133
1134 seq_puts(m, "\tinstdone accu =\n");
1135
1136 i915_instdone_info(dev_priv, m,
1137 &engine->hangcheck.instdone);
1138 }
1139 }
1140
1141 return 0;
1142}
1143
1144static int i915_reset_info(struct seq_file *m, void *unused)
1145{
1146 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1147 struct i915_gpu_error *error = &dev_priv->gpu_error;
1148 struct intel_engine_cs *engine;
1149 enum intel_engine_id id;
1150
1151 seq_printf(m, "full gpu reset = %u\n", i915_reset_count(error));
1152
1153 for_each_engine(engine, dev_priv, id) {
1154 seq_printf(m, "%s = %u\n", engine->name,
1155 i915_reset_engine_count(error, engine));
1156 }
1157
1158 return 0;
1159}
1160
1161static int ironlake_drpc_info(struct seq_file *m)
1162{
1163 struct drm_i915_private *i915 = node_to_i915(m->private);
1164 struct intel_uncore *uncore = &i915->uncore;
1165 u32 rgvmodectl, rstdbyctl;
1166 u16 crstandvid;
1167
1168 rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
1169 rstdbyctl = intel_uncore_read(uncore, RSTDBYCTL);
1170 crstandvid = intel_uncore_read16(uncore, CRSTANDVID);
1171
1172 seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
1173 seq_printf(m, "Boost freq: %d\n",
1174 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1175 MEMMODE_BOOST_FREQ_SHIFT);
1176 seq_printf(m, "HW control enabled: %s\n",
1177 yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
1178 seq_printf(m, "SW control enabled: %s\n",
1179 yesno(rgvmodectl & MEMMODE_SWMODE_EN));
1180 seq_printf(m, "Gated voltage change: %s\n",
1181 yesno(rgvmodectl & MEMMODE_RCLK_GATE));
1182 seq_printf(m, "Starting frequency: P%d\n",
1183 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1184 seq_printf(m, "Max P-state: P%d\n",
1185 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1186 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1187 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1188 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1189 seq_printf(m, "Render standby enabled: %s\n",
1190 yesno(!(rstdbyctl & RCX_SW_EXIT)));
1191 seq_puts(m, "Current RS state: ");
1192 switch (rstdbyctl & RSX_STATUS_MASK) {
1193 case RSX_STATUS_ON:
1194 seq_puts(m, "on\n");
1195 break;
1196 case RSX_STATUS_RC1:
1197 seq_puts(m, "RC1\n");
1198 break;
1199 case RSX_STATUS_RC1E:
1200 seq_puts(m, "RC1E\n");
1201 break;
1202 case RSX_STATUS_RS1:
1203 seq_puts(m, "RS1\n");
1204 break;
1205 case RSX_STATUS_RS2:
1206 seq_puts(m, "RS2 (RC6)\n");
1207 break;
1208 case RSX_STATUS_RS3:
1209 seq_puts(m, "RC3 (RC6+)\n");
1210 break;
1211 default:
1212 seq_puts(m, "unknown\n");
1213 break;
1214 }
1215
1216 return 0;
1217}
1218
1219static int i915_forcewake_domains(struct seq_file *m, void *data)
1220{
1221 struct drm_i915_private *i915 = node_to_i915(m->private);
1222 struct intel_uncore *uncore = &i915->uncore;
1223 struct intel_uncore_forcewake_domain *fw_domain;
1224 unsigned int tmp;
1225
1226 seq_printf(m, "user.bypass_count = %u\n",
1227 uncore->user_forcewake.count);
1228
1229 for_each_fw_domain(fw_domain, uncore, tmp)
1230 seq_printf(m, "%s.wake_count = %u\n",
1231 intel_uncore_forcewake_domain_to_str(fw_domain->id),
1232 READ_ONCE(fw_domain->wake_count));
1233
1234 return 0;
1235}
1236
1237static void print_rc6_res(struct seq_file *m,
1238 const char *title,
1239 const i915_reg_t reg)
1240{
1241 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1242
1243 seq_printf(m, "%s %u (%llu us)\n",
1244 title, I915_READ(reg),
1245 intel_rc6_residency_us(dev_priv, reg));
1246}
1247
1248static int vlv_drpc_info(struct seq_file *m)
1249{
1250 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1251 u32 rcctl1, pw_status;
1252
1253 pw_status = I915_READ(VLV_GTLC_PW_STATUS);
1254 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1255
1256 seq_printf(m, "RC6 Enabled: %s\n",
1257 yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1258 GEN6_RC_CTL_EI_MODE(1))));
1259 seq_printf(m, "Render Power Well: %s\n",
1260 (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
1261 seq_printf(m, "Media Power Well: %s\n",
1262 (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1263
1264 print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
1265 print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
1266
1267 return i915_forcewake_domains(m, NULL);
1268}
1269
1270static int gen6_drpc_info(struct seq_file *m)
1271{
1272 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1273 u32 gt_core_status, rcctl1, rc6vids = 0;
1274 u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
1275
1276 gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
1277 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
1278
1279 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1280 if (INTEL_GEN(dev_priv) >= 9) {
1281 gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
1282 gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
1283 }
1284
1285 if (INTEL_GEN(dev_priv) <= 7)
1286 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
1287 &rc6vids, NULL);
1288
1289 seq_printf(m, "RC1e Enabled: %s\n",
1290 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1291 seq_printf(m, "RC6 Enabled: %s\n",
1292 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1293 if (INTEL_GEN(dev_priv) >= 9) {
1294 seq_printf(m, "Render Well Gating Enabled: %s\n",
1295 yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
1296 seq_printf(m, "Media Well Gating Enabled: %s\n",
1297 yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
1298 }
1299 seq_printf(m, "Deep RC6 Enabled: %s\n",
1300 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1301 seq_printf(m, "Deepest RC6 Enabled: %s\n",
1302 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1303 seq_puts(m, "Current RC state: ");
1304 switch (gt_core_status & GEN6_RCn_MASK) {
1305 case GEN6_RC0:
1306 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1307 seq_puts(m, "Core Power Down\n");
1308 else
1309 seq_puts(m, "on\n");
1310 break;
1311 case GEN6_RC3:
1312 seq_puts(m, "RC3\n");
1313 break;
1314 case GEN6_RC6:
1315 seq_puts(m, "RC6\n");
1316 break;
1317 case GEN6_RC7:
1318 seq_puts(m, "RC7\n");
1319 break;
1320 default:
1321 seq_puts(m, "Unknown\n");
1322 break;
1323 }
1324
1325 seq_printf(m, "Core Power Down: %s\n",
1326 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1327 if (INTEL_GEN(dev_priv) >= 9) {
1328 seq_printf(m, "Render Power Well: %s\n",
1329 (gen9_powergate_status &
1330 GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
1331 seq_printf(m, "Media Power Well: %s\n",
1332 (gen9_powergate_status &
1333 GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
1334 }
1335
1336
1337 print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
1338 GEN6_GT_GFX_RC6_LOCKED);
1339 print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
1340 print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
1341 print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
1342
1343 if (INTEL_GEN(dev_priv) <= 7) {
1344 seq_printf(m, "RC6 voltage: %dmV\n",
1345 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1346 seq_printf(m, "RC6+ voltage: %dmV\n",
1347 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1348 seq_printf(m, "RC6++ voltage: %dmV\n",
1349 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1350 }
1351
1352 return i915_forcewake_domains(m, NULL);
1353}
1354
1355static int i915_drpc_info(struct seq_file *m, void *unused)
1356{
1357 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1358 intel_wakeref_t wakeref;
1359 int err = -ENODEV;
1360
1361 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
1362 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1363 err = vlv_drpc_info(m);
1364 else if (INTEL_GEN(dev_priv) >= 6)
1365 err = gen6_drpc_info(m);
1366 else
1367 err = ironlake_drpc_info(m);
1368 }
1369
1370 return err;
1371}
1372
1373static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1374{
1375 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1376
1377 seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1378 dev_priv->fb_tracking.busy_bits);
1379
1380 seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1381 dev_priv->fb_tracking.flip_bits);
1382
1383 return 0;
1384}
1385
1386static int i915_fbc_status(struct seq_file *m, void *unused)
1387{
1388 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1389 struct intel_fbc *fbc = &dev_priv->fbc;
1390 intel_wakeref_t wakeref;
1391
1392 if (!HAS_FBC(dev_priv))
1393 return -ENODEV;
1394
1395 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1396 mutex_lock(&fbc->lock);
1397
1398 if (intel_fbc_is_active(dev_priv))
1399 seq_puts(m, "FBC enabled\n");
1400 else
1401 seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
1402
1403 if (intel_fbc_is_active(dev_priv)) {
1404 u32 mask;
1405
1406 if (INTEL_GEN(dev_priv) >= 8)
1407 mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
1408 else if (INTEL_GEN(dev_priv) >= 7)
1409 mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
1410 else if (INTEL_GEN(dev_priv) >= 5)
1411 mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
1412 else if (IS_G4X(dev_priv))
1413 mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
1414 else
1415 mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
1416 FBC_STAT_COMPRESSED);
1417
1418 seq_printf(m, "Compressing: %s\n", yesno(mask));
1419 }
1420
1421 mutex_unlock(&fbc->lock);
1422 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1423
1424 return 0;
1425}
1426
1427static int i915_fbc_false_color_get(void *data, u64 *val)
1428{
1429 struct drm_i915_private *dev_priv = data;
1430
1431 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1432 return -ENODEV;
1433
1434 *val = dev_priv->fbc.false_color;
1435
1436 return 0;
1437}
1438
1439static int i915_fbc_false_color_set(void *data, u64 val)
1440{
1441 struct drm_i915_private *dev_priv = data;
1442 u32 reg;
1443
1444 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1445 return -ENODEV;
1446
1447 mutex_lock(&dev_priv->fbc.lock);
1448
1449 reg = I915_READ(ILK_DPFC_CONTROL);
1450 dev_priv->fbc.false_color = val;
1451
1452 I915_WRITE(ILK_DPFC_CONTROL, val ?
1453 (reg | FBC_CTL_FALSE_COLOR) :
1454 (reg & ~FBC_CTL_FALSE_COLOR));
1455
1456 mutex_unlock(&dev_priv->fbc.lock);
1457 return 0;
1458}
1459
1460DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
1461 i915_fbc_false_color_get, i915_fbc_false_color_set,
1462 "%llu\n");
1463
1464static int i915_ips_status(struct seq_file *m, void *unused)
1465{
1466 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1467 intel_wakeref_t wakeref;
1468
1469 if (!HAS_IPS(dev_priv))
1470 return -ENODEV;
1471
1472 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1473
1474 seq_printf(m, "Enabled by kernel parameter: %s\n",
1475 yesno(i915_modparams.enable_ips));
1476
1477 if (INTEL_GEN(dev_priv) >= 8) {
1478 seq_puts(m, "Currently: unknown\n");
1479 } else {
1480 if (I915_READ(IPS_CTL) & IPS_ENABLE)
1481 seq_puts(m, "Currently: enabled\n");
1482 else
1483 seq_puts(m, "Currently: disabled\n");
1484 }
1485
1486 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1487
1488 return 0;
1489}
1490
1491static int i915_sr_status(struct seq_file *m, void *unused)
1492{
1493 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1494 intel_wakeref_t wakeref;
1495 bool sr_enabled = false;
1496
1497 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
1498
1499 if (INTEL_GEN(dev_priv) >= 9)
1500 ;
1501 else if (HAS_PCH_SPLIT(dev_priv))
1502 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1503 else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
1504 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
1505 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1506 else if (IS_I915GM(dev_priv))
1507 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1508 else if (IS_PINEVIEW(dev_priv))
1509 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1510 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1511 sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
1512
1513 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
1514
1515 seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
1516
1517 return 0;
1518}
1519
1520static int i915_emon_status(struct seq_file *m, void *unused)
1521{
1522 struct drm_i915_private *i915 = node_to_i915(m->private);
1523 intel_wakeref_t wakeref;
1524
1525 if (!IS_GEN(i915, 5))
1526 return -ENODEV;
1527
1528 with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
1529 unsigned long temp, chipset, gfx;
1530
1531 temp = i915_mch_val(i915);
1532 chipset = i915_chipset_val(i915);
1533 gfx = i915_gfx_val(i915);
1534
1535 seq_printf(m, "GMCH temp: %ld\n", temp);
1536 seq_printf(m, "Chipset power: %ld\n", chipset);
1537 seq_printf(m, "GFX power: %ld\n", gfx);
1538 seq_printf(m, "Total power: %ld\n", chipset + gfx);
1539 }
1540
1541 return 0;
1542}
1543
1544static int i915_ring_freq_table(struct seq_file *m, void *unused)
1545{
1546 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1547 struct intel_rps *rps = &dev_priv->gt_pm.rps;
1548 unsigned int max_gpu_freq, min_gpu_freq;
1549 intel_wakeref_t wakeref;
1550 int gpu_freq, ia_freq;
1551
1552 if (!HAS_LLC(dev_priv))
1553 return -ENODEV;
1554
1555 min_gpu_freq = rps->min_freq;
1556 max_gpu_freq = rps->max_freq;
1557 if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
1558
1559 min_gpu_freq /= GEN9_FREQ_SCALER;
1560 max_gpu_freq /= GEN9_FREQ_SCALER;
1561 }
1562
1563 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1564
1565 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1566 for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
1567 ia_freq = gpu_freq;
1568 sandybridge_pcode_read(dev_priv,
1569 GEN6_PCODE_READ_MIN_FREQ_TABLE,
1570 &ia_freq, NULL);
1571 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1572 intel_gpu_freq(dev_priv, (gpu_freq *
1573 (IS_GEN9_BC(dev_priv) ||
1574 INTEL_GEN(dev_priv) >= 10 ?
1575 GEN9_FREQ_SCALER : 1))),
1576 ((ia_freq >> 0) & 0xff) * 100,
1577 ((ia_freq >> 8) & 0xff) * 100);
1578 }
1579 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1580
1581 return 0;
1582}
1583
1584static int i915_opregion(struct seq_file *m, void *unused)
1585{
1586 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1587 struct drm_device *dev = &dev_priv->drm;
1588 struct intel_opregion *opregion = &dev_priv->opregion;
1589 int ret;
1590
1591 ret = mutex_lock_interruptible(&dev->struct_mutex);
1592 if (ret)
1593 goto out;
1594
1595 if (opregion->header)
1596 seq_write(m, opregion->header, OPREGION_SIZE);
1597
1598 mutex_unlock(&dev->struct_mutex);
1599
1600out:
1601 return 0;
1602}
1603
1604static int i915_vbt(struct seq_file *m, void *unused)
1605{
1606 struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
1607
1608 if (opregion->vbt)
1609 seq_write(m, opregion->vbt, opregion->vbt_size);
1610
1611 return 0;
1612}
1613
1614static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1615{
1616 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1617 struct drm_device *dev = &dev_priv->drm;
1618 struct intel_framebuffer *fbdev_fb = NULL;
1619 struct drm_framebuffer *drm_fb;
1620 int ret;
1621
1622 ret = mutex_lock_interruptible(&dev->struct_mutex);
1623 if (ret)
1624 return ret;
1625
1626#ifdef CONFIG_DRM_FBDEV_EMULATION
1627 if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
1628 fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
1629
1630 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1631 fbdev_fb->base.width,
1632 fbdev_fb->base.height,
1633 fbdev_fb->base.format->depth,
1634 fbdev_fb->base.format->cpp[0] * 8,
1635 fbdev_fb->base.modifier,
1636 drm_framebuffer_read_refcount(&fbdev_fb->base));
1637 describe_obj(m, intel_fb_obj(&fbdev_fb->base));
1638 seq_putc(m, '\n');
1639 }
1640#endif
1641
1642 mutex_lock(&dev->mode_config.fb_lock);
1643 drm_for_each_fb(drm_fb, dev) {
1644 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1645 if (fb == fbdev_fb)
1646 continue;
1647
1648 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1649 fb->base.width,
1650 fb->base.height,
1651 fb->base.format->depth,
1652 fb->base.format->cpp[0] * 8,
1653 fb->base.modifier,
1654 drm_framebuffer_read_refcount(&fb->base));
1655 describe_obj(m, intel_fb_obj(&fb->base));
1656 seq_putc(m, '\n');
1657 }
1658 mutex_unlock(&dev->mode_config.fb_lock);
1659 mutex_unlock(&dev->struct_mutex);
1660
1661 return 0;
1662}
1663
1664static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
1665{
1666 seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
1667 ring->space, ring->head, ring->tail, ring->emit);
1668}
1669
1670static int i915_context_status(struct seq_file *m, void *unused)
1671{
1672 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1673 struct drm_device *dev = &dev_priv->drm;
1674 struct i915_gem_context *ctx;
1675 int ret;
1676
1677 ret = mutex_lock_interruptible(&dev->struct_mutex);
1678 if (ret)
1679 return ret;
1680
1681 list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
1682 struct i915_gem_engines_iter it;
1683 struct intel_context *ce;
1684
1685 seq_puts(m, "HW context ");
1686 if (!list_empty(&ctx->hw_id_link))
1687 seq_printf(m, "%x [pin %u]", ctx->hw_id,
1688 atomic_read(&ctx->hw_id_pin_count));
1689 if (ctx->pid) {
1690 struct task_struct *task;
1691
1692 task = get_pid_task(ctx->pid, PIDTYPE_PID);
1693 if (task) {
1694 seq_printf(m, "(%s [%d]) ",
1695 task->comm, task->pid);
1696 put_task_struct(task);
1697 }
1698 } else if (IS_ERR(ctx->file_priv)) {
1699 seq_puts(m, "(deleted) ");
1700 } else {
1701 seq_puts(m, "(kernel) ");
1702 }
1703
1704 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1705 seq_putc(m, '\n');
1706
1707 for_each_gem_engine(ce,
1708 i915_gem_context_lock_engines(ctx), it) {
1709 seq_printf(m, "%s: ", ce->engine->name);
1710 if (ce->state)
1711 describe_obj(m, ce->state->obj);
1712 if (ce->ring)
1713 describe_ctx_ring(m, ce->ring);
1714 seq_putc(m, '\n');
1715 }
1716 i915_gem_context_unlock_engines(ctx);
1717
1718 seq_putc(m, '\n');
1719 }
1720
1721 mutex_unlock(&dev->struct_mutex);
1722
1723 return 0;
1724}
1725
1726static const char *swizzle_string(unsigned swizzle)
1727{
1728 switch (swizzle) {
1729 case I915_BIT_6_SWIZZLE_NONE:
1730 return "none";
1731 case I915_BIT_6_SWIZZLE_9:
1732 return "bit9";
1733 case I915_BIT_6_SWIZZLE_9_10:
1734 return "bit9/bit10";
1735 case I915_BIT_6_SWIZZLE_9_11:
1736 return "bit9/bit11";
1737 case I915_BIT_6_SWIZZLE_9_10_11:
1738 return "bit9/bit10/bit11";
1739 case I915_BIT_6_SWIZZLE_9_17:
1740 return "bit9/bit17";
1741 case I915_BIT_6_SWIZZLE_9_10_17:
1742 return "bit9/bit10/bit17";
1743 case I915_BIT_6_SWIZZLE_UNKNOWN:
1744 return "unknown";
1745 }
1746
1747 return "bug";
1748}
1749
1750static int i915_swizzle_info(struct seq_file *m, void *data)
1751{
1752 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1753 struct intel_uncore *uncore = &dev_priv->uncore;
1754 intel_wakeref_t wakeref;
1755
1756 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1757
1758 seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
1759 swizzle_string(dev_priv->mm.bit_6_swizzle_x));
1760 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
1761 swizzle_string(dev_priv->mm.bit_6_swizzle_y));
1762
1763 if (IS_GEN_RANGE(dev_priv, 3, 4)) {
1764 seq_printf(m, "DDC = 0x%08x\n",
1765 intel_uncore_read(uncore, DCC));
1766 seq_printf(m, "DDC2 = 0x%08x\n",
1767 intel_uncore_read(uncore, DCC2));
1768 seq_printf(m, "C0DRB3 = 0x%04x\n",
1769 intel_uncore_read16(uncore, C0DRB3));
1770 seq_printf(m, "C1DRB3 = 0x%04x\n",
1771 intel_uncore_read16(uncore, C1DRB3));
1772 } else if (INTEL_GEN(dev_priv) >= 6) {
1773 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
1774 intel_uncore_read(uncore, MAD_DIMM_C0));
1775 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
1776 intel_uncore_read(uncore, MAD_DIMM_C1));
1777 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
1778 intel_uncore_read(uncore, MAD_DIMM_C2));
1779 seq_printf(m, "TILECTL = 0x%08x\n",
1780 intel_uncore_read(uncore, TILECTL));
1781 if (INTEL_GEN(dev_priv) >= 8)
1782 seq_printf(m, "GAMTARBMODE = 0x%08x\n",
1783 intel_uncore_read(uncore, GAMTARBMODE));
1784 else
1785 seq_printf(m, "ARB_MODE = 0x%08x\n",
1786 intel_uncore_read(uncore, ARB_MODE));
1787 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
1788 intel_uncore_read(uncore, DISP_ARB_CTL));
1789 }
1790
1791 if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
1792 seq_puts(m, "L-shaped memory detected\n");
1793
1794 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1795
1796 return 0;
1797}
1798
1799static const char *rps_power_to_str(unsigned int power)
1800{
1801 static const char * const strings[] = {
1802 [LOW_POWER] = "low power",
1803 [BETWEEN] = "mixed",
1804 [HIGH_POWER] = "high power",
1805 };
1806
1807 if (power >= ARRAY_SIZE(strings) || !strings[power])
1808 return "unknown";
1809
1810 return strings[power];
1811}
1812
1813static int i915_rps_boost_info(struct seq_file *m, void *data)
1814{
1815 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1816 struct intel_rps *rps = &dev_priv->gt_pm.rps;
1817 u32 act_freq = rps->cur_freq;
1818 intel_wakeref_t wakeref;
1819
1820 with_intel_runtime_pm_if_in_use(&dev_priv->runtime_pm, wakeref) {
1821 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1822 vlv_punit_get(dev_priv);
1823 act_freq = vlv_punit_read(dev_priv,
1824 PUNIT_REG_GPU_FREQ_STS);
1825 vlv_punit_put(dev_priv);
1826 act_freq = (act_freq >> 8) & 0xff;
1827 } else {
1828 act_freq = intel_get_cagf(dev_priv,
1829 I915_READ(GEN6_RPSTAT1));
1830 }
1831 }
1832
1833 seq_printf(m, "RPS enabled? %d\n", rps->enabled);
1834 seq_printf(m, "GPU busy? %s\n", yesno(dev_priv->gt.awake));
1835 seq_printf(m, "Boosts outstanding? %d\n",
1836 atomic_read(&rps->num_waiters));
1837 seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
1838 seq_printf(m, "Frequency requested %d, actual %d\n",
1839 intel_gpu_freq(dev_priv, rps->cur_freq),
1840 intel_gpu_freq(dev_priv, act_freq));
1841 seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
1842 intel_gpu_freq(dev_priv, rps->min_freq),
1843 intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
1844 intel_gpu_freq(dev_priv, rps->max_freq_softlimit),
1845 intel_gpu_freq(dev_priv, rps->max_freq));
1846 seq_printf(m, " idle:%d, efficient:%d, boost:%d\n",
1847 intel_gpu_freq(dev_priv, rps->idle_freq),
1848 intel_gpu_freq(dev_priv, rps->efficient_freq),
1849 intel_gpu_freq(dev_priv, rps->boost_freq));
1850
1851 seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
1852
1853 if (INTEL_GEN(dev_priv) >= 6 && rps->enabled && dev_priv->gt.awake) {
1854 u32 rpup, rpupei;
1855 u32 rpdown, rpdownei;
1856
1857 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
1858 rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
1859 rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
1860 rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
1861 rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
1862 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1863
1864 seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
1865 rps_power_to_str(rps->power.mode));
1866 seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n",
1867 rpup && rpupei ? 100 * rpup / rpupei : 0,
1868 rps->power.up_threshold);
1869 seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n",
1870 rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
1871 rps->power.down_threshold);
1872 } else {
1873 seq_puts(m, "\nRPS Autotuning inactive\n");
1874 }
1875
1876 return 0;
1877}
1878
1879static int i915_llc(struct seq_file *m, void *data)
1880{
1881 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1882 const bool edram = INTEL_GEN(dev_priv) > 8;
1883
1884 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
1885 seq_printf(m, "%s: %uMB\n", edram ? "eDRAM" : "eLLC",
1886 dev_priv->edram_size_mb);
1887
1888 return 0;
1889}
1890
1891static int i915_huc_load_status_info(struct seq_file *m, void *data)
1892{
1893 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1894 intel_wakeref_t wakeref;
1895 struct drm_printer p;
1896
1897 if (!HAS_HUC(dev_priv))
1898 return -ENODEV;
1899
1900 p = drm_seq_file_printer(m);
1901 intel_uc_fw_dump(&dev_priv->huc.fw, &p);
1902
1903 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
1904 seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
1905
1906 return 0;
1907}
1908
1909static int i915_guc_load_status_info(struct seq_file *m, void *data)
1910{
1911 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1912 intel_wakeref_t wakeref;
1913 struct drm_printer p;
1914
1915 if (!HAS_GUC(dev_priv))
1916 return -ENODEV;
1917
1918 p = drm_seq_file_printer(m);
1919 intel_uc_fw_dump(&dev_priv->guc.fw, &p);
1920
1921 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
1922 u32 tmp = I915_READ(GUC_STATUS);
1923 u32 i;
1924
1925 seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
1926 seq_printf(m, "\tBootrom status = 0x%x\n",
1927 (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
1928 seq_printf(m, "\tuKernel status = 0x%x\n",
1929 (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
1930 seq_printf(m, "\tMIA Core status = 0x%x\n",
1931 (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
1932 seq_puts(m, "\nScratch registers:\n");
1933 for (i = 0; i < 16; i++) {
1934 seq_printf(m, "\t%2d: \t0x%x\n",
1935 i, I915_READ(SOFT_SCRATCH(i)));
1936 }
1937 }
1938
1939 return 0;
1940}
1941
1942static const char *
1943stringify_guc_log_type(enum guc_log_buffer_type type)
1944{
1945 switch (type) {
1946 case GUC_ISR_LOG_BUFFER:
1947 return "ISR";
1948 case GUC_DPC_LOG_BUFFER:
1949 return "DPC";
1950 case GUC_CRASH_DUMP_LOG_BUFFER:
1951 return "CRASH";
1952 default:
1953 MISSING_CASE(type);
1954 }
1955
1956 return "";
1957}
1958
1959static void i915_guc_log_info(struct seq_file *m,
1960 struct drm_i915_private *dev_priv)
1961{
1962 struct intel_guc_log *log = &dev_priv->guc.log;
1963 enum guc_log_buffer_type type;
1964
1965 if (!intel_guc_log_relay_enabled(log)) {
1966 seq_puts(m, "GuC log relay disabled\n");
1967 return;
1968 }
1969
1970 seq_puts(m, "GuC logging stats:\n");
1971
1972 seq_printf(m, "\tRelay full count: %u\n",
1973 log->relay.full_count);
1974
1975 for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
1976 seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n",
1977 stringify_guc_log_type(type),
1978 log->stats[type].flush,
1979 log->stats[type].sampled_overflow);
1980 }
1981}
1982
1983static void i915_guc_client_info(struct seq_file *m,
1984 struct drm_i915_private *dev_priv,
1985 struct intel_guc_client *client)
1986{
1987 struct intel_engine_cs *engine;
1988 enum intel_engine_id id;
1989 u64 tot = 0;
1990
1991 seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
1992 client->priority, client->stage_id, client->proc_desc_offset);
1993 seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
1994 client->doorbell_id, client->doorbell_offset);
1995
1996 for_each_engine(engine, dev_priv, id) {
1997 u64 submissions = client->submissions[id];
1998 tot += submissions;
1999 seq_printf(m, "\tSubmissions: %llu %s\n",
2000 submissions, engine->name);
2001 }
2002 seq_printf(m, "\tTotal: %llu\n", tot);
2003}
2004
2005static int i915_guc_info(struct seq_file *m, void *data)
2006{
2007 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2008 const struct intel_guc *guc = &dev_priv->guc;
2009
2010 if (!USES_GUC(dev_priv))
2011 return -ENODEV;
2012
2013 i915_guc_log_info(m, dev_priv);
2014
2015 if (!USES_GUC_SUBMISSION(dev_priv))
2016 return 0;
2017
2018 GEM_BUG_ON(!guc->execbuf_client);
2019
2020 seq_printf(m, "\nDoorbell map:\n");
2021 seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
2022 seq_printf(m, "Doorbell next cacheline: 0x%x\n", guc->db_cacheline);
2023
2024 seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
2025 i915_guc_client_info(m, dev_priv, guc->execbuf_client);
2026 if (guc->preempt_client) {
2027 seq_printf(m, "\nGuC preempt client @ %p:\n",
2028 guc->preempt_client);
2029 i915_guc_client_info(m, dev_priv, guc->preempt_client);
2030 }
2031
2032
2033
2034 return 0;
2035}
2036
2037static int i915_guc_stage_pool(struct seq_file *m, void *data)
2038{
2039 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2040 const struct intel_guc *guc = &dev_priv->guc;
2041 struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
2042 struct intel_guc_client *client = guc->execbuf_client;
2043 intel_engine_mask_t tmp;
2044 int index;
2045
2046 if (!USES_GUC_SUBMISSION(dev_priv))
2047 return -ENODEV;
2048
2049 for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
2050 struct intel_engine_cs *engine;
2051
2052 if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
2053 continue;
2054
2055 seq_printf(m, "GuC stage descriptor %u:\n", index);
2056 seq_printf(m, "\tIndex: %u\n", desc->stage_id);
2057 seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
2058 seq_printf(m, "\tPriority: %d\n", desc->priority);
2059 seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
2060 seq_printf(m, "\tEngines used: 0x%x\n",
2061 desc->engines_used);
2062 seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
2063 desc->db_trigger_phy,
2064 desc->db_trigger_cpu,
2065 desc->db_trigger_uk);
2066 seq_printf(m, "\tProcess descriptor: 0x%x\n",
2067 desc->process_desc);
2068 seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
2069 desc->wq_addr, desc->wq_size);
2070 seq_putc(m, '\n');
2071
2072 for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
2073 u32 guc_engine_id = engine->guc_id;
2074 struct guc_execlist_context *lrc =
2075 &desc->lrc[guc_engine_id];
2076
2077 seq_printf(m, "\t%s LRC:\n", engine->name);
2078 seq_printf(m, "\t\tContext desc: 0x%x\n",
2079 lrc->context_desc);
2080 seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
2081 seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
2082 seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
2083 seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
2084 seq_putc(m, '\n');
2085 }
2086 }
2087
2088 return 0;
2089}
2090
2091static int i915_guc_log_dump(struct seq_file *m, void *data)
2092{
2093 struct drm_info_node *node = m->private;
2094 struct drm_i915_private *dev_priv = node_to_i915(node);
2095 bool dump_load_err = !!node->info_ent->data;
2096 struct drm_i915_gem_object *obj = NULL;
2097 u32 *log;
2098 int i = 0;
2099
2100 if (!HAS_GUC(dev_priv))
2101 return -ENODEV;
2102
2103 if (dump_load_err)
2104 obj = dev_priv->guc.load_err_log;
2105 else if (dev_priv->guc.log.vma)
2106 obj = dev_priv->guc.log.vma->obj;
2107
2108 if (!obj)
2109 return 0;
2110
2111 log = i915_gem_object_pin_map(obj, I915_MAP_WC);
2112 if (IS_ERR(log)) {
2113 DRM_DEBUG("Failed to pin object\n");
2114 seq_puts(m, "(log data unaccessible)\n");
2115 return PTR_ERR(log);
2116 }
2117
2118 for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
2119 seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
2120 *(log + i), *(log + i + 1),
2121 *(log + i + 2), *(log + i + 3));
2122
2123 seq_putc(m, '\n');
2124
2125 i915_gem_object_unpin_map(obj);
2126
2127 return 0;
2128}
2129
2130static int i915_guc_log_level_get(void *data, u64 *val)
2131{
2132 struct drm_i915_private *dev_priv = data;
2133
2134 if (!USES_GUC(dev_priv))
2135 return -ENODEV;
2136
2137 *val = intel_guc_log_get_level(&dev_priv->guc.log);
2138
2139 return 0;
2140}
2141
2142static int i915_guc_log_level_set(void *data, u64 val)
2143{
2144 struct drm_i915_private *dev_priv = data;
2145
2146 if (!USES_GUC(dev_priv))
2147 return -ENODEV;
2148
2149 return intel_guc_log_set_level(&dev_priv->guc.log, val);
2150}
2151
2152DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
2153 i915_guc_log_level_get, i915_guc_log_level_set,
2154 "%lld\n");
2155
2156static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
2157{
2158 struct drm_i915_private *dev_priv = inode->i_private;
2159
2160 if (!USES_GUC(dev_priv))
2161 return -ENODEV;
2162
2163 file->private_data = &dev_priv->guc.log;
2164
2165 return intel_guc_log_relay_open(&dev_priv->guc.log);
2166}
2167
2168static ssize_t
2169i915_guc_log_relay_write(struct file *filp,
2170 const char __user *ubuf,
2171 size_t cnt,
2172 loff_t *ppos)
2173{
2174 struct intel_guc_log *log = filp->private_data;
2175
2176 intel_guc_log_relay_flush(log);
2177
2178 return cnt;
2179}
2180
2181static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
2182{
2183 struct drm_i915_private *dev_priv = inode->i_private;
2184
2185 intel_guc_log_relay_close(&dev_priv->guc.log);
2186
2187 return 0;
2188}
2189
2190static const struct file_operations i915_guc_log_relay_fops = {
2191 .owner = THIS_MODULE,
2192 .open = i915_guc_log_relay_open,
2193 .write = i915_guc_log_relay_write,
2194 .release = i915_guc_log_relay_release,
2195};
2196
2197static int i915_psr_sink_status_show(struct seq_file *m, void *data)
2198{
2199 u8 val;
2200 static const char * const sink_status[] = {
2201 "inactive",
2202 "transition to active, capture and display",
2203 "active, display from RFB",
2204 "active, capture and display on sink device timings",
2205 "transition to inactive, capture and display, timing re-sync",
2206 "reserved",
2207 "reserved",
2208 "sink internal error",
2209 };
2210 struct drm_connector *connector = m->private;
2211 struct drm_i915_private *dev_priv = to_i915(connector->dev);
2212 struct intel_dp *intel_dp =
2213 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
2214 int ret;
2215
2216 if (!CAN_PSR(dev_priv)) {
2217 seq_puts(m, "PSR Unsupported\n");
2218 return -ENODEV;
2219 }
2220
2221 if (connector->status != connector_status_connected)
2222 return -ENODEV;
2223
2224 ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
2225
2226 if (ret == 1) {
2227 const char *str = "unknown";
2228
2229 val &= DP_PSR_SINK_STATE_MASK;
2230 if (val < ARRAY_SIZE(sink_status))
2231 str = sink_status[val];
2232 seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
2233 } else {
2234 return ret;
2235 }
2236
2237 return 0;
2238}
2239DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
2240
2241static void
2242psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
2243{
2244 u32 val, status_val;
2245 const char *status = "unknown";
2246
2247 if (dev_priv->psr.psr2_enabled) {
2248 static const char * const live_status[] = {
2249 "IDLE",
2250 "CAPTURE",
2251 "CAPTURE_FS",
2252 "SLEEP",
2253 "BUFON_FW",
2254 "ML_UP",
2255 "SU_STANDBY",
2256 "FAST_SLEEP",
2257 "DEEP_SLEEP",
2258 "BUF_ON",
2259 "TG_ON"
2260 };
2261 val = I915_READ(EDP_PSR2_STATUS);
2262 status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
2263 EDP_PSR2_STATUS_STATE_SHIFT;
2264 if (status_val < ARRAY_SIZE(live_status))
2265 status = live_status[status_val];
2266 } else {
2267 static const char * const live_status[] = {
2268 "IDLE",
2269 "SRDONACK",
2270 "SRDENT",
2271 "BUFOFF",
2272 "BUFON",
2273 "AUXACK",
2274 "SRDOFFACK",
2275 "SRDENT_ON",
2276 };
2277 val = I915_READ(EDP_PSR_STATUS);
2278 status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
2279 EDP_PSR_STATUS_STATE_SHIFT;
2280 if (status_val < ARRAY_SIZE(live_status))
2281 status = live_status[status_val];
2282 }
2283
2284 seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
2285}
2286
2287static int i915_edp_psr_status(struct seq_file *m, void *data)
2288{
2289 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2290 struct i915_psr *psr = &dev_priv->psr;
2291 intel_wakeref_t wakeref;
2292 const char *status;
2293 bool enabled;
2294 u32 val;
2295
2296 if (!HAS_PSR(dev_priv))
2297 return -ENODEV;
2298
2299 seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
2300 if (psr->dp)
2301 seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]);
2302 seq_puts(m, "\n");
2303
2304 if (!psr->sink_support)
2305 return 0;
2306
2307 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2308 mutex_lock(&psr->lock);
2309
2310 if (psr->enabled)
2311 status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
2312 else
2313 status = "disabled";
2314 seq_printf(m, "PSR mode: %s\n", status);
2315
2316 if (!psr->enabled)
2317 goto unlock;
2318
2319 if (psr->psr2_enabled) {
2320 val = I915_READ(EDP_PSR2_CTL);
2321 enabled = val & EDP_PSR2_ENABLE;
2322 } else {
2323 val = I915_READ(EDP_PSR_CTL);
2324 enabled = val & EDP_PSR_ENABLE;
2325 }
2326 seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
2327 enableddisabled(enabled), val);
2328 psr_source_status(dev_priv, m);
2329 seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
2330 psr->busy_frontbuffer_bits);
2331
2332
2333
2334
2335 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2336 val = I915_READ(EDP_PSR_PERF_CNT) & EDP_PSR_PERF_CNT_MASK;
2337 seq_printf(m, "Performance counter: %u\n", val);
2338 }
2339
2340 if (psr->debug & I915_PSR_DEBUG_IRQ) {
2341 seq_printf(m, "Last attempted entry at: %lld\n",
2342 psr->last_entry_attempt);
2343 seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
2344 }
2345
2346 if (psr->psr2_enabled) {
2347 u32 su_frames_val[3];
2348 int frame;
2349
2350
2351
2352
2353
2354 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3)
2355 su_frames_val[frame / 3] = I915_READ(PSR2_SU_STATUS(frame));
2356
2357 seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
2358
2359 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
2360 u32 su_blocks;
2361
2362 su_blocks = su_frames_val[frame / 3] &
2363 PSR2_SU_STATUS_MASK(frame);
2364 su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
2365 seq_printf(m, "%d\t%d\n", frame, su_blocks);
2366 }
2367 }
2368
2369unlock:
2370 mutex_unlock(&psr->lock);
2371 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2372
2373 return 0;
2374}
2375
2376static int
2377i915_edp_psr_debug_set(void *data, u64 val)
2378{
2379 struct drm_i915_private *dev_priv = data;
2380 intel_wakeref_t wakeref;
2381 int ret;
2382
2383 if (!CAN_PSR(dev_priv))
2384 return -ENODEV;
2385
2386 DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val);
2387
2388 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2389
2390 ret = intel_psr_debug_set(dev_priv, val);
2391
2392 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2393
2394 return ret;
2395}
2396
2397static int
2398i915_edp_psr_debug_get(void *data, u64 *val)
2399{
2400 struct drm_i915_private *dev_priv = data;
2401
2402 if (!CAN_PSR(dev_priv))
2403 return -ENODEV;
2404
2405 *val = READ_ONCE(dev_priv->psr.debug);
2406 return 0;
2407}
2408
2409DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
2410 i915_edp_psr_debug_get, i915_edp_psr_debug_set,
2411 "%llu\n");
2412
2413static int i915_energy_uJ(struct seq_file *m, void *data)
2414{
2415 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2416 unsigned long long power;
2417 intel_wakeref_t wakeref;
2418 u32 units;
2419
2420 if (INTEL_GEN(dev_priv) < 6)
2421 return -ENODEV;
2422
2423 if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power))
2424 return -ENODEV;
2425
2426 units = (power & 0x1f00) >> 8;
2427 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
2428 power = I915_READ(MCH_SECP_NRG_STTS);
2429
2430 power = (1000000 * power) >> units;
2431 seq_printf(m, "%llu", power);
2432
2433 return 0;
2434}
2435
2436static int i915_runtime_pm_status(struct seq_file *m, void *unused)
2437{
2438 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2439 struct pci_dev *pdev = dev_priv->drm.pdev;
2440
2441 if (!HAS_RUNTIME_PM(dev_priv))
2442 seq_puts(m, "Runtime power management not supported\n");
2443
2444 seq_printf(m, "Runtime power status: %s\n",
2445 enableddisabled(!dev_priv->power_domains.wakeref));
2446
2447 seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
2448 seq_printf(m, "IRQs disabled: %s\n",
2449 yesno(!intel_irqs_enabled(dev_priv)));
2450#ifdef CONFIG_PM
2451 seq_printf(m, "Usage count: %d\n",
2452 atomic_read(&dev_priv->drm.dev->power.usage_count));
2453#else
2454 seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2455#endif
2456 seq_printf(m, "PCI device power state: %s [%d]\n",
2457 pci_power_name(pdev->current_state),
2458 pdev->current_state);
2459
2460 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) {
2461 struct drm_printer p = drm_seq_file_printer(m);
2462
2463 print_intel_runtime_pm_wakeref(&dev_priv->runtime_pm, &p);
2464 }
2465
2466 return 0;
2467}
2468
2469static int i915_power_domain_info(struct seq_file *m, void *unused)
2470{
2471 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2472 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2473 int i;
2474
2475 mutex_lock(&power_domains->lock);
2476
2477 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2478 for (i = 0; i < power_domains->power_well_count; i++) {
2479 struct i915_power_well *power_well;
2480 enum intel_display_power_domain power_domain;
2481
2482 power_well = &power_domains->power_wells[i];
2483 seq_printf(m, "%-25s %d\n", power_well->desc->name,
2484 power_well->count);
2485
2486 for_each_power_domain(power_domain, power_well->desc->domains)
2487 seq_printf(m, " %-23s %d\n",
2488 intel_display_power_domain_str(power_domain),
2489 power_domains->domain_use_count[power_domain]);
2490 }
2491
2492 mutex_unlock(&power_domains->lock);
2493
2494 return 0;
2495}
2496
2497static int i915_dmc_info(struct seq_file *m, void *unused)
2498{
2499 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2500 intel_wakeref_t wakeref;
2501 struct intel_csr *csr;
2502
2503 if (!HAS_CSR(dev_priv))
2504 return -ENODEV;
2505
2506 csr = &dev_priv->csr;
2507
2508 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2509
2510 seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2511 seq_printf(m, "path: %s\n", csr->fw_path);
2512
2513 if (!csr->dmc_payload)
2514 goto out;
2515
2516 seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2517 CSR_VERSION_MINOR(csr->version));
2518
2519 if (WARN_ON(INTEL_GEN(dev_priv) > 11))
2520 goto out;
2521
2522 seq_printf(m, "DC3 -> DC5 count: %d\n",
2523 I915_READ(IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
2524 SKL_CSR_DC3_DC5_COUNT));
2525 if (!IS_GEN9_LP(dev_priv))
2526 seq_printf(m, "DC5 -> DC6 count: %d\n",
2527 I915_READ(SKL_CSR_DC5_DC6_COUNT));
2528
2529out:
2530 seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2531 seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2532 seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2533
2534 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2535
2536 return 0;
2537}
2538
2539static void intel_seq_print_mode(struct seq_file *m, int tabs,
2540 struct drm_display_mode *mode)
2541{
2542 int i;
2543
2544 for (i = 0; i < tabs; i++)
2545 seq_putc(m, '\t');
2546
2547 seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
2548}
2549
2550static void intel_encoder_info(struct seq_file *m,
2551 struct intel_crtc *intel_crtc,
2552 struct intel_encoder *intel_encoder)
2553{
2554 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2555 struct drm_device *dev = &dev_priv->drm;
2556 struct drm_crtc *crtc = &intel_crtc->base;
2557 struct intel_connector *intel_connector;
2558 struct drm_encoder *encoder;
2559
2560 encoder = &intel_encoder->base;
2561 seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
2562 encoder->base.id, encoder->name);
2563 for_each_connector_on_encoder(dev, encoder, intel_connector) {
2564 struct drm_connector *connector = &intel_connector->base;
2565 seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2566 connector->base.id,
2567 connector->name,
2568 drm_get_connector_status_name(connector->status));
2569 if (connector->status == connector_status_connected) {
2570 struct drm_display_mode *mode = &crtc->mode;
2571 seq_printf(m, ", mode:\n");
2572 intel_seq_print_mode(m, 2, mode);
2573 } else {
2574 seq_putc(m, '\n');
2575 }
2576 }
2577}
2578
2579static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2580{
2581 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2582 struct drm_device *dev = &dev_priv->drm;
2583 struct drm_crtc *crtc = &intel_crtc->base;
2584 struct intel_encoder *intel_encoder;
2585 struct drm_plane_state *plane_state = crtc->primary->state;
2586 struct drm_framebuffer *fb = plane_state->fb;
2587
2588 if (fb)
2589 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
2590 fb->base.id, plane_state->src_x >> 16,
2591 plane_state->src_y >> 16, fb->width, fb->height);
2592 else
2593 seq_puts(m, "\tprimary plane disabled\n");
2594 for_each_encoder_on_crtc(dev, crtc, intel_encoder)
2595 intel_encoder_info(m, intel_crtc, intel_encoder);
2596}
2597
2598static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
2599{
2600 struct drm_display_mode *mode = panel->fixed_mode;
2601
2602 seq_printf(m, "\tfixed mode:\n");
2603 intel_seq_print_mode(m, 2, mode);
2604}
2605
2606static void intel_dp_info(struct seq_file *m,
2607 struct intel_connector *intel_connector)
2608{
2609 struct intel_encoder *intel_encoder = intel_connector->encoder;
2610 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2611
2612 seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
2613 seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
2614 if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
2615 intel_panel_info(m, &intel_connector->panel);
2616
2617 drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
2618 &intel_dp->aux);
2619}
2620
2621static void intel_dp_mst_info(struct seq_file *m,
2622 struct intel_connector *intel_connector)
2623{
2624 struct intel_encoder *intel_encoder = intel_connector->encoder;
2625 struct intel_dp_mst_encoder *intel_mst =
2626 enc_to_mst(&intel_encoder->base);
2627 struct intel_digital_port *intel_dig_port = intel_mst->primary;
2628 struct intel_dp *intel_dp = &intel_dig_port->dp;
2629 bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
2630 intel_connector->port);
2631
2632 seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
2633}
2634
2635static void intel_hdmi_info(struct seq_file *m,
2636 struct intel_connector *intel_connector)
2637{
2638 struct intel_encoder *intel_encoder = intel_connector->encoder;
2639 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
2640
2641 seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
2642}
2643
2644static void intel_lvds_info(struct seq_file *m,
2645 struct intel_connector *intel_connector)
2646{
2647 intel_panel_info(m, &intel_connector->panel);
2648}
2649
2650static void intel_connector_info(struct seq_file *m,
2651 struct drm_connector *connector)
2652{
2653 struct intel_connector *intel_connector = to_intel_connector(connector);
2654 struct intel_encoder *intel_encoder = intel_connector->encoder;
2655 struct drm_display_mode *mode;
2656
2657 seq_printf(m, "connector %d: type %s, status: %s\n",
2658 connector->base.id, connector->name,
2659 drm_get_connector_status_name(connector->status));
2660
2661 if (connector->status == connector_status_disconnected)
2662 return;
2663
2664 seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
2665 connector->display_info.width_mm,
2666 connector->display_info.height_mm);
2667 seq_printf(m, "\tsubpixel order: %s\n",
2668 drm_get_subpixel_order_name(connector->display_info.subpixel_order));
2669 seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
2670
2671 if (!intel_encoder)
2672 return;
2673
2674 switch (connector->connector_type) {
2675 case DRM_MODE_CONNECTOR_DisplayPort:
2676 case DRM_MODE_CONNECTOR_eDP:
2677 if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
2678 intel_dp_mst_info(m, intel_connector);
2679 else
2680 intel_dp_info(m, intel_connector);
2681 break;
2682 case DRM_MODE_CONNECTOR_LVDS:
2683 if (intel_encoder->type == INTEL_OUTPUT_LVDS)
2684 intel_lvds_info(m, intel_connector);
2685 break;
2686 case DRM_MODE_CONNECTOR_HDMIA:
2687 if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
2688 intel_encoder->type == INTEL_OUTPUT_DDI)
2689 intel_hdmi_info(m, intel_connector);
2690 break;
2691 default:
2692 break;
2693 }
2694
2695 seq_printf(m, "\tmodes:\n");
2696 list_for_each_entry(mode, &connector->modes, head)
2697 intel_seq_print_mode(m, 2, mode);
2698}
2699
2700static const char *plane_type(enum drm_plane_type type)
2701{
2702 switch (type) {
2703 case DRM_PLANE_TYPE_OVERLAY:
2704 return "OVL";
2705 case DRM_PLANE_TYPE_PRIMARY:
2706 return "PRI";
2707 case DRM_PLANE_TYPE_CURSOR:
2708 return "CUR";
2709
2710
2711
2712
2713 }
2714
2715 return "unknown";
2716}
2717
2718static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
2719{
2720
2721
2722
2723
2724 snprintf(buf, bufsize,
2725 "%s%s%s%s%s%s(0x%08x)",
2726 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
2727 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
2728 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
2729 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
2730 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
2731 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
2732 rotation);
2733}
2734
2735static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2736{
2737 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2738 struct drm_device *dev = &dev_priv->drm;
2739 struct intel_plane *intel_plane;
2740
2741 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
2742 struct drm_plane_state *state;
2743 struct drm_plane *plane = &intel_plane->base;
2744 struct drm_format_name_buf format_name;
2745 char rot_str[48];
2746
2747 if (!plane->state) {
2748 seq_puts(m, "plane->state is NULL!\n");
2749 continue;
2750 }
2751
2752 state = plane->state;
2753
2754 if (state->fb) {
2755 drm_get_format_name(state->fb->format->format,
2756 &format_name);
2757 } else {
2758 sprintf(format_name.str, "N/A");
2759 }
2760
2761 plane_rotation(rot_str, sizeof(rot_str), state->rotation);
2762
2763 seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
2764 plane->base.id,
2765 plane_type(intel_plane->base.type),
2766 state->crtc_x, state->crtc_y,
2767 state->crtc_w, state->crtc_h,
2768 (state->src_x >> 16),
2769 ((state->src_x & 0xffff) * 15625) >> 10,
2770 (state->src_y >> 16),
2771 ((state->src_y & 0xffff) * 15625) >> 10,
2772 (state->src_w >> 16),
2773 ((state->src_w & 0xffff) * 15625) >> 10,
2774 (state->src_h >> 16),
2775 ((state->src_h & 0xffff) * 15625) >> 10,
2776 format_name.str,
2777 rot_str);
2778 }
2779}
2780
2781static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2782{
2783 struct intel_crtc_state *pipe_config;
2784 int num_scalers = intel_crtc->num_scalers;
2785 int i;
2786
2787 pipe_config = to_intel_crtc_state(intel_crtc->base.state);
2788
2789
2790 if (num_scalers) {
2791 seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
2792 num_scalers,
2793 pipe_config->scaler_state.scaler_users,
2794 pipe_config->scaler_state.scaler_id);
2795
2796 for (i = 0; i < num_scalers; i++) {
2797 struct intel_scaler *sc =
2798 &pipe_config->scaler_state.scalers[i];
2799
2800 seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
2801 i, yesno(sc->in_use), sc->mode);
2802 }
2803 seq_puts(m, "\n");
2804 } else {
2805 seq_puts(m, "\tNo scalers available on this platform\n");
2806 }
2807}
2808
2809static int i915_display_info(struct seq_file *m, void *unused)
2810{
2811 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2812 struct drm_device *dev = &dev_priv->drm;
2813 struct intel_crtc *crtc;
2814 struct drm_connector *connector;
2815 struct drm_connector_list_iter conn_iter;
2816 intel_wakeref_t wakeref;
2817
2818 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2819
2820 seq_printf(m, "CRTC info\n");
2821 seq_printf(m, "---------\n");
2822 for_each_intel_crtc(dev, crtc) {
2823 struct intel_crtc_state *pipe_config;
2824
2825 drm_modeset_lock(&crtc->base.mutex, NULL);
2826 pipe_config = to_intel_crtc_state(crtc->base.state);
2827
2828 seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
2829 crtc->base.base.id, pipe_name(crtc->pipe),
2830 yesno(pipe_config->base.active),
2831 pipe_config->pipe_src_w, pipe_config->pipe_src_h,
2832 yesno(pipe_config->dither), pipe_config->pipe_bpp);
2833
2834 if (pipe_config->base.active) {
2835 struct intel_plane *cursor =
2836 to_intel_plane(crtc->base.cursor);
2837
2838 intel_crtc_info(m, crtc);
2839
2840 seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n",
2841 yesno(cursor->base.state->visible),
2842 cursor->base.state->crtc_x,
2843 cursor->base.state->crtc_y,
2844 cursor->base.state->crtc_w,
2845 cursor->base.state->crtc_h,
2846 cursor->cursor.base);
2847 intel_scaler_info(m, crtc);
2848 intel_plane_info(m, crtc);
2849 }
2850
2851 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
2852 yesno(!crtc->cpu_fifo_underrun_disabled),
2853 yesno(!crtc->pch_fifo_underrun_disabled));
2854 drm_modeset_unlock(&crtc->base.mutex);
2855 }
2856
2857 seq_printf(m, "\n");
2858 seq_printf(m, "Connector info\n");
2859 seq_printf(m, "--------------\n");
2860 mutex_lock(&dev->mode_config.mutex);
2861 drm_connector_list_iter_begin(dev, &conn_iter);
2862 drm_for_each_connector_iter(connector, &conn_iter)
2863 intel_connector_info(m, connector);
2864 drm_connector_list_iter_end(&conn_iter);
2865 mutex_unlock(&dev->mode_config.mutex);
2866
2867 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2868
2869 return 0;
2870}
2871
2872static int i915_engine_info(struct seq_file *m, void *unused)
2873{
2874 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2875 struct intel_engine_cs *engine;
2876 intel_wakeref_t wakeref;
2877 enum intel_engine_id id;
2878 struct drm_printer p;
2879
2880 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2881
2882 seq_printf(m, "GT awake? %s [%d]\n",
2883 yesno(dev_priv->gt.awake),
2884 atomic_read(&dev_priv->gt.wakeref.count));
2885 seq_printf(m, "CS timestamp frequency: %u kHz\n",
2886 RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
2887
2888 p = drm_seq_file_printer(m);
2889 for_each_engine(engine, dev_priv, id)
2890 intel_engine_dump(engine, &p, "%s\n", engine->name);
2891
2892 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2893
2894 return 0;
2895}
2896
2897static int i915_rcs_topology(struct seq_file *m, void *unused)
2898{
2899 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2900 struct drm_printer p = drm_seq_file_printer(m);
2901
2902 intel_device_info_dump_topology(&RUNTIME_INFO(dev_priv)->sseu, &p);
2903
2904 return 0;
2905}
2906
2907static int i915_shrinker_info(struct seq_file *m, void *unused)
2908{
2909 struct drm_i915_private *i915 = node_to_i915(m->private);
2910
2911 seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
2912 seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
2913
2914 return 0;
2915}
2916
2917static int i915_shared_dplls_info(struct seq_file *m, void *unused)
2918{
2919 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2920 struct drm_device *dev = &dev_priv->drm;
2921 int i;
2922
2923 drm_modeset_lock_all(dev);
2924 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
2925 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
2926
2927 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
2928 pll->info->id);
2929 seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
2930 pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
2931 seq_printf(m, " tracked hardware state:\n");
2932 seq_printf(m, " dpll: 0x%08x\n", pll->state.hw_state.dpll);
2933 seq_printf(m, " dpll_md: 0x%08x\n",
2934 pll->state.hw_state.dpll_md);
2935 seq_printf(m, " fp0: 0x%08x\n", pll->state.hw_state.fp0);
2936 seq_printf(m, " fp1: 0x%08x\n", pll->state.hw_state.fp1);
2937 seq_printf(m, " wrpll: 0x%08x\n", pll->state.hw_state.wrpll);
2938 seq_printf(m, " cfgcr0: 0x%08x\n", pll->state.hw_state.cfgcr0);
2939 seq_printf(m, " cfgcr1: 0x%08x\n", pll->state.hw_state.cfgcr1);
2940 seq_printf(m, " mg_refclkin_ctl: 0x%08x\n",
2941 pll->state.hw_state.mg_refclkin_ctl);
2942 seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
2943 pll->state.hw_state.mg_clktop2_coreclkctl1);
2944 seq_printf(m, " mg_clktop2_hsclkctl: 0x%08x\n",
2945 pll->state.hw_state.mg_clktop2_hsclkctl);
2946 seq_printf(m, " mg_pll_div0: 0x%08x\n",
2947 pll->state.hw_state.mg_pll_div0);
2948 seq_printf(m, " mg_pll_div1: 0x%08x\n",
2949 pll->state.hw_state.mg_pll_div1);
2950 seq_printf(m, " mg_pll_lf: 0x%08x\n",
2951 pll->state.hw_state.mg_pll_lf);
2952 seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
2953 pll->state.hw_state.mg_pll_frac_lock);
2954 seq_printf(m, " mg_pll_ssc: 0x%08x\n",
2955 pll->state.hw_state.mg_pll_ssc);
2956 seq_printf(m, " mg_pll_bias: 0x%08x\n",
2957 pll->state.hw_state.mg_pll_bias);
2958 seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
2959 pll->state.hw_state.mg_pll_tdc_coldst_bias);
2960 }
2961 drm_modeset_unlock_all(dev);
2962
2963 return 0;
2964}
2965
2966static int i915_wa_registers(struct seq_file *m, void *unused)
2967{
2968 struct drm_i915_private *i915 = node_to_i915(m->private);
2969 const struct i915_wa_list *wal = &i915->engine[RCS0]->ctx_wa_list;
2970 struct i915_wa *wa;
2971 unsigned int i;
2972
2973 seq_printf(m, "Workarounds applied: %u\n", wal->count);
2974 for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
2975 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
2976 i915_mmio_reg_offset(wa->reg), wa->val, wa->mask);
2977
2978 return 0;
2979}
2980
2981static int i915_ipc_status_show(struct seq_file *m, void *data)
2982{
2983 struct drm_i915_private *dev_priv = m->private;
2984
2985 seq_printf(m, "Isochronous Priority Control: %s\n",
2986 yesno(dev_priv->ipc_enabled));
2987 return 0;
2988}
2989
2990static int i915_ipc_status_open(struct inode *inode, struct file *file)
2991{
2992 struct drm_i915_private *dev_priv = inode->i_private;
2993
2994 if (!HAS_IPC(dev_priv))
2995 return -ENODEV;
2996
2997 return single_open(file, i915_ipc_status_show, dev_priv);
2998}
2999
3000static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
3001 size_t len, loff_t *offp)
3002{
3003 struct seq_file *m = file->private_data;
3004 struct drm_i915_private *dev_priv = m->private;
3005 intel_wakeref_t wakeref;
3006 bool enable;
3007 int ret;
3008
3009 ret = kstrtobool_from_user(ubuf, len, &enable);
3010 if (ret < 0)
3011 return ret;
3012
3013 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
3014 if (!dev_priv->ipc_enabled && enable)
3015 DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
3016 dev_priv->wm.distrust_bios_wm = true;
3017 dev_priv->ipc_enabled = enable;
3018 intel_enable_ipc(dev_priv);
3019 }
3020
3021 return len;
3022}
3023
3024static const struct file_operations i915_ipc_status_fops = {
3025 .owner = THIS_MODULE,
3026 .open = i915_ipc_status_open,
3027 .read = seq_read,
3028 .llseek = seq_lseek,
3029 .release = single_release,
3030 .write = i915_ipc_status_write
3031};
3032
3033static int i915_ddb_info(struct seq_file *m, void *unused)
3034{
3035 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3036 struct drm_device *dev = &dev_priv->drm;
3037 struct skl_ddb_entry *entry;
3038 struct intel_crtc *crtc;
3039
3040 if (INTEL_GEN(dev_priv) < 9)
3041 return -ENODEV;
3042
3043 drm_modeset_lock_all(dev);
3044
3045 seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
3046
3047 for_each_intel_crtc(&dev_priv->drm, crtc) {
3048 struct intel_crtc_state *crtc_state =
3049 to_intel_crtc_state(crtc->base.state);
3050 enum pipe pipe = crtc->pipe;
3051 enum plane_id plane_id;
3052
3053 seq_printf(m, "Pipe %c\n", pipe_name(pipe));
3054
3055 for_each_plane_id_on_crtc(crtc, plane_id) {
3056 entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
3057 seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane_id + 1,
3058 entry->start, entry->end,
3059 skl_ddb_entry_size(entry));
3060 }
3061
3062 entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
3063 seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start,
3064 entry->end, skl_ddb_entry_size(entry));
3065 }
3066
3067 drm_modeset_unlock_all(dev);
3068
3069 return 0;
3070}
3071
3072static void drrs_status_per_crtc(struct seq_file *m,
3073 struct drm_device *dev,
3074 struct intel_crtc *intel_crtc)
3075{
3076 struct drm_i915_private *dev_priv = to_i915(dev);
3077 struct i915_drrs *drrs = &dev_priv->drrs;
3078 int vrefresh = 0;
3079 struct drm_connector *connector;
3080 struct drm_connector_list_iter conn_iter;
3081
3082 drm_connector_list_iter_begin(dev, &conn_iter);
3083 drm_for_each_connector_iter(connector, &conn_iter) {
3084 if (connector->state->crtc != &intel_crtc->base)
3085 continue;
3086
3087 seq_printf(m, "%s:\n", connector->name);
3088 }
3089 drm_connector_list_iter_end(&conn_iter);
3090
3091 if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
3092 seq_puts(m, "\tVBT: DRRS_type: Static");
3093 else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
3094 seq_puts(m, "\tVBT: DRRS_type: Seamless");
3095 else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
3096 seq_puts(m, "\tVBT: DRRS_type: None");
3097 else
3098 seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
3099
3100 seq_puts(m, "\n\n");
3101
3102 if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
3103 struct intel_panel *panel;
3104
3105 mutex_lock(&drrs->mutex);
3106
3107 seq_puts(m, "\tDRRS Supported: Yes\n");
3108
3109
3110 if (!drrs->dp) {
3111 seq_puts(m, "Idleness DRRS: Disabled\n");
3112 if (dev_priv->psr.enabled)
3113 seq_puts(m,
3114 "\tAs PSR is enabled, DRRS is not enabled\n");
3115 mutex_unlock(&drrs->mutex);
3116 return;
3117 }
3118
3119 panel = &drrs->dp->attached_connector->panel;
3120 seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
3121 drrs->busy_frontbuffer_bits);
3122
3123 seq_puts(m, "\n\t\t");
3124 if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
3125 seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
3126 vrefresh = panel->fixed_mode->vrefresh;
3127 } else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
3128 seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
3129 vrefresh = panel->downclock_mode->vrefresh;
3130 } else {
3131 seq_printf(m, "DRRS_State: Unknown(%d)\n",
3132 drrs->refresh_rate_type);
3133 mutex_unlock(&drrs->mutex);
3134 return;
3135 }
3136 seq_printf(m, "\t\tVrefresh: %d", vrefresh);
3137
3138 seq_puts(m, "\n\t\t");
3139 mutex_unlock(&drrs->mutex);
3140 } else {
3141
3142 seq_puts(m, "\tDRRS Supported : No");
3143 }
3144 seq_puts(m, "\n");
3145}
3146
3147static int i915_drrs_status(struct seq_file *m, void *unused)
3148{
3149 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3150 struct drm_device *dev = &dev_priv->drm;
3151 struct intel_crtc *intel_crtc;
3152 int active_crtc_cnt = 0;
3153
3154 drm_modeset_lock_all(dev);
3155 for_each_intel_crtc(dev, intel_crtc) {
3156 if (intel_crtc->base.state->active) {
3157 active_crtc_cnt++;
3158 seq_printf(m, "\nCRTC %d: ", active_crtc_cnt);
3159
3160 drrs_status_per_crtc(m, dev, intel_crtc);
3161 }
3162 }
3163 drm_modeset_unlock_all(dev);
3164
3165 if (!active_crtc_cnt)
3166 seq_puts(m, "No active crtc found\n");
3167
3168 return 0;
3169}
3170
3171static int i915_dp_mst_info(struct seq_file *m, void *unused)
3172{
3173 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3174 struct drm_device *dev = &dev_priv->drm;
3175 struct intel_encoder *intel_encoder;
3176 struct intel_digital_port *intel_dig_port;
3177 struct drm_connector *connector;
3178 struct drm_connector_list_iter conn_iter;
3179
3180 drm_connector_list_iter_begin(dev, &conn_iter);
3181 drm_for_each_connector_iter(connector, &conn_iter) {
3182 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
3183 continue;
3184
3185 intel_encoder = intel_attached_encoder(connector);
3186 if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
3187 continue;
3188
3189 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
3190 if (!intel_dig_port->dp.can_mst)
3191 continue;
3192
3193 seq_printf(m, "MST Source Port %c\n",
3194 port_name(intel_dig_port->base.port));
3195 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
3196 }
3197 drm_connector_list_iter_end(&conn_iter);
3198
3199 return 0;
3200}
3201
3202static ssize_t i915_displayport_test_active_write(struct file *file,
3203 const char __user *ubuf,
3204 size_t len, loff_t *offp)
3205{
3206 char *input_buffer;
3207 int status = 0;
3208 struct drm_device *dev;
3209 struct drm_connector *connector;
3210 struct drm_connector_list_iter conn_iter;
3211 struct intel_dp *intel_dp;
3212 int val = 0;
3213
3214 dev = ((struct seq_file *)file->private_data)->private;
3215
3216 if (len == 0)
3217 return 0;
3218
3219 input_buffer = memdup_user_nul(ubuf, len);
3220 if (IS_ERR(input_buffer))
3221 return PTR_ERR(input_buffer);
3222
3223 DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
3224
3225 drm_connector_list_iter_begin(dev, &conn_iter);
3226 drm_for_each_connector_iter(connector, &conn_iter) {
3227 struct intel_encoder *encoder;
3228
3229 if (connector->connector_type !=
3230 DRM_MODE_CONNECTOR_DisplayPort)
3231 continue;
3232
3233 encoder = to_intel_encoder(connector->encoder);
3234 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3235 continue;
3236
3237 if (encoder && connector->status == connector_status_connected) {
3238 intel_dp = enc_to_intel_dp(&encoder->base);
3239 status = kstrtoint(input_buffer, 10, &val);
3240 if (status < 0)
3241 break;
3242 DRM_DEBUG_DRIVER("Got %d for test active\n", val);
3243
3244
3245
3246 if (val == 1)
3247 intel_dp->compliance.test_active = 1;
3248 else
3249 intel_dp->compliance.test_active = 0;
3250 }
3251 }
3252 drm_connector_list_iter_end(&conn_iter);
3253 kfree(input_buffer);
3254 if (status < 0)
3255 return status;
3256
3257 *offp += len;
3258 return len;
3259}
3260
3261static int i915_displayport_test_active_show(struct seq_file *m, void *data)
3262{
3263 struct drm_i915_private *dev_priv = m->private;
3264 struct drm_device *dev = &dev_priv->drm;
3265 struct drm_connector *connector;
3266 struct drm_connector_list_iter conn_iter;
3267 struct intel_dp *intel_dp;
3268
3269 drm_connector_list_iter_begin(dev, &conn_iter);
3270 drm_for_each_connector_iter(connector, &conn_iter) {
3271 struct intel_encoder *encoder;
3272
3273 if (connector->connector_type !=
3274 DRM_MODE_CONNECTOR_DisplayPort)
3275 continue;
3276
3277 encoder = to_intel_encoder(connector->encoder);
3278 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3279 continue;
3280
3281 if (encoder && connector->status == connector_status_connected) {
3282 intel_dp = enc_to_intel_dp(&encoder->base);
3283 if (intel_dp->compliance.test_active)
3284 seq_puts(m, "1");
3285 else
3286 seq_puts(m, "0");
3287 } else
3288 seq_puts(m, "0");
3289 }
3290 drm_connector_list_iter_end(&conn_iter);
3291
3292 return 0;
3293}
3294
3295static int i915_displayport_test_active_open(struct inode *inode,
3296 struct file *file)
3297{
3298 return single_open(file, i915_displayport_test_active_show,
3299 inode->i_private);
3300}
3301
3302static const struct file_operations i915_displayport_test_active_fops = {
3303 .owner = THIS_MODULE,
3304 .open = i915_displayport_test_active_open,
3305 .read = seq_read,
3306 .llseek = seq_lseek,
3307 .release = single_release,
3308 .write = i915_displayport_test_active_write
3309};
3310
3311static int i915_displayport_test_data_show(struct seq_file *m, void *data)
3312{
3313 struct drm_i915_private *dev_priv = m->private;
3314 struct drm_device *dev = &dev_priv->drm;
3315 struct drm_connector *connector;
3316 struct drm_connector_list_iter conn_iter;
3317 struct intel_dp *intel_dp;
3318
3319 drm_connector_list_iter_begin(dev, &conn_iter);
3320 drm_for_each_connector_iter(connector, &conn_iter) {
3321 struct intel_encoder *encoder;
3322
3323 if (connector->connector_type !=
3324 DRM_MODE_CONNECTOR_DisplayPort)
3325 continue;
3326
3327 encoder = to_intel_encoder(connector->encoder);
3328 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3329 continue;
3330
3331 if (encoder && connector->status == connector_status_connected) {
3332 intel_dp = enc_to_intel_dp(&encoder->base);
3333 if (intel_dp->compliance.test_type ==
3334 DP_TEST_LINK_EDID_READ)
3335 seq_printf(m, "%lx",
3336 intel_dp->compliance.test_data.edid);
3337 else if (intel_dp->compliance.test_type ==
3338 DP_TEST_LINK_VIDEO_PATTERN) {
3339 seq_printf(m, "hdisplay: %d\n",
3340 intel_dp->compliance.test_data.hdisplay);
3341 seq_printf(m, "vdisplay: %d\n",
3342 intel_dp->compliance.test_data.vdisplay);
3343 seq_printf(m, "bpc: %u\n",
3344 intel_dp->compliance.test_data.bpc);
3345 }
3346 } else
3347 seq_puts(m, "0");
3348 }
3349 drm_connector_list_iter_end(&conn_iter);
3350
3351 return 0;
3352}
3353DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
3354
3355static int i915_displayport_test_type_show(struct seq_file *m, void *data)
3356{
3357 struct drm_i915_private *dev_priv = m->private;
3358 struct drm_device *dev = &dev_priv->drm;
3359 struct drm_connector *connector;
3360 struct drm_connector_list_iter conn_iter;
3361 struct intel_dp *intel_dp;
3362
3363 drm_connector_list_iter_begin(dev, &conn_iter);
3364 drm_for_each_connector_iter(connector, &conn_iter) {
3365 struct intel_encoder *encoder;
3366
3367 if (connector->connector_type !=
3368 DRM_MODE_CONNECTOR_DisplayPort)
3369 continue;
3370
3371 encoder = to_intel_encoder(connector->encoder);
3372 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3373 continue;
3374
3375 if (encoder && connector->status == connector_status_connected) {
3376 intel_dp = enc_to_intel_dp(&encoder->base);
3377 seq_printf(m, "%02lx", intel_dp->compliance.test_type);
3378 } else
3379 seq_puts(m, "0");
3380 }
3381 drm_connector_list_iter_end(&conn_iter);
3382
3383 return 0;
3384}
3385DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
3386
3387static void wm_latency_show(struct seq_file *m, const u16 wm[8])
3388{
3389 struct drm_i915_private *dev_priv = m->private;
3390 struct drm_device *dev = &dev_priv->drm;
3391 int level;
3392 int num_levels;
3393
3394 if (IS_CHERRYVIEW(dev_priv))
3395 num_levels = 3;
3396 else if (IS_VALLEYVIEW(dev_priv))
3397 num_levels = 1;
3398 else if (IS_G4X(dev_priv))
3399 num_levels = 3;
3400 else
3401 num_levels = ilk_wm_max_level(dev_priv) + 1;
3402
3403 drm_modeset_lock_all(dev);
3404
3405 for (level = 0; level < num_levels; level++) {
3406 unsigned int latency = wm[level];
3407
3408
3409
3410
3411
3412 if (INTEL_GEN(dev_priv) >= 9 ||
3413 IS_VALLEYVIEW(dev_priv) ||
3414 IS_CHERRYVIEW(dev_priv) ||
3415 IS_G4X(dev_priv))
3416 latency *= 10;
3417 else if (level > 0)
3418 latency *= 5;
3419
3420 seq_printf(m, "WM%d %u (%u.%u usec)\n",
3421 level, wm[level], latency / 10, latency % 10);
3422 }
3423
3424 drm_modeset_unlock_all(dev);
3425}
3426
3427static int pri_wm_latency_show(struct seq_file *m, void *data)
3428{
3429 struct drm_i915_private *dev_priv = m->private;
3430 const u16 *latencies;
3431
3432 if (INTEL_GEN(dev_priv) >= 9)
3433 latencies = dev_priv->wm.skl_latency;
3434 else
3435 latencies = dev_priv->wm.pri_latency;
3436
3437 wm_latency_show(m, latencies);
3438
3439 return 0;
3440}
3441
3442static int spr_wm_latency_show(struct seq_file *m, void *data)
3443{
3444 struct drm_i915_private *dev_priv = m->private;
3445 const u16 *latencies;
3446
3447 if (INTEL_GEN(dev_priv) >= 9)
3448 latencies = dev_priv->wm.skl_latency;
3449 else
3450 latencies = dev_priv->wm.spr_latency;
3451
3452 wm_latency_show(m, latencies);
3453
3454 return 0;
3455}
3456
3457static int cur_wm_latency_show(struct seq_file *m, void *data)
3458{
3459 struct drm_i915_private *dev_priv = m->private;
3460 const u16 *latencies;
3461
3462 if (INTEL_GEN(dev_priv) >= 9)
3463 latencies = dev_priv->wm.skl_latency;
3464 else
3465 latencies = dev_priv->wm.cur_latency;
3466
3467 wm_latency_show(m, latencies);
3468
3469 return 0;
3470}
3471
3472static int pri_wm_latency_open(struct inode *inode, struct file *file)
3473{
3474 struct drm_i915_private *dev_priv = inode->i_private;
3475
3476 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
3477 return -ENODEV;
3478
3479 return single_open(file, pri_wm_latency_show, dev_priv);
3480}
3481
3482static int spr_wm_latency_open(struct inode *inode, struct file *file)
3483{
3484 struct drm_i915_private *dev_priv = inode->i_private;
3485
3486 if (HAS_GMCH(dev_priv))
3487 return -ENODEV;
3488
3489 return single_open(file, spr_wm_latency_show, dev_priv);
3490}
3491
3492static int cur_wm_latency_open(struct inode *inode, struct file *file)
3493{
3494 struct drm_i915_private *dev_priv = inode->i_private;
3495
3496 if (HAS_GMCH(dev_priv))
3497 return -ENODEV;
3498
3499 return single_open(file, cur_wm_latency_show, dev_priv);
3500}
3501
3502static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
3503 size_t len, loff_t *offp, u16 wm[8])
3504{
3505 struct seq_file *m = file->private_data;
3506 struct drm_i915_private *dev_priv = m->private;
3507 struct drm_device *dev = &dev_priv->drm;
3508 u16 new[8] = { 0 };
3509 int num_levels;
3510 int level;
3511 int ret;
3512 char tmp[32];
3513
3514 if (IS_CHERRYVIEW(dev_priv))
3515 num_levels = 3;
3516 else if (IS_VALLEYVIEW(dev_priv))
3517 num_levels = 1;
3518 else if (IS_G4X(dev_priv))
3519 num_levels = 3;
3520 else
3521 num_levels = ilk_wm_max_level(dev_priv) + 1;
3522
3523 if (len >= sizeof(tmp))
3524 return -EINVAL;
3525
3526 if (copy_from_user(tmp, ubuf, len))
3527 return -EFAULT;
3528
3529 tmp[len] = '\0';
3530
3531 ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
3532 &new[0], &new[1], &new[2], &new[3],
3533 &new[4], &new[5], &new[6], &new[7]);
3534 if (ret != num_levels)
3535 return -EINVAL;
3536
3537 drm_modeset_lock_all(dev);
3538
3539 for (level = 0; level < num_levels; level++)
3540 wm[level] = new[level];
3541
3542 drm_modeset_unlock_all(dev);
3543
3544 return len;
3545}
3546
3547
3548static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
3549 size_t len, loff_t *offp)
3550{
3551 struct seq_file *m = file->private_data;
3552 struct drm_i915_private *dev_priv = m->private;
3553 u16 *latencies;
3554
3555 if (INTEL_GEN(dev_priv) >= 9)
3556 latencies = dev_priv->wm.skl_latency;
3557 else
3558 latencies = dev_priv->wm.pri_latency;
3559
3560 return wm_latency_write(file, ubuf, len, offp, latencies);
3561}
3562
3563static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
3564 size_t len, loff_t *offp)
3565{
3566 struct seq_file *m = file->private_data;
3567 struct drm_i915_private *dev_priv = m->private;
3568 u16 *latencies;
3569
3570 if (INTEL_GEN(dev_priv) >= 9)
3571 latencies = dev_priv->wm.skl_latency;
3572 else
3573 latencies = dev_priv->wm.spr_latency;
3574
3575 return wm_latency_write(file, ubuf, len, offp, latencies);
3576}
3577
3578static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
3579 size_t len, loff_t *offp)
3580{
3581 struct seq_file *m = file->private_data;
3582 struct drm_i915_private *dev_priv = m->private;
3583 u16 *latencies;
3584
3585 if (INTEL_GEN(dev_priv) >= 9)
3586 latencies = dev_priv->wm.skl_latency;
3587 else
3588 latencies = dev_priv->wm.cur_latency;
3589
3590 return wm_latency_write(file, ubuf, len, offp, latencies);
3591}
3592
3593static const struct file_operations i915_pri_wm_latency_fops = {
3594 .owner = THIS_MODULE,
3595 .open = pri_wm_latency_open,
3596 .read = seq_read,
3597 .llseek = seq_lseek,
3598 .release = single_release,
3599 .write = pri_wm_latency_write
3600};
3601
3602static const struct file_operations i915_spr_wm_latency_fops = {
3603 .owner = THIS_MODULE,
3604 .open = spr_wm_latency_open,
3605 .read = seq_read,
3606 .llseek = seq_lseek,
3607 .release = single_release,
3608 .write = spr_wm_latency_write
3609};
3610
3611static const struct file_operations i915_cur_wm_latency_fops = {
3612 .owner = THIS_MODULE,
3613 .open = cur_wm_latency_open,
3614 .read = seq_read,
3615 .llseek = seq_lseek,
3616 .release = single_release,
3617 .write = cur_wm_latency_write
3618};
3619
3620static int
3621i915_wedged_get(void *data, u64 *val)
3622{
3623 int ret = i915_terminally_wedged(data);
3624
3625 switch (ret) {
3626 case -EIO:
3627 *val = 1;
3628 return 0;
3629 case 0:
3630 *val = 0;
3631 return 0;
3632 default:
3633 return ret;
3634 }
3635}
3636
3637static int
3638i915_wedged_set(void *data, u64 val)
3639{
3640 struct drm_i915_private *i915 = data;
3641
3642
3643 wait_event(i915->gpu_error.reset_queue,
3644 !test_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags));
3645
3646 i915_handle_error(i915, val, I915_ERROR_CAPTURE,
3647 "Manually set wedged engine mask = %llx", val);
3648 return 0;
3649}
3650
3651DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
3652 i915_wedged_get, i915_wedged_set,
3653 "%llu\n");
3654
3655#define DROP_UNBOUND BIT(0)
3656#define DROP_BOUND BIT(1)
3657#define DROP_RETIRE BIT(2)
3658#define DROP_ACTIVE BIT(3)
3659#define DROP_FREED BIT(4)
3660#define DROP_SHRINK_ALL BIT(5)
3661#define DROP_IDLE BIT(6)
3662#define DROP_RESET_ACTIVE BIT(7)
3663#define DROP_RESET_SEQNO BIT(8)
3664#define DROP_ALL (DROP_UNBOUND | \
3665 DROP_BOUND | \
3666 DROP_RETIRE | \
3667 DROP_ACTIVE | \
3668 DROP_FREED | \
3669 DROP_SHRINK_ALL |\
3670 DROP_IDLE | \
3671 DROP_RESET_ACTIVE | \
3672 DROP_RESET_SEQNO)
3673static int
3674i915_drop_caches_get(void *data, u64 *val)
3675{
3676 *val = DROP_ALL;
3677
3678 return 0;
3679}
3680
3681static int
3682i915_drop_caches_set(void *data, u64 val)
3683{
3684 struct drm_i915_private *i915 = data;
3685
3686 DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
3687 val, val & DROP_ALL);
3688
3689 if (val & DROP_RESET_ACTIVE &&
3690 wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT))
3691 i915_gem_set_wedged(i915);
3692
3693
3694
3695 if (val & (DROP_ACTIVE | DROP_IDLE | DROP_RETIRE | DROP_RESET_SEQNO)) {
3696 int ret;
3697
3698 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
3699 if (ret)
3700 return ret;
3701
3702
3703
3704
3705
3706
3707
3708 if (ret == 0 && val & (DROP_IDLE | DROP_ACTIVE))
3709 ret = i915_gem_wait_for_idle(i915,
3710 I915_WAIT_INTERRUPTIBLE |
3711 I915_WAIT_LOCKED,
3712 MAX_SCHEDULE_TIMEOUT);
3713
3714 if (ret == 0 && val & DROP_IDLE)
3715 ret = i915_gem_wait_for_idle(i915,
3716 I915_WAIT_INTERRUPTIBLE |
3717 I915_WAIT_LOCKED,
3718 MAX_SCHEDULE_TIMEOUT);
3719
3720 if (val & DROP_RETIRE)
3721 i915_retire_requests(i915);
3722
3723 mutex_unlock(&i915->drm.struct_mutex);
3724 }
3725
3726 if (val & DROP_RESET_ACTIVE && i915_terminally_wedged(i915))
3727 i915_handle_error(i915, ALL_ENGINES, 0, NULL);
3728
3729 fs_reclaim_acquire(GFP_KERNEL);
3730 if (val & DROP_BOUND)
3731 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_BOUND);
3732
3733 if (val & DROP_UNBOUND)
3734 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
3735
3736 if (val & DROP_SHRINK_ALL)
3737 i915_gem_shrink_all(i915);
3738 fs_reclaim_release(GFP_KERNEL);
3739
3740 if (val & DROP_IDLE) {
3741 flush_delayed_work(&i915->gem.retire_work);
3742 flush_work(&i915->gem.idle_work);
3743 }
3744
3745 if (val & DROP_FREED)
3746 i915_gem_drain_freed_objects(i915);
3747
3748 return 0;
3749}
3750
3751DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
3752 i915_drop_caches_get, i915_drop_caches_set,
3753 "0x%08llx\n");
3754
3755static int
3756i915_cache_sharing_get(void *data, u64 *val)
3757{
3758 struct drm_i915_private *dev_priv = data;
3759 intel_wakeref_t wakeref;
3760 u32 snpcr = 0;
3761
3762 if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
3763 return -ENODEV;
3764
3765 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
3766 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
3767
3768 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
3769
3770 return 0;
3771}
3772
3773static int
3774i915_cache_sharing_set(void *data, u64 val)
3775{
3776 struct drm_i915_private *dev_priv = data;
3777 intel_wakeref_t wakeref;
3778
3779 if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
3780 return -ENODEV;
3781
3782 if (val > 3)
3783 return -EINVAL;
3784
3785 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
3786 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
3787 u32 snpcr;
3788
3789
3790 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
3791 snpcr &= ~GEN6_MBC_SNPCR_MASK;
3792 snpcr |= val << GEN6_MBC_SNPCR_SHIFT;
3793 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
3794 }
3795
3796 return 0;
3797}
3798
3799DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
3800 i915_cache_sharing_get, i915_cache_sharing_set,
3801 "%llu\n");
3802
3803static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
3804 struct sseu_dev_info *sseu)
3805{
3806#define SS_MAX 2
3807 const int ss_max = SS_MAX;
3808 u32 sig1[SS_MAX], sig2[SS_MAX];
3809 int ss;
3810
3811 sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
3812 sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
3813 sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
3814 sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
3815
3816 for (ss = 0; ss < ss_max; ss++) {
3817 unsigned int eu_cnt;
3818
3819 if (sig1[ss] & CHV_SS_PG_ENABLE)
3820
3821 continue;
3822
3823 sseu->slice_mask = BIT(0);
3824 sseu->subslice_mask[0] |= BIT(ss);
3825 eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
3826 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
3827 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
3828 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
3829 sseu->eu_total += eu_cnt;
3830 sseu->eu_per_subslice = max_t(unsigned int,
3831 sseu->eu_per_subslice, eu_cnt);
3832 }
3833#undef SS_MAX
3834}
3835
3836static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
3837 struct sseu_dev_info *sseu)
3838{
3839#define SS_MAX 6
3840 const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
3841 u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
3842 int s, ss;
3843
3844 for (s = 0; s < info->sseu.max_slices; s++) {
3845
3846
3847
3848
3849
3850
3851 s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
3852 GEN10_PGCTL_VALID_SS_MASK(s);
3853 eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
3854 eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
3855 }
3856
3857 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
3858 GEN9_PGCTL_SSA_EU19_ACK |
3859 GEN9_PGCTL_SSA_EU210_ACK |
3860 GEN9_PGCTL_SSA_EU311_ACK;
3861 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
3862 GEN9_PGCTL_SSB_EU19_ACK |
3863 GEN9_PGCTL_SSB_EU210_ACK |
3864 GEN9_PGCTL_SSB_EU311_ACK;
3865
3866 for (s = 0; s < info->sseu.max_slices; s++) {
3867 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
3868
3869 continue;
3870
3871 sseu->slice_mask |= BIT(s);
3872 sseu->subslice_mask[s] = info->sseu.subslice_mask[s];
3873
3874 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
3875 unsigned int eu_cnt;
3876
3877 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
3878
3879 continue;
3880
3881 eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
3882 eu_mask[ss % 2]);
3883 sseu->eu_total += eu_cnt;
3884 sseu->eu_per_subslice = max_t(unsigned int,
3885 sseu->eu_per_subslice,
3886 eu_cnt);
3887 }
3888 }
3889#undef SS_MAX
3890}
3891
3892static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
3893 struct sseu_dev_info *sseu)
3894{
3895#define SS_MAX 3
3896 const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
3897 u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
3898 int s, ss;
3899
3900 for (s = 0; s < info->sseu.max_slices; s++) {
3901 s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
3902 eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
3903 eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
3904 }
3905
3906 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
3907 GEN9_PGCTL_SSA_EU19_ACK |
3908 GEN9_PGCTL_SSA_EU210_ACK |
3909 GEN9_PGCTL_SSA_EU311_ACK;
3910 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
3911 GEN9_PGCTL_SSB_EU19_ACK |
3912 GEN9_PGCTL_SSB_EU210_ACK |
3913 GEN9_PGCTL_SSB_EU311_ACK;
3914
3915 for (s = 0; s < info->sseu.max_slices; s++) {
3916 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
3917
3918 continue;
3919
3920 sseu->slice_mask |= BIT(s);
3921
3922 if (IS_GEN9_BC(dev_priv))
3923 sseu->subslice_mask[s] =
3924 RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
3925
3926 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
3927 unsigned int eu_cnt;
3928
3929 if (IS_GEN9_LP(dev_priv)) {
3930 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
3931
3932 continue;
3933
3934 sseu->subslice_mask[s] |= BIT(ss);
3935 }
3936
3937 eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
3938 eu_mask[ss%2]);
3939 sseu->eu_total += eu_cnt;
3940 sseu->eu_per_subslice = max_t(unsigned int,
3941 sseu->eu_per_subslice,
3942 eu_cnt);
3943 }
3944 }
3945#undef SS_MAX
3946}
3947
3948static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
3949 struct sseu_dev_info *sseu)
3950{
3951 u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
3952 int s;
3953
3954 sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
3955
3956 if (sseu->slice_mask) {
3957 sseu->eu_per_subslice =
3958 RUNTIME_INFO(dev_priv)->sseu.eu_per_subslice;
3959 for (s = 0; s < fls(sseu->slice_mask); s++) {
3960 sseu->subslice_mask[s] =
3961 RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
3962 }
3963 sseu->eu_total = sseu->eu_per_subslice *
3964 intel_sseu_subslice_total(sseu);
3965
3966
3967 for (s = 0; s < fls(sseu->slice_mask); s++) {
3968 u8 subslice_7eu =
3969 RUNTIME_INFO(dev_priv)->sseu.subslice_7eu[s];
3970
3971 sseu->eu_total -= hweight8(subslice_7eu);
3972 }
3973 }
3974}
3975
3976static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
3977 const struct sseu_dev_info *sseu)
3978{
3979 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3980 const char *type = is_available_info ? "Available" : "Enabled";
3981 int s;
3982
3983 seq_printf(m, " %s Slice Mask: %04x\n", type,
3984 sseu->slice_mask);
3985 seq_printf(m, " %s Slice Total: %u\n", type,
3986 hweight8(sseu->slice_mask));
3987 seq_printf(m, " %s Subslice Total: %u\n", type,
3988 intel_sseu_subslice_total(sseu));
3989 for (s = 0; s < fls(sseu->slice_mask); s++) {
3990 seq_printf(m, " %s Slice%i subslices: %u\n", type,
3991 s, intel_sseu_subslices_per_slice(sseu, s));
3992 }
3993 seq_printf(m, " %s EU Total: %u\n", type,
3994 sseu->eu_total);
3995 seq_printf(m, " %s EU Per Subslice: %u\n", type,
3996 sseu->eu_per_subslice);
3997
3998 if (!is_available_info)
3999 return;
4000
4001 seq_printf(m, " Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
4002 if (HAS_POOLED_EU(dev_priv))
4003 seq_printf(m, " Min EU in pool: %u\n", sseu->min_eu_in_pool);
4004
4005 seq_printf(m, " Has Slice Power Gating: %s\n",
4006 yesno(sseu->has_slice_pg));
4007 seq_printf(m, " Has Subslice Power Gating: %s\n",
4008 yesno(sseu->has_subslice_pg));
4009 seq_printf(m, " Has EU Power Gating: %s\n",
4010 yesno(sseu->has_eu_pg));
4011}
4012
4013static int i915_sseu_status(struct seq_file *m, void *unused)
4014{
4015 struct drm_i915_private *dev_priv = node_to_i915(m->private);
4016 struct sseu_dev_info sseu;
4017 intel_wakeref_t wakeref;
4018
4019 if (INTEL_GEN(dev_priv) < 8)
4020 return -ENODEV;
4021
4022 seq_puts(m, "SSEU Device Info\n");
4023 i915_print_sseu_info(m, true, &RUNTIME_INFO(dev_priv)->sseu);
4024
4025 seq_puts(m, "SSEU Device Status\n");
4026 memset(&sseu, 0, sizeof(sseu));
4027 sseu.max_slices = RUNTIME_INFO(dev_priv)->sseu.max_slices;
4028 sseu.max_subslices = RUNTIME_INFO(dev_priv)->sseu.max_subslices;
4029 sseu.max_eus_per_subslice =
4030 RUNTIME_INFO(dev_priv)->sseu.max_eus_per_subslice;
4031
4032 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
4033 if (IS_CHERRYVIEW(dev_priv))
4034 cherryview_sseu_device_status(dev_priv, &sseu);
4035 else if (IS_BROADWELL(dev_priv))
4036 broadwell_sseu_device_status(dev_priv, &sseu);
4037 else if (IS_GEN(dev_priv, 9))
4038 gen9_sseu_device_status(dev_priv, &sseu);
4039 else if (INTEL_GEN(dev_priv) >= 10)
4040 gen10_sseu_device_status(dev_priv, &sseu);
4041 }
4042
4043 i915_print_sseu_info(m, false, &sseu);
4044
4045 return 0;
4046}
4047
4048static int i915_forcewake_open(struct inode *inode, struct file *file)
4049{
4050 struct drm_i915_private *i915 = inode->i_private;
4051
4052 if (INTEL_GEN(i915) < 6)
4053 return 0;
4054
4055 file->private_data =
4056 (void *)(uintptr_t)intel_runtime_pm_get(&i915->runtime_pm);
4057 intel_uncore_forcewake_user_get(&i915->uncore);
4058
4059 return 0;
4060}
4061
4062static int i915_forcewake_release(struct inode *inode, struct file *file)
4063{
4064 struct drm_i915_private *i915 = inode->i_private;
4065
4066 if (INTEL_GEN(i915) < 6)
4067 return 0;
4068
4069 intel_uncore_forcewake_user_put(&i915->uncore);
4070 intel_runtime_pm_put(&i915->runtime_pm,
4071 (intel_wakeref_t)(uintptr_t)file->private_data);
4072
4073 return 0;
4074}
4075
4076static const struct file_operations i915_forcewake_fops = {
4077 .owner = THIS_MODULE,
4078 .open = i915_forcewake_open,
4079 .release = i915_forcewake_release,
4080};
4081
4082static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
4083{
4084 struct drm_i915_private *dev_priv = m->private;
4085 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4086
4087
4088
4089
4090 synchronize_irq(dev_priv->drm.irq);
4091 flush_work(&dev_priv->hotplug.dig_port_work);
4092 flush_work(&dev_priv->hotplug.hotplug_work);
4093
4094 seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
4095 seq_printf(m, "Detected: %s\n",
4096 yesno(delayed_work_pending(&hotplug->reenable_work)));
4097
4098 return 0;
4099}
4100
4101static ssize_t i915_hpd_storm_ctl_write(struct file *file,
4102 const char __user *ubuf, size_t len,
4103 loff_t *offp)
4104{
4105 struct seq_file *m = file->private_data;
4106 struct drm_i915_private *dev_priv = m->private;
4107 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4108 unsigned int new_threshold;
4109 int i;
4110 char *newline;
4111 char tmp[16];
4112
4113 if (len >= sizeof(tmp))
4114 return -EINVAL;
4115
4116 if (copy_from_user(tmp, ubuf, len))
4117 return -EFAULT;
4118
4119 tmp[len] = '\0';
4120
4121
4122 newline = strchr(tmp, '\n');
4123 if (newline)
4124 *newline = '\0';
4125
4126 if (strcmp(tmp, "reset") == 0)
4127 new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4128 else if (kstrtouint(tmp, 10, &new_threshold) != 0)
4129 return -EINVAL;
4130
4131 if (new_threshold > 0)
4132 DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
4133 new_threshold);
4134 else
4135 DRM_DEBUG_KMS("Disabling HPD storm detection\n");
4136
4137 spin_lock_irq(&dev_priv->irq_lock);
4138 hotplug->hpd_storm_threshold = new_threshold;
4139
4140 for_each_hpd_pin(i)
4141 hotplug->stats[i].count = 0;
4142 spin_unlock_irq(&dev_priv->irq_lock);
4143
4144
4145 flush_delayed_work(&dev_priv->hotplug.reenable_work);
4146
4147 return len;
4148}
4149
4150static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
4151{
4152 return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
4153}
4154
4155static const struct file_operations i915_hpd_storm_ctl_fops = {
4156 .owner = THIS_MODULE,
4157 .open = i915_hpd_storm_ctl_open,
4158 .read = seq_read,
4159 .llseek = seq_lseek,
4160 .release = single_release,
4161 .write = i915_hpd_storm_ctl_write
4162};
4163
4164static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
4165{
4166 struct drm_i915_private *dev_priv = m->private;
4167
4168 seq_printf(m, "Enabled: %s\n",
4169 yesno(dev_priv->hotplug.hpd_short_storm_enabled));
4170
4171 return 0;
4172}
4173
4174static int
4175i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
4176{
4177 return single_open(file, i915_hpd_short_storm_ctl_show,
4178 inode->i_private);
4179}
4180
4181static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
4182 const char __user *ubuf,
4183 size_t len, loff_t *offp)
4184{
4185 struct seq_file *m = file->private_data;
4186 struct drm_i915_private *dev_priv = m->private;
4187 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4188 char *newline;
4189 char tmp[16];
4190 int i;
4191 bool new_state;
4192
4193 if (len >= sizeof(tmp))
4194 return -EINVAL;
4195
4196 if (copy_from_user(tmp, ubuf, len))
4197 return -EFAULT;
4198
4199 tmp[len] = '\0';
4200
4201
4202 newline = strchr(tmp, '\n');
4203 if (newline)
4204 *newline = '\0';
4205
4206
4207 if (strcmp(tmp, "reset") == 0)
4208 new_state = !HAS_DP_MST(dev_priv);
4209 else if (kstrtobool(tmp, &new_state) != 0)
4210 return -EINVAL;
4211
4212 DRM_DEBUG_KMS("%sabling HPD short storm detection\n",
4213 new_state ? "En" : "Dis");
4214
4215 spin_lock_irq(&dev_priv->irq_lock);
4216 hotplug->hpd_short_storm_enabled = new_state;
4217
4218 for_each_hpd_pin(i)
4219 hotplug->stats[i].count = 0;
4220 spin_unlock_irq(&dev_priv->irq_lock);
4221
4222
4223 flush_delayed_work(&dev_priv->hotplug.reenable_work);
4224
4225 return len;
4226}
4227
4228static const struct file_operations i915_hpd_short_storm_ctl_fops = {
4229 .owner = THIS_MODULE,
4230 .open = i915_hpd_short_storm_ctl_open,
4231 .read = seq_read,
4232 .llseek = seq_lseek,
4233 .release = single_release,
4234 .write = i915_hpd_short_storm_ctl_write,
4235};
4236
4237static int i915_drrs_ctl_set(void *data, u64 val)
4238{
4239 struct drm_i915_private *dev_priv = data;
4240 struct drm_device *dev = &dev_priv->drm;
4241 struct intel_crtc *crtc;
4242
4243 if (INTEL_GEN(dev_priv) < 7)
4244 return -ENODEV;
4245
4246 for_each_intel_crtc(dev, crtc) {
4247 struct drm_connector_list_iter conn_iter;
4248 struct intel_crtc_state *crtc_state;
4249 struct drm_connector *connector;
4250 struct drm_crtc_commit *commit;
4251 int ret;
4252
4253 ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
4254 if (ret)
4255 return ret;
4256
4257 crtc_state = to_intel_crtc_state(crtc->base.state);
4258
4259 if (!crtc_state->base.active ||
4260 !crtc_state->has_drrs)
4261 goto out;
4262
4263 commit = crtc_state->base.commit;
4264 if (commit) {
4265 ret = wait_for_completion_interruptible(&commit->hw_done);
4266 if (ret)
4267 goto out;
4268 }
4269
4270 drm_connector_list_iter_begin(dev, &conn_iter);
4271 drm_for_each_connector_iter(connector, &conn_iter) {
4272 struct intel_encoder *encoder;
4273 struct intel_dp *intel_dp;
4274
4275 if (!(crtc_state->base.connector_mask &
4276 drm_connector_mask(connector)))
4277 continue;
4278
4279 encoder = intel_attached_encoder(connector);
4280 if (encoder->type != INTEL_OUTPUT_EDP)
4281 continue;
4282
4283 DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n",
4284 val ? "en" : "dis", val);
4285
4286 intel_dp = enc_to_intel_dp(&encoder->base);
4287 if (val)
4288 intel_edp_drrs_enable(intel_dp,
4289 crtc_state);
4290 else
4291 intel_edp_drrs_disable(intel_dp,
4292 crtc_state);
4293 }
4294 drm_connector_list_iter_end(&conn_iter);
4295
4296out:
4297 drm_modeset_unlock(&crtc->base.mutex);
4298 if (ret)
4299 return ret;
4300 }
4301
4302 return 0;
4303}
4304
4305DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
4306
4307static ssize_t
4308i915_fifo_underrun_reset_write(struct file *filp,
4309 const char __user *ubuf,
4310 size_t cnt, loff_t *ppos)
4311{
4312 struct drm_i915_private *dev_priv = filp->private_data;
4313 struct intel_crtc *intel_crtc;
4314 struct drm_device *dev = &dev_priv->drm;
4315 int ret;
4316 bool reset;
4317
4318 ret = kstrtobool_from_user(ubuf, cnt, &reset);
4319 if (ret)
4320 return ret;
4321
4322 if (!reset)
4323 return cnt;
4324
4325 for_each_intel_crtc(dev, intel_crtc) {
4326 struct drm_crtc_commit *commit;
4327 struct intel_crtc_state *crtc_state;
4328
4329 ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
4330 if (ret)
4331 return ret;
4332
4333 crtc_state = to_intel_crtc_state(intel_crtc->base.state);
4334 commit = crtc_state->base.commit;
4335 if (commit) {
4336 ret = wait_for_completion_interruptible(&commit->hw_done);
4337 if (!ret)
4338 ret = wait_for_completion_interruptible(&commit->flip_done);
4339 }
4340
4341 if (!ret && crtc_state->base.active) {
4342 DRM_DEBUG_KMS("Re-arming FIFO underruns on pipe %c\n",
4343 pipe_name(intel_crtc->pipe));
4344
4345 intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
4346 }
4347
4348 drm_modeset_unlock(&intel_crtc->base.mutex);
4349
4350 if (ret)
4351 return ret;
4352 }
4353
4354 ret = intel_fbc_reset_underrun(dev_priv);
4355 if (ret)
4356 return ret;
4357
4358 return cnt;
4359}
4360
4361static const struct file_operations i915_fifo_underrun_reset_ops = {
4362 .owner = THIS_MODULE,
4363 .open = simple_open,
4364 .write = i915_fifo_underrun_reset_write,
4365 .llseek = default_llseek,
4366};
4367
4368static const struct drm_info_list i915_debugfs_list[] = {
4369 {"i915_capabilities", i915_capabilities, 0},
4370 {"i915_gem_objects", i915_gem_object_info, 0},
4371 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
4372 {"i915_gem_interrupt", i915_interrupt_info, 0},
4373 {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
4374 {"i915_guc_info", i915_guc_info, 0},
4375 {"i915_guc_load_status", i915_guc_load_status_info, 0},
4376 {"i915_guc_log_dump", i915_guc_log_dump, 0},
4377 {"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
4378 {"i915_guc_stage_pool", i915_guc_stage_pool, 0},
4379 {"i915_huc_load_status", i915_huc_load_status_info, 0},
4380 {"i915_frequency_info", i915_frequency_info, 0},
4381 {"i915_hangcheck_info", i915_hangcheck_info, 0},
4382 {"i915_reset_info", i915_reset_info, 0},
4383 {"i915_drpc_info", i915_drpc_info, 0},
4384 {"i915_emon_status", i915_emon_status, 0},
4385 {"i915_ring_freq_table", i915_ring_freq_table, 0},
4386 {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
4387 {"i915_fbc_status", i915_fbc_status, 0},
4388 {"i915_ips_status", i915_ips_status, 0},
4389 {"i915_sr_status", i915_sr_status, 0},
4390 {"i915_opregion", i915_opregion, 0},
4391 {"i915_vbt", i915_vbt, 0},
4392 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
4393 {"i915_context_status", i915_context_status, 0},
4394 {"i915_forcewake_domains", i915_forcewake_domains, 0},
4395 {"i915_swizzle_info", i915_swizzle_info, 0},
4396 {"i915_llc", i915_llc, 0},
4397 {"i915_edp_psr_status", i915_edp_psr_status, 0},
4398 {"i915_energy_uJ", i915_energy_uJ, 0},
4399 {"i915_runtime_pm_status", i915_runtime_pm_status, 0},
4400 {"i915_power_domain_info", i915_power_domain_info, 0},
4401 {"i915_dmc_info", i915_dmc_info, 0},
4402 {"i915_display_info", i915_display_info, 0},
4403 {"i915_engine_info", i915_engine_info, 0},
4404 {"i915_rcs_topology", i915_rcs_topology, 0},
4405 {"i915_shrinker_info", i915_shrinker_info, 0},
4406 {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
4407 {"i915_dp_mst_info", i915_dp_mst_info, 0},
4408 {"i915_wa_registers", i915_wa_registers, 0},
4409 {"i915_ddb_info", i915_ddb_info, 0},
4410 {"i915_sseu_status", i915_sseu_status, 0},
4411 {"i915_drrs_status", i915_drrs_status, 0},
4412 {"i915_rps_boost_info", i915_rps_boost_info, 0},
4413};
4414#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
4415
4416static const struct i915_debugfs_files {
4417 const char *name;
4418 const struct file_operations *fops;
4419} i915_debugfs_files[] = {
4420 {"i915_wedged", &i915_wedged_fops},
4421 {"i915_cache_sharing", &i915_cache_sharing_fops},
4422 {"i915_gem_drop_caches", &i915_drop_caches_fops},
4423#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
4424 {"i915_error_state", &i915_error_state_fops},
4425 {"i915_gpu_info", &i915_gpu_info_fops},
4426#endif
4427 {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
4428 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
4429 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
4430 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
4431 {"i915_fbc_false_color", &i915_fbc_false_color_fops},
4432 {"i915_dp_test_data", &i915_displayport_test_data_fops},
4433 {"i915_dp_test_type", &i915_displayport_test_type_fops},
4434 {"i915_dp_test_active", &i915_displayport_test_active_fops},
4435 {"i915_guc_log_level", &i915_guc_log_level_fops},
4436 {"i915_guc_log_relay", &i915_guc_log_relay_fops},
4437 {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
4438 {"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
4439 {"i915_ipc_status", &i915_ipc_status_fops},
4440 {"i915_drrs_ctl", &i915_drrs_ctl_fops},
4441 {"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
4442};
4443
4444int i915_debugfs_register(struct drm_i915_private *dev_priv)
4445{
4446 struct drm_minor *minor = dev_priv->drm.primary;
4447 int i;
4448
4449 debugfs_create_file("i915_forcewake_user", S_IRUSR, minor->debugfs_root,
4450 to_i915(minor->dev), &i915_forcewake_fops);
4451
4452 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
4453 debugfs_create_file(i915_debugfs_files[i].name,
4454 S_IRUGO | S_IWUSR,
4455 minor->debugfs_root,
4456 to_i915(minor->dev),
4457 i915_debugfs_files[i].fops);
4458 }
4459
4460 return drm_debugfs_create_files(i915_debugfs_list,
4461 I915_DEBUGFS_ENTRIES,
4462 minor->debugfs_root, minor);
4463}
4464
4465struct dpcd_block {
4466
4467 unsigned int offset;
4468
4469 unsigned int end;
4470
4471 size_t size;
4472
4473 bool edp;
4474};
4475
4476static const struct dpcd_block i915_dpcd_debug[] = {
4477 { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
4478 { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
4479 { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
4480 { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
4481 { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
4482 { .offset = DP_SET_POWER },
4483 { .offset = DP_EDP_DPCD_REV },
4484 { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
4485 { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
4486 { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
4487};
4488
4489static int i915_dpcd_show(struct seq_file *m, void *data)
4490{
4491 struct drm_connector *connector = m->private;
4492 struct intel_dp *intel_dp =
4493 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4494 u8 buf[16];
4495 ssize_t err;
4496 int i;
4497
4498 if (connector->status != connector_status_connected)
4499 return -ENODEV;
4500
4501 for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
4502 const struct dpcd_block *b = &i915_dpcd_debug[i];
4503 size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
4504
4505 if (b->edp &&
4506 connector->connector_type != DRM_MODE_CONNECTOR_eDP)
4507 continue;
4508
4509
4510 if (WARN_ON(size > sizeof(buf)))
4511 continue;
4512
4513 err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
4514 if (err < 0)
4515 seq_printf(m, "%04x: ERROR %d\n", b->offset, (int)err);
4516 else
4517 seq_printf(m, "%04x: %*ph\n", b->offset, (int)err, buf);
4518 }
4519
4520 return 0;
4521}
4522DEFINE_SHOW_ATTRIBUTE(i915_dpcd);
4523
4524static int i915_panel_show(struct seq_file *m, void *data)
4525{
4526 struct drm_connector *connector = m->private;
4527 struct intel_dp *intel_dp =
4528 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4529
4530 if (connector->status != connector_status_connected)
4531 return -ENODEV;
4532
4533 seq_printf(m, "Panel power up delay: %d\n",
4534 intel_dp->panel_power_up_delay);
4535 seq_printf(m, "Panel power down delay: %d\n",
4536 intel_dp->panel_power_down_delay);
4537 seq_printf(m, "Backlight on delay: %d\n",
4538 intel_dp->backlight_on_delay);
4539 seq_printf(m, "Backlight off delay: %d\n",
4540 intel_dp->backlight_off_delay);
4541
4542 return 0;
4543}
4544DEFINE_SHOW_ATTRIBUTE(i915_panel);
4545
4546static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
4547{
4548 struct drm_connector *connector = m->private;
4549 struct intel_connector *intel_connector = to_intel_connector(connector);
4550 bool hdcp_cap, hdcp2_cap;
4551
4552 if (connector->status != connector_status_connected)
4553 return -ENODEV;
4554
4555
4556 if (!intel_connector->hdcp.shim)
4557 return -EINVAL;
4558
4559 seq_printf(m, "%s:%d HDCP version: ", connector->name,
4560 connector->base.id);
4561 hdcp_cap = intel_hdcp_capable(intel_connector);
4562 hdcp2_cap = intel_hdcp2_capable(intel_connector);
4563
4564 if (hdcp_cap)
4565 seq_puts(m, "HDCP1.4 ");
4566 if (hdcp2_cap)
4567 seq_puts(m, "HDCP2.2 ");
4568
4569 if (!hdcp_cap && !hdcp2_cap)
4570 seq_puts(m, "None");
4571 seq_puts(m, "\n");
4572
4573 return 0;
4574}
4575DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
4576
4577static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
4578{
4579 struct drm_connector *connector = m->private;
4580 struct drm_device *dev = connector->dev;
4581 struct drm_crtc *crtc;
4582 struct intel_dp *intel_dp;
4583 struct drm_modeset_acquire_ctx ctx;
4584 struct intel_crtc_state *crtc_state = NULL;
4585 int ret = 0;
4586 bool try_again = false;
4587
4588 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
4589
4590 do {
4591 try_again = false;
4592 ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
4593 &ctx);
4594 if (ret) {
4595 if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) {
4596 try_again = true;
4597 continue;
4598 }
4599 break;
4600 }
4601 crtc = connector->state->crtc;
4602 if (connector->status != connector_status_connected || !crtc) {
4603 ret = -ENODEV;
4604 break;
4605 }
4606 ret = drm_modeset_lock(&crtc->mutex, &ctx);
4607 if (ret == -EDEADLK) {
4608 ret = drm_modeset_backoff(&ctx);
4609 if (!ret) {
4610 try_again = true;
4611 continue;
4612 }
4613 break;
4614 } else if (ret) {
4615 break;
4616 }
4617 intel_dp = enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4618 crtc_state = to_intel_crtc_state(crtc->state);
4619 seq_printf(m, "DSC_Enabled: %s\n",
4620 yesno(crtc_state->dsc_params.compression_enable));
4621 seq_printf(m, "DSC_Sink_Support: %s\n",
4622 yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
4623 seq_printf(m, "Force_DSC_Enable: %s\n",
4624 yesno(intel_dp->force_dsc_en));
4625 if (!intel_dp_is_edp(intel_dp))
4626 seq_printf(m, "FEC_Sink_Support: %s\n",
4627 yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
4628 } while (try_again);
4629
4630 drm_modeset_drop_locks(&ctx);
4631 drm_modeset_acquire_fini(&ctx);
4632
4633 return ret;
4634}
4635
4636static ssize_t i915_dsc_fec_support_write(struct file *file,
4637 const char __user *ubuf,
4638 size_t len, loff_t *offp)
4639{
4640 bool dsc_enable = false;
4641 int ret;
4642 struct drm_connector *connector =
4643 ((struct seq_file *)file->private_data)->private;
4644 struct intel_encoder *encoder = intel_attached_encoder(connector);
4645 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4646
4647 if (len == 0)
4648 return 0;
4649
4650 DRM_DEBUG_DRIVER("Copied %zu bytes from user to force DSC\n",
4651 len);
4652
4653 ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
4654 if (ret < 0)
4655 return ret;
4656
4657 DRM_DEBUG_DRIVER("Got %s for DSC Enable\n",
4658 (dsc_enable) ? "true" : "false");
4659 intel_dp->force_dsc_en = dsc_enable;
4660
4661 *offp += len;
4662 return len;
4663}
4664
4665static int i915_dsc_fec_support_open(struct inode *inode,
4666 struct file *file)
4667{
4668 return single_open(file, i915_dsc_fec_support_show,
4669 inode->i_private);
4670}
4671
4672static const struct file_operations i915_dsc_fec_support_fops = {
4673 .owner = THIS_MODULE,
4674 .open = i915_dsc_fec_support_open,
4675 .read = seq_read,
4676 .llseek = seq_lseek,
4677 .release = single_release,
4678 .write = i915_dsc_fec_support_write
4679};
4680
4681
4682
4683
4684
4685
4686
4687
4688
4689
4690int i915_debugfs_connector_add(struct drm_connector *connector)
4691{
4692 struct dentry *root = connector->debugfs_entry;
4693 struct drm_i915_private *dev_priv = to_i915(connector->dev);
4694
4695
4696 if (!root)
4697 return -ENODEV;
4698
4699 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4700 connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4701 debugfs_create_file("i915_dpcd", S_IRUGO, root,
4702 connector, &i915_dpcd_fops);
4703
4704 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
4705 debugfs_create_file("i915_panel_timings", S_IRUGO, root,
4706 connector, &i915_panel_fops);
4707 debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
4708 connector, &i915_psr_sink_status_fops);
4709 }
4710
4711 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4712 connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
4713 connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
4714 debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
4715 connector, &i915_hdcp_sink_capability_fops);
4716 }
4717
4718 if (INTEL_GEN(dev_priv) >= 10 &&
4719 (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4720 connector->connector_type == DRM_MODE_CONNECTOR_eDP))
4721 debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
4722 connector, &i915_dsc_fec_support_fops);
4723
4724 return 0;
4725}
4726