1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include <linux/uaccess.h>
27
28#include <drm/drm_debugfs.h>
29
30#include "dc.h"
31#include "amdgpu.h"
32#include "amdgpu_dm.h"
33#include "amdgpu_dm_debugfs.h"
34#include "dm_helpers.h"
35#include "dmub/dmub_srv.h"
36#include "resource.h"
37#include "dsc.h"
38#include "dc_link_dp.h"
39
40struct dmub_debugfs_trace_header {
41 uint32_t entry_count;
42 uint32_t reserved[3];
43};
44
45struct dmub_debugfs_trace_entry {
46 uint32_t trace_code;
47 uint32_t tick_count;
48 uint32_t param0;
49 uint32_t param1;
50};
51
52static inline const char *yesno(bool v)
53{
54 return v ? "yes" : "no";
55}
56
57
58
59
60
61
62
63
64
65static int parse_write_buffer_into_params(char *wr_buf, uint32_t wr_buf_size,
66 long *param, const char __user *buf,
67 int max_param_num,
68 uint8_t *param_nums)
69{
70 char *wr_buf_ptr = NULL;
71 uint32_t wr_buf_count = 0;
72 int r;
73 char *sub_str = NULL;
74 const char delimiter[3] = {' ', '\n', '\0'};
75 uint8_t param_index = 0;
76
77 *param_nums = 0;
78
79 wr_buf_ptr = wr_buf;
80
81 r = copy_from_user(wr_buf_ptr, buf, wr_buf_size);
82
83
84 if (r >= wr_buf_size) {
85 DRM_DEBUG_DRIVER("user data not be read\n");
86 return -EINVAL;
87 }
88
89
90 while ((*wr_buf_ptr != 0xa) && (wr_buf_count < wr_buf_size)) {
91
92 while (isspace(*wr_buf_ptr) && (wr_buf_count < wr_buf_size)) {
93 wr_buf_ptr++;
94 wr_buf_count++;
95 }
96
97 if (wr_buf_count == wr_buf_size)
98 break;
99
100
101 while ((!isspace(*wr_buf_ptr)) && (wr_buf_count < wr_buf_size)) {
102 wr_buf_ptr++;
103 wr_buf_count++;
104 }
105
106 (*param_nums)++;
107
108 if (wr_buf_count == wr_buf_size)
109 break;
110 }
111
112 if (*param_nums > max_param_num)
113 *param_nums = max_param_num;
114
115 wr_buf_ptr = wr_buf;
116 wr_buf_count = 0;
117
118 while (isspace(*wr_buf_ptr) && (wr_buf_count < wr_buf_size)) {
119 wr_buf_ptr++;
120 wr_buf_count++;
121 }
122
123 while (param_index < *param_nums) {
124
125 sub_str = strsep(&wr_buf_ptr, delimiter);
126
127 r = kstrtol(sub_str, 16, &(param[param_index]));
128
129 if (r)
130 DRM_DEBUG_DRIVER("string to int convert error code: %d\n", r);
131
132 param_index++;
133 }
134
135 return 0;
136}
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179static ssize_t dp_link_settings_read(struct file *f, char __user *buf,
180 size_t size, loff_t *pos)
181{
182 struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
183 struct dc_link *link = connector->dc_link;
184 char *rd_buf = NULL;
185 char *rd_buf_ptr = NULL;
186 const uint32_t rd_buf_size = 100;
187 uint32_t result = 0;
188 uint8_t str_len = 0;
189 int r;
190
191 if (*pos & 3 || size & 3)
192 return -EINVAL;
193
194 rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
195 if (!rd_buf)
196 return 0;
197
198 rd_buf_ptr = rd_buf;
199
200 str_len = strlen("Current: %d %d %d ");
201 snprintf(rd_buf_ptr, str_len, "Current: %d %d %d ",
202 link->cur_link_settings.lane_count,
203 link->cur_link_settings.link_rate,
204 link->cur_link_settings.link_spread);
205 rd_buf_ptr += str_len;
206
207 str_len = strlen("Verified: %d %d %d ");
208 snprintf(rd_buf_ptr, str_len, "Verified: %d %d %d ",
209 link->verified_link_cap.lane_count,
210 link->verified_link_cap.link_rate,
211 link->verified_link_cap.link_spread);
212 rd_buf_ptr += str_len;
213
214 str_len = strlen("Reported: %d %d %d ");
215 snprintf(rd_buf_ptr, str_len, "Reported: %d %d %d ",
216 link->reported_link_cap.lane_count,
217 link->reported_link_cap.link_rate,
218 link->reported_link_cap.link_spread);
219 rd_buf_ptr += str_len;
220
221 str_len = strlen("Preferred: %d %d %d ");
222 snprintf(rd_buf_ptr, str_len, "Preferred: %d %d %d\n",
223 link->preferred_link_setting.lane_count,
224 link->preferred_link_setting.link_rate,
225 link->preferred_link_setting.link_spread);
226
227 while (size) {
228 if (*pos >= rd_buf_size)
229 break;
230
231 r = put_user(*(rd_buf + result), buf);
232 if (r)
233 return r;
234
235 buf += 1;
236 size -= 1;
237 *pos += 1;
238 result += 1;
239 }
240
241 kfree(rd_buf);
242 return result;
243}
244
245static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
246 size_t size, loff_t *pos)
247{
248 struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
249 struct dc_link *link = connector->dc_link;
250 struct dc *dc = (struct dc *)link->dc;
251 struct dc_link_settings prefer_link_settings;
252 char *wr_buf = NULL;
253 const uint32_t wr_buf_size = 40;
254
255 int max_param_num = 2;
256 uint8_t param_nums = 0;
257 long param[2];
258 bool valid_input = false;
259
260 if (size == 0)
261 return -EINVAL;
262
263 wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL);
264 if (!wr_buf)
265 return -ENOSPC;
266
267 if (parse_write_buffer_into_params(wr_buf, size,
268 (long *)param, buf,
269 max_param_num,
270 ¶m_nums)) {
271 kfree(wr_buf);
272 return -EINVAL;
273 }
274
275 if (param_nums <= 0) {
276 kfree(wr_buf);
277 DRM_DEBUG_DRIVER("user data not be read\n");
278 return -EINVAL;
279 }
280
281 switch (param[0]) {
282 case LANE_COUNT_ONE:
283 case LANE_COUNT_TWO:
284 case LANE_COUNT_FOUR:
285 valid_input = true;
286 break;
287 default:
288 break;
289 }
290
291 switch (param[1]) {
292 case LINK_RATE_LOW:
293 case LINK_RATE_HIGH:
294 case LINK_RATE_RBR2:
295 case LINK_RATE_HIGH2:
296 case LINK_RATE_HIGH3:
297 valid_input = true;
298 break;
299 default:
300 break;
301 }
302
303 if (!valid_input) {
304 kfree(wr_buf);
305 DRM_DEBUG_DRIVER("Invalid Input value No HW will be programmed\n");
306 return size;
307 }
308
309
310
311
312 prefer_link_settings.link_spread = link->cur_link_settings.link_spread;
313 prefer_link_settings.lane_count = param[0];
314 prefer_link_settings.link_rate = param[1];
315
316 dc_link_set_preferred_link_settings(dc, &prefer_link_settings, link);
317
318 kfree(wr_buf);
319 return size;
320}
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363static ssize_t dp_phy_settings_read(struct file *f, char __user *buf,
364 size_t size, loff_t *pos)
365{
366 struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
367 struct dc_link *link = connector->dc_link;
368 char *rd_buf = NULL;
369 const uint32_t rd_buf_size = 20;
370 uint32_t result = 0;
371 int r;
372
373 if (*pos & 3 || size & 3)
374 return -EINVAL;
375
376 rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
377 if (!rd_buf)
378 return -EINVAL;
379
380 snprintf(rd_buf, rd_buf_size, " %d %d %d ",
381 link->cur_lane_setting.VOLTAGE_SWING,
382 link->cur_lane_setting.PRE_EMPHASIS,
383 link->cur_lane_setting.POST_CURSOR2);
384
385 while (size) {
386 if (*pos >= rd_buf_size)
387 break;
388
389 r = put_user((*(rd_buf + result)), buf);
390 if (r)
391 return r;
392
393 buf += 1;
394 size -= 1;
395 *pos += 1;
396 result += 1;
397 }
398
399 kfree(rd_buf);
400 return result;
401}
402
403static ssize_t dp_phy_settings_write(struct file *f, const char __user *buf,
404 size_t size, loff_t *pos)
405{
406 struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
407 struct dc_link *link = connector->dc_link;
408 struct dc *dc = (struct dc *)link->dc;
409 char *wr_buf = NULL;
410 uint32_t wr_buf_size = 40;
411 long param[3];
412 bool use_prefer_link_setting;
413 struct link_training_settings link_lane_settings;
414 int max_param_num = 3;
415 uint8_t param_nums = 0;
416 int r = 0;
417
418
419 if (size == 0)
420 return -EINVAL;
421
422 wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL);
423 if (!wr_buf)
424 return -ENOSPC;
425
426 if (parse_write_buffer_into_params(wr_buf, size,
427 (long *)param, buf,
428 max_param_num,
429 ¶m_nums)) {
430 kfree(wr_buf);
431 return -EINVAL;
432 }
433
434 if (param_nums <= 0) {
435 kfree(wr_buf);
436 DRM_DEBUG_DRIVER("user data not be read\n");
437 return -EINVAL;
438 }
439
440 if ((param[0] > VOLTAGE_SWING_MAX_LEVEL) ||
441 (param[1] > PRE_EMPHASIS_MAX_LEVEL) ||
442 (param[2] > POST_CURSOR2_MAX_LEVEL)) {
443 kfree(wr_buf);
444 DRM_DEBUG_DRIVER("Invalid Input No HW will be programmed\n");
445 return size;
446 }
447
448
449 use_prefer_link_setting =
450 ((link->preferred_link_setting.link_rate != LINK_RATE_UNKNOWN) &&
451 (link->test_pattern_enabled));
452
453 memset(&link_lane_settings, 0, sizeof(link_lane_settings));
454
455 if (use_prefer_link_setting) {
456 link_lane_settings.link_settings.lane_count =
457 link->preferred_link_setting.lane_count;
458 link_lane_settings.link_settings.link_rate =
459 link->preferred_link_setting.link_rate;
460 link_lane_settings.link_settings.link_spread =
461 link->preferred_link_setting.link_spread;
462 } else {
463 link_lane_settings.link_settings.lane_count =
464 link->cur_link_settings.lane_count;
465 link_lane_settings.link_settings.link_rate =
466 link->cur_link_settings.link_rate;
467 link_lane_settings.link_settings.link_spread =
468 link->cur_link_settings.link_spread;
469 }
470
471
472 for (r = 0; r < link_lane_settings.link_settings.lane_count; r++) {
473 link_lane_settings.lane_settings[r].VOLTAGE_SWING =
474 (enum dc_voltage_swing) (param[0]);
475 link_lane_settings.lane_settings[r].PRE_EMPHASIS =
476 (enum dc_pre_emphasis) (param[1]);
477 link_lane_settings.lane_settings[r].POST_CURSOR2 =
478 (enum dc_post_cursor2) (param[2]);
479 }
480
481
482 dc_link_set_drive_settings(dc, &link_lane_settings, link);
483
484 kfree(wr_buf);
485 return size;
486}
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __user *buf,
547 size_t size, loff_t *pos)
548{
549 struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
550 struct dc_link *link = connector->dc_link;
551 char *wr_buf = NULL;
552 uint32_t wr_buf_size = 100;
553 long param[11] = {0x0};
554 int max_param_num = 11;
555 enum dp_test_pattern test_pattern = DP_TEST_PATTERN_UNSUPPORTED;
556 bool disable_hpd = false;
557 bool valid_test_pattern = false;
558 uint8_t param_nums = 0;
559
560 uint8_t custom_pattern[10] = {
561 0x1f, 0x7c, 0xf0, 0xc1, 0x07,
562 0x1f, 0x7c, 0xf0, 0xc1, 0x07
563 };
564 struct dc_link_settings prefer_link_settings = {LANE_COUNT_UNKNOWN,
565 LINK_RATE_UNKNOWN, LINK_SPREAD_DISABLED};
566 struct dc_link_settings cur_link_settings = {LANE_COUNT_UNKNOWN,
567 LINK_RATE_UNKNOWN, LINK_SPREAD_DISABLED};
568 struct link_training_settings link_training_settings;
569 int i;
570
571 if (size == 0)
572 return -EINVAL;
573
574 wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL);
575 if (!wr_buf)
576 return -ENOSPC;
577
578 if (parse_write_buffer_into_params(wr_buf, size,
579 (long *)param, buf,
580 max_param_num,
581 ¶m_nums)) {
582 kfree(wr_buf);
583 return -EINVAL;
584 }
585
586 if (param_nums <= 0) {
587 kfree(wr_buf);
588 DRM_DEBUG_DRIVER("user data not be read\n");
589 return -EINVAL;
590 }
591
592
593 test_pattern = param[0];
594
595 switch (test_pattern) {
596 case DP_TEST_PATTERN_VIDEO_MODE:
597 case DP_TEST_PATTERN_COLOR_SQUARES:
598 case DP_TEST_PATTERN_COLOR_SQUARES_CEA:
599 case DP_TEST_PATTERN_VERTICAL_BARS:
600 case DP_TEST_PATTERN_HORIZONTAL_BARS:
601 case DP_TEST_PATTERN_COLOR_RAMP:
602 valid_test_pattern = true;
603 break;
604
605 case DP_TEST_PATTERN_D102:
606 case DP_TEST_PATTERN_SYMBOL_ERROR:
607 case DP_TEST_PATTERN_PRBS7:
608 case DP_TEST_PATTERN_80BIT_CUSTOM:
609 case DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE:
610 case DP_TEST_PATTERN_TRAINING_PATTERN4:
611 disable_hpd = true;
612 valid_test_pattern = true;
613 break;
614
615 default:
616 valid_test_pattern = false;
617 test_pattern = DP_TEST_PATTERN_UNSUPPORTED;
618 break;
619 }
620
621 if (!valid_test_pattern) {
622 kfree(wr_buf);
623 DRM_DEBUG_DRIVER("Invalid Test Pattern Parameters\n");
624 return size;
625 }
626
627 if (test_pattern == DP_TEST_PATTERN_80BIT_CUSTOM) {
628 for (i = 0; i < 10; i++) {
629 if ((uint8_t) param[i + 1] != 0x0)
630 break;
631 }
632
633 if (i < 10) {
634
635 for (i = 0; i < 10; i++)
636 custom_pattern[i] = (uint8_t) param[i + 1];
637 }
638 }
639
640
641
642
643
644
645
646
647
648 if (!disable_hpd)
649 dc_link_enable_hpd(link);
650
651 prefer_link_settings.lane_count = link->verified_link_cap.lane_count;
652 prefer_link_settings.link_rate = link->verified_link_cap.link_rate;
653 prefer_link_settings.link_spread = link->verified_link_cap.link_spread;
654
655 cur_link_settings.lane_count = link->cur_link_settings.lane_count;
656 cur_link_settings.link_rate = link->cur_link_settings.link_rate;
657 cur_link_settings.link_spread = link->cur_link_settings.link_spread;
658
659 link_training_settings.link_settings = cur_link_settings;
660
661
662 if (test_pattern != DP_TEST_PATTERN_VIDEO_MODE) {
663 if (prefer_link_settings.lane_count != LANE_COUNT_UNKNOWN &&
664 prefer_link_settings.link_rate != LINK_RATE_UNKNOWN &&
665 (prefer_link_settings.lane_count != cur_link_settings.lane_count ||
666 prefer_link_settings.link_rate != cur_link_settings.link_rate))
667 link_training_settings.link_settings = prefer_link_settings;
668 }
669
670 for (i = 0; i < (unsigned int)(link_training_settings.link_settings.lane_count); i++)
671 link_training_settings.lane_settings[i] = link->cur_lane_setting;
672
673 dc_link_set_test_pattern(
674 link,
675 test_pattern,
676 DP_TEST_PATTERN_COLOR_SPACE_RGB,
677 &link_training_settings,
678 custom_pattern,
679 10);
680
681
682
683
684
685
686 if (valid_test_pattern && disable_hpd)
687 dc_link_disable_hpd(link);
688
689 kfree(wr_buf);
690
691 return size;
692}
693
694
695
696
697
698static int dmub_tracebuffer_show(struct seq_file *m, void *data)
699{
700 struct amdgpu_device *adev = m->private;
701 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
702 struct dmub_debugfs_trace_entry *entries;
703 uint8_t *tbuf_base;
704 uint32_t tbuf_size, max_entries, num_entries, i;
705
706 if (!fb_info)
707 return 0;
708
709 tbuf_base = (uint8_t *)fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr;
710 if (!tbuf_base)
711 return 0;
712
713 tbuf_size = fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size;
714 max_entries = (tbuf_size - sizeof(struct dmub_debugfs_trace_header)) /
715 sizeof(struct dmub_debugfs_trace_entry);
716
717 num_entries =
718 ((struct dmub_debugfs_trace_header *)tbuf_base)->entry_count;
719
720 num_entries = min(num_entries, max_entries);
721
722 entries = (struct dmub_debugfs_trace_entry
723 *)(tbuf_base +
724 sizeof(struct dmub_debugfs_trace_header));
725
726 for (i = 0; i < num_entries; ++i) {
727 struct dmub_debugfs_trace_entry *entry = &entries[i];
728
729 seq_printf(m,
730 "trace_code=%u tick_count=%u param0=%u param1=%u\n",
731 entry->trace_code, entry->tick_count, entry->param0,
732 entry->param1);
733 }
734
735 return 0;
736}
737
738
739
740
741
742static int dmub_fw_state_show(struct seq_file *m, void *data)
743{
744 struct amdgpu_device *adev = m->private;
745 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
746 uint8_t *state_base;
747 uint32_t state_size;
748
749 if (!fb_info)
750 return 0;
751
752 state_base = (uint8_t *)fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr;
753 if (!state_base)
754 return 0;
755
756 state_size = fb_info->fb[DMUB_WINDOW_6_FW_STATE].size;
757
758 return seq_write(m, state_base, state_size);
759}
760
761
762
763
764
765static int output_bpc_show(struct seq_file *m, void *data)
766{
767 struct drm_connector *connector = m->private;
768 struct drm_device *dev = connector->dev;
769 struct drm_crtc *crtc = NULL;
770 struct dm_crtc_state *dm_crtc_state = NULL;
771 int res = -ENODEV;
772 unsigned int bpc;
773
774 mutex_lock(&dev->mode_config.mutex);
775 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
776
777 if (connector->state == NULL)
778 goto unlock;
779
780 crtc = connector->state->crtc;
781 if (crtc == NULL)
782 goto unlock;
783
784 drm_modeset_lock(&crtc->mutex, NULL);
785 if (crtc->state == NULL)
786 goto unlock;
787
788 dm_crtc_state = to_dm_crtc_state(crtc->state);
789 if (dm_crtc_state->stream == NULL)
790 goto unlock;
791
792 switch (dm_crtc_state->stream->timing.display_color_depth) {
793 case COLOR_DEPTH_666:
794 bpc = 6;
795 break;
796 case COLOR_DEPTH_888:
797 bpc = 8;
798 break;
799 case COLOR_DEPTH_101010:
800 bpc = 10;
801 break;
802 case COLOR_DEPTH_121212:
803 bpc = 12;
804 break;
805 case COLOR_DEPTH_161616:
806 bpc = 16;
807 break;
808 default:
809 goto unlock;
810 }
811
812 seq_printf(m, "Current: %u\n", bpc);
813 seq_printf(m, "Maximum: %u\n", connector->display_info.bpc);
814 res = 0;
815
816unlock:
817 if (crtc)
818 drm_modeset_unlock(&crtc->mutex);
819
820 drm_modeset_unlock(&dev->mode_config.connection_mutex);
821 mutex_unlock(&dev->mode_config.mutex);
822
823 return res;
824}
825
826#ifdef CONFIG_DRM_AMD_DC_HDCP
827
828
829
830
831
832
833
834
835
836static int hdcp_sink_capability_show(struct seq_file *m, void *data)
837{
838 struct drm_connector *connector = m->private;
839 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
840 bool hdcp_cap, hdcp2_cap;
841
842 if (connector->status != connector_status_connected)
843 return -ENODEV;
844
845 seq_printf(m, "%s:%d HDCP version: ", connector->name, connector->base.id);
846
847 hdcp_cap = dc_link_is_hdcp14(aconnector->dc_link, aconnector->dc_sink->sink_signal);
848 hdcp2_cap = dc_link_is_hdcp22(aconnector->dc_link, aconnector->dc_sink->sink_signal);
849
850
851 if (hdcp_cap)
852 seq_printf(m, "%s ", "HDCP1.4");
853 if (hdcp2_cap)
854 seq_printf(m, "%s ", "HDCP2.2");
855
856 if (!hdcp_cap && !hdcp2_cap)
857 seq_printf(m, "%s ", "None");
858
859 seq_puts(m, "\n");
860
861 return 0;
862}
863#endif
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878static ssize_t dp_sdp_message_debugfs_write(struct file *f, const char __user *buf,
879 size_t size, loff_t *pos)
880{
881 int r;
882 uint8_t data[36];
883 struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
884 struct dm_crtc_state *acrtc_state;
885 uint32_t write_size = 36;
886
887 if (connector->base.status != connector_status_connected)
888 return -ENODEV;
889
890 if (size == 0)
891 return 0;
892
893 acrtc_state = to_dm_crtc_state(connector->base.state->crtc->state);
894
895 r = copy_from_user(data, buf, write_size);
896
897 write_size -= r;
898
899 dc_stream_send_dp_sdp(acrtc_state->stream, data, write_size);
900
901 return write_size;
902}
903
904static ssize_t dp_dpcd_address_write(struct file *f, const char __user *buf,
905 size_t size, loff_t *pos)
906{
907 int r;
908 struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
909
910 if (size < sizeof(connector->debugfs_dpcd_address))
911 return -EINVAL;
912
913 r = copy_from_user(&connector->debugfs_dpcd_address,
914 buf, sizeof(connector->debugfs_dpcd_address));
915
916 return size - r;
917}
918
919static ssize_t dp_dpcd_size_write(struct file *f, const char __user *buf,
920 size_t size, loff_t *pos)
921{
922 int r;
923 struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
924
925 if (size < sizeof(connector->debugfs_dpcd_size))
926 return -EINVAL;
927
928 r = copy_from_user(&connector->debugfs_dpcd_size,
929 buf, sizeof(connector->debugfs_dpcd_size));
930
931 if (connector->debugfs_dpcd_size > 256)
932 connector->debugfs_dpcd_size = 0;
933
934 return size - r;
935}
936
937static ssize_t dp_dpcd_data_write(struct file *f, const char __user *buf,
938 size_t size, loff_t *pos)
939{
940 int r;
941 char *data;
942 struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
943 struct dc_link *link = connector->dc_link;
944 uint32_t write_size = connector->debugfs_dpcd_size;
945
946 if (!write_size || size < write_size)
947 return -EINVAL;
948
949 data = kzalloc(write_size, GFP_KERNEL);
950 if (!data)
951 return 0;
952
953 r = copy_from_user(data, buf, write_size);
954
955 dm_helpers_dp_write_dpcd(link->ctx, link,
956 connector->debugfs_dpcd_address, data, write_size - r);
957 kfree(data);
958 return write_size - r;
959}
960
961static ssize_t dp_dpcd_data_read(struct file *f, char __user *buf,
962 size_t size, loff_t *pos)
963{
964 int r;
965 char *data;
966 struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
967 struct dc_link *link = connector->dc_link;
968 uint32_t read_size = connector->debugfs_dpcd_size;
969
970 if (!read_size || size < read_size)
971 return 0;
972
973 data = kzalloc(read_size, GFP_KERNEL);
974 if (!data)
975 return 0;
976
977 dm_helpers_dp_read_dpcd(link->ctx, link,
978 connector->debugfs_dpcd_address, data, read_size);
979
980 r = copy_to_user(buf, data, read_size);
981
982 kfree(data);
983 return read_size - r;
984}
985
986
987
988
989
990
991
992
993
994
995static int dp_dsc_fec_support_show(struct seq_file *m, void *data)
996{
997 struct drm_connector *connector = m->private;
998 struct drm_modeset_acquire_ctx ctx;
999 struct drm_device *dev = connector->dev;
1000 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
1001 int ret = 0;
1002 bool try_again = false;
1003 bool is_fec_supported = false;
1004 bool is_dsc_supported = false;
1005 struct dpcd_caps dpcd_caps;
1006
1007 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
1008 do {
1009 try_again = false;
1010 ret = drm_modeset_lock(&dev->mode_config.connection_mutex, &ctx);
1011 if (ret) {
1012 if (ret == -EDEADLK) {
1013 ret = drm_modeset_backoff(&ctx);
1014 if (!ret) {
1015 try_again = true;
1016 continue;
1017 }
1018 }
1019 break;
1020 }
1021 if (connector->status != connector_status_connected) {
1022 ret = -ENODEV;
1023 break;
1024 }
1025 dpcd_caps = aconnector->dc_link->dpcd_caps;
1026 if (aconnector->port) {
1027
1028
1029
1030
1031
1032 if (aconnector->dsc_aux) {
1033 is_fec_supported = true;
1034 is_dsc_supported = true;
1035 }
1036 } else {
1037 is_fec_supported = dpcd_caps.fec_cap.raw & 0x1;
1038 is_dsc_supported = dpcd_caps.dsc_caps.dsc_basic_caps.raw[0] & 0x1;
1039 }
1040 } while (try_again);
1041
1042 drm_modeset_drop_locks(&ctx);
1043 drm_modeset_acquire_fini(&ctx);
1044
1045 seq_printf(m, "FEC_Sink_Support: %s\n", yesno(is_fec_supported));
1046 seq_printf(m, "DSC_Sink_Support: %s\n", yesno(is_dsc_supported));
1047
1048 return ret;
1049}
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066static ssize_t dp_trigger_hotplug(struct file *f, const char __user *buf,
1067 size_t size, loff_t *pos)
1068{
1069 struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
1070 struct drm_connector *connector = &aconnector->base;
1071 struct dc_link *link = NULL;
1072 struct drm_device *dev = connector->dev;
1073 enum dc_connection_type new_connection_type = dc_connection_none;
1074 char *wr_buf = NULL;
1075 uint32_t wr_buf_size = 42;
1076 int max_param_num = 1;
1077 long param[1] = {0};
1078 uint8_t param_nums = 0;
1079
1080 if (!aconnector || !aconnector->dc_link)
1081 return -EINVAL;
1082
1083 if (size == 0)
1084 return -EINVAL;
1085
1086 wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL);
1087
1088 if (!wr_buf) {
1089 DRM_DEBUG_DRIVER("no memory to allocate write buffer\n");
1090 return -ENOSPC;
1091 }
1092
1093 if (parse_write_buffer_into_params(wr_buf, size,
1094 (long *)param, buf,
1095 max_param_num,
1096 ¶m_nums)) {
1097 kfree(wr_buf);
1098 return -EINVAL;
1099 }
1100
1101 if (param_nums <= 0) {
1102 DRM_DEBUG_DRIVER("user data not be read\n");
1103 kfree(wr_buf);
1104 return -EINVAL;
1105 }
1106
1107 if (param[0] == 1) {
1108 mutex_lock(&aconnector->hpd_lock);
1109
1110 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type) &&
1111 new_connection_type != dc_connection_none)
1112 goto unlock;
1113
1114 if (!dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD))
1115 goto unlock;
1116
1117 amdgpu_dm_update_connector_after_detect(aconnector);
1118
1119 drm_modeset_lock_all(dev);
1120 dm_restore_drm_connector_state(dev, connector);
1121 drm_modeset_unlock_all(dev);
1122
1123 drm_kms_helper_hotplug_event(dev);
1124 } else if (param[0] == 0) {
1125 if (!aconnector->dc_link)
1126 goto unlock;
1127
1128 link = aconnector->dc_link;
1129
1130 if (link->local_sink) {
1131 dc_sink_release(link->local_sink);
1132 link->local_sink = NULL;
1133 }
1134
1135 link->dpcd_sink_count = 0;
1136 link->type = dc_connection_none;
1137 link->dongle_max_pix_clk = 0;
1138
1139 amdgpu_dm_update_connector_after_detect(aconnector);
1140
1141 drm_modeset_lock_all(dev);
1142 dm_restore_drm_connector_state(dev, connector);
1143 drm_modeset_unlock_all(dev);
1144
1145 drm_kms_helper_hotplug_event(dev);
1146 }
1147
1148unlock:
1149 mutex_unlock(&aconnector->hpd_lock);
1150
1151 kfree(wr_buf);
1152 return size;
1153}
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf,
1171 size_t size, loff_t *pos)
1172{
1173 char *rd_buf = NULL;
1174 char *rd_buf_ptr = NULL;
1175 struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
1176 struct display_stream_compressor *dsc;
1177 struct dcn_dsc_state dsc_state = {0};
1178 const uint32_t rd_buf_size = 10;
1179 struct pipe_ctx *pipe_ctx;
1180 ssize_t result = 0;
1181 int i, r, str_len = 30;
1182
1183 rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
1184
1185 if (!rd_buf)
1186 return -ENOMEM;
1187
1188 rd_buf_ptr = rd_buf;
1189
1190 for (i = 0; i < MAX_PIPES; i++) {
1191 pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
1192 if (pipe_ctx && pipe_ctx->stream &&
1193 pipe_ctx->stream->link == aconnector->dc_link)
1194 break;
1195 }
1196
1197 if (!pipe_ctx)
1198 return -ENXIO;
1199
1200 dsc = pipe_ctx->stream_res.dsc;
1201 if (dsc)
1202 dsc->funcs->dsc_read_state(dsc, &dsc_state);
1203
1204 snprintf(rd_buf_ptr, str_len,
1205 "%d\n",
1206 dsc_state.dsc_clock_en);
1207 rd_buf_ptr += str_len;
1208
1209 while (size) {
1210 if (*pos >= rd_buf_size)
1211 break;
1212
1213 r = put_user(*(rd_buf + result), buf);
1214 if (r)
1215 return r;
1216
1217 buf += 1;
1218 size -= 1;
1219 *pos += 1;
1220 result += 1;
1221 }
1222
1223 kfree(rd_buf);
1224 return result;
1225}
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252static ssize_t dp_dsc_clock_en_write(struct file *f, const char __user *buf,
1253 size_t size, loff_t *pos)
1254{
1255 struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
1256 struct drm_connector *connector = &aconnector->base;
1257 struct drm_device *dev = connector->dev;
1258 struct drm_crtc *crtc = NULL;
1259 struct dm_crtc_state *dm_crtc_state = NULL;
1260 struct pipe_ctx *pipe_ctx;
1261 int i;
1262 char *wr_buf = NULL;
1263 uint32_t wr_buf_size = 42;
1264 int max_param_num = 1;
1265 long param[1] = {0};
1266 uint8_t param_nums = 0;
1267
1268 if (size == 0)
1269 return -EINVAL;
1270
1271 wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL);
1272
1273 if (!wr_buf) {
1274 DRM_DEBUG_DRIVER("no memory to allocate write buffer\n");
1275 return -ENOSPC;
1276 }
1277
1278 if (parse_write_buffer_into_params(wr_buf, size,
1279 (long *)param, buf,
1280 max_param_num,
1281 ¶m_nums)) {
1282 kfree(wr_buf);
1283 return -EINVAL;
1284 }
1285
1286 if (param_nums <= 0) {
1287 DRM_DEBUG_DRIVER("user data not be read\n");
1288 kfree(wr_buf);
1289 return -EINVAL;
1290 }
1291
1292 for (i = 0; i < MAX_PIPES; i++) {
1293 pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
1294 if (pipe_ctx && pipe_ctx->stream &&
1295 pipe_ctx->stream->link == aconnector->dc_link)
1296 break;
1297 }
1298
1299 if (!pipe_ctx || !pipe_ctx->stream)
1300 goto done;
1301
1302
1303 mutex_lock(&dev->mode_config.mutex);
1304 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
1305
1306 if (connector->state == NULL)
1307 goto unlock;
1308
1309 crtc = connector->state->crtc;
1310 if (crtc == NULL)
1311 goto unlock;
1312
1313 drm_modeset_lock(&crtc->mutex, NULL);
1314 if (crtc->state == NULL)
1315 goto unlock;
1316
1317 dm_crtc_state = to_dm_crtc_state(crtc->state);
1318 if (dm_crtc_state->stream == NULL)
1319 goto unlock;
1320
1321 if (param[0] == 1)
1322 aconnector->dsc_settings.dsc_force_enable = DSC_CLK_FORCE_ENABLE;
1323 else if (param[0] == 2)
1324 aconnector->dsc_settings.dsc_force_enable = DSC_CLK_FORCE_DISABLE;
1325 else
1326 aconnector->dsc_settings.dsc_force_enable = DSC_CLK_FORCE_DEFAULT;
1327
1328 dm_crtc_state->dsc_force_changed = true;
1329
1330unlock:
1331 if (crtc)
1332 drm_modeset_unlock(&crtc->mutex);
1333 drm_modeset_unlock(&dev->mode_config.connection_mutex);
1334 mutex_unlock(&dev->mode_config.mutex);
1335
1336done:
1337 kfree(wr_buf);
1338 return size;
1339}
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357static ssize_t dp_dsc_slice_width_read(struct file *f, char __user *buf,
1358 size_t size, loff_t *pos)
1359{
1360 char *rd_buf = NULL;
1361 char *rd_buf_ptr = NULL;
1362 struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
1363 struct display_stream_compressor *dsc;
1364 struct dcn_dsc_state dsc_state = {0};
1365 const uint32_t rd_buf_size = 100;
1366 struct pipe_ctx *pipe_ctx;
1367 ssize_t result = 0;
1368 int i, r, str_len = 30;
1369
1370 rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
1371
1372 if (!rd_buf)
1373 return -ENOMEM;
1374
1375 rd_buf_ptr = rd_buf;
1376
1377 for (i = 0; i < MAX_PIPES; i++) {
1378 pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
1379 if (pipe_ctx && pipe_ctx->stream &&
1380 pipe_ctx->stream->link == aconnector->dc_link)
1381 break;
1382 }
1383
1384 if (!pipe_ctx)
1385 return -ENXIO;
1386
1387 dsc = pipe_ctx->stream_res.dsc;
1388 if (dsc)
1389 dsc->funcs->dsc_read_state(dsc, &dsc_state);
1390
1391 snprintf(rd_buf_ptr, str_len,
1392 "%d\n",
1393 dsc_state.dsc_slice_width);
1394 rd_buf_ptr += str_len;
1395
1396 while (size) {
1397 if (*pos >= rd_buf_size)
1398 break;
1399
1400 r = put_user(*(rd_buf + result), buf);
1401 if (r)
1402 return r;
1403
1404 buf += 1;
1405 size -= 1;
1406 *pos += 1;
1407 result += 1;
1408 }
1409
1410 kfree(rd_buf);
1411 return result;
1412}
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437static ssize_t dp_dsc_slice_width_write(struct file *f, const char __user *buf,
1438 size_t size, loff_t *pos)
1439{
1440 struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
1441 struct pipe_ctx *pipe_ctx;
1442 struct drm_connector *connector = &aconnector->base;
1443 struct drm_device *dev = connector->dev;
1444 struct drm_crtc *crtc = NULL;
1445 struct dm_crtc_state *dm_crtc_state = NULL;
1446 int i;
1447 char *wr_buf = NULL;
1448 uint32_t wr_buf_size = 42;
1449 int max_param_num = 1;
1450 long param[1] = {0};
1451 uint8_t param_nums = 0;
1452
1453 if (size == 0)
1454 return -EINVAL;
1455
1456 wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL);
1457
1458 if (!wr_buf) {
1459 DRM_DEBUG_DRIVER("no memory to allocate write buffer\n");
1460 return -ENOSPC;
1461 }
1462
1463 if (parse_write_buffer_into_params(wr_buf, size,
1464 (long *)param, buf,
1465 max_param_num,
1466 ¶m_nums)) {
1467 kfree(wr_buf);
1468 return -EINVAL;
1469 }
1470
1471 if (param_nums <= 0) {
1472 DRM_DEBUG_DRIVER("user data not be read\n");
1473 kfree(wr_buf);
1474 return -EINVAL;
1475 }
1476
1477 for (i = 0; i < MAX_PIPES; i++) {
1478 pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
1479 if (pipe_ctx && pipe_ctx->stream &&
1480 pipe_ctx->stream->link == aconnector->dc_link)
1481 break;
1482 }
1483
1484 if (!pipe_ctx || !pipe_ctx->stream)
1485 goto done;
1486
1487
1488 mutex_lock(&dev->mode_config.mutex);
1489 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
1490
1491 if (connector->state == NULL)
1492 goto unlock;
1493
1494 crtc = connector->state->crtc;
1495 if (crtc == NULL)
1496 goto unlock;
1497
1498 drm_modeset_lock(&crtc->mutex, NULL);
1499 if (crtc->state == NULL)
1500 goto unlock;
1501
1502 dm_crtc_state = to_dm_crtc_state(crtc->state);
1503 if (dm_crtc_state->stream == NULL)
1504 goto unlock;
1505
1506 if (param[0] > 0)
1507 aconnector->dsc_settings.dsc_num_slices_h = DIV_ROUND_UP(
1508 pipe_ctx->stream->timing.h_addressable,
1509 param[0]);
1510 else
1511 aconnector->dsc_settings.dsc_num_slices_h = 0;
1512
1513 dm_crtc_state->dsc_force_changed = true;
1514
1515unlock:
1516 if (crtc)
1517 drm_modeset_unlock(&crtc->mutex);
1518 drm_modeset_unlock(&dev->mode_config.connection_mutex);
1519 mutex_unlock(&dev->mode_config.mutex);
1520
1521done:
1522 kfree(wr_buf);
1523 return size;
1524}
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf,
1543 size_t size, loff_t *pos)
1544{
1545 char *rd_buf = NULL;
1546 char *rd_buf_ptr = NULL;
1547 struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
1548 struct display_stream_compressor *dsc;
1549 struct dcn_dsc_state dsc_state = {0};
1550 const uint32_t rd_buf_size = 100;
1551 struct pipe_ctx *pipe_ctx;
1552 ssize_t result = 0;
1553 int i, r, str_len = 30;
1554
1555 rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
1556
1557 if (!rd_buf)
1558 return -ENOMEM;
1559
1560 rd_buf_ptr = rd_buf;
1561
1562 for (i = 0; i < MAX_PIPES; i++) {
1563 pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
1564 if (pipe_ctx && pipe_ctx->stream &&
1565 pipe_ctx->stream->link == aconnector->dc_link)
1566 break;
1567 }
1568
1569 if (!pipe_ctx)
1570 return -ENXIO;
1571
1572 dsc = pipe_ctx->stream_res.dsc;
1573 if (dsc)
1574 dsc->funcs->dsc_read_state(dsc, &dsc_state);
1575
1576 snprintf(rd_buf_ptr, str_len,
1577 "%d\n",
1578 dsc_state.dsc_slice_height);
1579 rd_buf_ptr += str_len;
1580
1581 while (size) {
1582 if (*pos >= rd_buf_size)
1583 break;
1584
1585 r = put_user(*(rd_buf + result), buf);
1586 if (r)
1587 return r;
1588
1589 buf += 1;
1590 size -= 1;
1591 *pos += 1;
1592 result += 1;
1593 }
1594
1595 kfree(rd_buf);
1596 return result;
1597}
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622static ssize_t dp_dsc_slice_height_write(struct file *f, const char __user *buf,
1623 size_t size, loff_t *pos)
1624{
1625 struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
1626 struct drm_connector *connector = &aconnector->base;
1627 struct drm_device *dev = connector->dev;
1628 struct drm_crtc *crtc = NULL;
1629 struct dm_crtc_state *dm_crtc_state = NULL;
1630 struct pipe_ctx *pipe_ctx;
1631 int i;
1632 char *wr_buf = NULL;
1633 uint32_t wr_buf_size = 42;
1634 int max_param_num = 1;
1635 uint8_t param_nums = 0;
1636 long param[1] = {0};
1637
1638 if (size == 0)
1639 return -EINVAL;
1640
1641 wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL);
1642
1643 if (!wr_buf) {
1644 DRM_DEBUG_DRIVER("no memory to allocate write buffer\n");
1645 return -ENOSPC;
1646 }
1647
1648 if (parse_write_buffer_into_params(wr_buf, size,
1649 (long *)param, buf,
1650 max_param_num,
1651 ¶m_nums)) {
1652 kfree(wr_buf);
1653 return -EINVAL;
1654 }
1655
1656 if (param_nums <= 0) {
1657 DRM_DEBUG_DRIVER("user data not be read\n");
1658 kfree(wr_buf);
1659 return -EINVAL;
1660 }
1661
1662 for (i = 0; i < MAX_PIPES; i++) {
1663 pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
1664 if (pipe_ctx && pipe_ctx->stream &&
1665 pipe_ctx->stream->link == aconnector->dc_link)
1666 break;
1667 }
1668
1669 if (!pipe_ctx || !pipe_ctx->stream)
1670 goto done;
1671
1672
1673 mutex_lock(&dev->mode_config.mutex);
1674 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
1675
1676 if (connector->state == NULL)
1677 goto unlock;
1678
1679 crtc = connector->state->crtc;
1680 if (crtc == NULL)
1681 goto unlock;
1682
1683 drm_modeset_lock(&crtc->mutex, NULL);
1684 if (crtc->state == NULL)
1685 goto unlock;
1686
1687 dm_crtc_state = to_dm_crtc_state(crtc->state);
1688 if (dm_crtc_state->stream == NULL)
1689 goto unlock;
1690
1691 if (param[0] > 0)
1692 aconnector->dsc_settings.dsc_num_slices_v = DIV_ROUND_UP(
1693 pipe_ctx->stream->timing.v_addressable,
1694 param[0]);
1695 else
1696 aconnector->dsc_settings.dsc_num_slices_v = 0;
1697
1698 dm_crtc_state->dsc_force_changed = true;
1699
1700unlock:
1701 if (crtc)
1702 drm_modeset_unlock(&crtc->mutex);
1703 drm_modeset_unlock(&dev->mode_config.connection_mutex);
1704 mutex_unlock(&dev->mode_config.mutex);
1705
1706done:
1707 kfree(wr_buf);
1708 return size;
1709}
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf,
1724 size_t size, loff_t *pos)
1725{
1726 char *rd_buf = NULL;
1727 char *rd_buf_ptr = NULL;
1728 struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
1729 struct display_stream_compressor *dsc;
1730 struct dcn_dsc_state dsc_state = {0};
1731 const uint32_t rd_buf_size = 100;
1732 struct pipe_ctx *pipe_ctx;
1733 ssize_t result = 0;
1734 int i, r, str_len = 30;
1735
1736 rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
1737
1738 if (!rd_buf)
1739 return -ENOMEM;
1740
1741 rd_buf_ptr = rd_buf;
1742
1743 for (i = 0; i < MAX_PIPES; i++) {
1744 pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
1745 if (pipe_ctx && pipe_ctx->stream &&
1746 pipe_ctx->stream->link == aconnector->dc_link)
1747 break;
1748 }
1749
1750 if (!pipe_ctx)
1751 return -ENXIO;
1752
1753 dsc = pipe_ctx->stream_res.dsc;
1754 if (dsc)
1755 dsc->funcs->dsc_read_state(dsc, &dsc_state);
1756
1757 snprintf(rd_buf_ptr, str_len,
1758 "%d\n",
1759 dsc_state.dsc_bits_per_pixel);
1760 rd_buf_ptr += str_len;
1761
1762 while (size) {
1763 if (*pos >= rd_buf_size)
1764 break;
1765
1766 r = put_user(*(rd_buf + result), buf);
1767 if (r)
1768 return r;
1769
1770 buf += 1;
1771 size -= 1;
1772 *pos += 1;
1773 result += 1;
1774 }
1775
1776 kfree(rd_buf);
1777 return result;
1778}
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800static ssize_t dp_dsc_bits_per_pixel_write(struct file *f, const char __user *buf,
1801 size_t size, loff_t *pos)
1802{
1803 struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
1804 struct drm_connector *connector = &aconnector->base;
1805 struct drm_device *dev = connector->dev;
1806 struct drm_crtc *crtc = NULL;
1807 struct dm_crtc_state *dm_crtc_state = NULL;
1808 struct pipe_ctx *pipe_ctx;
1809 int i;
1810 char *wr_buf = NULL;
1811 uint32_t wr_buf_size = 42;
1812 int max_param_num = 1;
1813 uint8_t param_nums = 0;
1814 long param[1] = {0};
1815
1816 if (size == 0)
1817 return -EINVAL;
1818
1819 wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL);
1820
1821 if (!wr_buf) {
1822 DRM_DEBUG_DRIVER("no memory to allocate write buffer\n");
1823 return -ENOSPC;
1824 }
1825
1826 if (parse_write_buffer_into_params(wr_buf, size,
1827 (long *)param, buf,
1828 max_param_num,
1829 ¶m_nums)) {
1830 kfree(wr_buf);
1831 return -EINVAL;
1832 }
1833
1834 if (param_nums <= 0) {
1835 DRM_DEBUG_DRIVER("user data not be read\n");
1836 kfree(wr_buf);
1837 return -EINVAL;
1838 }
1839
1840 for (i = 0; i < MAX_PIPES; i++) {
1841 pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
1842 if (pipe_ctx && pipe_ctx->stream &&
1843 pipe_ctx->stream->link == aconnector->dc_link)
1844 break;
1845 }
1846
1847 if (!pipe_ctx || !pipe_ctx->stream)
1848 goto done;
1849
1850
1851 mutex_lock(&dev->mode_config.mutex);
1852 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
1853
1854 if (connector->state == NULL)
1855 goto unlock;
1856
1857 crtc = connector->state->crtc;
1858 if (crtc == NULL)
1859 goto unlock;
1860
1861 drm_modeset_lock(&crtc->mutex, NULL);
1862 if (crtc->state == NULL)
1863 goto unlock;
1864
1865 dm_crtc_state = to_dm_crtc_state(crtc->state);
1866 if (dm_crtc_state->stream == NULL)
1867 goto unlock;
1868
1869 aconnector->dsc_settings.dsc_bits_per_pixel = param[0];
1870
1871 dm_crtc_state->dsc_force_changed = true;
1872
1873unlock:
1874 if (crtc)
1875 drm_modeset_unlock(&crtc->mutex);
1876 drm_modeset_unlock(&dev->mode_config.connection_mutex);
1877 mutex_unlock(&dev->mode_config.mutex);
1878
1879done:
1880 kfree(wr_buf);
1881 return size;
1882}
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899static ssize_t dp_dsc_pic_width_read(struct file *f, char __user *buf,
1900 size_t size, loff_t *pos)
1901{
1902 char *rd_buf = NULL;
1903 char *rd_buf_ptr = NULL;
1904 struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
1905 struct display_stream_compressor *dsc;
1906 struct dcn_dsc_state dsc_state = {0};
1907 const uint32_t rd_buf_size = 100;
1908 struct pipe_ctx *pipe_ctx;
1909 ssize_t result = 0;
1910 int i, r, str_len = 30;
1911
1912 rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
1913
1914 if (!rd_buf)
1915 return -ENOMEM;
1916
1917 rd_buf_ptr = rd_buf;
1918
1919 for (i = 0; i < MAX_PIPES; i++) {
1920 pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
1921 if (pipe_ctx && pipe_ctx->stream &&
1922 pipe_ctx->stream->link == aconnector->dc_link)
1923 break;
1924 }
1925
1926 if (!pipe_ctx)
1927 return -ENXIO;
1928
1929 dsc = pipe_ctx->stream_res.dsc;
1930 if (dsc)
1931 dsc->funcs->dsc_read_state(dsc, &dsc_state);
1932
1933 snprintf(rd_buf_ptr, str_len,
1934 "%d\n",
1935 dsc_state.dsc_pic_width);
1936 rd_buf_ptr += str_len;
1937
1938 while (size) {
1939 if (*pos >= rd_buf_size)
1940 break;
1941
1942 r = put_user(*(rd_buf + result), buf);
1943 if (r)
1944 return r;
1945
1946 buf += 1;
1947 size -= 1;
1948 *pos += 1;
1949 result += 1;
1950 }
1951
1952 kfree(rd_buf);
1953 return result;
1954}
1955
1956static ssize_t dp_dsc_pic_height_read(struct file *f, char __user *buf,
1957 size_t size, loff_t *pos)
1958{
1959 char *rd_buf = NULL;
1960 char *rd_buf_ptr = NULL;
1961 struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
1962 struct display_stream_compressor *dsc;
1963 struct dcn_dsc_state dsc_state = {0};
1964 const uint32_t rd_buf_size = 100;
1965 struct pipe_ctx *pipe_ctx;
1966 ssize_t result = 0;
1967 int i, r, str_len = 30;
1968
1969 rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
1970
1971 if (!rd_buf)
1972 return -ENOMEM;
1973
1974 rd_buf_ptr = rd_buf;
1975
1976 for (i = 0; i < MAX_PIPES; i++) {
1977 pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
1978 if (pipe_ctx && pipe_ctx->stream &&
1979 pipe_ctx->stream->link == aconnector->dc_link)
1980 break;
1981 }
1982
1983 if (!pipe_ctx)
1984 return -ENXIO;
1985
1986 dsc = pipe_ctx->stream_res.dsc;
1987 if (dsc)
1988 dsc->funcs->dsc_read_state(dsc, &dsc_state);
1989
1990 snprintf(rd_buf_ptr, str_len,
1991 "%d\n",
1992 dsc_state.dsc_pic_height);
1993 rd_buf_ptr += str_len;
1994
1995 while (size) {
1996 if (*pos >= rd_buf_size)
1997 break;
1998
1999 r = put_user(*(rd_buf + result), buf);
2000 if (r)
2001 return r;
2002
2003 buf += 1;
2004 size -= 1;
2005 *pos += 1;
2006 result += 1;
2007 }
2008
2009 kfree(rd_buf);
2010 return result;
2011}
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028static ssize_t dp_dsc_chunk_size_read(struct file *f, char __user *buf,
2029 size_t size, loff_t *pos)
2030{
2031 char *rd_buf = NULL;
2032 char *rd_buf_ptr = NULL;
2033 struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
2034 struct display_stream_compressor *dsc;
2035 struct dcn_dsc_state dsc_state = {0};
2036 const uint32_t rd_buf_size = 100;
2037 struct pipe_ctx *pipe_ctx;
2038 ssize_t result = 0;
2039 int i, r, str_len = 30;
2040
2041 rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
2042
2043 if (!rd_buf)
2044 return -ENOMEM;
2045
2046 rd_buf_ptr = rd_buf;
2047
2048 for (i = 0; i < MAX_PIPES; i++) {
2049 pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
2050 if (pipe_ctx && pipe_ctx->stream &&
2051 pipe_ctx->stream->link == aconnector->dc_link)
2052 break;
2053 }
2054
2055 if (!pipe_ctx)
2056 return -ENXIO;
2057
2058 dsc = pipe_ctx->stream_res.dsc;
2059 if (dsc)
2060 dsc->funcs->dsc_read_state(dsc, &dsc_state);
2061
2062 snprintf(rd_buf_ptr, str_len,
2063 "%d\n",
2064 dsc_state.dsc_chunk_size);
2065 rd_buf_ptr += str_len;
2066
2067 while (size) {
2068 if (*pos >= rd_buf_size)
2069 break;
2070
2071 r = put_user(*(rd_buf + result), buf);
2072 if (r)
2073 return r;
2074
2075 buf += 1;
2076 size -= 1;
2077 *pos += 1;
2078 result += 1;
2079 }
2080
2081 kfree(rd_buf);
2082 return result;
2083}
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf,
2101 size_t size, loff_t *pos)
2102{
2103 char *rd_buf = NULL;
2104 char *rd_buf_ptr = NULL;
2105 struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
2106 struct display_stream_compressor *dsc;
2107 struct dcn_dsc_state dsc_state = {0};
2108 const uint32_t rd_buf_size = 100;
2109 struct pipe_ctx *pipe_ctx;
2110 ssize_t result = 0;
2111 int i, r, str_len = 30;
2112
2113 rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
2114
2115 if (!rd_buf)
2116 return -ENOMEM;
2117
2118 rd_buf_ptr = rd_buf;
2119
2120 for (i = 0; i < MAX_PIPES; i++) {
2121 pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
2122 if (pipe_ctx && pipe_ctx->stream &&
2123 pipe_ctx->stream->link == aconnector->dc_link)
2124 break;
2125 }
2126
2127 if (!pipe_ctx)
2128 return -ENXIO;
2129
2130 dsc = pipe_ctx->stream_res.dsc;
2131 if (dsc)
2132 dsc->funcs->dsc_read_state(dsc, &dsc_state);
2133
2134 snprintf(rd_buf_ptr, str_len,
2135 "%d\n",
2136 dsc_state.dsc_slice_bpg_offset);
2137 rd_buf_ptr += str_len;
2138
2139 while (size) {
2140 if (*pos >= rd_buf_size)
2141 break;
2142
2143 r = put_user(*(rd_buf + result), buf);
2144 if (r)
2145 return r;
2146
2147 buf += 1;
2148 size -= 1;
2149 *pos += 1;
2150 result += 1;
2151 }
2152
2153 kfree(rd_buf);
2154 return result;
2155}
2156
2157DEFINE_SHOW_ATTRIBUTE(dp_dsc_fec_support);
2158DEFINE_SHOW_ATTRIBUTE(dmub_fw_state);
2159DEFINE_SHOW_ATTRIBUTE(dmub_tracebuffer);
2160DEFINE_SHOW_ATTRIBUTE(output_bpc);
2161#ifdef CONFIG_DRM_AMD_DC_HDCP
2162DEFINE_SHOW_ATTRIBUTE(hdcp_sink_capability);
2163#endif
2164
2165static const struct file_operations dp_dsc_clock_en_debugfs_fops = {
2166 .owner = THIS_MODULE,
2167 .read = dp_dsc_clock_en_read,
2168 .write = dp_dsc_clock_en_write,
2169 .llseek = default_llseek
2170};
2171
2172static const struct file_operations dp_dsc_slice_width_debugfs_fops = {
2173 .owner = THIS_MODULE,
2174 .read = dp_dsc_slice_width_read,
2175 .write = dp_dsc_slice_width_write,
2176 .llseek = default_llseek
2177};
2178
2179static const struct file_operations dp_dsc_slice_height_debugfs_fops = {
2180 .owner = THIS_MODULE,
2181 .read = dp_dsc_slice_height_read,
2182 .write = dp_dsc_slice_height_write,
2183 .llseek = default_llseek
2184};
2185
2186static const struct file_operations dp_dsc_bits_per_pixel_debugfs_fops = {
2187 .owner = THIS_MODULE,
2188 .read = dp_dsc_bits_per_pixel_read,
2189 .write = dp_dsc_bits_per_pixel_write,
2190 .llseek = default_llseek
2191};
2192
2193static const struct file_operations dp_dsc_pic_width_debugfs_fops = {
2194 .owner = THIS_MODULE,
2195 .read = dp_dsc_pic_width_read,
2196 .llseek = default_llseek
2197};
2198
2199static const struct file_operations dp_dsc_pic_height_debugfs_fops = {
2200 .owner = THIS_MODULE,
2201 .read = dp_dsc_pic_height_read,
2202 .llseek = default_llseek
2203};
2204
2205static const struct file_operations dp_dsc_chunk_size_debugfs_fops = {
2206 .owner = THIS_MODULE,
2207 .read = dp_dsc_chunk_size_read,
2208 .llseek = default_llseek
2209};
2210
2211static const struct file_operations dp_dsc_slice_bpg_offset_debugfs_fops = {
2212 .owner = THIS_MODULE,
2213 .read = dp_dsc_slice_bpg_offset_read,
2214 .llseek = default_llseek
2215};
2216
2217static const struct file_operations dp_trigger_hotplug_debugfs_fops = {
2218 .owner = THIS_MODULE,
2219 .write = dp_trigger_hotplug,
2220 .llseek = default_llseek
2221};
2222
2223static const struct file_operations dp_link_settings_debugfs_fops = {
2224 .owner = THIS_MODULE,
2225 .read = dp_link_settings_read,
2226 .write = dp_link_settings_write,
2227 .llseek = default_llseek
2228};
2229
2230static const struct file_operations dp_phy_settings_debugfs_fop = {
2231 .owner = THIS_MODULE,
2232 .read = dp_phy_settings_read,
2233 .write = dp_phy_settings_write,
2234 .llseek = default_llseek
2235};
2236
2237static const struct file_operations dp_phy_test_pattern_fops = {
2238 .owner = THIS_MODULE,
2239 .write = dp_phy_test_pattern_debugfs_write,
2240 .llseek = default_llseek
2241};
2242
2243static const struct file_operations sdp_message_fops = {
2244 .owner = THIS_MODULE,
2245 .write = dp_sdp_message_debugfs_write,
2246 .llseek = default_llseek
2247};
2248
2249static const struct file_operations dp_dpcd_address_debugfs_fops = {
2250 .owner = THIS_MODULE,
2251 .write = dp_dpcd_address_write,
2252 .llseek = default_llseek
2253};
2254
2255static const struct file_operations dp_dpcd_size_debugfs_fops = {
2256 .owner = THIS_MODULE,
2257 .write = dp_dpcd_size_write,
2258 .llseek = default_llseek
2259};
2260
2261static const struct file_operations dp_dpcd_data_debugfs_fops = {
2262 .owner = THIS_MODULE,
2263 .read = dp_dpcd_data_read,
2264 .write = dp_dpcd_data_write,
2265 .llseek = default_llseek
2266};
2267
2268static const struct {
2269 char *name;
2270 const struct file_operations *fops;
2271} dp_debugfs_entries[] = {
2272 {"link_settings", &dp_link_settings_debugfs_fops},
2273 {"trigger_hotplug", &dp_trigger_hotplug_debugfs_fops},
2274 {"phy_settings", &dp_phy_settings_debugfs_fop},
2275 {"test_pattern", &dp_phy_test_pattern_fops},
2276#ifdef CONFIG_DRM_AMD_DC_HDCP
2277 {"hdcp_sink_capability", &hdcp_sink_capability_fops},
2278#endif
2279 {"sdp_message", &sdp_message_fops},
2280 {"aux_dpcd_address", &dp_dpcd_address_debugfs_fops},
2281 {"aux_dpcd_size", &dp_dpcd_size_debugfs_fops},
2282 {"aux_dpcd_data", &dp_dpcd_data_debugfs_fops},
2283 {"dsc_clock_en", &dp_dsc_clock_en_debugfs_fops},
2284 {"dsc_slice_width", &dp_dsc_slice_width_debugfs_fops},
2285 {"dsc_slice_height", &dp_dsc_slice_height_debugfs_fops},
2286 {"dsc_bits_per_pixel", &dp_dsc_bits_per_pixel_debugfs_fops},
2287 {"dsc_pic_width", &dp_dsc_pic_width_debugfs_fops},
2288 {"dsc_pic_height", &dp_dsc_pic_height_debugfs_fops},
2289 {"dsc_chunk_size", &dp_dsc_chunk_size_debugfs_fops},
2290 {"dsc_slice_bpg", &dp_dsc_slice_bpg_offset_debugfs_fops},
2291 {"dp_dsc_fec_support", &dp_dsc_fec_support_fops}
2292};
2293
2294#ifdef CONFIG_DRM_AMD_DC_HDCP
2295static const struct {
2296 char *name;
2297 const struct file_operations *fops;
2298} hdmi_debugfs_entries[] = {
2299 {"hdcp_sink_capability", &hdcp_sink_capability_fops}
2300};
2301#endif
2302
2303
2304
2305static int force_yuv420_output_set(void *data, u64 val)
2306{
2307 struct amdgpu_dm_connector *connector = data;
2308
2309 connector->force_yuv420_output = (bool)val;
2310
2311 return 0;
2312}
2313
2314
2315
2316
2317static int force_yuv420_output_get(void *data, u64 *val)
2318{
2319 struct amdgpu_dm_connector *connector = data;
2320
2321 *val = connector->force_yuv420_output;
2322
2323 return 0;
2324}
2325
2326DEFINE_DEBUGFS_ATTRIBUTE(force_yuv420_output_fops, force_yuv420_output_get,
2327 force_yuv420_output_set, "%llu\n");
2328
2329
2330
2331
2332static int psr_get(void *data, u64 *val)
2333{
2334 struct amdgpu_dm_connector *connector = data;
2335 struct dc_link *link = connector->dc_link;
2336 enum dc_psr_state state = PSR_STATE0;
2337
2338 dc_link_get_psr_state(link, &state);
2339
2340 *val = state;
2341
2342 return 0;
2343}
2344
2345
2346DEFINE_DEBUGFS_ATTRIBUTE(psr_fops, psr_get, NULL, "%llu\n");
2347
2348void connector_debugfs_init(struct amdgpu_dm_connector *connector)
2349{
2350 int i;
2351 struct dentry *dir = connector->base.debugfs_entry;
2352
2353 if (connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
2354 connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) {
2355 for (i = 0; i < ARRAY_SIZE(dp_debugfs_entries); i++) {
2356 debugfs_create_file(dp_debugfs_entries[i].name,
2357 0644, dir, connector,
2358 dp_debugfs_entries[i].fops);
2359 }
2360 }
2361 if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
2362 debugfs_create_file_unsafe("psr_state", 0444, dir, connector, &psr_fops);
2363
2364 debugfs_create_file_unsafe("force_yuv420_output", 0644, dir, connector,
2365 &force_yuv420_output_fops);
2366
2367 debugfs_create_file("output_bpc", 0644, dir, connector,
2368 &output_bpc_fops);
2369
2370 connector->debugfs_dpcd_address = 0;
2371 connector->debugfs_dpcd_size = 0;
2372
2373#ifdef CONFIG_DRM_AMD_DC_HDCP
2374 if (connector->base.connector_type == DRM_MODE_CONNECTOR_HDMIA) {
2375 for (i = 0; i < ARRAY_SIZE(hdmi_debugfs_entries); i++) {
2376 debugfs_create_file(hdmi_debugfs_entries[i].name,
2377 0644, dir, connector,
2378 hdmi_debugfs_entries[i].fops);
2379 }
2380 }
2381#endif
2382}
2383
2384
2385
2386
2387
2388static ssize_t dtn_log_read(
2389 struct file *f,
2390 char __user *buf,
2391 size_t size,
2392 loff_t *pos)
2393{
2394 struct amdgpu_device *adev = file_inode(f)->i_private;
2395 struct dc *dc = adev->dm.dc;
2396 struct dc_log_buffer_ctx log_ctx = { 0 };
2397 ssize_t result = 0;
2398
2399 if (!buf || !size)
2400 return -EINVAL;
2401
2402 if (!dc->hwss.log_hw_state)
2403 return 0;
2404
2405 dc->hwss.log_hw_state(dc, &log_ctx);
2406
2407 if (*pos < log_ctx.pos) {
2408 size_t to_copy = log_ctx.pos - *pos;
2409
2410 to_copy = min(to_copy, size);
2411
2412 if (!copy_to_user(buf, log_ctx.buf + *pos, to_copy)) {
2413 *pos += to_copy;
2414 result = to_copy;
2415 }
2416 }
2417
2418 kfree(log_ctx.buf);
2419
2420 return result;
2421}
2422
2423
2424
2425
2426
2427static ssize_t dtn_log_write(
2428 struct file *f,
2429 const char __user *buf,
2430 size_t size,
2431 loff_t *pos)
2432{
2433 struct amdgpu_device *adev = file_inode(f)->i_private;
2434 struct dc *dc = adev->dm.dc;
2435
2436
2437 if (size == 0)
2438 return 0;
2439
2440 if (dc->hwss.log_hw_state)
2441 dc->hwss.log_hw_state(dc, NULL);
2442
2443 return size;
2444}
2445
2446
2447
2448
2449
2450
2451static int current_backlight_read(struct seq_file *m, void *data)
2452{
2453 struct drm_info_node *node = (struct drm_info_node *)m->private;
2454 struct drm_device *dev = node->minor->dev;
2455 struct amdgpu_device *adev = drm_to_adev(dev);
2456 struct amdgpu_display_manager *dm = &adev->dm;
2457
2458 unsigned int backlight = dc_link_get_backlight_level(dm->backlight_link);
2459
2460 seq_printf(m, "0x%x\n", backlight);
2461 return 0;
2462}
2463
2464
2465
2466
2467
2468
2469static int target_backlight_read(struct seq_file *m, void *data)
2470{
2471 struct drm_info_node *node = (struct drm_info_node *)m->private;
2472 struct drm_device *dev = node->minor->dev;
2473 struct amdgpu_device *adev = drm_to_adev(dev);
2474 struct amdgpu_display_manager *dm = &adev->dm;
2475
2476 unsigned int backlight = dc_link_get_target_backlight_pwm(dm->backlight_link);
2477
2478 seq_printf(m, "0x%x\n", backlight);
2479 return 0;
2480}
2481
2482static int mst_topo(struct seq_file *m, void *unused)
2483{
2484 struct drm_info_node *node = (struct drm_info_node *)m->private;
2485 struct drm_device *dev = node->minor->dev;
2486 struct drm_connector *connector;
2487 struct drm_connector_list_iter conn_iter;
2488 struct amdgpu_dm_connector *aconnector;
2489
2490 drm_connector_list_iter_begin(dev, &conn_iter);
2491 drm_for_each_connector_iter(connector, &conn_iter) {
2492 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
2493 continue;
2494
2495 aconnector = to_amdgpu_dm_connector(connector);
2496
2497 seq_printf(m, "\nMST topology for connector %d\n", aconnector->connector_id);
2498 drm_dp_mst_dump_topology(m, &aconnector->mst_mgr);
2499 }
2500 drm_connector_list_iter_end(&conn_iter);
2501
2502 return 0;
2503}
2504
2505static const struct drm_info_list amdgpu_dm_debugfs_list[] = {
2506 {"amdgpu_current_backlight_pwm", ¤t_backlight_read},
2507 {"amdgpu_target_backlight_pwm", &target_backlight_read},
2508 {"amdgpu_mst_topology", &mst_topo},
2509};
2510
2511
2512
2513
2514
2515
2516static int force_timing_sync_set(void *data, u64 val)
2517{
2518 struct amdgpu_device *adev = data;
2519
2520 adev->dm.force_timing_sync = (bool)val;
2521
2522 amdgpu_dm_trigger_timing_sync(adev_to_drm(adev));
2523
2524 return 0;
2525}
2526
2527
2528
2529
2530
2531static int force_timing_sync_get(void *data, u64 *val)
2532{
2533 struct amdgpu_device *adev = data;
2534
2535 *val = adev->dm.force_timing_sync;
2536
2537 return 0;
2538}
2539
2540DEFINE_DEBUGFS_ATTRIBUTE(force_timing_sync_ops, force_timing_sync_get,
2541 force_timing_sync_set, "%llu\n");
2542
2543
2544
2545
2546
2547static int visual_confirm_set(void *data, u64 val)
2548{
2549 struct amdgpu_device *adev = data;
2550
2551 adev->dm.dc->debug.visual_confirm = (enum visual_confirm)val;
2552
2553 return 0;
2554}
2555
2556
2557
2558
2559
2560static int visual_confirm_get(void *data, u64 *val)
2561{
2562 struct amdgpu_device *adev = data;
2563
2564 *val = adev->dm.dc->debug.visual_confirm;
2565
2566 return 0;
2567}
2568
2569DEFINE_DEBUGFS_ATTRIBUTE(visual_confirm_fops, visual_confirm_get,
2570 visual_confirm_set, "%llu\n");
2571
2572int dtn_debugfs_init(struct amdgpu_device *adev)
2573{
2574 static const struct file_operations dtn_log_fops = {
2575 .owner = THIS_MODULE,
2576 .read = dtn_log_read,
2577 .write = dtn_log_write,
2578 .llseek = default_llseek
2579 };
2580
2581 struct drm_minor *minor = adev_to_drm(adev)->primary;
2582 struct dentry *root = minor->debugfs_root;
2583 int ret;
2584
2585 ret = amdgpu_debugfs_add_files(adev, amdgpu_dm_debugfs_list,
2586 ARRAY_SIZE(amdgpu_dm_debugfs_list));
2587 if (ret)
2588 return ret;
2589
2590 debugfs_create_file("amdgpu_dm_dtn_log", 0644, root, adev,
2591 &dtn_log_fops);
2592
2593 debugfs_create_file_unsafe("amdgpu_dm_visual_confirm", 0644, root, adev,
2594 &visual_confirm_fops);
2595
2596 debugfs_create_file_unsafe("amdgpu_dm_dmub_tracebuffer", 0644, root,
2597 adev, &dmub_tracebuffer_fops);
2598
2599 debugfs_create_file_unsafe("amdgpu_dm_dmub_fw_state", 0644, root,
2600 adev, &dmub_fw_state_fops);
2601
2602 debugfs_create_file_unsafe("amdgpu_dm_force_timing_sync", 0644, root,
2603 adev, &force_timing_sync_ops);
2604
2605 return 0;
2606}
2607