1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include <linux/uaccess.h>
27
28#include <drm/drm_debugfs.h>
29
30#include "dc.h"
31#include "amdgpu.h"
32#include "amdgpu_dm.h"
33#include "amdgpu_dm_debugfs.h"
34#include "dm_helpers.h"
35#include "dmub/dmub_srv.h"
36#include "resource.h"
37#include "dsc.h"
38#include "dc_link_dp.h"
39
40struct dmub_debugfs_trace_header {
41 uint32_t entry_count;
42 uint32_t reserved[3];
43};
44
45struct dmub_debugfs_trace_entry {
46 uint32_t trace_code;
47 uint32_t tick_count;
48 uint32_t param0;
49 uint32_t param1;
50};
51
52static inline const char *yesno(bool v)
53{
54 return v ? "yes" : "no";
55}
56
57
58
59
60
61
62
63
64
65static int parse_write_buffer_into_params(char *wr_buf, uint32_t wr_buf_size,
66 long *param, const char __user *buf,
67 int max_param_num,
68 uint8_t *param_nums)
69{
70 char *wr_buf_ptr = NULL;
71 uint32_t wr_buf_count = 0;
72 int r;
73 char *sub_str = NULL;
74 const char delimiter[3] = {' ', '\n', '\0'};
75 uint8_t param_index = 0;
76
77 *param_nums = 0;
78
79 wr_buf_ptr = wr_buf;
80
81 r = copy_from_user(wr_buf_ptr, buf, wr_buf_size);
82
83
84 if (r >= wr_buf_size) {
85 DRM_DEBUG_DRIVER("user data not be read\n");
86 return -EINVAL;
87 }
88
89
90 while ((*wr_buf_ptr != 0xa) && (wr_buf_count < wr_buf_size)) {
91
92 while (isspace(*wr_buf_ptr) && (wr_buf_count < wr_buf_size)) {
93 wr_buf_ptr++;
94 wr_buf_count++;
95 }
96
97 if (wr_buf_count == wr_buf_size)
98 break;
99
100
101 while ((!isspace(*wr_buf_ptr)) && (wr_buf_count < wr_buf_size)) {
102 wr_buf_ptr++;
103 wr_buf_count++;
104 }
105
106 (*param_nums)++;
107
108 if (wr_buf_count == wr_buf_size)
109 break;
110 }
111
112 if (*param_nums > max_param_num)
113 *param_nums = max_param_num;
114
115 wr_buf_ptr = wr_buf;
116 wr_buf_count = 0;
117
118 while (isspace(*wr_buf_ptr) && (wr_buf_count < wr_buf_size)) {
119 wr_buf_ptr++;
120 wr_buf_count++;
121 }
122
123 while (param_index < *param_nums) {
124
125 sub_str = strsep(&wr_buf_ptr, delimiter);
126
127 r = kstrtol(sub_str, 16, &(param[param_index]));
128
129 if (r)
130 DRM_DEBUG_DRIVER("string to int convert error code: %d\n", r);
131
132 param_index++;
133 }
134
135 return 0;
136}
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179static ssize_t dp_link_settings_read(struct file *f, char __user *buf,
180 size_t size, loff_t *pos)
181{
182 struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
183 struct dc_link *link = connector->dc_link;
184 char *rd_buf = NULL;
185 char *rd_buf_ptr = NULL;
186 const uint32_t rd_buf_size = 100;
187 uint32_t result = 0;
188 uint8_t str_len = 0;
189 int r;
190
191 if (*pos & 3 || size & 3)
192 return -EINVAL;
193
194 rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
195 if (!rd_buf)
196 return 0;
197
198 rd_buf_ptr = rd_buf;
199
200 str_len = strlen("Current: %d %d %d ");
201 snprintf(rd_buf_ptr, str_len, "Current: %d %d %d ",
202 link->cur_link_settings.lane_count,
203 link->cur_link_settings.link_rate,
204 link->cur_link_settings.link_spread);
205 rd_buf_ptr += str_len;
206
207 str_len = strlen("Verified: %d %d %d ");
208 snprintf(rd_buf_ptr, str_len, "Verified: %d %d %d ",
209 link->verified_link_cap.lane_count,
210 link->verified_link_cap.link_rate,
211 link->verified_link_cap.link_spread);
212 rd_buf_ptr += str_len;
213
214 str_len = strlen("Reported: %d %d %d ");
215 snprintf(rd_buf_ptr, str_len, "Reported: %d %d %d ",
216 link->reported_link_cap.lane_count,
217 link->reported_link_cap.link_rate,
218 link->reported_link_cap.link_spread);
219 rd_buf_ptr += str_len;
220
221 str_len = strlen("Preferred: %d %d %d ");
222 snprintf(rd_buf_ptr, str_len, "Preferred: %d %d %d\n",
223 link->preferred_link_setting.lane_count,
224 link->preferred_link_setting.link_rate,
225 link->preferred_link_setting.link_spread);
226
227 while (size) {
228 if (*pos >= rd_buf_size)
229 break;
230
231 r = put_user(*(rd_buf + result), buf);
232 if (r)
233 return r;
234
235 buf += 1;
236 size -= 1;
237 *pos += 1;
238 result += 1;
239 }
240
241 kfree(rd_buf);
242 return result;
243}
244
245static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
246 size_t size, loff_t *pos)
247{
248 struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
249 struct dc_link *link = connector->dc_link;
250 struct dc *dc = (struct dc *)link->dc;
251 struct dc_link_settings prefer_link_settings;
252 char *wr_buf = NULL;
253 const uint32_t wr_buf_size = 40;
254
255 int max_param_num = 2;
256 uint8_t param_nums = 0;
257 long param[2];
258 bool valid_input = true;
259
260 if (size == 0)
261 return -EINVAL;
262
263 wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL);
264 if (!wr_buf)
265 return -ENOSPC;
266
267 if (parse_write_buffer_into_params(wr_buf, size,
268 (long *)param, buf,
269 max_param_num,
270 ¶m_nums)) {
271 kfree(wr_buf);
272 return -EINVAL;
273 }
274
275 if (param_nums <= 0) {
276 kfree(wr_buf);
277 DRM_DEBUG_DRIVER("user data not be read\n");
278 return -EINVAL;
279 }
280
281 switch (param[0]) {
282 case LANE_COUNT_ONE:
283 case LANE_COUNT_TWO:
284 case LANE_COUNT_FOUR:
285 break;
286 default:
287 valid_input = false;
288 break;
289 }
290
291 switch (param[1]) {
292 case LINK_RATE_LOW:
293 case LINK_RATE_HIGH:
294 case LINK_RATE_RBR2:
295 case LINK_RATE_HIGH2:
296 case LINK_RATE_HIGH3:
297 break;
298 default:
299 valid_input = false;
300 break;
301 }
302
303 if (!valid_input) {
304 kfree(wr_buf);
305 DRM_DEBUG_DRIVER("Invalid Input value No HW will be programmed\n");
306 return size;
307 }
308
309
310
311
312 prefer_link_settings.link_spread = link->cur_link_settings.link_spread;
313 prefer_link_settings.use_link_rate_set = false;
314 prefer_link_settings.lane_count = param[0];
315 prefer_link_settings.link_rate = param[1];
316
317 dc_link_set_preferred_training_settings(dc, &prefer_link_settings, NULL, link, true);
318
319 kfree(wr_buf);
320 return size;
321}
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364static ssize_t dp_phy_settings_read(struct file *f, char __user *buf,
365 size_t size, loff_t *pos)
366{
367 struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
368 struct dc_link *link = connector->dc_link;
369 char *rd_buf = NULL;
370 const uint32_t rd_buf_size = 20;
371 uint32_t result = 0;
372 int r;
373
374 if (*pos & 3 || size & 3)
375 return -EINVAL;
376
377 rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
378 if (!rd_buf)
379 return -EINVAL;
380
381 snprintf(rd_buf, rd_buf_size, " %d %d %d ",
382 link->cur_lane_setting.VOLTAGE_SWING,
383 link->cur_lane_setting.PRE_EMPHASIS,
384 link->cur_lane_setting.POST_CURSOR2);
385
386 while (size) {
387 if (*pos >= rd_buf_size)
388 break;
389
390 r = put_user((*(rd_buf + result)), buf);
391 if (r)
392 return r;
393
394 buf += 1;
395 size -= 1;
396 *pos += 1;
397 result += 1;
398 }
399
400 kfree(rd_buf);
401 return result;
402}
403
404static ssize_t dp_phy_settings_write(struct file *f, const char __user *buf,
405 size_t size, loff_t *pos)
406{
407 struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
408 struct dc_link *link = connector->dc_link;
409 struct dc *dc = (struct dc *)link->dc;
410 char *wr_buf = NULL;
411 uint32_t wr_buf_size = 40;
412 long param[3];
413 bool use_prefer_link_setting;
414 struct link_training_settings link_lane_settings;
415 int max_param_num = 3;
416 uint8_t param_nums = 0;
417 int r = 0;
418
419
420 if (size == 0)
421 return -EINVAL;
422
423 wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL);
424 if (!wr_buf)
425 return -ENOSPC;
426
427 if (parse_write_buffer_into_params(wr_buf, size,
428 (long *)param, buf,
429 max_param_num,
430 ¶m_nums)) {
431 kfree(wr_buf);
432 return -EINVAL;
433 }
434
435 if (param_nums <= 0) {
436 kfree(wr_buf);
437 DRM_DEBUG_DRIVER("user data not be read\n");
438 return -EINVAL;
439 }
440
441 if ((param[0] > VOLTAGE_SWING_MAX_LEVEL) ||
442 (param[1] > PRE_EMPHASIS_MAX_LEVEL) ||
443 (param[2] > POST_CURSOR2_MAX_LEVEL)) {
444 kfree(wr_buf);
445 DRM_DEBUG_DRIVER("Invalid Input No HW will be programmed\n");
446 return size;
447 }
448
449
450 use_prefer_link_setting =
451 ((link->preferred_link_setting.link_rate != LINK_RATE_UNKNOWN) &&
452 (link->test_pattern_enabled));
453
454 memset(&link_lane_settings, 0, sizeof(link_lane_settings));
455
456 if (use_prefer_link_setting) {
457 link_lane_settings.link_settings.lane_count =
458 link->preferred_link_setting.lane_count;
459 link_lane_settings.link_settings.link_rate =
460 link->preferred_link_setting.link_rate;
461 link_lane_settings.link_settings.link_spread =
462 link->preferred_link_setting.link_spread;
463 } else {
464 link_lane_settings.link_settings.lane_count =
465 link->cur_link_settings.lane_count;
466 link_lane_settings.link_settings.link_rate =
467 link->cur_link_settings.link_rate;
468 link_lane_settings.link_settings.link_spread =
469 link->cur_link_settings.link_spread;
470 }
471
472
473 for (r = 0; r < link_lane_settings.link_settings.lane_count; r++) {
474 link_lane_settings.lane_settings[r].VOLTAGE_SWING =
475 (enum dc_voltage_swing) (param[0]);
476 link_lane_settings.lane_settings[r].PRE_EMPHASIS =
477 (enum dc_pre_emphasis) (param[1]);
478 link_lane_settings.lane_settings[r].POST_CURSOR2 =
479 (enum dc_post_cursor2) (param[2]);
480 }
481
482
483 dc_link_set_drive_settings(dc, &link_lane_settings, link);
484
485 kfree(wr_buf);
486 return size;
487}
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __user *buf,
548 size_t size, loff_t *pos)
549{
550 struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
551 struct dc_link *link = connector->dc_link;
552 char *wr_buf = NULL;
553 uint32_t wr_buf_size = 100;
554 long param[11] = {0x0};
555 int max_param_num = 11;
556 enum dp_test_pattern test_pattern = DP_TEST_PATTERN_UNSUPPORTED;
557 bool disable_hpd = false;
558 bool valid_test_pattern = false;
559 uint8_t param_nums = 0;
560
561 uint8_t custom_pattern[10] = {
562 0x1f, 0x7c, 0xf0, 0xc1, 0x07,
563 0x1f, 0x7c, 0xf0, 0xc1, 0x07
564 };
565 struct dc_link_settings prefer_link_settings = {LANE_COUNT_UNKNOWN,
566 LINK_RATE_UNKNOWN, LINK_SPREAD_DISABLED};
567 struct dc_link_settings cur_link_settings = {LANE_COUNT_UNKNOWN,
568 LINK_RATE_UNKNOWN, LINK_SPREAD_DISABLED};
569 struct link_training_settings link_training_settings;
570 int i;
571
572 if (size == 0)
573 return -EINVAL;
574
575 wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL);
576 if (!wr_buf)
577 return -ENOSPC;
578
579 if (parse_write_buffer_into_params(wr_buf, size,
580 (long *)param, buf,
581 max_param_num,
582 ¶m_nums)) {
583 kfree(wr_buf);
584 return -EINVAL;
585 }
586
587 if (param_nums <= 0) {
588 kfree(wr_buf);
589 DRM_DEBUG_DRIVER("user data not be read\n");
590 return -EINVAL;
591 }
592
593
594 test_pattern = param[0];
595
596 switch (test_pattern) {
597 case DP_TEST_PATTERN_VIDEO_MODE:
598 case DP_TEST_PATTERN_COLOR_SQUARES:
599 case DP_TEST_PATTERN_COLOR_SQUARES_CEA:
600 case DP_TEST_PATTERN_VERTICAL_BARS:
601 case DP_TEST_PATTERN_HORIZONTAL_BARS:
602 case DP_TEST_PATTERN_COLOR_RAMP:
603 valid_test_pattern = true;
604 break;
605
606 case DP_TEST_PATTERN_D102:
607 case DP_TEST_PATTERN_SYMBOL_ERROR:
608 case DP_TEST_PATTERN_PRBS7:
609 case DP_TEST_PATTERN_80BIT_CUSTOM:
610 case DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE:
611 case DP_TEST_PATTERN_TRAINING_PATTERN4:
612 disable_hpd = true;
613 valid_test_pattern = true;
614 break;
615
616 default:
617 valid_test_pattern = false;
618 test_pattern = DP_TEST_PATTERN_UNSUPPORTED;
619 break;
620 }
621
622 if (!valid_test_pattern) {
623 kfree(wr_buf);
624 DRM_DEBUG_DRIVER("Invalid Test Pattern Parameters\n");
625 return size;
626 }
627
628 if (test_pattern == DP_TEST_PATTERN_80BIT_CUSTOM) {
629 for (i = 0; i < 10; i++) {
630 if ((uint8_t) param[i + 1] != 0x0)
631 break;
632 }
633
634 if (i < 10) {
635
636 for (i = 0; i < 10; i++)
637 custom_pattern[i] = (uint8_t) param[i + 1];
638 }
639 }
640
641
642
643
644
645
646
647
648
649 if (!disable_hpd)
650 dc_link_enable_hpd(link);
651
652 prefer_link_settings.lane_count = link->verified_link_cap.lane_count;
653 prefer_link_settings.link_rate = link->verified_link_cap.link_rate;
654 prefer_link_settings.link_spread = link->verified_link_cap.link_spread;
655
656 cur_link_settings.lane_count = link->cur_link_settings.lane_count;
657 cur_link_settings.link_rate = link->cur_link_settings.link_rate;
658 cur_link_settings.link_spread = link->cur_link_settings.link_spread;
659
660 link_training_settings.link_settings = cur_link_settings;
661
662
663 if (test_pattern != DP_TEST_PATTERN_VIDEO_MODE) {
664 if (prefer_link_settings.lane_count != LANE_COUNT_UNKNOWN &&
665 prefer_link_settings.link_rate != LINK_RATE_UNKNOWN &&
666 (prefer_link_settings.lane_count != cur_link_settings.lane_count ||
667 prefer_link_settings.link_rate != cur_link_settings.link_rate))
668 link_training_settings.link_settings = prefer_link_settings;
669 }
670
671 for (i = 0; i < (unsigned int)(link_training_settings.link_settings.lane_count); i++)
672 link_training_settings.lane_settings[i] = link->cur_lane_setting;
673
674 dc_link_set_test_pattern(
675 link,
676 test_pattern,
677 DP_TEST_PATTERN_COLOR_SPACE_RGB,
678 &link_training_settings,
679 custom_pattern,
680 10);
681
682
683
684
685
686
687 if (valid_test_pattern && disable_hpd)
688 dc_link_disable_hpd(link);
689
690 kfree(wr_buf);
691
692 return size;
693}
694
695
696
697
698
699static int dmub_tracebuffer_show(struct seq_file *m, void *data)
700{
701 struct amdgpu_device *adev = m->private;
702 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
703 struct dmub_debugfs_trace_entry *entries;
704 uint8_t *tbuf_base;
705 uint32_t tbuf_size, max_entries, num_entries, i;
706
707 if (!fb_info)
708 return 0;
709
710 tbuf_base = (uint8_t *)fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr;
711 if (!tbuf_base)
712 return 0;
713
714 tbuf_size = fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size;
715 max_entries = (tbuf_size - sizeof(struct dmub_debugfs_trace_header)) /
716 sizeof(struct dmub_debugfs_trace_entry);
717
718 num_entries =
719 ((struct dmub_debugfs_trace_header *)tbuf_base)->entry_count;
720
721 num_entries = min(num_entries, max_entries);
722
723 entries = (struct dmub_debugfs_trace_entry
724 *)(tbuf_base +
725 sizeof(struct dmub_debugfs_trace_header));
726
727 for (i = 0; i < num_entries; ++i) {
728 struct dmub_debugfs_trace_entry *entry = &entries[i];
729
730 seq_printf(m,
731 "trace_code=%u tick_count=%u param0=%u param1=%u\n",
732 entry->trace_code, entry->tick_count, entry->param0,
733 entry->param1);
734 }
735
736 return 0;
737}
738
739
740
741
742
743static int dmub_fw_state_show(struct seq_file *m, void *data)
744{
745 struct amdgpu_device *adev = m->private;
746 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
747 uint8_t *state_base;
748 uint32_t state_size;
749
750 if (!fb_info)
751 return 0;
752
753 state_base = (uint8_t *)fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr;
754 if (!state_base)
755 return 0;
756
757 state_size = fb_info->fb[DMUB_WINDOW_6_FW_STATE].size;
758
759 return seq_write(m, state_base, state_size);
760}
761
762
763
764
765
766static int output_bpc_show(struct seq_file *m, void *data)
767{
768 struct drm_connector *connector = m->private;
769 struct drm_device *dev = connector->dev;
770 struct drm_crtc *crtc = NULL;
771 struct dm_crtc_state *dm_crtc_state = NULL;
772 int res = -ENODEV;
773 unsigned int bpc;
774
775 mutex_lock(&dev->mode_config.mutex);
776 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
777
778 if (connector->state == NULL)
779 goto unlock;
780
781 crtc = connector->state->crtc;
782 if (crtc == NULL)
783 goto unlock;
784
785 drm_modeset_lock(&crtc->mutex, NULL);
786 if (crtc->state == NULL)
787 goto unlock;
788
789 dm_crtc_state = to_dm_crtc_state(crtc->state);
790 if (dm_crtc_state->stream == NULL)
791 goto unlock;
792
793 switch (dm_crtc_state->stream->timing.display_color_depth) {
794 case COLOR_DEPTH_666:
795 bpc = 6;
796 break;
797 case COLOR_DEPTH_888:
798 bpc = 8;
799 break;
800 case COLOR_DEPTH_101010:
801 bpc = 10;
802 break;
803 case COLOR_DEPTH_121212:
804 bpc = 12;
805 break;
806 case COLOR_DEPTH_161616:
807 bpc = 16;
808 break;
809 default:
810 goto unlock;
811 }
812
813 seq_printf(m, "Current: %u\n", bpc);
814 seq_printf(m, "Maximum: %u\n", connector->display_info.bpc);
815 res = 0;
816
817unlock:
818 if (crtc)
819 drm_modeset_unlock(&crtc->mutex);
820
821 drm_modeset_unlock(&dev->mode_config.connection_mutex);
822 mutex_unlock(&dev->mode_config.mutex);
823
824 return res;
825}
826
827#ifdef CONFIG_DRM_AMD_DC_HDCP
828
829
830
831
832
833
834
835
836
837static int hdcp_sink_capability_show(struct seq_file *m, void *data)
838{
839 struct drm_connector *connector = m->private;
840 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
841 bool hdcp_cap, hdcp2_cap;
842
843 if (connector->status != connector_status_connected)
844 return -ENODEV;
845
846 seq_printf(m, "%s:%d HDCP version: ", connector->name, connector->base.id);
847
848 hdcp_cap = dc_link_is_hdcp14(aconnector->dc_link, aconnector->dc_sink->sink_signal);
849 hdcp2_cap = dc_link_is_hdcp22(aconnector->dc_link, aconnector->dc_sink->sink_signal);
850
851
852 if (hdcp_cap)
853 seq_printf(m, "%s ", "HDCP1.4");
854 if (hdcp2_cap)
855 seq_printf(m, "%s ", "HDCP2.2");
856
857 if (!hdcp_cap && !hdcp2_cap)
858 seq_printf(m, "%s ", "None");
859
860 seq_puts(m, "\n");
861
862 return 0;
863}
864#endif
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879static ssize_t dp_sdp_message_debugfs_write(struct file *f, const char __user *buf,
880 size_t size, loff_t *pos)
881{
882 int r;
883 uint8_t data[36];
884 struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
885 struct dm_crtc_state *acrtc_state;
886 uint32_t write_size = 36;
887
888 if (connector->base.status != connector_status_connected)
889 return -ENODEV;
890
891 if (size == 0)
892 return 0;
893
894 acrtc_state = to_dm_crtc_state(connector->base.state->crtc->state);
895
896 r = copy_from_user(data, buf, write_size);
897
898 write_size -= r;
899
900 dc_stream_send_dp_sdp(acrtc_state->stream, data, write_size);
901
902 return write_size;
903}
904
905static ssize_t dp_dpcd_address_write(struct file *f, const char __user *buf,
906 size_t size, loff_t *pos)
907{
908 int r;
909 struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
910
911 if (size < sizeof(connector->debugfs_dpcd_address))
912 return -EINVAL;
913
914 r = copy_from_user(&connector->debugfs_dpcd_address,
915 buf, sizeof(connector->debugfs_dpcd_address));
916
917 return size - r;
918}
919
920static ssize_t dp_dpcd_size_write(struct file *f, const char __user *buf,
921 size_t size, loff_t *pos)
922{
923 int r;
924 struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
925
926 if (size < sizeof(connector->debugfs_dpcd_size))
927 return -EINVAL;
928
929 r = copy_from_user(&connector->debugfs_dpcd_size,
930 buf, sizeof(connector->debugfs_dpcd_size));
931
932 if (connector->debugfs_dpcd_size > 256)
933 connector->debugfs_dpcd_size = 0;
934
935 return size - r;
936}
937
938static ssize_t dp_dpcd_data_write(struct file *f, const char __user *buf,
939 size_t size, loff_t *pos)
940{
941 int r;
942 char *data;
943 struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
944 struct dc_link *link = connector->dc_link;
945 uint32_t write_size = connector->debugfs_dpcd_size;
946
947 if (!write_size || size < write_size)
948 return -EINVAL;
949
950 data = kzalloc(write_size, GFP_KERNEL);
951 if (!data)
952 return 0;
953
954 r = copy_from_user(data, buf, write_size);
955
956 dm_helpers_dp_write_dpcd(link->ctx, link,
957 connector->debugfs_dpcd_address, data, write_size - r);
958 kfree(data);
959 return write_size - r;
960}
961
962static ssize_t dp_dpcd_data_read(struct file *f, char __user *buf,
963 size_t size, loff_t *pos)
964{
965 int r;
966 char *data;
967 struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
968 struct dc_link *link = connector->dc_link;
969 uint32_t read_size = connector->debugfs_dpcd_size;
970
971 if (!read_size || size < read_size)
972 return 0;
973
974 data = kzalloc(read_size, GFP_KERNEL);
975 if (!data)
976 return 0;
977
978 dm_helpers_dp_read_dpcd(link->ctx, link,
979 connector->debugfs_dpcd_address, data, read_size);
980
981 r = copy_to_user(buf, data, read_size);
982
983 kfree(data);
984 return read_size - r;
985}
986
987
988
989
990
991
992
993
994
995
996static int dp_dsc_fec_support_show(struct seq_file *m, void *data)
997{
998 struct drm_connector *connector = m->private;
999 struct drm_modeset_acquire_ctx ctx;
1000 struct drm_device *dev = connector->dev;
1001 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
1002 int ret = 0;
1003 bool try_again = false;
1004 bool is_fec_supported = false;
1005 bool is_dsc_supported = false;
1006 struct dpcd_caps dpcd_caps;
1007
1008 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
1009 do {
1010 try_again = false;
1011 ret = drm_modeset_lock(&dev->mode_config.connection_mutex, &ctx);
1012 if (ret) {
1013 if (ret == -EDEADLK) {
1014 ret = drm_modeset_backoff(&ctx);
1015 if (!ret) {
1016 try_again = true;
1017 continue;
1018 }
1019 }
1020 break;
1021 }
1022 if (connector->status != connector_status_connected) {
1023 ret = -ENODEV;
1024 break;
1025 }
1026 dpcd_caps = aconnector->dc_link->dpcd_caps;
1027 if (aconnector->port) {
1028
1029
1030
1031
1032
1033 if (aconnector->dsc_aux) {
1034 is_fec_supported = true;
1035 is_dsc_supported = true;
1036 }
1037 } else {
1038 is_fec_supported = dpcd_caps.fec_cap.raw & 0x1;
1039 is_dsc_supported = dpcd_caps.dsc_caps.dsc_basic_caps.raw[0] & 0x1;
1040 }
1041 } while (try_again);
1042
1043 drm_modeset_drop_locks(&ctx);
1044 drm_modeset_acquire_fini(&ctx);
1045
1046 seq_printf(m, "FEC_Sink_Support: %s\n", yesno(is_fec_supported));
1047 seq_printf(m, "DSC_Sink_Support: %s\n", yesno(is_dsc_supported));
1048
1049 return ret;
1050}
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067static ssize_t trigger_hotplug(struct file *f, const char __user *buf,
1068 size_t size, loff_t *pos)
1069{
1070 struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
1071 struct drm_connector *connector = &aconnector->base;
1072 struct dc_link *link = NULL;
1073 struct drm_device *dev = connector->dev;
1074 enum dc_connection_type new_connection_type = dc_connection_none;
1075 char *wr_buf = NULL;
1076 uint32_t wr_buf_size = 42;
1077 int max_param_num = 1;
1078 long param[1] = {0};
1079 uint8_t param_nums = 0;
1080
1081 if (!aconnector || !aconnector->dc_link)
1082 return -EINVAL;
1083
1084 if (size == 0)
1085 return -EINVAL;
1086
1087 wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL);
1088
1089 if (!wr_buf) {
1090 DRM_DEBUG_DRIVER("no memory to allocate write buffer\n");
1091 return -ENOSPC;
1092 }
1093
1094 if (parse_write_buffer_into_params(wr_buf, size,
1095 (long *)param, buf,
1096 max_param_num,
1097 ¶m_nums)) {
1098 kfree(wr_buf);
1099 return -EINVAL;
1100 }
1101
1102 if (param_nums <= 0) {
1103 DRM_DEBUG_DRIVER("user data not be read\n");
1104 kfree(wr_buf);
1105 return -EINVAL;
1106 }
1107
1108 if (param[0] == 1) {
1109 mutex_lock(&aconnector->hpd_lock);
1110
1111 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type) &&
1112 new_connection_type != dc_connection_none)
1113 goto unlock;
1114
1115 if (!dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD))
1116 goto unlock;
1117
1118 amdgpu_dm_update_connector_after_detect(aconnector);
1119
1120 drm_modeset_lock_all(dev);
1121 dm_restore_drm_connector_state(dev, connector);
1122 drm_modeset_unlock_all(dev);
1123
1124 drm_kms_helper_hotplug_event(dev);
1125 } else if (param[0] == 0) {
1126 if (!aconnector->dc_link)
1127 goto unlock;
1128
1129 link = aconnector->dc_link;
1130
1131 if (link->local_sink) {
1132 dc_sink_release(link->local_sink);
1133 link->local_sink = NULL;
1134 }
1135
1136 link->dpcd_sink_count = 0;
1137 link->type = dc_connection_none;
1138 link->dongle_max_pix_clk = 0;
1139
1140 amdgpu_dm_update_connector_after_detect(aconnector);
1141
1142 drm_modeset_lock_all(dev);
1143 dm_restore_drm_connector_state(dev, connector);
1144 drm_modeset_unlock_all(dev);
1145
1146 drm_kms_helper_hotplug_event(dev);
1147 }
1148
1149unlock:
1150 mutex_unlock(&aconnector->hpd_lock);
1151
1152 kfree(wr_buf);
1153 return size;
1154}
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf,
1172 size_t size, loff_t *pos)
1173{
1174 char *rd_buf = NULL;
1175 char *rd_buf_ptr = NULL;
1176 struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
1177 struct display_stream_compressor *dsc;
1178 struct dcn_dsc_state dsc_state = {0};
1179 const uint32_t rd_buf_size = 10;
1180 struct pipe_ctx *pipe_ctx;
1181 ssize_t result = 0;
1182 int i, r, str_len = 30;
1183
1184 rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
1185
1186 if (!rd_buf)
1187 return -ENOMEM;
1188
1189 rd_buf_ptr = rd_buf;
1190
1191 for (i = 0; i < MAX_PIPES; i++) {
1192 pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
1193 if (pipe_ctx && pipe_ctx->stream &&
1194 pipe_ctx->stream->link == aconnector->dc_link)
1195 break;
1196 }
1197
1198 if (!pipe_ctx)
1199 return -ENXIO;
1200
1201 dsc = pipe_ctx->stream_res.dsc;
1202 if (dsc)
1203 dsc->funcs->dsc_read_state(dsc, &dsc_state);
1204
1205 snprintf(rd_buf_ptr, str_len,
1206 "%d\n",
1207 dsc_state.dsc_clock_en);
1208 rd_buf_ptr += str_len;
1209
1210 while (size) {
1211 if (*pos >= rd_buf_size)
1212 break;
1213
1214 r = put_user(*(rd_buf + result), buf);
1215 if (r)
1216 return r;
1217
1218 buf += 1;
1219 size -= 1;
1220 *pos += 1;
1221 result += 1;
1222 }
1223
1224 kfree(rd_buf);
1225 return result;
1226}
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253static ssize_t dp_dsc_clock_en_write(struct file *f, const char __user *buf,
1254 size_t size, loff_t *pos)
1255{
1256 struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
1257 struct drm_connector *connector = &aconnector->base;
1258 struct drm_device *dev = connector->dev;
1259 struct drm_crtc *crtc = NULL;
1260 struct dm_crtc_state *dm_crtc_state = NULL;
1261 struct pipe_ctx *pipe_ctx;
1262 int i;
1263 char *wr_buf = NULL;
1264 uint32_t wr_buf_size = 42;
1265 int max_param_num = 1;
1266 long param[1] = {0};
1267 uint8_t param_nums = 0;
1268
1269 if (size == 0)
1270 return -EINVAL;
1271
1272 wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL);
1273
1274 if (!wr_buf) {
1275 DRM_DEBUG_DRIVER("no memory to allocate write buffer\n");
1276 return -ENOSPC;
1277 }
1278
1279 if (parse_write_buffer_into_params(wr_buf, size,
1280 (long *)param, buf,
1281 max_param_num,
1282 ¶m_nums)) {
1283 kfree(wr_buf);
1284 return -EINVAL;
1285 }
1286
1287 if (param_nums <= 0) {
1288 DRM_DEBUG_DRIVER("user data not be read\n");
1289 kfree(wr_buf);
1290 return -EINVAL;
1291 }
1292
1293 for (i = 0; i < MAX_PIPES; i++) {
1294 pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
1295 if (pipe_ctx && pipe_ctx->stream &&
1296 pipe_ctx->stream->link == aconnector->dc_link)
1297 break;
1298 }
1299
1300 if (!pipe_ctx || !pipe_ctx->stream)
1301 goto done;
1302
1303
1304 mutex_lock(&dev->mode_config.mutex);
1305 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
1306
1307 if (connector->state == NULL)
1308 goto unlock;
1309
1310 crtc = connector->state->crtc;
1311 if (crtc == NULL)
1312 goto unlock;
1313
1314 drm_modeset_lock(&crtc->mutex, NULL);
1315 if (crtc->state == NULL)
1316 goto unlock;
1317
1318 dm_crtc_state = to_dm_crtc_state(crtc->state);
1319 if (dm_crtc_state->stream == NULL)
1320 goto unlock;
1321
1322 if (param[0] == 1)
1323 aconnector->dsc_settings.dsc_force_enable = DSC_CLK_FORCE_ENABLE;
1324 else if (param[0] == 2)
1325 aconnector->dsc_settings.dsc_force_enable = DSC_CLK_FORCE_DISABLE;
1326 else
1327 aconnector->dsc_settings.dsc_force_enable = DSC_CLK_FORCE_DEFAULT;
1328
1329 dm_crtc_state->dsc_force_changed = true;
1330
1331unlock:
1332 if (crtc)
1333 drm_modeset_unlock(&crtc->mutex);
1334 drm_modeset_unlock(&dev->mode_config.connection_mutex);
1335 mutex_unlock(&dev->mode_config.mutex);
1336
1337done:
1338 kfree(wr_buf);
1339 return size;
1340}
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358static ssize_t dp_dsc_slice_width_read(struct file *f, char __user *buf,
1359 size_t size, loff_t *pos)
1360{
1361 char *rd_buf = NULL;
1362 char *rd_buf_ptr = NULL;
1363 struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
1364 struct display_stream_compressor *dsc;
1365 struct dcn_dsc_state dsc_state = {0};
1366 const uint32_t rd_buf_size = 100;
1367 struct pipe_ctx *pipe_ctx;
1368 ssize_t result = 0;
1369 int i, r, str_len = 30;
1370
1371 rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
1372
1373 if (!rd_buf)
1374 return -ENOMEM;
1375
1376 rd_buf_ptr = rd_buf;
1377
1378 for (i = 0; i < MAX_PIPES; i++) {
1379 pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
1380 if (pipe_ctx && pipe_ctx->stream &&
1381 pipe_ctx->stream->link == aconnector->dc_link)
1382 break;
1383 }
1384
1385 if (!pipe_ctx)
1386 return -ENXIO;
1387
1388 dsc = pipe_ctx->stream_res.dsc;
1389 if (dsc)
1390 dsc->funcs->dsc_read_state(dsc, &dsc_state);
1391
1392 snprintf(rd_buf_ptr, str_len,
1393 "%d\n",
1394 dsc_state.dsc_slice_width);
1395 rd_buf_ptr += str_len;
1396
1397 while (size) {
1398 if (*pos >= rd_buf_size)
1399 break;
1400
1401 r = put_user(*(rd_buf + result), buf);
1402 if (r)
1403 return r;
1404
1405 buf += 1;
1406 size -= 1;
1407 *pos += 1;
1408 result += 1;
1409 }
1410
1411 kfree(rd_buf);
1412 return result;
1413}
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438static ssize_t dp_dsc_slice_width_write(struct file *f, const char __user *buf,
1439 size_t size, loff_t *pos)
1440{
1441 struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
1442 struct pipe_ctx *pipe_ctx;
1443 struct drm_connector *connector = &aconnector->base;
1444 struct drm_device *dev = connector->dev;
1445 struct drm_crtc *crtc = NULL;
1446 struct dm_crtc_state *dm_crtc_state = NULL;
1447 int i;
1448 char *wr_buf = NULL;
1449 uint32_t wr_buf_size = 42;
1450 int max_param_num = 1;
1451 long param[1] = {0};
1452 uint8_t param_nums = 0;
1453
1454 if (size == 0)
1455 return -EINVAL;
1456
1457 wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL);
1458
1459 if (!wr_buf) {
1460 DRM_DEBUG_DRIVER("no memory to allocate write buffer\n");
1461 return -ENOSPC;
1462 }
1463
1464 if (parse_write_buffer_into_params(wr_buf, size,
1465 (long *)param, buf,
1466 max_param_num,
1467 ¶m_nums)) {
1468 kfree(wr_buf);
1469 return -EINVAL;
1470 }
1471
1472 if (param_nums <= 0) {
1473 DRM_DEBUG_DRIVER("user data not be read\n");
1474 kfree(wr_buf);
1475 return -EINVAL;
1476 }
1477
1478 for (i = 0; i < MAX_PIPES; i++) {
1479 pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
1480 if (pipe_ctx && pipe_ctx->stream &&
1481 pipe_ctx->stream->link == aconnector->dc_link)
1482 break;
1483 }
1484
1485 if (!pipe_ctx || !pipe_ctx->stream)
1486 goto done;
1487
1488
1489 mutex_lock(&dev->mode_config.mutex);
1490 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
1491
1492 if (connector->state == NULL)
1493 goto unlock;
1494
1495 crtc = connector->state->crtc;
1496 if (crtc == NULL)
1497 goto unlock;
1498
1499 drm_modeset_lock(&crtc->mutex, NULL);
1500 if (crtc->state == NULL)
1501 goto unlock;
1502
1503 dm_crtc_state = to_dm_crtc_state(crtc->state);
1504 if (dm_crtc_state->stream == NULL)
1505 goto unlock;
1506
1507 if (param[0] > 0)
1508 aconnector->dsc_settings.dsc_num_slices_h = DIV_ROUND_UP(
1509 pipe_ctx->stream->timing.h_addressable,
1510 param[0]);
1511 else
1512 aconnector->dsc_settings.dsc_num_slices_h = 0;
1513
1514 dm_crtc_state->dsc_force_changed = true;
1515
1516unlock:
1517 if (crtc)
1518 drm_modeset_unlock(&crtc->mutex);
1519 drm_modeset_unlock(&dev->mode_config.connection_mutex);
1520 mutex_unlock(&dev->mode_config.mutex);
1521
1522done:
1523 kfree(wr_buf);
1524 return size;
1525}
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf,
1544 size_t size, loff_t *pos)
1545{
1546 char *rd_buf = NULL;
1547 char *rd_buf_ptr = NULL;
1548 struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
1549 struct display_stream_compressor *dsc;
1550 struct dcn_dsc_state dsc_state = {0};
1551 const uint32_t rd_buf_size = 100;
1552 struct pipe_ctx *pipe_ctx;
1553 ssize_t result = 0;
1554 int i, r, str_len = 30;
1555
1556 rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
1557
1558 if (!rd_buf)
1559 return -ENOMEM;
1560
1561 rd_buf_ptr = rd_buf;
1562
1563 for (i = 0; i < MAX_PIPES; i++) {
1564 pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
1565 if (pipe_ctx && pipe_ctx->stream &&
1566 pipe_ctx->stream->link == aconnector->dc_link)
1567 break;
1568 }
1569
1570 if (!pipe_ctx)
1571 return -ENXIO;
1572
1573 dsc = pipe_ctx->stream_res.dsc;
1574 if (dsc)
1575 dsc->funcs->dsc_read_state(dsc, &dsc_state);
1576
1577 snprintf(rd_buf_ptr, str_len,
1578 "%d\n",
1579 dsc_state.dsc_slice_height);
1580 rd_buf_ptr += str_len;
1581
1582 while (size) {
1583 if (*pos >= rd_buf_size)
1584 break;
1585
1586 r = put_user(*(rd_buf + result), buf);
1587 if (r)
1588 return r;
1589
1590 buf += 1;
1591 size -= 1;
1592 *pos += 1;
1593 result += 1;
1594 }
1595
1596 kfree(rd_buf);
1597 return result;
1598}
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623static ssize_t dp_dsc_slice_height_write(struct file *f, const char __user *buf,
1624 size_t size, loff_t *pos)
1625{
1626 struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
1627 struct drm_connector *connector = &aconnector->base;
1628 struct drm_device *dev = connector->dev;
1629 struct drm_crtc *crtc = NULL;
1630 struct dm_crtc_state *dm_crtc_state = NULL;
1631 struct pipe_ctx *pipe_ctx;
1632 int i;
1633 char *wr_buf = NULL;
1634 uint32_t wr_buf_size = 42;
1635 int max_param_num = 1;
1636 uint8_t param_nums = 0;
1637 long param[1] = {0};
1638
1639 if (size == 0)
1640 return -EINVAL;
1641
1642 wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL);
1643
1644 if (!wr_buf) {
1645 DRM_DEBUG_DRIVER("no memory to allocate write buffer\n");
1646 return -ENOSPC;
1647 }
1648
1649 if (parse_write_buffer_into_params(wr_buf, size,
1650 (long *)param, buf,
1651 max_param_num,
1652 ¶m_nums)) {
1653 kfree(wr_buf);
1654 return -EINVAL;
1655 }
1656
1657 if (param_nums <= 0) {
1658 DRM_DEBUG_DRIVER("user data not be read\n");
1659 kfree(wr_buf);
1660 return -EINVAL;
1661 }
1662
1663 for (i = 0; i < MAX_PIPES; i++) {
1664 pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
1665 if (pipe_ctx && pipe_ctx->stream &&
1666 pipe_ctx->stream->link == aconnector->dc_link)
1667 break;
1668 }
1669
1670 if (!pipe_ctx || !pipe_ctx->stream)
1671 goto done;
1672
1673
1674 mutex_lock(&dev->mode_config.mutex);
1675 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
1676
1677 if (connector->state == NULL)
1678 goto unlock;
1679
1680 crtc = connector->state->crtc;
1681 if (crtc == NULL)
1682 goto unlock;
1683
1684 drm_modeset_lock(&crtc->mutex, NULL);
1685 if (crtc->state == NULL)
1686 goto unlock;
1687
1688 dm_crtc_state = to_dm_crtc_state(crtc->state);
1689 if (dm_crtc_state->stream == NULL)
1690 goto unlock;
1691
1692 if (param[0] > 0)
1693 aconnector->dsc_settings.dsc_num_slices_v = DIV_ROUND_UP(
1694 pipe_ctx->stream->timing.v_addressable,
1695 param[0]);
1696 else
1697 aconnector->dsc_settings.dsc_num_slices_v = 0;
1698
1699 dm_crtc_state->dsc_force_changed = true;
1700
1701unlock:
1702 if (crtc)
1703 drm_modeset_unlock(&crtc->mutex);
1704 drm_modeset_unlock(&dev->mode_config.connection_mutex);
1705 mutex_unlock(&dev->mode_config.mutex);
1706
1707done:
1708 kfree(wr_buf);
1709 return size;
1710}
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf,
1725 size_t size, loff_t *pos)
1726{
1727 char *rd_buf = NULL;
1728 char *rd_buf_ptr = NULL;
1729 struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
1730 struct display_stream_compressor *dsc;
1731 struct dcn_dsc_state dsc_state = {0};
1732 const uint32_t rd_buf_size = 100;
1733 struct pipe_ctx *pipe_ctx;
1734 ssize_t result = 0;
1735 int i, r, str_len = 30;
1736
1737 rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
1738
1739 if (!rd_buf)
1740 return -ENOMEM;
1741
1742 rd_buf_ptr = rd_buf;
1743
1744 for (i = 0; i < MAX_PIPES; i++) {
1745 pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
1746 if (pipe_ctx && pipe_ctx->stream &&
1747 pipe_ctx->stream->link == aconnector->dc_link)
1748 break;
1749 }
1750
1751 if (!pipe_ctx)
1752 return -ENXIO;
1753
1754 dsc = pipe_ctx->stream_res.dsc;
1755 if (dsc)
1756 dsc->funcs->dsc_read_state(dsc, &dsc_state);
1757
1758 snprintf(rd_buf_ptr, str_len,
1759 "%d\n",
1760 dsc_state.dsc_bits_per_pixel);
1761 rd_buf_ptr += str_len;
1762
1763 while (size) {
1764 if (*pos >= rd_buf_size)
1765 break;
1766
1767 r = put_user(*(rd_buf + result), buf);
1768 if (r)
1769 return r;
1770
1771 buf += 1;
1772 size -= 1;
1773 *pos += 1;
1774 result += 1;
1775 }
1776
1777 kfree(rd_buf);
1778 return result;
1779}
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801static ssize_t dp_dsc_bits_per_pixel_write(struct file *f, const char __user *buf,
1802 size_t size, loff_t *pos)
1803{
1804 struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
1805 struct drm_connector *connector = &aconnector->base;
1806 struct drm_device *dev = connector->dev;
1807 struct drm_crtc *crtc = NULL;
1808 struct dm_crtc_state *dm_crtc_state = NULL;
1809 struct pipe_ctx *pipe_ctx;
1810 int i;
1811 char *wr_buf = NULL;
1812 uint32_t wr_buf_size = 42;
1813 int max_param_num = 1;
1814 uint8_t param_nums = 0;
1815 long param[1] = {0};
1816
1817 if (size == 0)
1818 return -EINVAL;
1819
1820 wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL);
1821
1822 if (!wr_buf) {
1823 DRM_DEBUG_DRIVER("no memory to allocate write buffer\n");
1824 return -ENOSPC;
1825 }
1826
1827 if (parse_write_buffer_into_params(wr_buf, size,
1828 (long *)param, buf,
1829 max_param_num,
1830 ¶m_nums)) {
1831 kfree(wr_buf);
1832 return -EINVAL;
1833 }
1834
1835 if (param_nums <= 0) {
1836 DRM_DEBUG_DRIVER("user data not be read\n");
1837 kfree(wr_buf);
1838 return -EINVAL;
1839 }
1840
1841 for (i = 0; i < MAX_PIPES; i++) {
1842 pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
1843 if (pipe_ctx && pipe_ctx->stream &&
1844 pipe_ctx->stream->link == aconnector->dc_link)
1845 break;
1846 }
1847
1848 if (!pipe_ctx || !pipe_ctx->stream)
1849 goto done;
1850
1851
1852 mutex_lock(&dev->mode_config.mutex);
1853 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
1854
1855 if (connector->state == NULL)
1856 goto unlock;
1857
1858 crtc = connector->state->crtc;
1859 if (crtc == NULL)
1860 goto unlock;
1861
1862 drm_modeset_lock(&crtc->mutex, NULL);
1863 if (crtc->state == NULL)
1864 goto unlock;
1865
1866 dm_crtc_state = to_dm_crtc_state(crtc->state);
1867 if (dm_crtc_state->stream == NULL)
1868 goto unlock;
1869
1870 aconnector->dsc_settings.dsc_bits_per_pixel = param[0];
1871
1872 dm_crtc_state->dsc_force_changed = true;
1873
1874unlock:
1875 if (crtc)
1876 drm_modeset_unlock(&crtc->mutex);
1877 drm_modeset_unlock(&dev->mode_config.connection_mutex);
1878 mutex_unlock(&dev->mode_config.mutex);
1879
1880done:
1881 kfree(wr_buf);
1882 return size;
1883}
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900static ssize_t dp_dsc_pic_width_read(struct file *f, char __user *buf,
1901 size_t size, loff_t *pos)
1902{
1903 char *rd_buf = NULL;
1904 char *rd_buf_ptr = NULL;
1905 struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
1906 struct display_stream_compressor *dsc;
1907 struct dcn_dsc_state dsc_state = {0};
1908 const uint32_t rd_buf_size = 100;
1909 struct pipe_ctx *pipe_ctx;
1910 ssize_t result = 0;
1911 int i, r, str_len = 30;
1912
1913 rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
1914
1915 if (!rd_buf)
1916 return -ENOMEM;
1917
1918 rd_buf_ptr = rd_buf;
1919
1920 for (i = 0; i < MAX_PIPES; i++) {
1921 pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
1922 if (pipe_ctx && pipe_ctx->stream &&
1923 pipe_ctx->stream->link == aconnector->dc_link)
1924 break;
1925 }
1926
1927 if (!pipe_ctx)
1928 return -ENXIO;
1929
1930 dsc = pipe_ctx->stream_res.dsc;
1931 if (dsc)
1932 dsc->funcs->dsc_read_state(dsc, &dsc_state);
1933
1934 snprintf(rd_buf_ptr, str_len,
1935 "%d\n",
1936 dsc_state.dsc_pic_width);
1937 rd_buf_ptr += str_len;
1938
1939 while (size) {
1940 if (*pos >= rd_buf_size)
1941 break;
1942
1943 r = put_user(*(rd_buf + result), buf);
1944 if (r)
1945 return r;
1946
1947 buf += 1;
1948 size -= 1;
1949 *pos += 1;
1950 result += 1;
1951 }
1952
1953 kfree(rd_buf);
1954 return result;
1955}
1956
1957static ssize_t dp_dsc_pic_height_read(struct file *f, char __user *buf,
1958 size_t size, loff_t *pos)
1959{
1960 char *rd_buf = NULL;
1961 char *rd_buf_ptr = NULL;
1962 struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
1963 struct display_stream_compressor *dsc;
1964 struct dcn_dsc_state dsc_state = {0};
1965 const uint32_t rd_buf_size = 100;
1966 struct pipe_ctx *pipe_ctx;
1967 ssize_t result = 0;
1968 int i, r, str_len = 30;
1969
1970 rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
1971
1972 if (!rd_buf)
1973 return -ENOMEM;
1974
1975 rd_buf_ptr = rd_buf;
1976
1977 for (i = 0; i < MAX_PIPES; i++) {
1978 pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
1979 if (pipe_ctx && pipe_ctx->stream &&
1980 pipe_ctx->stream->link == aconnector->dc_link)
1981 break;
1982 }
1983
1984 if (!pipe_ctx)
1985 return -ENXIO;
1986
1987 dsc = pipe_ctx->stream_res.dsc;
1988 if (dsc)
1989 dsc->funcs->dsc_read_state(dsc, &dsc_state);
1990
1991 snprintf(rd_buf_ptr, str_len,
1992 "%d\n",
1993 dsc_state.dsc_pic_height);
1994 rd_buf_ptr += str_len;
1995
1996 while (size) {
1997 if (*pos >= rd_buf_size)
1998 break;
1999
2000 r = put_user(*(rd_buf + result), buf);
2001 if (r)
2002 return r;
2003
2004 buf += 1;
2005 size -= 1;
2006 *pos += 1;
2007 result += 1;
2008 }
2009
2010 kfree(rd_buf);
2011 return result;
2012}
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029static ssize_t dp_dsc_chunk_size_read(struct file *f, char __user *buf,
2030 size_t size, loff_t *pos)
2031{
2032 char *rd_buf = NULL;
2033 char *rd_buf_ptr = NULL;
2034 struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
2035 struct display_stream_compressor *dsc;
2036 struct dcn_dsc_state dsc_state = {0};
2037 const uint32_t rd_buf_size = 100;
2038 struct pipe_ctx *pipe_ctx;
2039 ssize_t result = 0;
2040 int i, r, str_len = 30;
2041
2042 rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
2043
2044 if (!rd_buf)
2045 return -ENOMEM;
2046
2047 rd_buf_ptr = rd_buf;
2048
2049 for (i = 0; i < MAX_PIPES; i++) {
2050 pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
2051 if (pipe_ctx && pipe_ctx->stream &&
2052 pipe_ctx->stream->link == aconnector->dc_link)
2053 break;
2054 }
2055
2056 if (!pipe_ctx)
2057 return -ENXIO;
2058
2059 dsc = pipe_ctx->stream_res.dsc;
2060 if (dsc)
2061 dsc->funcs->dsc_read_state(dsc, &dsc_state);
2062
2063 snprintf(rd_buf_ptr, str_len,
2064 "%d\n",
2065 dsc_state.dsc_chunk_size);
2066 rd_buf_ptr += str_len;
2067
2068 while (size) {
2069 if (*pos >= rd_buf_size)
2070 break;
2071
2072 r = put_user(*(rd_buf + result), buf);
2073 if (r)
2074 return r;
2075
2076 buf += 1;
2077 size -= 1;
2078 *pos += 1;
2079 result += 1;
2080 }
2081
2082 kfree(rd_buf);
2083 return result;
2084}
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf,
2102 size_t size, loff_t *pos)
2103{
2104 char *rd_buf = NULL;
2105 char *rd_buf_ptr = NULL;
2106 struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
2107 struct display_stream_compressor *dsc;
2108 struct dcn_dsc_state dsc_state = {0};
2109 const uint32_t rd_buf_size = 100;
2110 struct pipe_ctx *pipe_ctx;
2111 ssize_t result = 0;
2112 int i, r, str_len = 30;
2113
2114 rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
2115
2116 if (!rd_buf)
2117 return -ENOMEM;
2118
2119 rd_buf_ptr = rd_buf;
2120
2121 for (i = 0; i < MAX_PIPES; i++) {
2122 pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
2123 if (pipe_ctx && pipe_ctx->stream &&
2124 pipe_ctx->stream->link == aconnector->dc_link)
2125 break;
2126 }
2127
2128 if (!pipe_ctx)
2129 return -ENXIO;
2130
2131 dsc = pipe_ctx->stream_res.dsc;
2132 if (dsc)
2133 dsc->funcs->dsc_read_state(dsc, &dsc_state);
2134
2135 snprintf(rd_buf_ptr, str_len,
2136 "%d\n",
2137 dsc_state.dsc_slice_bpg_offset);
2138 rd_buf_ptr += str_len;
2139
2140 while (size) {
2141 if (*pos >= rd_buf_size)
2142 break;
2143
2144 r = put_user(*(rd_buf + result), buf);
2145 if (r)
2146 return r;
2147
2148 buf += 1;
2149 size -= 1;
2150 *pos += 1;
2151 result += 1;
2152 }
2153
2154 kfree(rd_buf);
2155 return result;
2156}
2157
2158DEFINE_SHOW_ATTRIBUTE(dp_dsc_fec_support);
2159DEFINE_SHOW_ATTRIBUTE(dmub_fw_state);
2160DEFINE_SHOW_ATTRIBUTE(dmub_tracebuffer);
2161DEFINE_SHOW_ATTRIBUTE(output_bpc);
2162#ifdef CONFIG_DRM_AMD_DC_HDCP
2163DEFINE_SHOW_ATTRIBUTE(hdcp_sink_capability);
2164#endif
2165
2166static const struct file_operations dp_dsc_clock_en_debugfs_fops = {
2167 .owner = THIS_MODULE,
2168 .read = dp_dsc_clock_en_read,
2169 .write = dp_dsc_clock_en_write,
2170 .llseek = default_llseek
2171};
2172
2173static const struct file_operations dp_dsc_slice_width_debugfs_fops = {
2174 .owner = THIS_MODULE,
2175 .read = dp_dsc_slice_width_read,
2176 .write = dp_dsc_slice_width_write,
2177 .llseek = default_llseek
2178};
2179
2180static const struct file_operations dp_dsc_slice_height_debugfs_fops = {
2181 .owner = THIS_MODULE,
2182 .read = dp_dsc_slice_height_read,
2183 .write = dp_dsc_slice_height_write,
2184 .llseek = default_llseek
2185};
2186
2187static const struct file_operations dp_dsc_bits_per_pixel_debugfs_fops = {
2188 .owner = THIS_MODULE,
2189 .read = dp_dsc_bits_per_pixel_read,
2190 .write = dp_dsc_bits_per_pixel_write,
2191 .llseek = default_llseek
2192};
2193
2194static const struct file_operations dp_dsc_pic_width_debugfs_fops = {
2195 .owner = THIS_MODULE,
2196 .read = dp_dsc_pic_width_read,
2197 .llseek = default_llseek
2198};
2199
2200static const struct file_operations dp_dsc_pic_height_debugfs_fops = {
2201 .owner = THIS_MODULE,
2202 .read = dp_dsc_pic_height_read,
2203 .llseek = default_llseek
2204};
2205
2206static const struct file_operations dp_dsc_chunk_size_debugfs_fops = {
2207 .owner = THIS_MODULE,
2208 .read = dp_dsc_chunk_size_read,
2209 .llseek = default_llseek
2210};
2211
2212static const struct file_operations dp_dsc_slice_bpg_offset_debugfs_fops = {
2213 .owner = THIS_MODULE,
2214 .read = dp_dsc_slice_bpg_offset_read,
2215 .llseek = default_llseek
2216};
2217
2218static const struct file_operations trigger_hotplug_debugfs_fops = {
2219 .owner = THIS_MODULE,
2220 .write = trigger_hotplug,
2221 .llseek = default_llseek
2222};
2223
2224static const struct file_operations dp_link_settings_debugfs_fops = {
2225 .owner = THIS_MODULE,
2226 .read = dp_link_settings_read,
2227 .write = dp_link_settings_write,
2228 .llseek = default_llseek
2229};
2230
2231static const struct file_operations dp_phy_settings_debugfs_fop = {
2232 .owner = THIS_MODULE,
2233 .read = dp_phy_settings_read,
2234 .write = dp_phy_settings_write,
2235 .llseek = default_llseek
2236};
2237
2238static const struct file_operations dp_phy_test_pattern_fops = {
2239 .owner = THIS_MODULE,
2240 .write = dp_phy_test_pattern_debugfs_write,
2241 .llseek = default_llseek
2242};
2243
2244static const struct file_operations sdp_message_fops = {
2245 .owner = THIS_MODULE,
2246 .write = dp_sdp_message_debugfs_write,
2247 .llseek = default_llseek
2248};
2249
2250static const struct file_operations dp_dpcd_address_debugfs_fops = {
2251 .owner = THIS_MODULE,
2252 .write = dp_dpcd_address_write,
2253 .llseek = default_llseek
2254};
2255
2256static const struct file_operations dp_dpcd_size_debugfs_fops = {
2257 .owner = THIS_MODULE,
2258 .write = dp_dpcd_size_write,
2259 .llseek = default_llseek
2260};
2261
2262static const struct file_operations dp_dpcd_data_debugfs_fops = {
2263 .owner = THIS_MODULE,
2264 .read = dp_dpcd_data_read,
2265 .write = dp_dpcd_data_write,
2266 .llseek = default_llseek
2267};
2268
2269static const struct {
2270 char *name;
2271 const struct file_operations *fops;
2272} dp_debugfs_entries[] = {
2273 {"link_settings", &dp_link_settings_debugfs_fops},
2274 {"phy_settings", &dp_phy_settings_debugfs_fop},
2275 {"test_pattern", &dp_phy_test_pattern_fops},
2276#ifdef CONFIG_DRM_AMD_DC_HDCP
2277 {"hdcp_sink_capability", &hdcp_sink_capability_fops},
2278#endif
2279 {"sdp_message", &sdp_message_fops},
2280 {"aux_dpcd_address", &dp_dpcd_address_debugfs_fops},
2281 {"aux_dpcd_size", &dp_dpcd_size_debugfs_fops},
2282 {"aux_dpcd_data", &dp_dpcd_data_debugfs_fops},
2283 {"dsc_clock_en", &dp_dsc_clock_en_debugfs_fops},
2284 {"dsc_slice_width", &dp_dsc_slice_width_debugfs_fops},
2285 {"dsc_slice_height", &dp_dsc_slice_height_debugfs_fops},
2286 {"dsc_bits_per_pixel", &dp_dsc_bits_per_pixel_debugfs_fops},
2287 {"dsc_pic_width", &dp_dsc_pic_width_debugfs_fops},
2288 {"dsc_pic_height", &dp_dsc_pic_height_debugfs_fops},
2289 {"dsc_chunk_size", &dp_dsc_chunk_size_debugfs_fops},
2290 {"dsc_slice_bpg", &dp_dsc_slice_bpg_offset_debugfs_fops},
2291 {"dp_dsc_fec_support", &dp_dsc_fec_support_fops}
2292};
2293
2294#ifdef CONFIG_DRM_AMD_DC_HDCP
2295static const struct {
2296 char *name;
2297 const struct file_operations *fops;
2298} hdmi_debugfs_entries[] = {
2299 {"hdcp_sink_capability", &hdcp_sink_capability_fops}
2300};
2301#endif
2302
2303
2304
2305static int force_yuv420_output_set(void *data, u64 val)
2306{
2307 struct amdgpu_dm_connector *connector = data;
2308
2309 connector->force_yuv420_output = (bool)val;
2310
2311 return 0;
2312}
2313
2314
2315
2316
2317static int force_yuv420_output_get(void *data, u64 *val)
2318{
2319 struct amdgpu_dm_connector *connector = data;
2320
2321 *val = connector->force_yuv420_output;
2322
2323 return 0;
2324}
2325
2326DEFINE_DEBUGFS_ATTRIBUTE(force_yuv420_output_fops, force_yuv420_output_get,
2327 force_yuv420_output_set, "%llu\n");
2328
2329
2330
2331
2332static int psr_get(void *data, u64 *val)
2333{
2334 struct amdgpu_dm_connector *connector = data;
2335 struct dc_link *link = connector->dc_link;
2336 enum dc_psr_state state = PSR_STATE0;
2337
2338 dc_link_get_psr_state(link, &state);
2339
2340 *val = state;
2341
2342 return 0;
2343}
2344
2345
2346DEFINE_DEBUGFS_ATTRIBUTE(psr_fops, psr_get, NULL, "%llu\n");
2347
2348void connector_debugfs_init(struct amdgpu_dm_connector *connector)
2349{
2350 int i;
2351 struct dentry *dir = connector->base.debugfs_entry;
2352
2353 if (connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
2354 connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) {
2355 for (i = 0; i < ARRAY_SIZE(dp_debugfs_entries); i++) {
2356 debugfs_create_file(dp_debugfs_entries[i].name,
2357 0644, dir, connector,
2358 dp_debugfs_entries[i].fops);
2359 }
2360 }
2361 if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
2362 debugfs_create_file_unsafe("psr_state", 0444, dir, connector, &psr_fops);
2363
2364 debugfs_create_file_unsafe("force_yuv420_output", 0644, dir, connector,
2365 &force_yuv420_output_fops);
2366
2367 debugfs_create_file("output_bpc", 0644, dir, connector,
2368 &output_bpc_fops);
2369
2370 debugfs_create_file("trigger_hotplug", 0644, dir, connector,
2371 &trigger_hotplug_debugfs_fops);
2372
2373 connector->debugfs_dpcd_address = 0;
2374 connector->debugfs_dpcd_size = 0;
2375
2376#ifdef CONFIG_DRM_AMD_DC_HDCP
2377 if (connector->base.connector_type == DRM_MODE_CONNECTOR_HDMIA) {
2378 for (i = 0; i < ARRAY_SIZE(hdmi_debugfs_entries); i++) {
2379 debugfs_create_file(hdmi_debugfs_entries[i].name,
2380 0644, dir, connector,
2381 hdmi_debugfs_entries[i].fops);
2382 }
2383 }
2384#endif
2385}
2386
2387
2388
2389
2390
2391static ssize_t dtn_log_read(
2392 struct file *f,
2393 char __user *buf,
2394 size_t size,
2395 loff_t *pos)
2396{
2397 struct amdgpu_device *adev = file_inode(f)->i_private;
2398 struct dc *dc = adev->dm.dc;
2399 struct dc_log_buffer_ctx log_ctx = { 0 };
2400 ssize_t result = 0;
2401
2402 if (!buf || !size)
2403 return -EINVAL;
2404
2405 if (!dc->hwss.log_hw_state)
2406 return 0;
2407
2408 dc->hwss.log_hw_state(dc, &log_ctx);
2409
2410 if (*pos < log_ctx.pos) {
2411 size_t to_copy = log_ctx.pos - *pos;
2412
2413 to_copy = min(to_copy, size);
2414
2415 if (!copy_to_user(buf, log_ctx.buf + *pos, to_copy)) {
2416 *pos += to_copy;
2417 result = to_copy;
2418 }
2419 }
2420
2421 kfree(log_ctx.buf);
2422
2423 return result;
2424}
2425
2426
2427
2428
2429
2430static ssize_t dtn_log_write(
2431 struct file *f,
2432 const char __user *buf,
2433 size_t size,
2434 loff_t *pos)
2435{
2436 struct amdgpu_device *adev = file_inode(f)->i_private;
2437 struct dc *dc = adev->dm.dc;
2438
2439
2440 if (size == 0)
2441 return 0;
2442
2443 if (dc->hwss.log_hw_state)
2444 dc->hwss.log_hw_state(dc, NULL);
2445
2446 return size;
2447}
2448
2449
2450
2451
2452
2453
2454static int current_backlight_read(struct seq_file *m, void *data)
2455{
2456 struct drm_info_node *node = (struct drm_info_node *)m->private;
2457 struct drm_device *dev = node->minor->dev;
2458 struct amdgpu_device *adev = drm_to_adev(dev);
2459 struct amdgpu_display_manager *dm = &adev->dm;
2460
2461 unsigned int backlight = dc_link_get_backlight_level(dm->backlight_link);
2462
2463 seq_printf(m, "0x%x\n", backlight);
2464 return 0;
2465}
2466
2467
2468
2469
2470
2471
2472static int target_backlight_read(struct seq_file *m, void *data)
2473{
2474 struct drm_info_node *node = (struct drm_info_node *)m->private;
2475 struct drm_device *dev = node->minor->dev;
2476 struct amdgpu_device *adev = drm_to_adev(dev);
2477 struct amdgpu_display_manager *dm = &adev->dm;
2478
2479 unsigned int backlight = dc_link_get_target_backlight_pwm(dm->backlight_link);
2480
2481 seq_printf(m, "0x%x\n", backlight);
2482 return 0;
2483}
2484
2485static int mst_topo(struct seq_file *m, void *unused)
2486{
2487 struct drm_info_node *node = (struct drm_info_node *)m->private;
2488 struct drm_device *dev = node->minor->dev;
2489 struct drm_connector *connector;
2490 struct drm_connector_list_iter conn_iter;
2491 struct amdgpu_dm_connector *aconnector;
2492
2493 drm_connector_list_iter_begin(dev, &conn_iter);
2494 drm_for_each_connector_iter(connector, &conn_iter) {
2495 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
2496 continue;
2497
2498 aconnector = to_amdgpu_dm_connector(connector);
2499
2500 seq_printf(m, "\nMST topology for connector %d\n", aconnector->connector_id);
2501 drm_dp_mst_dump_topology(m, &aconnector->mst_mgr);
2502 }
2503 drm_connector_list_iter_end(&conn_iter);
2504
2505 return 0;
2506}
2507
2508static const struct drm_info_list amdgpu_dm_debugfs_list[] = {
2509 {"amdgpu_current_backlight_pwm", ¤t_backlight_read},
2510 {"amdgpu_target_backlight_pwm", &target_backlight_read},
2511 {"amdgpu_mst_topology", &mst_topo},
2512};
2513
2514
2515
2516
2517
2518
2519static int force_timing_sync_set(void *data, u64 val)
2520{
2521 struct amdgpu_device *adev = data;
2522
2523 adev->dm.force_timing_sync = (bool)val;
2524
2525 amdgpu_dm_trigger_timing_sync(adev_to_drm(adev));
2526
2527 return 0;
2528}
2529
2530
2531
2532
2533
2534static int force_timing_sync_get(void *data, u64 *val)
2535{
2536 struct amdgpu_device *adev = data;
2537
2538 *val = adev->dm.force_timing_sync;
2539
2540 return 0;
2541}
2542
2543DEFINE_DEBUGFS_ATTRIBUTE(force_timing_sync_ops, force_timing_sync_get,
2544 force_timing_sync_set, "%llu\n");
2545
2546
2547
2548
2549
2550static int visual_confirm_set(void *data, u64 val)
2551{
2552 struct amdgpu_device *adev = data;
2553
2554 adev->dm.dc->debug.visual_confirm = (enum visual_confirm)val;
2555
2556 return 0;
2557}
2558
2559
2560
2561
2562
2563static int visual_confirm_get(void *data, u64 *val)
2564{
2565 struct amdgpu_device *adev = data;
2566
2567 *val = adev->dm.dc->debug.visual_confirm;
2568
2569 return 0;
2570}
2571
2572DEFINE_DEBUGFS_ATTRIBUTE(visual_confirm_fops, visual_confirm_get,
2573 visual_confirm_set, "%llu\n");
2574
2575int dtn_debugfs_init(struct amdgpu_device *adev)
2576{
2577 static const struct file_operations dtn_log_fops = {
2578 .owner = THIS_MODULE,
2579 .read = dtn_log_read,
2580 .write = dtn_log_write,
2581 .llseek = default_llseek
2582 };
2583
2584 struct drm_minor *minor = adev_to_drm(adev)->primary;
2585 struct dentry *root = minor->debugfs_root;
2586 int ret;
2587
2588 ret = amdgpu_debugfs_add_files(adev, amdgpu_dm_debugfs_list,
2589 ARRAY_SIZE(amdgpu_dm_debugfs_list));
2590 if (ret)
2591 return ret;
2592
2593 debugfs_create_file("amdgpu_dm_dtn_log", 0644, root, adev,
2594 &dtn_log_fops);
2595
2596 debugfs_create_file_unsafe("amdgpu_dm_visual_confirm", 0644, root, adev,
2597 &visual_confirm_fops);
2598
2599 debugfs_create_file_unsafe("amdgpu_dm_dmub_tracebuffer", 0644, root,
2600 adev, &dmub_tracebuffer_fops);
2601
2602 debugfs_create_file_unsafe("amdgpu_dm_dmub_fw_state", 0644, root,
2603 adev, &dmub_fw_state_fops);
2604
2605 debugfs_create_file_unsafe("amdgpu_dm_force_timing_sync", 0644, root,
2606 adev, &force_timing_sync_ops);
2607
2608 return 0;
2609}
2610