1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27#define CREATE_TRACE_POINTS
28
29#include "dm_services_types.h"
30#include "dc.h"
31#include "dc/inc/core_types.h"
32#include "dal_asic_id.h"
33#include "dmub/dmub_srv.h"
34#include "dc/inc/hw/dmcu.h"
35#include "dc/inc/hw/abm.h"
36#include "dc/dc_dmub_srv.h"
37
38#include "vid.h"
39#include "amdgpu.h"
40#include "amdgpu_display.h"
41#include "amdgpu_ucode.h"
42#include "atom.h"
43#include "amdgpu_dm.h"
44#ifdef CONFIG_DRM_AMD_DC_HDCP
45#include "amdgpu_dm_hdcp.h"
46#include <drm/drm_hdcp.h>
47#endif
48#include "amdgpu_pm.h"
49
50#include "amd_shared.h"
51#include "amdgpu_dm_irq.h"
52#include "dm_helpers.h"
53#include "amdgpu_dm_mst_types.h"
54#if defined(CONFIG_DEBUG_FS)
55#include "amdgpu_dm_debugfs.h"
56#endif
57
58#include "ivsrcid/ivsrcid_vislands30.h"
59
60#include <linux/module.h>
61#include <linux/moduleparam.h>
62#include <linux/version.h>
63#include <linux/types.h>
64#include <linux/pm_runtime.h>
65#include <linux/pci.h>
66#include <linux/firmware.h>
67#include <linux/component.h>
68
69#include <drm/drm_atomic.h>
70#include <drm/drm_atomic_uapi.h>
71#include <drm/drm_atomic_helper.h>
72#include <drm/drm_dp_mst_helper.h>
73#include <drm/drm_fb_helper.h>
74#include <drm/drm_fourcc.h>
75#include <drm/drm_edid.h>
76#include <drm/drm_vblank.h>
77#include <drm/drm_audio_component.h>
78#include <drm/drm_hdcp.h>
79
80#if defined(CONFIG_DRM_AMD_DC_DCN)
81#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82
83#include "dcn/dcn_1_0_offset.h"
84#include "dcn/dcn_1_0_sh_mask.h"
85#include "soc15_hw_ip.h"
86#include "vega10_ip_offset.h"
87
88#include "soc15_common.h"
89#endif
90
91#include "modules/inc/mod_freesync.h"
92#include "modules/power/power_helpers.h"
93#include "modules/inc/mod_info_packet.h"
94
95#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97
98#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
99MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
100
101#define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
102MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
103
104
105#define PSP_HEADER_BYTES 0x100
106
107
108#define PSP_FOOTER_BYTES 0x100
109
110
111
112
113
114
115
116
117
118
119
120
121static int amdgpu_dm_init(struct amdgpu_device *adev);
122static void amdgpu_dm_fini(struct amdgpu_device *adev);
123
124
125
126
127
128
129
130
131static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
132
133static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
134
135static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
136 struct drm_plane *plane,
137 unsigned long possible_crtcs,
138 const struct dc_plane_cap *plane_cap);
139static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
140 struct drm_plane *plane,
141 uint32_t link_index);
142static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
143 struct amdgpu_dm_connector *amdgpu_dm_connector,
144 uint32_t link_index,
145 struct amdgpu_encoder *amdgpu_encoder);
146static int amdgpu_dm_encoder_init(struct drm_device *dev,
147 struct amdgpu_encoder *aencoder,
148 uint32_t link_index);
149
150static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
151
152static int amdgpu_dm_atomic_commit(struct drm_device *dev,
153 struct drm_atomic_state *state,
154 bool nonblock);
155
156static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
157
158static int amdgpu_dm_atomic_check(struct drm_device *dev,
159 struct drm_atomic_state *state);
160
161static void handle_cursor_update(struct drm_plane *plane,
162 struct drm_plane_state *old_plane_state);
163
164static void amdgpu_dm_set_psr_caps(struct dc_link *link);
165static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
166static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
167static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
184{
185 if (crtc >= adev->mode_info.num_crtc)
186 return 0;
187 else {
188 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
189 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
190 acrtc->base.state);
191
192
193 if (acrtc_state->stream == NULL) {
194 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
195 crtc);
196 return 0;
197 }
198
199 return dc_stream_get_vblank_counter(acrtc_state->stream);
200 }
201}
202
203static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
204 u32 *vbl, u32 *position)
205{
206 uint32_t v_blank_start, v_blank_end, h_position, v_position;
207
208 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
209 return -EINVAL;
210 else {
211 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
212 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
213 acrtc->base.state);
214
215 if (acrtc_state->stream == NULL) {
216 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
217 crtc);
218 return 0;
219 }
220
221
222
223
224
225 dc_stream_get_scanoutpos(acrtc_state->stream,
226 &v_blank_start,
227 &v_blank_end,
228 &h_position,
229 &v_position);
230
231 *position = v_position | (h_position << 16);
232 *vbl = v_blank_start | (v_blank_end << 16);
233 }
234
235 return 0;
236}
237
238static bool dm_is_idle(void *handle)
239{
240
241 return true;
242}
243
244static int dm_wait_for_idle(void *handle)
245{
246
247 return 0;
248}
249
250static bool dm_check_soft_reset(void *handle)
251{
252 return false;
253}
254
255static int dm_soft_reset(void *handle)
256{
257
258 return 0;
259}
260
261static struct amdgpu_crtc *
262get_crtc_by_otg_inst(struct amdgpu_device *adev,
263 int otg_inst)
264{
265 struct drm_device *dev = adev->ddev;
266 struct drm_crtc *crtc;
267 struct amdgpu_crtc *amdgpu_crtc;
268
269 if (otg_inst == -1) {
270 WARN_ON(1);
271 return adev->mode_info.crtcs[0];
272 }
273
274 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
275 amdgpu_crtc = to_amdgpu_crtc(crtc);
276
277 if (amdgpu_crtc->otg_inst == otg_inst)
278 return amdgpu_crtc;
279 }
280
281 return NULL;
282}
283
284static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
285{
286 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
287 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
288}
289
290
291
292
293
294
295
296
297static void dm_pflip_high_irq(void *interrupt_params)
298{
299 struct amdgpu_crtc *amdgpu_crtc;
300 struct common_irq_params *irq_params = interrupt_params;
301 struct amdgpu_device *adev = irq_params->adev;
302 unsigned long flags;
303 struct drm_pending_vblank_event *e;
304 struct dm_crtc_state *acrtc_state;
305 uint32_t vpos, hpos, v_blank_start, v_blank_end;
306 bool vrr_active;
307
308 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
309
310
311
312 if (amdgpu_crtc == NULL) {
313 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
314 return;
315 }
316
317 spin_lock_irqsave(&adev->ddev->event_lock, flags);
318
319 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
320 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
321 amdgpu_crtc->pflip_status,
322 AMDGPU_FLIP_SUBMITTED,
323 amdgpu_crtc->crtc_id,
324 amdgpu_crtc);
325 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
326 return;
327 }
328
329
330 e = amdgpu_crtc->event;
331 amdgpu_crtc->event = NULL;
332
333 if (!e)
334 WARN_ON(1);
335
336 acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
337 vrr_active = amdgpu_dm_vrr_active(acrtc_state);
338
339
340 if (!vrr_active ||
341 !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
342 &v_blank_end, &hpos, &vpos) ||
343 (vpos < v_blank_start)) {
344
345
346
347
348 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
349
350
351
352
353 if (e) {
354 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
355
356
357 drm_crtc_vblank_put(&amdgpu_crtc->base);
358 }
359 } else if (e) {
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
375 e->pipe = amdgpu_crtc->crtc_id;
376
377 list_add_tail(&e->base.link, &adev->ddev->vblank_event_list);
378 e = NULL;
379 }
380
381
382
383
384
385
386 amdgpu_crtc->last_flip_vblank =
387 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
388
389 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
390 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
391
392 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
393 amdgpu_crtc->crtc_id, amdgpu_crtc,
394 vrr_active, (int) !e);
395}
396
397static void dm_vupdate_high_irq(void *interrupt_params)
398{
399 struct common_irq_params *irq_params = interrupt_params;
400 struct amdgpu_device *adev = irq_params->adev;
401 struct amdgpu_crtc *acrtc;
402 struct dm_crtc_state *acrtc_state;
403 unsigned long flags;
404
405 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
406
407 if (acrtc) {
408 acrtc_state = to_dm_crtc_state(acrtc->base.state);
409
410 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
411 acrtc->crtc_id,
412 amdgpu_dm_vrr_active(acrtc_state));
413
414
415
416
417
418
419
420 if (amdgpu_dm_vrr_active(acrtc_state)) {
421 drm_crtc_handle_vblank(&acrtc->base);
422
423
424 if (acrtc_state->stream &&
425 adev->family < AMDGPU_FAMILY_AI) {
426 spin_lock_irqsave(&adev->ddev->event_lock, flags);
427 mod_freesync_handle_v_update(
428 adev->dm.freesync_module,
429 acrtc_state->stream,
430 &acrtc_state->vrr_params);
431
432 dc_stream_adjust_vmin_vmax(
433 adev->dm.dc,
434 acrtc_state->stream,
435 &acrtc_state->vrr_params.adjust);
436 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
437 }
438 }
439 }
440}
441
442
443
444
445
446
447
448
449static void dm_crtc_high_irq(void *interrupt_params)
450{
451 struct common_irq_params *irq_params = interrupt_params;
452 struct amdgpu_device *adev = irq_params->adev;
453 struct amdgpu_crtc *acrtc;
454 struct dm_crtc_state *acrtc_state;
455 unsigned long flags;
456
457 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
458 if (!acrtc)
459 return;
460
461 acrtc_state = to_dm_crtc_state(acrtc->base.state);
462
463 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
464 amdgpu_dm_vrr_active(acrtc_state),
465 acrtc_state->active_planes);
466
467
468
469
470
471
472
473 if (!amdgpu_dm_vrr_active(acrtc_state))
474 drm_crtc_handle_vblank(&acrtc->base);
475
476
477
478
479
480 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
481
482
483 if (adev->family < AMDGPU_FAMILY_AI)
484 return;
485
486 spin_lock_irqsave(&adev->ddev->event_lock, flags);
487
488 if (acrtc_state->stream && acrtc_state->vrr_params.supported &&
489 acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
490 mod_freesync_handle_v_update(adev->dm.freesync_module,
491 acrtc_state->stream,
492 &acrtc_state->vrr_params);
493
494 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc_state->stream,
495 &acrtc_state->vrr_params.adjust);
496 }
497
498
499
500
501
502
503
504
505
506
507
508 if (adev->family >= AMDGPU_FAMILY_RV &&
509 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
510 acrtc_state->active_planes == 0) {
511 if (acrtc->event) {
512 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
513 acrtc->event = NULL;
514 drm_crtc_vblank_put(&acrtc->base);
515 }
516 acrtc->pflip_status = AMDGPU_FLIP_NONE;
517 }
518
519 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
520}
521
522static int dm_set_clockgating_state(void *handle,
523 enum amd_clockgating_state state)
524{
525 return 0;
526}
527
528static int dm_set_powergating_state(void *handle,
529 enum amd_powergating_state state)
530{
531 return 0;
532}
533
534
535static int dm_early_init(void* handle);
536
537
538static void amdgpu_dm_fbc_init(struct drm_connector *connector)
539{
540 struct drm_device *dev = connector->dev;
541 struct amdgpu_device *adev = dev->dev_private;
542 struct dm_comressor_info *compressor = &adev->dm.compressor;
543 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
544 struct drm_display_mode *mode;
545 unsigned long max_size = 0;
546
547 if (adev->dm.dc->fbc_compressor == NULL)
548 return;
549
550 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
551 return;
552
553 if (compressor->bo_ptr)
554 return;
555
556
557 list_for_each_entry(mode, &connector->modes, head) {
558 if (max_size < mode->htotal * mode->vtotal)
559 max_size = mode->htotal * mode->vtotal;
560 }
561
562 if (max_size) {
563 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
564 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
565 &compressor->gpu_addr, &compressor->cpu_addr);
566
567 if (r)
568 DRM_ERROR("DM: Failed to initialize FBC\n");
569 else {
570 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
571 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
572 }
573
574 }
575
576}
577
578static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
579 int pipe, bool *enabled,
580 unsigned char *buf, int max_bytes)
581{
582 struct drm_device *dev = dev_get_drvdata(kdev);
583 struct amdgpu_device *adev = dev->dev_private;
584 struct drm_connector *connector;
585 struct drm_connector_list_iter conn_iter;
586 struct amdgpu_dm_connector *aconnector;
587 int ret = 0;
588
589 *enabled = false;
590
591 mutex_lock(&adev->dm.audio_lock);
592
593 drm_connector_list_iter_begin(dev, &conn_iter);
594 drm_for_each_connector_iter(connector, &conn_iter) {
595 aconnector = to_amdgpu_dm_connector(connector);
596 if (aconnector->audio_inst != port)
597 continue;
598
599 *enabled = true;
600 ret = drm_eld_size(connector->eld);
601 memcpy(buf, connector->eld, min(max_bytes, ret));
602
603 break;
604 }
605 drm_connector_list_iter_end(&conn_iter);
606
607 mutex_unlock(&adev->dm.audio_lock);
608
609 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
610
611 return ret;
612}
613
614static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
615 .get_eld = amdgpu_dm_audio_component_get_eld,
616};
617
618static int amdgpu_dm_audio_component_bind(struct device *kdev,
619 struct device *hda_kdev, void *data)
620{
621 struct drm_device *dev = dev_get_drvdata(kdev);
622 struct amdgpu_device *adev = dev->dev_private;
623 struct drm_audio_component *acomp = data;
624
625 acomp->ops = &amdgpu_dm_audio_component_ops;
626 acomp->dev = kdev;
627 adev->dm.audio_component = acomp;
628
629 return 0;
630}
631
632static void amdgpu_dm_audio_component_unbind(struct device *kdev,
633 struct device *hda_kdev, void *data)
634{
635 struct drm_device *dev = dev_get_drvdata(kdev);
636 struct amdgpu_device *adev = dev->dev_private;
637 struct drm_audio_component *acomp = data;
638
639 acomp->ops = NULL;
640 acomp->dev = NULL;
641 adev->dm.audio_component = NULL;
642}
643
644static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
645 .bind = amdgpu_dm_audio_component_bind,
646 .unbind = amdgpu_dm_audio_component_unbind,
647};
648
649static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
650{
651 int i, ret;
652
653 if (!amdgpu_audio)
654 return 0;
655
656 adev->mode_info.audio.enabled = true;
657
658 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
659
660 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
661 adev->mode_info.audio.pin[i].channels = -1;
662 adev->mode_info.audio.pin[i].rate = -1;
663 adev->mode_info.audio.pin[i].bits_per_sample = -1;
664 adev->mode_info.audio.pin[i].status_bits = 0;
665 adev->mode_info.audio.pin[i].category_code = 0;
666 adev->mode_info.audio.pin[i].connected = false;
667 adev->mode_info.audio.pin[i].id =
668 adev->dm.dc->res_pool->audios[i]->inst;
669 adev->mode_info.audio.pin[i].offset = 0;
670 }
671
672 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
673 if (ret < 0)
674 return ret;
675
676 adev->dm.audio_registered = true;
677
678 return 0;
679}
680
681static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
682{
683 if (!amdgpu_audio)
684 return;
685
686 if (!adev->mode_info.audio.enabled)
687 return;
688
689 if (adev->dm.audio_registered) {
690 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
691 adev->dm.audio_registered = false;
692 }
693
694
695
696 adev->mode_info.audio.enabled = false;
697}
698
699void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
700{
701 struct drm_audio_component *acomp = adev->dm.audio_component;
702
703 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
704 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
705
706 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
707 pin, -1);
708 }
709}
710
711static int dm_dmub_hw_init(struct amdgpu_device *adev)
712{
713 const struct dmcub_firmware_header_v1_0 *hdr;
714 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
715 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
716 const struct firmware *dmub_fw = adev->dm.dmub_fw;
717 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
718 struct abm *abm = adev->dm.dc->res_pool->abm;
719 struct dmub_srv_hw_params hw_params;
720 enum dmub_status status;
721 const unsigned char *fw_inst_const, *fw_bss_data;
722 uint32_t i, fw_inst_const_size, fw_bss_data_size;
723 bool has_hw_support;
724
725 if (!dmub_srv)
726
727 return 0;
728
729 if (!fb_info) {
730 DRM_ERROR("No framebuffer info for DMUB service.\n");
731 return -EINVAL;
732 }
733
734 if (!dmub_fw) {
735
736 DRM_ERROR("No firmware provided for DMUB.\n");
737 return -EINVAL;
738 }
739
740 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
741 if (status != DMUB_STATUS_OK) {
742 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
743 return -EINVAL;
744 }
745
746 if (!has_hw_support) {
747 DRM_INFO("DMUB unsupported on ASIC\n");
748 return 0;
749 }
750
751 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
752
753 fw_inst_const = dmub_fw->data +
754 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
755 PSP_HEADER_BYTES;
756
757 fw_bss_data = dmub_fw->data +
758 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
759 le32_to_cpu(hdr->inst_const_bytes);
760
761
762 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
763 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
764
765 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
766
767
768
769
770
771
772 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
773 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
774 fw_inst_const_size);
775 }
776
777 if (fw_bss_data_size)
778 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
779 fw_bss_data, fw_bss_data_size);
780
781
782 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
783 adev->bios_size);
784
785
786 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
787 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
788
789 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
790 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
791
792 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
793 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
794
795
796 memset(&hw_params, 0, sizeof(hw_params));
797 hw_params.fb_base = adev->gmc.fb_start;
798 hw_params.fb_offset = adev->gmc.aper_base;
799
800
801 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
802 hw_params.load_inst_const = true;
803
804 if (dmcu)
805 hw_params.psp_version = dmcu->psp_version;
806
807 for (i = 0; i < fb_info->num_fb; ++i)
808 hw_params.fb[i] = &fb_info->fb[i];
809
810 status = dmub_srv_hw_init(dmub_srv, &hw_params);
811 if (status != DMUB_STATUS_OK) {
812 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
813 return -EINVAL;
814 }
815
816
817 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
818 if (status != DMUB_STATUS_OK)
819 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
820
821
822 if (dmcu && abm) {
823 dmcu->funcs->dmcu_init(dmcu);
824 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
825 }
826
827 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
828 if (!adev->dm.dc->ctx->dmub_srv) {
829 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
830 return -ENOMEM;
831 }
832
833 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
834 adev->dm.dmcub_fw_version);
835
836 return 0;
837}
838
839static int amdgpu_dm_init(struct amdgpu_device *adev)
840{
841 struct dc_init_data init_data;
842#ifdef CONFIG_DRM_AMD_DC_HDCP
843 struct dc_callback_init init_params;
844#endif
845 int r;
846
847 adev->dm.ddev = adev->ddev;
848 adev->dm.adev = adev;
849
850
851 memset(&init_data, 0, sizeof(init_data));
852#ifdef CONFIG_DRM_AMD_DC_HDCP
853 memset(&init_params, 0, sizeof(init_params));
854#endif
855
856 mutex_init(&adev->dm.dc_lock);
857 mutex_init(&adev->dm.audio_lock);
858
859 if(amdgpu_dm_irq_init(adev)) {
860 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
861 goto error;
862 }
863
864 init_data.asic_id.chip_family = adev->family;
865
866 init_data.asic_id.pci_revision_id = adev->pdev->revision;
867 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
868
869 init_data.asic_id.vram_width = adev->gmc.vram_width;
870
871 init_data.asic_id.atombios_base_address =
872 adev->mode_info.atom_context->bios;
873
874 init_data.driver = adev;
875
876 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
877
878 if (!adev->dm.cgs_device) {
879 DRM_ERROR("amdgpu: failed to create cgs device.\n");
880 goto error;
881 }
882
883 init_data.cgs_device = adev->dm.cgs_device;
884
885 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
886
887 switch (adev->asic_type) {
888 case CHIP_CARRIZO:
889 case CHIP_STONEY:
890 case CHIP_RAVEN:
891 case CHIP_RENOIR:
892 init_data.flags.gpu_vm_support = true;
893 break;
894 default:
895 break;
896 }
897
898 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
899 init_data.flags.fbc_support = true;
900
901 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
902 init_data.flags.multi_mon_pp_mclk_switch = true;
903
904 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
905 init_data.flags.disable_fractional_pwm = true;
906
907 init_data.flags.power_down_display_on_boot = true;
908
909 init_data.soc_bounding_box = adev->dm.soc_bounding_box;
910
911
912 adev->dm.dc = dc_create(&init_data);
913
914 if (adev->dm.dc) {
915 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
916 } else {
917 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
918 goto error;
919 }
920
921 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
922 adev->dm.dc->debug.force_single_disp_pipe_split = false;
923 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
924 }
925
926 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
927 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
928
929 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
930 adev->dm.dc->debug.disable_stutter = true;
931
932 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
933 adev->dm.dc->debug.disable_dsc = true;
934
935 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
936 adev->dm.dc->debug.disable_clock_gate = true;
937
938 r = dm_dmub_hw_init(adev);
939 if (r) {
940 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
941 goto error;
942 }
943
944 dc_hardware_init(adev->dm.dc);
945
946 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
947 if (!adev->dm.freesync_module) {
948 DRM_ERROR(
949 "amdgpu: failed to initialize freesync_module.\n");
950 } else
951 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
952 adev->dm.freesync_module);
953
954 amdgpu_dm_init_color_mod();
955
956#ifdef CONFIG_DRM_AMD_DC_HDCP
957 if (adev->asic_type >= CHIP_RAVEN) {
958 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
959
960 if (!adev->dm.hdcp_workqueue)
961 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
962 else
963 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
964
965 dc_init_callbacks(adev->dm.dc, &init_params);
966 }
967#endif
968 if (amdgpu_dm_initialize_drm_device(adev)) {
969 DRM_ERROR(
970 "amdgpu: failed to initialize sw for display support.\n");
971 goto error;
972 }
973
974
975 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
976
977
978 dm_dp_create_fake_mst_encoders(adev);
979
980
981
982
983 adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
984 adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
985
986 if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
987 DRM_ERROR(
988 "amdgpu: failed to initialize sw for display support.\n");
989 goto error;
990 }
991
992 DRM_DEBUG_DRIVER("KMS initialized.\n");
993
994 return 0;
995error:
996 amdgpu_dm_fini(adev);
997
998 return -EINVAL;
999}
1000
1001static void amdgpu_dm_fini(struct amdgpu_device *adev)
1002{
1003 int i;
1004
1005 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1006 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1007 }
1008
1009 amdgpu_dm_audio_fini(adev);
1010
1011 amdgpu_dm_destroy_drm_device(&adev->dm);
1012
1013#ifdef CONFIG_DRM_AMD_DC_HDCP
1014 if (adev->dm.hdcp_workqueue) {
1015 hdcp_destroy(adev->dm.hdcp_workqueue);
1016 adev->dm.hdcp_workqueue = NULL;
1017 }
1018
1019 if (adev->dm.dc)
1020 dc_deinit_callbacks(adev->dm.dc);
1021#endif
1022 if (adev->dm.dc->ctx->dmub_srv) {
1023 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1024 adev->dm.dc->ctx->dmub_srv = NULL;
1025 }
1026
1027 if (adev->dm.dmub_bo)
1028 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1029 &adev->dm.dmub_bo_gpu_addr,
1030 &adev->dm.dmub_bo_cpu_addr);
1031
1032
1033 if (adev->dm.dc)
1034 dc_destroy(&adev->dm.dc);
1035
1036
1037
1038
1039
1040
1041 if (adev->dm.cgs_device) {
1042 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1043 adev->dm.cgs_device = NULL;
1044 }
1045 if (adev->dm.freesync_module) {
1046 mod_freesync_destroy(adev->dm.freesync_module);
1047 adev->dm.freesync_module = NULL;
1048 }
1049
1050 mutex_destroy(&adev->dm.audio_lock);
1051 mutex_destroy(&adev->dm.dc_lock);
1052
1053 return;
1054}
1055
1056static int load_dmcu_fw(struct amdgpu_device *adev)
1057{
1058 const char *fw_name_dmcu = NULL;
1059 int r;
1060 const struct dmcu_firmware_header_v1_0 *hdr;
1061
1062 switch(adev->asic_type) {
1063 case CHIP_BONAIRE:
1064 case CHIP_HAWAII:
1065 case CHIP_KAVERI:
1066 case CHIP_KABINI:
1067 case CHIP_MULLINS:
1068 case CHIP_TONGA:
1069 case CHIP_FIJI:
1070 case CHIP_CARRIZO:
1071 case CHIP_STONEY:
1072 case CHIP_POLARIS11:
1073 case CHIP_POLARIS10:
1074 case CHIP_POLARIS12:
1075 case CHIP_VEGAM:
1076 case CHIP_VEGA10:
1077 case CHIP_VEGA12:
1078 case CHIP_VEGA20:
1079 case CHIP_NAVI10:
1080 case CHIP_NAVI14:
1081 case CHIP_RENOIR:
1082 return 0;
1083 case CHIP_NAVI12:
1084 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1085 break;
1086 case CHIP_RAVEN:
1087 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1088 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1089 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1090 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1091 else
1092 return 0;
1093 break;
1094 default:
1095 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1096 return -EINVAL;
1097 }
1098
1099 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1100 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1101 return 0;
1102 }
1103
1104 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1105 if (r == -ENOENT) {
1106
1107 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1108 adev->dm.fw_dmcu = NULL;
1109 return 0;
1110 }
1111 if (r) {
1112 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1113 fw_name_dmcu);
1114 return r;
1115 }
1116
1117 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1118 if (r) {
1119 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1120 fw_name_dmcu);
1121 release_firmware(adev->dm.fw_dmcu);
1122 adev->dm.fw_dmcu = NULL;
1123 return r;
1124 }
1125
1126 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1127 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1128 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1129 adev->firmware.fw_size +=
1130 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1131
1132 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1133 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1134 adev->firmware.fw_size +=
1135 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1136
1137 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1138
1139 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1140
1141 return 0;
1142}
1143
1144static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1145{
1146 struct amdgpu_device *adev = ctx;
1147
1148 return dm_read_reg(adev->dm.dc->ctx, address);
1149}
1150
1151static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1152 uint32_t value)
1153{
1154 struct amdgpu_device *adev = ctx;
1155
1156 return dm_write_reg(adev->dm.dc->ctx, address, value);
1157}
1158
1159static int dm_dmub_sw_init(struct amdgpu_device *adev)
1160{
1161 struct dmub_srv_create_params create_params;
1162 struct dmub_srv_region_params region_params;
1163 struct dmub_srv_region_info region_info;
1164 struct dmub_srv_fb_params fb_params;
1165 struct dmub_srv_fb_info *fb_info;
1166 struct dmub_srv *dmub_srv;
1167 const struct dmcub_firmware_header_v1_0 *hdr;
1168 const char *fw_name_dmub;
1169 enum dmub_asic dmub_asic;
1170 enum dmub_status status;
1171 int r;
1172
1173 switch (adev->asic_type) {
1174 case CHIP_RENOIR:
1175 dmub_asic = DMUB_ASIC_DCN21;
1176 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1177 break;
1178
1179 default:
1180
1181 return 0;
1182 }
1183
1184 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1185 if (r) {
1186 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1187 return 0;
1188 }
1189
1190 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1191 if (r) {
1192 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1193 return 0;
1194 }
1195
1196 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1197
1198 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1199 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1200 AMDGPU_UCODE_ID_DMCUB;
1201 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1202 adev->dm.dmub_fw;
1203 adev->firmware.fw_size +=
1204 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1205
1206 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1207 adev->dm.dmcub_fw_version);
1208 }
1209
1210 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1211
1212 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1213 dmub_srv = adev->dm.dmub_srv;
1214
1215 if (!dmub_srv) {
1216 DRM_ERROR("Failed to allocate DMUB service!\n");
1217 return -ENOMEM;
1218 }
1219
1220 memset(&create_params, 0, sizeof(create_params));
1221 create_params.user_ctx = adev;
1222 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1223 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1224 create_params.asic = dmub_asic;
1225
1226
1227 status = dmub_srv_create(dmub_srv, &create_params);
1228 if (status != DMUB_STATUS_OK) {
1229 DRM_ERROR("Error creating DMUB service: %d\n", status);
1230 return -EINVAL;
1231 }
1232
1233
1234 memset(®ion_params, 0, sizeof(region_params));
1235
1236 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1237 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1238 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1239 region_params.vbios_size = adev->bios_size;
1240 region_params.fw_bss_data =
1241 adev->dm.dmub_fw->data +
1242 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1243 le32_to_cpu(hdr->inst_const_bytes);
1244 region_params.fw_inst_const =
1245 adev->dm.dmub_fw->data +
1246 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1247 PSP_HEADER_BYTES;
1248
1249 status = dmub_srv_calc_region_info(dmub_srv, ®ion_params,
1250 ®ion_info);
1251
1252 if (status != DMUB_STATUS_OK) {
1253 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1254 return -EINVAL;
1255 }
1256
1257
1258
1259
1260
1261 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1262 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1263 &adev->dm.dmub_bo_gpu_addr,
1264 &adev->dm.dmub_bo_cpu_addr);
1265 if (r)
1266 return r;
1267
1268
1269 memset(&fb_params, 0, sizeof(fb_params));
1270 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1271 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1272 fb_params.region_info = ®ion_info;
1273
1274 adev->dm.dmub_fb_info =
1275 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1276 fb_info = adev->dm.dmub_fb_info;
1277
1278 if (!fb_info) {
1279 DRM_ERROR(
1280 "Failed to allocate framebuffer info for DMUB service!\n");
1281 return -ENOMEM;
1282 }
1283
1284 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1285 if (status != DMUB_STATUS_OK) {
1286 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1287 return -EINVAL;
1288 }
1289
1290 return 0;
1291}
1292
1293static int dm_sw_init(void *handle)
1294{
1295 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1296 int r;
1297
1298 r = dm_dmub_sw_init(adev);
1299 if (r)
1300 return r;
1301
1302 return load_dmcu_fw(adev);
1303}
1304
1305static int dm_sw_fini(void *handle)
1306{
1307 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1308
1309 kfree(adev->dm.dmub_fb_info);
1310 adev->dm.dmub_fb_info = NULL;
1311
1312 if (adev->dm.dmub_srv) {
1313 dmub_srv_destroy(adev->dm.dmub_srv);
1314 adev->dm.dmub_srv = NULL;
1315 }
1316
1317 if (adev->dm.dmub_fw) {
1318 release_firmware(adev->dm.dmub_fw);
1319 adev->dm.dmub_fw = NULL;
1320 }
1321
1322 if(adev->dm.fw_dmcu) {
1323 release_firmware(adev->dm.fw_dmcu);
1324 adev->dm.fw_dmcu = NULL;
1325 }
1326
1327 return 0;
1328}
1329
1330static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1331{
1332 struct amdgpu_dm_connector *aconnector;
1333 struct drm_connector *connector;
1334 struct drm_connector_list_iter iter;
1335 int ret = 0;
1336
1337 drm_connector_list_iter_begin(dev, &iter);
1338 drm_for_each_connector_iter(connector, &iter) {
1339 aconnector = to_amdgpu_dm_connector(connector);
1340 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1341 aconnector->mst_mgr.aux) {
1342 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1343 aconnector,
1344 aconnector->base.base.id);
1345
1346 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1347 if (ret < 0) {
1348 DRM_ERROR("DM_MST: Failed to start MST\n");
1349 aconnector->dc_link->type =
1350 dc_connection_single;
1351 break;
1352 }
1353 }
1354 }
1355 drm_connector_list_iter_end(&iter);
1356
1357 return ret;
1358}
1359
1360static int dm_late_init(void *handle)
1361{
1362 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1363
1364 struct dmcu_iram_parameters params;
1365 unsigned int linear_lut[16];
1366 int i;
1367 struct dmcu *dmcu = NULL;
1368 bool ret;
1369
1370 if (!adev->dm.fw_dmcu && !adev->dm.dmub_fw)
1371 return detect_mst_link_for_all_connectors(adev->ddev);
1372
1373 dmcu = adev->dm.dc->res_pool->dmcu;
1374
1375 for (i = 0; i < 16; i++)
1376 linear_lut[i] = 0xFFFF * i / 15;
1377
1378 params.set = 0;
1379 params.backlight_ramping_start = 0xCCCC;
1380 params.backlight_ramping_reduction = 0xCCCCCCCC;
1381 params.backlight_lut_array_size = 16;
1382 params.backlight_lut_array = linear_lut;
1383
1384
1385
1386
1387 params.min_abm_backlight = 0x28F;
1388
1389 ret = dmcu_load_iram(dmcu, params);
1390
1391 if (!ret)
1392 return -EINVAL;
1393
1394 return detect_mst_link_for_all_connectors(adev->ddev);
1395}
1396
1397static void s3_handle_mst(struct drm_device *dev, bool suspend)
1398{
1399 struct amdgpu_dm_connector *aconnector;
1400 struct drm_connector *connector;
1401 struct drm_connector_list_iter iter;
1402 struct drm_dp_mst_topology_mgr *mgr;
1403 int ret;
1404 bool need_hotplug = false;
1405
1406 drm_connector_list_iter_begin(dev, &iter);
1407 drm_for_each_connector_iter(connector, &iter) {
1408 aconnector = to_amdgpu_dm_connector(connector);
1409 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1410 aconnector->mst_port)
1411 continue;
1412
1413 mgr = &aconnector->mst_mgr;
1414
1415 if (suspend) {
1416 drm_dp_mst_topology_mgr_suspend(mgr);
1417 } else {
1418 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1419 if (ret < 0) {
1420 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1421 need_hotplug = true;
1422 }
1423 }
1424 }
1425 drm_connector_list_iter_end(&iter);
1426
1427 if (need_hotplug)
1428 drm_kms_helper_hotplug_event(dev);
1429}
1430
1431static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1432{
1433 struct smu_context *smu = &adev->smu;
1434 int ret = 0;
1435
1436 if (!is_support_sw_smu(adev))
1437 return 0;
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469 switch(adev->asic_type) {
1470 case CHIP_NAVI10:
1471 case CHIP_NAVI14:
1472 case CHIP_NAVI12:
1473 break;
1474 default:
1475 return 0;
1476 }
1477
1478 mutex_lock(&smu->mutex);
1479
1480
1481 if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
1482 !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
1483 ret = smu_write_watermarks_table(smu);
1484
1485 if (ret) {
1486 mutex_unlock(&smu->mutex);
1487 DRM_ERROR("Failed to update WMTABLE!\n");
1488 return ret;
1489 }
1490 smu->watermarks_bitmap |= WATERMARKS_LOADED;
1491 }
1492
1493 mutex_unlock(&smu->mutex);
1494
1495 return 0;
1496}
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518static int dm_hw_init(void *handle)
1519{
1520 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1521
1522 amdgpu_dm_init(adev);
1523 amdgpu_dm_hpd_init(adev);
1524
1525 return 0;
1526}
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536static int dm_hw_fini(void *handle)
1537{
1538 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1539
1540 amdgpu_dm_hpd_fini(adev);
1541
1542 amdgpu_dm_irq_fini(adev);
1543 amdgpu_dm_fini(adev);
1544 return 0;
1545}
1546
1547
1548static int dm_enable_vblank(struct drm_crtc *crtc);
1549static void dm_disable_vblank(struct drm_crtc *crtc);
1550
1551static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1552 struct dc_state *state, bool enable)
1553{
1554 enum dc_irq_source irq_source;
1555 struct amdgpu_crtc *acrtc;
1556 int rc = -EBUSY;
1557 int i = 0;
1558
1559 for (i = 0; i < state->stream_count; i++) {
1560 acrtc = get_crtc_by_otg_inst(
1561 adev, state->stream_status[i].primary_otg_inst);
1562
1563 if (acrtc && state->stream_status[i].plane_count != 0) {
1564 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1565 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1566 DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1567 acrtc->crtc_id, enable ? "en" : "dis", rc);
1568 if (rc)
1569 DRM_WARN("Failed to %s pflip interrupts\n",
1570 enable ? "enable" : "disable");
1571
1572 if (enable) {
1573 rc = dm_enable_vblank(&acrtc->base);
1574 if (rc)
1575 DRM_WARN("Failed to enable vblank interrupts\n");
1576 } else {
1577 dm_disable_vblank(&acrtc->base);
1578 }
1579
1580 }
1581 }
1582
1583}
1584
1585enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1586{
1587 struct dc_state *context = NULL;
1588 enum dc_status res = DC_ERROR_UNEXPECTED;
1589 int i;
1590 struct dc_stream_state *del_streams[MAX_PIPES];
1591 int del_streams_count = 0;
1592
1593 memset(del_streams, 0, sizeof(del_streams));
1594
1595 context = dc_create_state(dc);
1596 if (context == NULL)
1597 goto context_alloc_fail;
1598
1599 dc_resource_state_copy_construct_current(dc, context);
1600
1601
1602 for (i = 0; i < context->stream_count; i++) {
1603 struct dc_stream_state *stream = context->streams[i];
1604
1605 del_streams[del_streams_count++] = stream;
1606 }
1607
1608
1609 for (i = 0; i < del_streams_count; i++) {
1610 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1611 res = DC_FAIL_DETACH_SURFACES;
1612 goto fail;
1613 }
1614
1615 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1616 if (res != DC_OK)
1617 goto fail;
1618 }
1619
1620
1621 res = dc_validate_global_state(dc, context, false);
1622
1623 if (res != DC_OK) {
1624 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1625 goto fail;
1626 }
1627
1628 res = dc_commit_state(dc, context);
1629
1630fail:
1631 dc_release_state(context);
1632
1633context_alloc_fail:
1634 return res;
1635}
1636
1637static int dm_suspend(void *handle)
1638{
1639 struct amdgpu_device *adev = handle;
1640 struct amdgpu_display_manager *dm = &adev->dm;
1641 int ret = 0;
1642
1643 if (adev->in_gpu_reset) {
1644 mutex_lock(&dm->dc_lock);
1645 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1646
1647 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1648
1649 amdgpu_dm_commit_zero_streams(dm->dc);
1650
1651 amdgpu_dm_irq_suspend(adev);
1652
1653 return ret;
1654 }
1655
1656 WARN_ON(adev->dm.cached_state);
1657 adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
1658
1659 s3_handle_mst(adev->ddev, true);
1660
1661 amdgpu_dm_irq_suspend(adev);
1662
1663
1664 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1665
1666 return 0;
1667}
1668
1669static struct amdgpu_dm_connector *
1670amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1671 struct drm_crtc *crtc)
1672{
1673 uint32_t i;
1674 struct drm_connector_state *new_con_state;
1675 struct drm_connector *connector;
1676 struct drm_crtc *crtc_from_state;
1677
1678 for_each_new_connector_in_state(state, connector, new_con_state, i) {
1679 crtc_from_state = new_con_state->crtc;
1680
1681 if (crtc_from_state == crtc)
1682 return to_amdgpu_dm_connector(connector);
1683 }
1684
1685 return NULL;
1686}
1687
1688static void emulated_link_detect(struct dc_link *link)
1689{
1690 struct dc_sink_init_data sink_init_data = { 0 };
1691 struct display_sink_capability sink_caps = { 0 };
1692 enum dc_edid_status edid_status;
1693 struct dc_context *dc_ctx = link->ctx;
1694 struct dc_sink *sink = NULL;
1695 struct dc_sink *prev_sink = NULL;
1696
1697 link->type = dc_connection_none;
1698 prev_sink = link->local_sink;
1699
1700 if (prev_sink != NULL)
1701 dc_sink_retain(prev_sink);
1702
1703 switch (link->connector_signal) {
1704 case SIGNAL_TYPE_HDMI_TYPE_A: {
1705 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1706 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1707 break;
1708 }
1709
1710 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1711 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1712 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1713 break;
1714 }
1715
1716 case SIGNAL_TYPE_DVI_DUAL_LINK: {
1717 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1718 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1719 break;
1720 }
1721
1722 case SIGNAL_TYPE_LVDS: {
1723 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1724 sink_caps.signal = SIGNAL_TYPE_LVDS;
1725 break;
1726 }
1727
1728 case SIGNAL_TYPE_EDP: {
1729 sink_caps.transaction_type =
1730 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1731 sink_caps.signal = SIGNAL_TYPE_EDP;
1732 break;
1733 }
1734
1735 case SIGNAL_TYPE_DISPLAY_PORT: {
1736 sink_caps.transaction_type =
1737 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1738 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1739 break;
1740 }
1741
1742 default:
1743 DC_ERROR("Invalid connector type! signal:%d\n",
1744 link->connector_signal);
1745 return;
1746 }
1747
1748 sink_init_data.link = link;
1749 sink_init_data.sink_signal = sink_caps.signal;
1750
1751 sink = dc_sink_create(&sink_init_data);
1752 if (!sink) {
1753 DC_ERROR("Failed to create sink!\n");
1754 return;
1755 }
1756
1757
1758 link->local_sink = sink;
1759
1760 edid_status = dm_helpers_read_local_edid(
1761 link->ctx,
1762 link,
1763 sink);
1764
1765 if (edid_status != EDID_OK)
1766 DC_ERROR("Failed to read EDID");
1767
1768}
1769
1770static void dm_gpureset_commit_state(struct dc_state *dc_state,
1771 struct amdgpu_display_manager *dm)
1772{
1773 struct {
1774 struct dc_surface_update surface_updates[MAX_SURFACES];
1775 struct dc_plane_info plane_infos[MAX_SURFACES];
1776 struct dc_scaling_info scaling_infos[MAX_SURFACES];
1777 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1778 struct dc_stream_update stream_update;
1779 } * bundle;
1780 int k, m;
1781
1782 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1783
1784 if (!bundle) {
1785 dm_error("Failed to allocate update bundle\n");
1786 goto cleanup;
1787 }
1788
1789 for (k = 0; k < dc_state->stream_count; k++) {
1790 bundle->stream_update.stream = dc_state->streams[k];
1791
1792 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1793 bundle->surface_updates[m].surface =
1794 dc_state->stream_status->plane_states[m];
1795 bundle->surface_updates[m].surface->force_full_update =
1796 true;
1797 }
1798 dc_commit_updates_for_stream(
1799 dm->dc, bundle->surface_updates,
1800 dc_state->stream_status->plane_count,
1801 dc_state->streams[k], &bundle->stream_update, dc_state);
1802 }
1803
1804cleanup:
1805 kfree(bundle);
1806
1807 return;
1808}
1809
1810static int dm_resume(void *handle)
1811{
1812 struct amdgpu_device *adev = handle;
1813 struct drm_device *ddev = adev->ddev;
1814 struct amdgpu_display_manager *dm = &adev->dm;
1815 struct amdgpu_dm_connector *aconnector;
1816 struct drm_connector *connector;
1817 struct drm_connector_list_iter iter;
1818 struct drm_crtc *crtc;
1819 struct drm_crtc_state *new_crtc_state;
1820 struct dm_crtc_state *dm_new_crtc_state;
1821 struct drm_plane *plane;
1822 struct drm_plane_state *new_plane_state;
1823 struct dm_plane_state *dm_new_plane_state;
1824 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1825 enum dc_connection_type new_connection_type = dc_connection_none;
1826 struct dc_state *dc_state;
1827 int i, r, j;
1828
1829 if (adev->in_gpu_reset) {
1830 dc_state = dm->cached_dc_state;
1831
1832 r = dm_dmub_hw_init(adev);
1833 if (r)
1834 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1835
1836 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1837 dc_resume(dm->dc);
1838
1839 amdgpu_dm_irq_resume_early(adev);
1840
1841 for (i = 0; i < dc_state->stream_count; i++) {
1842 dc_state->streams[i]->mode_changed = true;
1843 for (j = 0; j < dc_state->stream_status->plane_count; j++) {
1844 dc_state->stream_status->plane_states[j]->update_flags.raw
1845 = 0xffffffff;
1846 }
1847 }
1848
1849 WARN_ON(!dc_commit_state(dm->dc, dc_state));
1850
1851 dm_gpureset_commit_state(dm->cached_dc_state, dm);
1852
1853 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
1854
1855 dc_release_state(dm->cached_dc_state);
1856 dm->cached_dc_state = NULL;
1857
1858 amdgpu_dm_irq_resume_late(adev);
1859
1860 mutex_unlock(&dm->dc_lock);
1861
1862 return 0;
1863 }
1864
1865 dc_release_state(dm_state->context);
1866 dm_state->context = dc_create_state(dm->dc);
1867
1868 dc_resource_state_construct(dm->dc, dm_state->context);
1869
1870
1871 r = dm_dmub_hw_init(adev);
1872 if (r)
1873 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1874
1875
1876 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1877
1878
1879 dc_resume(dm->dc);
1880
1881
1882
1883
1884
1885 amdgpu_dm_irq_resume_early(adev);
1886
1887
1888 s3_handle_mst(ddev, false);
1889
1890
1891 drm_connector_list_iter_begin(ddev, &iter);
1892 drm_for_each_connector_iter(connector, &iter) {
1893 aconnector = to_amdgpu_dm_connector(connector);
1894
1895
1896
1897
1898
1899 if (aconnector->mst_port)
1900 continue;
1901
1902 mutex_lock(&aconnector->hpd_lock);
1903 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1904 DRM_ERROR("KMS: Failed to detect connector\n");
1905
1906 if (aconnector->base.force && new_connection_type == dc_connection_none)
1907 emulated_link_detect(aconnector->dc_link);
1908 else
1909 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
1910
1911 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
1912 aconnector->fake_enable = false;
1913
1914 if (aconnector->dc_sink)
1915 dc_sink_release(aconnector->dc_sink);
1916 aconnector->dc_sink = NULL;
1917 amdgpu_dm_update_connector_after_detect(aconnector);
1918 mutex_unlock(&aconnector->hpd_lock);
1919 }
1920 drm_connector_list_iter_end(&iter);
1921
1922
1923 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
1924 new_crtc_state->active_changed = true;
1925
1926
1927
1928
1929
1930
1931 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
1932 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1933 if (dm_new_crtc_state->stream) {
1934 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
1935 dc_stream_release(dm_new_crtc_state->stream);
1936 dm_new_crtc_state->stream = NULL;
1937 }
1938 }
1939
1940 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
1941 dm_new_plane_state = to_dm_plane_state(new_plane_state);
1942 if (dm_new_plane_state->dc_state) {
1943 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
1944 dc_plane_state_release(dm_new_plane_state->dc_state);
1945 dm_new_plane_state->dc_state = NULL;
1946 }
1947 }
1948
1949 drm_atomic_helper_resume(ddev, dm->cached_state);
1950
1951 dm->cached_state = NULL;
1952
1953 amdgpu_dm_irq_resume_late(adev);
1954
1955 amdgpu_dm_smu_write_watermarks_table(adev);
1956
1957 return 0;
1958}
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970static const struct amd_ip_funcs amdgpu_dm_funcs = {
1971 .name = "dm",
1972 .early_init = dm_early_init,
1973 .late_init = dm_late_init,
1974 .sw_init = dm_sw_init,
1975 .sw_fini = dm_sw_fini,
1976 .hw_init = dm_hw_init,
1977 .hw_fini = dm_hw_fini,
1978 .suspend = dm_suspend,
1979 .resume = dm_resume,
1980 .is_idle = dm_is_idle,
1981 .wait_for_idle = dm_wait_for_idle,
1982 .check_soft_reset = dm_check_soft_reset,
1983 .soft_reset = dm_soft_reset,
1984 .set_clockgating_state = dm_set_clockgating_state,
1985 .set_powergating_state = dm_set_powergating_state,
1986};
1987
1988const struct amdgpu_ip_block_version dm_ip_block =
1989{
1990 .type = AMD_IP_BLOCK_TYPE_DCE,
1991 .major = 1,
1992 .minor = 0,
1993 .rev = 0,
1994 .funcs = &amdgpu_dm_funcs,
1995};
1996
1997
1998
1999
2000
2001
2002
2003
2004static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2005 .fb_create = amdgpu_display_user_framebuffer_create,
2006 .output_poll_changed = drm_fb_helper_output_poll_changed,
2007 .atomic_check = amdgpu_dm_atomic_check,
2008 .atomic_commit = amdgpu_dm_atomic_commit,
2009};
2010
2011static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2012 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2013};
2014
2015static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2016{
2017 u32 max_cll, min_cll, max, min, q, r;
2018 struct amdgpu_dm_backlight_caps *caps;
2019 struct amdgpu_display_manager *dm;
2020 struct drm_connector *conn_base;
2021 struct amdgpu_device *adev;
2022 struct dc_link *link = NULL;
2023 static const u8 pre_computed_values[] = {
2024 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2025 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2026
2027 if (!aconnector || !aconnector->dc_link)
2028 return;
2029
2030 link = aconnector->dc_link;
2031 if (link->connector_signal != SIGNAL_TYPE_EDP)
2032 return;
2033
2034 conn_base = &aconnector->base;
2035 adev = conn_base->dev->dev_private;
2036 dm = &adev->dm;
2037 caps = &dm->backlight_caps;
2038 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2039 caps->aux_support = false;
2040 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2041 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2042
2043 if (caps->ext_caps->bits.oled == 1 ||
2044 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2045 caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2046 caps->aux_support = true;
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063 q = max_cll >> 5;
2064 r = max_cll % 32;
2065 max = (1 << q) * pre_computed_values[r];
2066
2067
2068 q = DIV_ROUND_CLOSEST(min_cll, 255);
2069 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2070
2071 caps->aux_max_input_signal = max;
2072 caps->aux_min_input_signal = min;
2073}
2074
2075void amdgpu_dm_update_connector_after_detect(
2076 struct amdgpu_dm_connector *aconnector)
2077{
2078 struct drm_connector *connector = &aconnector->base;
2079 struct drm_device *dev = connector->dev;
2080 struct dc_sink *sink;
2081
2082
2083 if (aconnector->mst_mgr.mst_state == true)
2084 return;
2085
2086
2087 sink = aconnector->dc_link->local_sink;
2088 if (sink)
2089 dc_sink_retain(sink);
2090
2091
2092
2093
2094
2095
2096 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2097 && aconnector->dc_em_sink) {
2098
2099
2100
2101
2102
2103 mutex_lock(&dev->mode_config.mutex);
2104
2105 if (sink) {
2106 if (aconnector->dc_sink) {
2107 amdgpu_dm_update_freesync_caps(connector, NULL);
2108
2109
2110
2111
2112
2113
2114 dc_sink_release(aconnector->dc_sink);
2115 }
2116 aconnector->dc_sink = sink;
2117 dc_sink_retain(aconnector->dc_sink);
2118 amdgpu_dm_update_freesync_caps(connector,
2119 aconnector->edid);
2120 } else {
2121 amdgpu_dm_update_freesync_caps(connector, NULL);
2122 if (!aconnector->dc_sink) {
2123 aconnector->dc_sink = aconnector->dc_em_sink;
2124 dc_sink_retain(aconnector->dc_sink);
2125 }
2126 }
2127
2128 mutex_unlock(&dev->mode_config.mutex);
2129
2130 if (sink)
2131 dc_sink_release(sink);
2132 return;
2133 }
2134
2135
2136
2137
2138
2139 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2140 dc_sink_release(sink);
2141 return;
2142 }
2143
2144 if (aconnector->dc_sink == sink) {
2145
2146
2147
2148
2149 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2150 aconnector->connector_id);
2151 if (sink)
2152 dc_sink_release(sink);
2153 return;
2154 }
2155
2156 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2157 aconnector->connector_id, aconnector->dc_sink, sink);
2158
2159 mutex_lock(&dev->mode_config.mutex);
2160
2161
2162
2163
2164
2165 if (sink) {
2166
2167
2168
2169
2170 if (aconnector->dc_sink)
2171 amdgpu_dm_update_freesync_caps(connector, NULL);
2172
2173 aconnector->dc_sink = sink;
2174 dc_sink_retain(aconnector->dc_sink);
2175 if (sink->dc_edid.length == 0) {
2176 aconnector->edid = NULL;
2177 if (aconnector->dc_link->aux_mode) {
2178 drm_dp_cec_unset_edid(
2179 &aconnector->dm_dp_aux.aux);
2180 }
2181 } else {
2182 aconnector->edid =
2183 (struct edid *)sink->dc_edid.raw_edid;
2184
2185 drm_connector_update_edid_property(connector,
2186 aconnector->edid);
2187
2188 if (aconnector->dc_link->aux_mode)
2189 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2190 aconnector->edid);
2191 }
2192
2193 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2194 update_connector_ext_caps(aconnector);
2195 } else {
2196 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2197 amdgpu_dm_update_freesync_caps(connector, NULL);
2198 drm_connector_update_edid_property(connector, NULL);
2199 aconnector->num_modes = 0;
2200 dc_sink_release(aconnector->dc_sink);
2201 aconnector->dc_sink = NULL;
2202 aconnector->edid = NULL;
2203#ifdef CONFIG_DRM_AMD_DC_HDCP
2204
2205 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2206 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2207#endif
2208 }
2209
2210 mutex_unlock(&dev->mode_config.mutex);
2211
2212 if (sink)
2213 dc_sink_release(sink);
2214}
2215
2216static void handle_hpd_irq(void *param)
2217{
2218 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2219 struct drm_connector *connector = &aconnector->base;
2220 struct drm_device *dev = connector->dev;
2221 enum dc_connection_type new_connection_type = dc_connection_none;
2222#ifdef CONFIG_DRM_AMD_DC_HDCP
2223 struct amdgpu_device *adev = dev->dev_private;
2224#endif
2225
2226
2227
2228
2229
2230 mutex_lock(&aconnector->hpd_lock);
2231
2232#ifdef CONFIG_DRM_AMD_DC_HDCP
2233 if (adev->dm.hdcp_workqueue)
2234 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2235#endif
2236 if (aconnector->fake_enable)
2237 aconnector->fake_enable = false;
2238
2239 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2240 DRM_ERROR("KMS: Failed to detect connector\n");
2241
2242 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2243 emulated_link_detect(aconnector->dc_link);
2244
2245
2246 drm_modeset_lock_all(dev);
2247 dm_restore_drm_connector_state(dev, connector);
2248 drm_modeset_unlock_all(dev);
2249
2250 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2251 drm_kms_helper_hotplug_event(dev);
2252
2253 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2254 amdgpu_dm_update_connector_after_detect(aconnector);
2255
2256
2257 drm_modeset_lock_all(dev);
2258 dm_restore_drm_connector_state(dev, connector);
2259 drm_modeset_unlock_all(dev);
2260
2261 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2262 drm_kms_helper_hotplug_event(dev);
2263 }
2264 mutex_unlock(&aconnector->hpd_lock);
2265
2266}
2267
2268static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2269{
2270 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2271 uint8_t dret;
2272 bool new_irq_handled = false;
2273 int dpcd_addr;
2274 int dpcd_bytes_to_read;
2275
2276 const int max_process_count = 30;
2277 int process_count = 0;
2278
2279 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2280
2281 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2282 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2283
2284 dpcd_addr = DP_SINK_COUNT;
2285 } else {
2286 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2287
2288 dpcd_addr = DP_SINK_COUNT_ESI;
2289 }
2290
2291 dret = drm_dp_dpcd_read(
2292 &aconnector->dm_dp_aux.aux,
2293 dpcd_addr,
2294 esi,
2295 dpcd_bytes_to_read);
2296
2297 while (dret == dpcd_bytes_to_read &&
2298 process_count < max_process_count) {
2299 uint8_t retry;
2300 dret = 0;
2301
2302 process_count++;
2303
2304 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2305
2306 if (aconnector->mst_mgr.mst_state)
2307 drm_dp_mst_hpd_irq(
2308 &aconnector->mst_mgr,
2309 esi,
2310 &new_irq_handled);
2311
2312 if (new_irq_handled) {
2313
2314 const int ack_dpcd_bytes_to_write =
2315 dpcd_bytes_to_read - 1;
2316
2317 for (retry = 0; retry < 3; retry++) {
2318 uint8_t wret;
2319
2320 wret = drm_dp_dpcd_write(
2321 &aconnector->dm_dp_aux.aux,
2322 dpcd_addr + 1,
2323 &esi[1],
2324 ack_dpcd_bytes_to_write);
2325 if (wret == ack_dpcd_bytes_to_write)
2326 break;
2327 }
2328
2329
2330 dret = drm_dp_dpcd_read(
2331 &aconnector->dm_dp_aux.aux,
2332 dpcd_addr,
2333 esi,
2334 dpcd_bytes_to_read);
2335
2336 new_irq_handled = false;
2337 } else {
2338 break;
2339 }
2340 }
2341
2342 if (process_count == max_process_count)
2343 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2344}
2345
2346static void handle_hpd_rx_irq(void *param)
2347{
2348 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2349 struct drm_connector *connector = &aconnector->base;
2350 struct drm_device *dev = connector->dev;
2351 struct dc_link *dc_link = aconnector->dc_link;
2352 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2353 enum dc_connection_type new_connection_type = dc_connection_none;
2354#ifdef CONFIG_DRM_AMD_DC_HDCP
2355 union hpd_irq_data hpd_irq_data;
2356 struct amdgpu_device *adev = dev->dev_private;
2357
2358 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2359#endif
2360
2361
2362
2363
2364
2365
2366 if (dc_link->type != dc_connection_mst_branch)
2367 mutex_lock(&aconnector->hpd_lock);
2368
2369
2370#ifdef CONFIG_DRM_AMD_DC_HDCP
2371 if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2372#else
2373 if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2374#endif
2375 !is_mst_root_connector) {
2376
2377 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2378 DRM_ERROR("KMS: Failed to detect connector\n");
2379
2380 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2381 emulated_link_detect(dc_link);
2382
2383 if (aconnector->fake_enable)
2384 aconnector->fake_enable = false;
2385
2386 amdgpu_dm_update_connector_after_detect(aconnector);
2387
2388
2389 drm_modeset_lock_all(dev);
2390 dm_restore_drm_connector_state(dev, connector);
2391 drm_modeset_unlock_all(dev);
2392
2393 drm_kms_helper_hotplug_event(dev);
2394 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2395
2396 if (aconnector->fake_enable)
2397 aconnector->fake_enable = false;
2398
2399 amdgpu_dm_update_connector_after_detect(aconnector);
2400
2401
2402 drm_modeset_lock_all(dev);
2403 dm_restore_drm_connector_state(dev, connector);
2404 drm_modeset_unlock_all(dev);
2405
2406 drm_kms_helper_hotplug_event(dev);
2407 }
2408 }
2409#ifdef CONFIG_DRM_AMD_DC_HDCP
2410 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2411 if (adev->dm.hdcp_workqueue)
2412 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2413 }
2414#endif
2415 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2416 (dc_link->type == dc_connection_mst_branch))
2417 dm_handle_hpd_rx_irq(aconnector);
2418
2419 if (dc_link->type != dc_connection_mst_branch) {
2420 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2421 mutex_unlock(&aconnector->hpd_lock);
2422 }
2423}
2424
2425static void register_hpd_handlers(struct amdgpu_device *adev)
2426{
2427 struct drm_device *dev = adev->ddev;
2428 struct drm_connector *connector;
2429 struct amdgpu_dm_connector *aconnector;
2430 const struct dc_link *dc_link;
2431 struct dc_interrupt_params int_params = {0};
2432
2433 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2434 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2435
2436 list_for_each_entry(connector,
2437 &dev->mode_config.connector_list, head) {
2438
2439 aconnector = to_amdgpu_dm_connector(connector);
2440 dc_link = aconnector->dc_link;
2441
2442 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2443 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2444 int_params.irq_source = dc_link->irq_source_hpd;
2445
2446 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2447 handle_hpd_irq,
2448 (void *) aconnector);
2449 }
2450
2451 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2452
2453
2454 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2455 int_params.irq_source = dc_link->irq_source_hpd_rx;
2456
2457 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2458 handle_hpd_rx_irq,
2459 (void *) aconnector);
2460 }
2461 }
2462}
2463
2464
2465static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2466{
2467 struct dc *dc = adev->dm.dc;
2468 struct common_irq_params *c_irq_params;
2469 struct dc_interrupt_params int_params = {0};
2470 int r;
2471 int i;
2472 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2473
2474 if (adev->asic_type >= CHIP_VEGA10)
2475 client_id = SOC15_IH_CLIENTID_DCE;
2476
2477 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2478 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2493 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2494 if (r) {
2495 DRM_ERROR("Failed to add crtc irq id!\n");
2496 return r;
2497 }
2498
2499 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2500 int_params.irq_source =
2501 dc_interrupt_to_irq_source(dc, i, 0);
2502
2503 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2504
2505 c_irq_params->adev = adev;
2506 c_irq_params->irq_src = int_params.irq_source;
2507
2508 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2509 dm_crtc_high_irq, c_irq_params);
2510 }
2511
2512
2513 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2514 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2515 if (r) {
2516 DRM_ERROR("Failed to add vupdate irq id!\n");
2517 return r;
2518 }
2519
2520 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2521 int_params.irq_source =
2522 dc_interrupt_to_irq_source(dc, i, 0);
2523
2524 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2525
2526 c_irq_params->adev = adev;
2527 c_irq_params->irq_src = int_params.irq_source;
2528
2529 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2530 dm_vupdate_high_irq, c_irq_params);
2531 }
2532
2533
2534 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2535 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2536 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2537 if (r) {
2538 DRM_ERROR("Failed to add page flip irq id!\n");
2539 return r;
2540 }
2541
2542 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2543 int_params.irq_source =
2544 dc_interrupt_to_irq_source(dc, i, 0);
2545
2546 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2547
2548 c_irq_params->adev = adev;
2549 c_irq_params->irq_src = int_params.irq_source;
2550
2551 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2552 dm_pflip_high_irq, c_irq_params);
2553
2554 }
2555
2556
2557 r = amdgpu_irq_add_id(adev, client_id,
2558 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2559 if (r) {
2560 DRM_ERROR("Failed to add hpd irq id!\n");
2561 return r;
2562 }
2563
2564 register_hpd_handlers(adev);
2565
2566 return 0;
2567}
2568
2569#if defined(CONFIG_DRM_AMD_DC_DCN)
2570
2571static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2572{
2573 struct dc *dc = adev->dm.dc;
2574 struct common_irq_params *c_irq_params;
2575 struct dc_interrupt_params int_params = {0};
2576 int r;
2577 int i;
2578
2579 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2580 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2596 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2597 i++) {
2598 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2599
2600 if (r) {
2601 DRM_ERROR("Failed to add crtc irq id!\n");
2602 return r;
2603 }
2604
2605 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2606 int_params.irq_source =
2607 dc_interrupt_to_irq_source(dc, i, 0);
2608
2609 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2610
2611 c_irq_params->adev = adev;
2612 c_irq_params->irq_src = int_params.irq_source;
2613
2614 amdgpu_dm_irq_register_interrupt(
2615 adev, &int_params, dm_crtc_high_irq, c_irq_params);
2616 }
2617
2618
2619
2620
2621
2622
2623 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2624 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2625 i++) {
2626 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2627
2628 if (r) {
2629 DRM_ERROR("Failed to add vupdate irq id!\n");
2630 return r;
2631 }
2632
2633 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2634 int_params.irq_source =
2635 dc_interrupt_to_irq_source(dc, i, 0);
2636
2637 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2638
2639 c_irq_params->adev = adev;
2640 c_irq_params->irq_src = int_params.irq_source;
2641
2642 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2643 dm_vupdate_high_irq, c_irq_params);
2644 }
2645
2646
2647 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2648 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2649 i++) {
2650 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2651 if (r) {
2652 DRM_ERROR("Failed to add page flip irq id!\n");
2653 return r;
2654 }
2655
2656 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2657 int_params.irq_source =
2658 dc_interrupt_to_irq_source(dc, i, 0);
2659
2660 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2661
2662 c_irq_params->adev = adev;
2663 c_irq_params->irq_src = int_params.irq_source;
2664
2665 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2666 dm_pflip_high_irq, c_irq_params);
2667
2668 }
2669
2670
2671 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2672 &adev->hpd_irq);
2673 if (r) {
2674 DRM_ERROR("Failed to add hpd irq id!\n");
2675 return r;
2676 }
2677
2678 register_hpd_handlers(adev);
2679
2680 return 0;
2681}
2682#endif
2683
2684
2685
2686
2687
2688
2689
2690static int dm_atomic_get_state(struct drm_atomic_state *state,
2691 struct dm_atomic_state **dm_state)
2692{
2693 struct drm_device *dev = state->dev;
2694 struct amdgpu_device *adev = dev->dev_private;
2695 struct amdgpu_display_manager *dm = &adev->dm;
2696 struct drm_private_state *priv_state;
2697
2698 if (*dm_state)
2699 return 0;
2700
2701 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2702 if (IS_ERR(priv_state))
2703 return PTR_ERR(priv_state);
2704
2705 *dm_state = to_dm_atomic_state(priv_state);
2706
2707 return 0;
2708}
2709
2710struct dm_atomic_state *
2711dm_atomic_get_new_state(struct drm_atomic_state *state)
2712{
2713 struct drm_device *dev = state->dev;
2714 struct amdgpu_device *adev = dev->dev_private;
2715 struct amdgpu_display_manager *dm = &adev->dm;
2716 struct drm_private_obj *obj;
2717 struct drm_private_state *new_obj_state;
2718 int i;
2719
2720 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2721 if (obj->funcs == dm->atomic_obj.funcs)
2722 return to_dm_atomic_state(new_obj_state);
2723 }
2724
2725 return NULL;
2726}
2727
2728struct dm_atomic_state *
2729dm_atomic_get_old_state(struct drm_atomic_state *state)
2730{
2731 struct drm_device *dev = state->dev;
2732 struct amdgpu_device *adev = dev->dev_private;
2733 struct amdgpu_display_manager *dm = &adev->dm;
2734 struct drm_private_obj *obj;
2735 struct drm_private_state *old_obj_state;
2736 int i;
2737
2738 for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
2739 if (obj->funcs == dm->atomic_obj.funcs)
2740 return to_dm_atomic_state(old_obj_state);
2741 }
2742
2743 return NULL;
2744}
2745
2746static struct drm_private_state *
2747dm_atomic_duplicate_state(struct drm_private_obj *obj)
2748{
2749 struct dm_atomic_state *old_state, *new_state;
2750
2751 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2752 if (!new_state)
2753 return NULL;
2754
2755 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2756
2757 old_state = to_dm_atomic_state(obj->state);
2758
2759 if (old_state && old_state->context)
2760 new_state->context = dc_copy_state(old_state->context);
2761
2762 if (!new_state->context) {
2763 kfree(new_state);
2764 return NULL;
2765 }
2766
2767 return &new_state->base;
2768}
2769
2770static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2771 struct drm_private_state *state)
2772{
2773 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2774
2775 if (dm_state && dm_state->context)
2776 dc_release_state(dm_state->context);
2777
2778 kfree(dm_state);
2779}
2780
2781static struct drm_private_state_funcs dm_atomic_state_funcs = {
2782 .atomic_duplicate_state = dm_atomic_duplicate_state,
2783 .atomic_destroy_state = dm_atomic_destroy_state,
2784};
2785
2786static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2787{
2788 struct dm_atomic_state *state;
2789 int r;
2790
2791 adev->mode_info.mode_config_initialized = true;
2792
2793 adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
2794 adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
2795
2796 adev->ddev->mode_config.max_width = 16384;
2797 adev->ddev->mode_config.max_height = 16384;
2798
2799 adev->ddev->mode_config.preferred_depth = 24;
2800 adev->ddev->mode_config.prefer_shadow = 1;
2801
2802 adev->ddev->mode_config.async_page_flip = true;
2803
2804 adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
2805
2806 state = kzalloc(sizeof(*state), GFP_KERNEL);
2807 if (!state)
2808 return -ENOMEM;
2809
2810 state->context = dc_create_state(adev->dm.dc);
2811 if (!state->context) {
2812 kfree(state);
2813 return -ENOMEM;
2814 }
2815
2816 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
2817
2818 drm_atomic_private_obj_init(adev->ddev,
2819 &adev->dm.atomic_obj,
2820 &state->base,
2821 &dm_atomic_state_funcs);
2822
2823 r = amdgpu_display_modeset_create_props(adev);
2824 if (r)
2825 return r;
2826
2827 r = amdgpu_dm_audio_init(adev);
2828 if (r)
2829 return r;
2830
2831 return 0;
2832}
2833
2834#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
2835#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
2836#define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
2837
2838#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2839 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2840
2841static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
2842{
2843#if defined(CONFIG_ACPI)
2844 struct amdgpu_dm_backlight_caps caps;
2845
2846 if (dm->backlight_caps.caps_valid)
2847 return;
2848
2849 amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
2850 if (caps.caps_valid) {
2851 dm->backlight_caps.caps_valid = true;
2852 if (caps.aux_support)
2853 return;
2854 dm->backlight_caps.min_input_signal = caps.min_input_signal;
2855 dm->backlight_caps.max_input_signal = caps.max_input_signal;
2856 } else {
2857 dm->backlight_caps.min_input_signal =
2858 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2859 dm->backlight_caps.max_input_signal =
2860 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2861 }
2862#else
2863 if (dm->backlight_caps.aux_support)
2864 return;
2865
2866 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2867 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2868#endif
2869}
2870
2871static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
2872{
2873 bool rc;
2874
2875 if (!link)
2876 return 1;
2877
2878 rc = dc_link_set_backlight_level_nits(link, true, brightness,
2879 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
2880
2881 return rc ? 0 : 1;
2882}
2883
2884static u32 convert_brightness(const struct amdgpu_dm_backlight_caps *caps,
2885 const uint32_t user_brightness)
2886{
2887 u32 min, max, conversion_pace;
2888 u32 brightness = user_brightness;
2889
2890 if (!caps)
2891 goto out;
2892
2893 if (!caps->aux_support) {
2894 max = caps->max_input_signal;
2895 min = caps->min_input_signal;
2896
2897
2898
2899
2900
2901
2902
2903
2904 conversion_pace = 0x101;
2905 brightness =
2906 user_brightness
2907 * conversion_pace
2908 * (max - min)
2909 / AMDGPU_MAX_BL_LEVEL
2910 + min * conversion_pace;
2911 } else {
2912
2913
2914
2915
2916
2917 max = caps->aux_max_input_signal;
2918 min = caps->aux_min_input_signal;
2919
2920 brightness = (AMDGPU_MAX_BL_LEVEL - user_brightness) * min
2921 + user_brightness * max;
2922
2923 brightness *= 1000;
2924 brightness = DIV_ROUND_CLOSEST(brightness, AMDGPU_MAX_BL_LEVEL);
2925 }
2926
2927out:
2928 return brightness;
2929}
2930
2931static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
2932{
2933 struct amdgpu_display_manager *dm = bl_get_data(bd);
2934 struct amdgpu_dm_backlight_caps caps;
2935 struct dc_link *link = NULL;
2936 u32 brightness;
2937 bool rc;
2938
2939 amdgpu_dm_update_backlight_caps(dm);
2940 caps = dm->backlight_caps;
2941
2942 link = (struct dc_link *)dm->backlight_link;
2943
2944 brightness = convert_brightness(&caps, bd->props.brightness);
2945
2946 if (caps.aux_support)
2947 return set_backlight_via_aux(link, brightness);
2948
2949 rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
2950
2951 return rc ? 0 : 1;
2952}
2953
2954static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
2955{
2956 struct amdgpu_display_manager *dm = bl_get_data(bd);
2957 int ret = dc_link_get_backlight_level(dm->backlight_link);
2958
2959 if (ret == DC_ERROR_UNEXPECTED)
2960 return bd->props.brightness;
2961 return ret;
2962}
2963
2964static const struct backlight_ops amdgpu_dm_backlight_ops = {
2965 .options = BL_CORE_SUSPENDRESUME,
2966 .get_brightness = amdgpu_dm_backlight_get_brightness,
2967 .update_status = amdgpu_dm_backlight_update_status,
2968};
2969
2970static void
2971amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
2972{
2973 char bl_name[16];
2974 struct backlight_properties props = { 0 };
2975
2976 amdgpu_dm_update_backlight_caps(dm);
2977
2978 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
2979 props.brightness = AMDGPU_MAX_BL_LEVEL;
2980 props.type = BACKLIGHT_RAW;
2981
2982 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
2983 dm->adev->ddev->primary->index);
2984
2985 dm->backlight_dev = backlight_device_register(bl_name,
2986 dm->adev->ddev->dev,
2987 dm,
2988 &amdgpu_dm_backlight_ops,
2989 &props);
2990
2991 if (IS_ERR(dm->backlight_dev))
2992 DRM_ERROR("DM: Backlight registration failed!\n");
2993 else
2994 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
2995}
2996
2997#endif
2998
2999static int initialize_plane(struct amdgpu_display_manager *dm,
3000 struct amdgpu_mode_info *mode_info, int plane_id,
3001 enum drm_plane_type plane_type,
3002 const struct dc_plane_cap *plane_cap)
3003{
3004 struct drm_plane *plane;
3005 unsigned long possible_crtcs;
3006 int ret = 0;
3007
3008 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3009 if (!plane) {
3010 DRM_ERROR("KMS: Failed to allocate plane\n");
3011 return -ENOMEM;
3012 }
3013 plane->type = plane_type;
3014
3015
3016
3017
3018
3019
3020
3021 possible_crtcs = 1 << plane_id;
3022 if (plane_id >= dm->dc->caps.max_streams)
3023 possible_crtcs = 0xff;
3024
3025 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3026
3027 if (ret) {
3028 DRM_ERROR("KMS: Failed to initialize plane\n");
3029 kfree(plane);
3030 return ret;
3031 }
3032
3033 if (mode_info)
3034 mode_info->planes[plane_id] = plane;
3035
3036 return ret;
3037}
3038
3039
3040static void register_backlight_device(struct amdgpu_display_manager *dm,
3041 struct dc_link *link)
3042{
3043#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3044 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3045
3046 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3047 link->type != dc_connection_none) {
3048
3049
3050
3051
3052
3053 amdgpu_dm_register_backlight_device(dm);
3054
3055 if (dm->backlight_dev)
3056 dm->backlight_link = link;
3057 }
3058#endif
3059}
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3071{
3072 struct amdgpu_display_manager *dm = &adev->dm;
3073 int32_t i;
3074 struct amdgpu_dm_connector *aconnector = NULL;
3075 struct amdgpu_encoder *aencoder = NULL;
3076 struct amdgpu_mode_info *mode_info = &adev->mode_info;
3077 uint32_t link_cnt;
3078 int32_t primary_planes;
3079 enum dc_connection_type new_connection_type = dc_connection_none;
3080 const struct dc_plane_cap *plane;
3081
3082 link_cnt = dm->dc->caps.max_links;
3083 if (amdgpu_dm_mode_config_init(dm->adev)) {
3084 DRM_ERROR("DM: Failed to initialize mode config\n");
3085 return -EINVAL;
3086 }
3087
3088
3089 primary_planes = dm->dc->caps.max_streams;
3090 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3091
3092
3093
3094
3095
3096 for (i = (primary_planes - 1); i >= 0; i--) {
3097 plane = &dm->dc->caps.planes[i];
3098
3099 if (initialize_plane(dm, mode_info, i,
3100 DRM_PLANE_TYPE_PRIMARY, plane)) {
3101 DRM_ERROR("KMS: Failed to initialize primary plane\n");
3102 goto fail;
3103 }
3104 }
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3116 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3117
3118 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3119 continue;
3120
3121 if (!plane->blends_with_above || !plane->blends_with_below)
3122 continue;
3123
3124 if (!plane->pixel_format_support.argb8888)
3125 continue;
3126
3127 if (initialize_plane(dm, NULL, primary_planes + i,
3128 DRM_PLANE_TYPE_OVERLAY, plane)) {
3129 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3130 goto fail;
3131 }
3132
3133
3134 break;
3135 }
3136
3137 for (i = 0; i < dm->dc->caps.max_streams; i++)
3138 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3139 DRM_ERROR("KMS: Failed to initialize crtc\n");
3140 goto fail;
3141 }
3142
3143 dm->display_indexes_num = dm->dc->caps.max_streams;
3144
3145
3146 for (i = 0; i < link_cnt; i++) {
3147 struct dc_link *link = NULL;
3148
3149 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3150 DRM_ERROR(
3151 "KMS: Cannot support more than %d display indexes\n",
3152 AMDGPU_DM_MAX_DISPLAY_INDEX);
3153 continue;
3154 }
3155
3156 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3157 if (!aconnector)
3158 goto fail;
3159
3160 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3161 if (!aencoder)
3162 goto fail;
3163
3164 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3165 DRM_ERROR("KMS: Failed to initialize encoder\n");
3166 goto fail;
3167 }
3168
3169 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3170 DRM_ERROR("KMS: Failed to initialize connector\n");
3171 goto fail;
3172 }
3173
3174 link = dc_get_link_at_index(dm->dc, i);
3175
3176 if (!dc_link_detect_sink(link, &new_connection_type))
3177 DRM_ERROR("KMS: Failed to detect connector\n");
3178
3179 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3180 emulated_link_detect(link);
3181 amdgpu_dm_update_connector_after_detect(aconnector);
3182
3183 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3184 amdgpu_dm_update_connector_after_detect(aconnector);
3185 register_backlight_device(dm, link);
3186 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3187 amdgpu_dm_set_psr_caps(link);
3188 }
3189
3190
3191 }
3192
3193
3194 switch (adev->asic_type) {
3195 case CHIP_BONAIRE:
3196 case CHIP_HAWAII:
3197 case CHIP_KAVERI:
3198 case CHIP_KABINI:
3199 case CHIP_MULLINS:
3200 case CHIP_TONGA:
3201 case CHIP_FIJI:
3202 case CHIP_CARRIZO:
3203 case CHIP_STONEY:
3204 case CHIP_POLARIS11:
3205 case CHIP_POLARIS10:
3206 case CHIP_POLARIS12:
3207 case CHIP_VEGAM:
3208 case CHIP_VEGA10:
3209 case CHIP_VEGA12:
3210 case CHIP_VEGA20:
3211 if (dce110_register_irq_handlers(dm->adev)) {
3212 DRM_ERROR("DM: Failed to initialize IRQ\n");
3213 goto fail;
3214 }
3215 break;
3216#if defined(CONFIG_DRM_AMD_DC_DCN)
3217 case CHIP_RAVEN:
3218 case CHIP_NAVI12:
3219 case CHIP_NAVI10:
3220 case CHIP_NAVI14:
3221 case CHIP_RENOIR:
3222 if (dcn10_register_irq_handlers(dm->adev)) {
3223 DRM_ERROR("DM: Failed to initialize IRQ\n");
3224 goto fail;
3225 }
3226 break;
3227#endif
3228 default:
3229 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3230 goto fail;
3231 }
3232
3233
3234 dm->dc->debug.disable_tri_buf = true;
3235
3236 return 0;
3237fail:
3238 kfree(aencoder);
3239 kfree(aconnector);
3240
3241 return -EINVAL;
3242}
3243
3244static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3245{
3246 drm_mode_config_cleanup(dm->ddev);
3247 drm_atomic_private_obj_fini(&dm->atomic_obj);
3248 return;
3249}
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262static void dm_bandwidth_update(struct amdgpu_device *adev)
3263{
3264
3265}
3266
3267static const struct amdgpu_display_funcs dm_display_funcs = {
3268 .bandwidth_update = dm_bandwidth_update,
3269 .vblank_get_counter = dm_vblank_get_counter,
3270 .backlight_set_level = NULL,
3271 .backlight_get_level = NULL,
3272 .hpd_sense = NULL,
3273 .hpd_set_polarity = NULL,
3274 .hpd_get_gpio_reg = NULL,
3275 .page_flip_get_scanoutpos =
3276 dm_crtc_get_scanoutpos,
3277 .add_encoder = NULL,
3278 .add_connector = NULL,
3279};
3280
3281#if defined(CONFIG_DEBUG_KERNEL_DC)
3282
3283static ssize_t s3_debug_store(struct device *device,
3284 struct device_attribute *attr,
3285 const char *buf,
3286 size_t count)
3287{
3288 int ret;
3289 int s3_state;
3290 struct drm_device *drm_dev = dev_get_drvdata(device);
3291 struct amdgpu_device *adev = drm_dev->dev_private;
3292
3293 ret = kstrtoint(buf, 0, &s3_state);
3294
3295 if (ret == 0) {
3296 if (s3_state) {
3297 dm_resume(adev);
3298 drm_kms_helper_hotplug_event(adev->ddev);
3299 } else
3300 dm_suspend(adev);
3301 }
3302
3303 return ret == 0 ? count : 0;
3304}
3305
3306DEVICE_ATTR_WO(s3_debug);
3307
3308#endif
3309
3310static int dm_early_init(void *handle)
3311{
3312 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3313
3314 switch (adev->asic_type) {
3315 case CHIP_BONAIRE:
3316 case CHIP_HAWAII:
3317 adev->mode_info.num_crtc = 6;
3318 adev->mode_info.num_hpd = 6;
3319 adev->mode_info.num_dig = 6;
3320 break;
3321 case CHIP_KAVERI:
3322 adev->mode_info.num_crtc = 4;
3323 adev->mode_info.num_hpd = 6;
3324 adev->mode_info.num_dig = 7;
3325 break;
3326 case CHIP_KABINI:
3327 case CHIP_MULLINS:
3328 adev->mode_info.num_crtc = 2;
3329 adev->mode_info.num_hpd = 6;
3330 adev->mode_info.num_dig = 6;
3331 break;
3332 case CHIP_FIJI:
3333 case CHIP_TONGA:
3334 adev->mode_info.num_crtc = 6;
3335 adev->mode_info.num_hpd = 6;
3336 adev->mode_info.num_dig = 7;
3337 break;
3338 case CHIP_CARRIZO:
3339 adev->mode_info.num_crtc = 3;
3340 adev->mode_info.num_hpd = 6;
3341 adev->mode_info.num_dig = 9;
3342 break;
3343 case CHIP_STONEY:
3344 adev->mode_info.num_crtc = 2;
3345 adev->mode_info.num_hpd = 6;
3346 adev->mode_info.num_dig = 9;
3347 break;
3348 case CHIP_POLARIS11:
3349 case CHIP_POLARIS12:
3350 adev->mode_info.num_crtc = 5;
3351 adev->mode_info.num_hpd = 5;
3352 adev->mode_info.num_dig = 5;
3353 break;
3354 case CHIP_POLARIS10:
3355 case CHIP_VEGAM:
3356 adev->mode_info.num_crtc = 6;
3357 adev->mode_info.num_hpd = 6;
3358 adev->mode_info.num_dig = 6;
3359 break;
3360 case CHIP_VEGA10:
3361 case CHIP_VEGA12:
3362 case CHIP_VEGA20:
3363 adev->mode_info.num_crtc = 6;
3364 adev->mode_info.num_hpd = 6;
3365 adev->mode_info.num_dig = 6;
3366 break;
3367#if defined(CONFIG_DRM_AMD_DC_DCN)
3368 case CHIP_RAVEN:
3369 adev->mode_info.num_crtc = 4;
3370 adev->mode_info.num_hpd = 4;
3371 adev->mode_info.num_dig = 4;
3372 break;
3373#endif
3374 case CHIP_NAVI10:
3375 case CHIP_NAVI12:
3376 adev->mode_info.num_crtc = 6;
3377 adev->mode_info.num_hpd = 6;
3378 adev->mode_info.num_dig = 6;
3379 break;
3380 case CHIP_NAVI14:
3381 adev->mode_info.num_crtc = 5;
3382 adev->mode_info.num_hpd = 5;
3383 adev->mode_info.num_dig = 5;
3384 break;
3385 case CHIP_RENOIR:
3386 adev->mode_info.num_crtc = 4;
3387 adev->mode_info.num_hpd = 4;
3388 adev->mode_info.num_dig = 4;
3389 break;
3390 default:
3391 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3392 return -EINVAL;
3393 }
3394
3395 amdgpu_dm_set_irq_funcs(adev);
3396
3397 if (adev->mode_info.funcs == NULL)
3398 adev->mode_info.funcs = &dm_display_funcs;
3399
3400
3401
3402
3403
3404
3405#if defined(CONFIG_DEBUG_KERNEL_DC)
3406 device_create_file(
3407 adev->ddev->dev,
3408 &dev_attr_s3_debug);
3409#endif
3410
3411 return 0;
3412}
3413
3414static bool modeset_required(struct drm_crtc_state *crtc_state,
3415 struct dc_stream_state *new_stream,
3416 struct dc_stream_state *old_stream)
3417{
3418 if (!drm_atomic_crtc_needs_modeset(crtc_state))
3419 return false;
3420
3421 if (!crtc_state->enable)
3422 return false;
3423
3424 return crtc_state->active;
3425}
3426
3427static bool modereset_required(struct drm_crtc_state *crtc_state)
3428{
3429 if (!drm_atomic_crtc_needs_modeset(crtc_state))
3430 return false;
3431
3432 return !crtc_state->enable || !crtc_state->active;
3433}
3434
3435static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3436{
3437 drm_encoder_cleanup(encoder);
3438 kfree(encoder);
3439}
3440
3441static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3442 .destroy = amdgpu_dm_encoder_destroy,
3443};
3444
3445
3446static int fill_dc_scaling_info(const struct drm_plane_state *state,
3447 struct dc_scaling_info *scaling_info)
3448{
3449 int scale_w, scale_h;
3450
3451 memset(scaling_info, 0, sizeof(*scaling_info));
3452
3453
3454 scaling_info->src_rect.x = state->src_x >> 16;
3455 scaling_info->src_rect.y = state->src_y >> 16;
3456
3457 scaling_info->src_rect.width = state->src_w >> 16;
3458 if (scaling_info->src_rect.width == 0)
3459 return -EINVAL;
3460
3461 scaling_info->src_rect.height = state->src_h >> 16;
3462 if (scaling_info->src_rect.height == 0)
3463 return -EINVAL;
3464
3465 scaling_info->dst_rect.x = state->crtc_x;
3466 scaling_info->dst_rect.y = state->crtc_y;
3467
3468 if (state->crtc_w == 0)
3469 return -EINVAL;
3470
3471 scaling_info->dst_rect.width = state->crtc_w;
3472
3473 if (state->crtc_h == 0)
3474 return -EINVAL;
3475
3476 scaling_info->dst_rect.height = state->crtc_h;
3477
3478
3479 scaling_info->clip_rect = scaling_info->dst_rect;
3480
3481
3482 scale_w = scaling_info->dst_rect.width * 1000 /
3483 scaling_info->src_rect.width;
3484
3485 if (scale_w < 250 || scale_w > 16000)
3486 return -EINVAL;
3487
3488 scale_h = scaling_info->dst_rect.height * 1000 /
3489 scaling_info->src_rect.height;
3490
3491 if (scale_h < 250 || scale_h > 16000)
3492 return -EINVAL;
3493
3494
3495
3496
3497
3498
3499 return 0;
3500}
3501
3502static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
3503 uint64_t *tiling_flags, bool *tmz_surface)
3504{
3505 struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
3506 int r = amdgpu_bo_reserve(rbo, false);
3507
3508 if (unlikely(r)) {
3509
3510 if (r != -ERESTARTSYS)
3511 DRM_ERROR("Unable to reserve buffer: %d\n", r);
3512 return r;
3513 }
3514
3515 if (tiling_flags)
3516 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3517
3518 if (tmz_surface)
3519 *tmz_surface = amdgpu_bo_encrypted(rbo);
3520
3521 amdgpu_bo_unreserve(rbo);
3522
3523 return r;
3524}
3525
3526static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3527{
3528 uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3529
3530 return offset ? (address + offset * 256) : 0;
3531}
3532
3533static int
3534fill_plane_dcc_attributes(struct amdgpu_device *adev,
3535 const struct amdgpu_framebuffer *afb,
3536 const enum surface_pixel_format format,
3537 const enum dc_rotation_angle rotation,
3538 const struct plane_size *plane_size,
3539 const union dc_tiling_info *tiling_info,
3540 const uint64_t info,
3541 struct dc_plane_dcc_param *dcc,
3542 struct dc_plane_address *address,
3543 bool force_disable_dcc)
3544{
3545 struct dc *dc = adev->dm.dc;
3546 struct dc_dcc_surface_param input;
3547 struct dc_surface_dcc_cap output;
3548 uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3549 uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3550 uint64_t dcc_address;
3551
3552 memset(&input, 0, sizeof(input));
3553 memset(&output, 0, sizeof(output));
3554
3555 if (force_disable_dcc)
3556 return 0;
3557
3558 if (!offset)
3559 return 0;
3560
3561 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3562 return 0;
3563
3564 if (!dc->cap_funcs.get_dcc_compression_cap)
3565 return -EINVAL;
3566
3567 input.format = format;
3568 input.surface_size.width = plane_size->surface_size.width;
3569 input.surface_size.height = plane_size->surface_size.height;
3570 input.swizzle_mode = tiling_info->gfx9.swizzle;
3571
3572 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3573 input.scan = SCAN_DIRECTION_HORIZONTAL;
3574 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3575 input.scan = SCAN_DIRECTION_VERTICAL;
3576
3577 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3578 return -EINVAL;
3579
3580 if (!output.capable)
3581 return -EINVAL;
3582
3583 if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
3584 return -EINVAL;
3585
3586 dcc->enable = 1;
3587 dcc->meta_pitch =
3588 AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
3589 dcc->independent_64b_blks = i64b;
3590
3591 dcc_address = get_dcc_address(afb->address, info);
3592 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3593 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
3594
3595 return 0;
3596}
3597
3598static int
3599fill_plane_buffer_attributes(struct amdgpu_device *adev,
3600 const struct amdgpu_framebuffer *afb,
3601 const enum surface_pixel_format format,
3602 const enum dc_rotation_angle rotation,
3603 const uint64_t tiling_flags,
3604 union dc_tiling_info *tiling_info,
3605 struct plane_size *plane_size,
3606 struct dc_plane_dcc_param *dcc,
3607 struct dc_plane_address *address,
3608 bool tmz_surface,
3609 bool force_disable_dcc)
3610{
3611 const struct drm_framebuffer *fb = &afb->base;
3612 int ret;
3613
3614 memset(tiling_info, 0, sizeof(*tiling_info));
3615 memset(plane_size, 0, sizeof(*plane_size));
3616 memset(dcc, 0, sizeof(*dcc));
3617 memset(address, 0, sizeof(*address));
3618
3619 address->tmz_surface = tmz_surface;
3620
3621 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3622 plane_size->surface_size.x = 0;
3623 plane_size->surface_size.y = 0;
3624 plane_size->surface_size.width = fb->width;
3625 plane_size->surface_size.height = fb->height;
3626 plane_size->surface_pitch =
3627 fb->pitches[0] / fb->format->cpp[0];
3628
3629 address->type = PLN_ADDR_TYPE_GRAPHICS;
3630 address->grph.addr.low_part = lower_32_bits(afb->address);
3631 address->grph.addr.high_part = upper_32_bits(afb->address);
3632 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
3633 uint64_t chroma_addr = afb->address + fb->offsets[1];
3634
3635 plane_size->surface_size.x = 0;
3636 plane_size->surface_size.y = 0;
3637 plane_size->surface_size.width = fb->width;
3638 plane_size->surface_size.height = fb->height;
3639 plane_size->surface_pitch =
3640 fb->pitches[0] / fb->format->cpp[0];
3641
3642 plane_size->chroma_size.x = 0;
3643 plane_size->chroma_size.y = 0;
3644
3645 plane_size->chroma_size.width = fb->width / 2;
3646 plane_size->chroma_size.height = fb->height / 2;
3647
3648 plane_size->chroma_pitch =
3649 fb->pitches[1] / fb->format->cpp[1];
3650
3651 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3652 address->video_progressive.luma_addr.low_part =
3653 lower_32_bits(afb->address);
3654 address->video_progressive.luma_addr.high_part =
3655 upper_32_bits(afb->address);
3656 address->video_progressive.chroma_addr.low_part =
3657 lower_32_bits(chroma_addr);
3658 address->video_progressive.chroma_addr.high_part =
3659 upper_32_bits(chroma_addr);
3660 }
3661
3662
3663 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3664 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3665
3666 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3667 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3668 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3669 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3670 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3671
3672
3673 tiling_info->gfx8.num_banks = num_banks;
3674 tiling_info->gfx8.array_mode =
3675 DC_ARRAY_2D_TILED_THIN1;
3676 tiling_info->gfx8.tile_split = tile_split;
3677 tiling_info->gfx8.bank_width = bankw;
3678 tiling_info->gfx8.bank_height = bankh;
3679 tiling_info->gfx8.tile_aspect = mtaspect;
3680 tiling_info->gfx8.tile_mode =
3681 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3682 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3683 == DC_ARRAY_1D_TILED_THIN1) {
3684 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3685 }
3686
3687 tiling_info->gfx8.pipe_config =
3688 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3689
3690 if (adev->asic_type == CHIP_VEGA10 ||
3691 adev->asic_type == CHIP_VEGA12 ||
3692 adev->asic_type == CHIP_VEGA20 ||
3693 adev->asic_type == CHIP_NAVI10 ||
3694 adev->asic_type == CHIP_NAVI14 ||
3695 adev->asic_type == CHIP_NAVI12 ||
3696 adev->asic_type == CHIP_RENOIR ||
3697 adev->asic_type == CHIP_RAVEN) {
3698
3699 tiling_info->gfx9.num_pipes =
3700 adev->gfx.config.gb_addr_config_fields.num_pipes;
3701 tiling_info->gfx9.num_banks =
3702 adev->gfx.config.gb_addr_config_fields.num_banks;
3703 tiling_info->gfx9.pipe_interleave =
3704 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3705 tiling_info->gfx9.num_shader_engines =
3706 adev->gfx.config.gb_addr_config_fields.num_se;
3707 tiling_info->gfx9.max_compressed_frags =
3708 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3709 tiling_info->gfx9.num_rb_per_se =
3710 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3711 tiling_info->gfx9.swizzle =
3712 AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3713 tiling_info->gfx9.shaderEnable = 1;
3714
3715 ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
3716 plane_size, tiling_info,
3717 tiling_flags, dcc, address,
3718 force_disable_dcc);
3719 if (ret)
3720 return ret;
3721 }
3722
3723 return 0;
3724}
3725
3726static void
3727fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
3728 bool *per_pixel_alpha, bool *global_alpha,
3729 int *global_alpha_value)
3730{
3731 *per_pixel_alpha = false;
3732 *global_alpha = false;
3733 *global_alpha_value = 0xff;
3734
3735 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
3736 return;
3737
3738 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
3739 static const uint32_t alpha_formats[] = {
3740 DRM_FORMAT_ARGB8888,
3741 DRM_FORMAT_RGBA8888,
3742 DRM_FORMAT_ABGR8888,
3743 };
3744 uint32_t format = plane_state->fb->format->format;
3745 unsigned int i;
3746
3747 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
3748 if (format == alpha_formats[i]) {
3749 *per_pixel_alpha = true;
3750 break;
3751 }
3752 }
3753 }
3754
3755 if (plane_state->alpha < 0xffff) {
3756 *global_alpha = true;
3757 *global_alpha_value = plane_state->alpha >> 8;
3758 }
3759}
3760
3761static int
3762fill_plane_color_attributes(const struct drm_plane_state *plane_state,
3763 const enum surface_pixel_format format,
3764 enum dc_color_space *color_space)
3765{
3766 bool full_range;
3767
3768 *color_space = COLOR_SPACE_SRGB;
3769
3770
3771 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3772 return 0;
3773
3774 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
3775
3776 switch (plane_state->color_encoding) {
3777 case DRM_COLOR_YCBCR_BT601:
3778 if (full_range)
3779 *color_space = COLOR_SPACE_YCBCR601;
3780 else
3781 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
3782 break;
3783
3784 case DRM_COLOR_YCBCR_BT709:
3785 if (full_range)
3786 *color_space = COLOR_SPACE_YCBCR709;
3787 else
3788 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
3789 break;
3790
3791 case DRM_COLOR_YCBCR_BT2020:
3792 if (full_range)
3793 *color_space = COLOR_SPACE_2020_YCBCR;
3794 else
3795 return -EINVAL;
3796 break;
3797
3798 default:
3799 return -EINVAL;
3800 }
3801
3802 return 0;
3803}
3804
3805static int
3806fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
3807 const struct drm_plane_state *plane_state,
3808 const uint64_t tiling_flags,
3809 struct dc_plane_info *plane_info,
3810 struct dc_plane_address *address,
3811 bool tmz_surface,
3812 bool force_disable_dcc)
3813{
3814 const struct drm_framebuffer *fb = plane_state->fb;
3815 const struct amdgpu_framebuffer *afb =
3816 to_amdgpu_framebuffer(plane_state->fb);
3817 struct drm_format_name_buf format_name;
3818 int ret;
3819
3820 memset(plane_info, 0, sizeof(*plane_info));
3821
3822 switch (fb->format->format) {
3823 case DRM_FORMAT_C8:
3824 plane_info->format =
3825 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
3826 break;
3827 case DRM_FORMAT_RGB565:
3828 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
3829 break;
3830 case DRM_FORMAT_XRGB8888:
3831 case DRM_FORMAT_ARGB8888:
3832 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
3833 break;
3834 case DRM_FORMAT_XRGB2101010:
3835 case DRM_FORMAT_ARGB2101010:
3836 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
3837 break;
3838 case DRM_FORMAT_XBGR2101010:
3839 case DRM_FORMAT_ABGR2101010:
3840 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
3841 break;
3842 case DRM_FORMAT_XBGR8888:
3843 case DRM_FORMAT_ABGR8888:
3844 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
3845 break;
3846 case DRM_FORMAT_NV21:
3847 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
3848 break;
3849 case DRM_FORMAT_NV12:
3850 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
3851 break;
3852 case DRM_FORMAT_P010:
3853 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
3854 break;
3855 case DRM_FORMAT_XRGB16161616F:
3856 case DRM_FORMAT_ARGB16161616F:
3857 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
3858 break;
3859 case DRM_FORMAT_XBGR16161616F:
3860 case DRM_FORMAT_ABGR16161616F:
3861 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
3862 break;
3863 default:
3864 DRM_ERROR(
3865 "Unsupported screen format %s\n",
3866 drm_get_format_name(fb->format->format, &format_name));
3867 return -EINVAL;
3868 }
3869
3870 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
3871 case DRM_MODE_ROTATE_0:
3872 plane_info->rotation = ROTATION_ANGLE_0;
3873 break;
3874 case DRM_MODE_ROTATE_90:
3875 plane_info->rotation = ROTATION_ANGLE_90;
3876 break;
3877 case DRM_MODE_ROTATE_180:
3878 plane_info->rotation = ROTATION_ANGLE_180;
3879 break;
3880 case DRM_MODE_ROTATE_270:
3881 plane_info->rotation = ROTATION_ANGLE_270;
3882 break;
3883 default:
3884 plane_info->rotation = ROTATION_ANGLE_0;
3885 break;
3886 }
3887
3888 plane_info->visible = true;
3889 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
3890
3891 plane_info->layer_index = 0;
3892
3893 ret = fill_plane_color_attributes(plane_state, plane_info->format,
3894 &plane_info->color_space);
3895 if (ret)
3896 return ret;
3897
3898 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
3899 plane_info->rotation, tiling_flags,
3900 &plane_info->tiling_info,
3901 &plane_info->plane_size,
3902 &plane_info->dcc, address, tmz_surface,
3903 force_disable_dcc);
3904 if (ret)
3905 return ret;
3906
3907 fill_blending_from_plane_state(
3908 plane_state, &plane_info->per_pixel_alpha,
3909 &plane_info->global_alpha, &plane_info->global_alpha_value);
3910
3911 return 0;
3912}
3913
3914static int fill_dc_plane_attributes(struct amdgpu_device *adev,
3915 struct dc_plane_state *dc_plane_state,
3916 struct drm_plane_state *plane_state,
3917 struct drm_crtc_state *crtc_state)
3918{
3919 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
3920 const struct amdgpu_framebuffer *amdgpu_fb =
3921 to_amdgpu_framebuffer(plane_state->fb);
3922 struct dc_scaling_info scaling_info;
3923 struct dc_plane_info plane_info;
3924 uint64_t tiling_flags;
3925 int ret;
3926 bool tmz_surface = false;
3927 bool force_disable_dcc = false;
3928
3929 ret = fill_dc_scaling_info(plane_state, &scaling_info);
3930 if (ret)
3931 return ret;
3932
3933 dc_plane_state->src_rect = scaling_info.src_rect;
3934 dc_plane_state->dst_rect = scaling_info.dst_rect;
3935 dc_plane_state->clip_rect = scaling_info.clip_rect;
3936 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
3937
3938 ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface);
3939 if (ret)
3940 return ret;
3941
3942 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
3943 ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
3944 &plane_info,
3945 &dc_plane_state->address,
3946 tmz_surface,
3947 force_disable_dcc);
3948 if (ret)
3949 return ret;
3950
3951 dc_plane_state->format = plane_info.format;
3952 dc_plane_state->color_space = plane_info.color_space;
3953 dc_plane_state->format = plane_info.format;
3954 dc_plane_state->plane_size = plane_info.plane_size;
3955 dc_plane_state->rotation = plane_info.rotation;
3956 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
3957 dc_plane_state->stereo_format = plane_info.stereo_format;
3958 dc_plane_state->tiling_info = plane_info.tiling_info;
3959 dc_plane_state->visible = plane_info.visible;
3960 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
3961 dc_plane_state->global_alpha = plane_info.global_alpha;
3962 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
3963 dc_plane_state->dcc = plane_info.dcc;
3964 dc_plane_state->layer_index = plane_info.layer_index;
3965
3966
3967
3968
3969
3970 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
3971 if (ret)
3972 return ret;
3973
3974 return 0;
3975}
3976
3977static void update_stream_scaling_settings(const struct drm_display_mode *mode,
3978 const struct dm_connector_state *dm_state,
3979 struct dc_stream_state *stream)
3980{
3981 enum amdgpu_rmx_type rmx_type;
3982
3983 struct rect src = { 0 };
3984 struct rect dst = { 0 };
3985
3986
3987 if (!mode)
3988 return;
3989
3990
3991 src.width = mode->hdisplay;
3992 src.height = mode->vdisplay;
3993 dst.width = stream->timing.h_addressable;
3994 dst.height = stream->timing.v_addressable;
3995
3996 if (dm_state) {
3997 rmx_type = dm_state->scaling;
3998 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
3999 if (src.width * dst.height <
4000 src.height * dst.width) {
4001
4002 dst.width = src.width *
4003 dst.height / src.height;
4004 } else {
4005
4006 dst.height = src.height *
4007 dst.width / src.width;
4008 }
4009 } else if (rmx_type == RMX_CENTER) {
4010 dst = src;
4011 }
4012
4013 dst.x = (stream->timing.h_addressable - dst.width) / 2;
4014 dst.y = (stream->timing.v_addressable - dst.height) / 2;
4015
4016 if (dm_state->underscan_enable) {
4017 dst.x += dm_state->underscan_hborder / 2;
4018 dst.y += dm_state->underscan_vborder / 2;
4019 dst.width -= dm_state->underscan_hborder;
4020 dst.height -= dm_state->underscan_vborder;
4021 }
4022 }
4023
4024 stream->src = src;
4025 stream->dst = dst;
4026
4027 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
4028 dst.x, dst.y, dst.width, dst.height);
4029
4030}
4031
4032static enum dc_color_depth
4033convert_color_depth_from_display_info(const struct drm_connector *connector,
4034 bool is_y420, int requested_bpc)
4035{
4036 uint8_t bpc;
4037
4038 if (is_y420) {
4039 bpc = 8;
4040
4041
4042 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4043 bpc = 16;
4044 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4045 bpc = 12;
4046 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4047 bpc = 10;
4048 } else {
4049 bpc = (uint8_t)connector->display_info.bpc;
4050
4051 bpc = bpc ? bpc : 8;
4052 }
4053
4054 if (requested_bpc > 0) {
4055
4056
4057
4058
4059
4060
4061
4062
4063 bpc = min_t(u8, bpc, requested_bpc);
4064
4065
4066 bpc = bpc - (bpc & 1);
4067 }
4068
4069 switch (bpc) {
4070 case 0:
4071
4072
4073
4074
4075
4076 return COLOR_DEPTH_888;
4077 case 6:
4078 return COLOR_DEPTH_666;
4079 case 8:
4080 return COLOR_DEPTH_888;
4081 case 10:
4082 return COLOR_DEPTH_101010;
4083 case 12:
4084 return COLOR_DEPTH_121212;
4085 case 14:
4086 return COLOR_DEPTH_141414;
4087 case 16:
4088 return COLOR_DEPTH_161616;
4089 default:
4090 return COLOR_DEPTH_UNDEFINED;
4091 }
4092}
4093
4094static enum dc_aspect_ratio
4095get_aspect_ratio(const struct drm_display_mode *mode_in)
4096{
4097
4098 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4099}
4100
4101static enum dc_color_space
4102get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4103{
4104 enum dc_color_space color_space = COLOR_SPACE_SRGB;
4105
4106 switch (dc_crtc_timing->pixel_encoding) {
4107 case PIXEL_ENCODING_YCBCR422:
4108 case PIXEL_ENCODING_YCBCR444:
4109 case PIXEL_ENCODING_YCBCR420:
4110 {
4111
4112
4113
4114
4115
4116 if (dc_crtc_timing->pix_clk_100hz > 270300) {
4117 if (dc_crtc_timing->flags.Y_ONLY)
4118 color_space =
4119 COLOR_SPACE_YCBCR709_LIMITED;
4120 else
4121 color_space = COLOR_SPACE_YCBCR709;
4122 } else {
4123 if (dc_crtc_timing->flags.Y_ONLY)
4124 color_space =
4125 COLOR_SPACE_YCBCR601_LIMITED;
4126 else
4127 color_space = COLOR_SPACE_YCBCR601;
4128 }
4129
4130 }
4131 break;
4132 case PIXEL_ENCODING_RGB:
4133 color_space = COLOR_SPACE_SRGB;
4134 break;
4135
4136 default:
4137 WARN_ON(1);
4138 break;
4139 }
4140
4141 return color_space;
4142}
4143
4144static bool adjust_colour_depth_from_display_info(
4145 struct dc_crtc_timing *timing_out,
4146 const struct drm_display_info *info)
4147{
4148 enum dc_color_depth depth = timing_out->display_color_depth;
4149 int normalized_clk;
4150 do {
4151 normalized_clk = timing_out->pix_clk_100hz / 10;
4152
4153 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4154 normalized_clk /= 2;
4155
4156 switch (depth) {
4157 case COLOR_DEPTH_888:
4158 break;
4159 case COLOR_DEPTH_101010:
4160 normalized_clk = (normalized_clk * 30) / 24;
4161 break;
4162 case COLOR_DEPTH_121212:
4163 normalized_clk = (normalized_clk * 36) / 24;
4164 break;
4165 case COLOR_DEPTH_161616:
4166 normalized_clk = (normalized_clk * 48) / 24;
4167 break;
4168 default:
4169
4170 return false;
4171 }
4172 if (normalized_clk <= info->max_tmds_clock) {
4173 timing_out->display_color_depth = depth;
4174 return true;
4175 }
4176 } while (--depth > COLOR_DEPTH_666);
4177 return false;
4178}
4179
4180static void fill_stream_properties_from_drm_display_mode(
4181 struct dc_stream_state *stream,
4182 const struct drm_display_mode *mode_in,
4183 const struct drm_connector *connector,
4184 const struct drm_connector_state *connector_state,
4185 const struct dc_stream_state *old_stream,
4186 int requested_bpc)
4187{
4188 struct dc_crtc_timing *timing_out = &stream->timing;
4189 const struct drm_display_info *info = &connector->display_info;
4190 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4191 struct hdmi_vendor_infoframe hv_frame;
4192 struct hdmi_avi_infoframe avi_frame;
4193
4194 memset(&hv_frame, 0, sizeof(hv_frame));
4195 memset(&avi_frame, 0, sizeof(avi_frame));
4196
4197 timing_out->h_border_left = 0;
4198 timing_out->h_border_right = 0;
4199 timing_out->v_border_top = 0;
4200 timing_out->v_border_bottom = 0;
4201
4202 if (drm_mode_is_420_only(info, mode_in)
4203 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4204 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4205 else if (drm_mode_is_420_also(info, mode_in)
4206 && aconnector->force_yuv420_output)
4207 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4208 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4209 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4210 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4211 else
4212 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4213
4214 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4215 timing_out->display_color_depth = convert_color_depth_from_display_info(
4216 connector,
4217 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4218 requested_bpc);
4219 timing_out->scan_type = SCANNING_TYPE_NODATA;
4220 timing_out->hdmi_vic = 0;
4221
4222 if(old_stream) {
4223 timing_out->vic = old_stream->timing.vic;
4224 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4225 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4226 } else {
4227 timing_out->vic = drm_match_cea_mode(mode_in);
4228 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4229 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4230 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4231 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4232 }
4233
4234 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4235 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4236 timing_out->vic = avi_frame.video_code;
4237 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4238 timing_out->hdmi_vic = hv_frame.vic;
4239 }
4240
4241 timing_out->h_addressable = mode_in->crtc_hdisplay;
4242 timing_out->h_total = mode_in->crtc_htotal;
4243 timing_out->h_sync_width =
4244 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4245 timing_out->h_front_porch =
4246 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4247 timing_out->v_total = mode_in->crtc_vtotal;
4248 timing_out->v_addressable = mode_in->crtc_vdisplay;
4249 timing_out->v_front_porch =
4250 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4251 timing_out->v_sync_width =
4252 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4253 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4254 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4255
4256 stream->output_color_space = get_output_color_space(timing_out);
4257
4258 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4259 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4260 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4261 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4262 drm_mode_is_420_also(info, mode_in) &&
4263 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4264 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4265 adjust_colour_depth_from_display_info(timing_out, info);
4266 }
4267 }
4268}
4269
4270static void fill_audio_info(struct audio_info *audio_info,
4271 const struct drm_connector *drm_connector,
4272 const struct dc_sink *dc_sink)
4273{
4274 int i = 0;
4275 int cea_revision = 0;
4276 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4277
4278 audio_info->manufacture_id = edid_caps->manufacturer_id;
4279 audio_info->product_id = edid_caps->product_id;
4280
4281 cea_revision = drm_connector->display_info.cea_rev;
4282
4283 strscpy(audio_info->display_name,
4284 edid_caps->display_name,
4285 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4286
4287 if (cea_revision >= 3) {
4288 audio_info->mode_count = edid_caps->audio_mode_count;
4289
4290 for (i = 0; i < audio_info->mode_count; ++i) {
4291 audio_info->modes[i].format_code =
4292 (enum audio_format_code)
4293 (edid_caps->audio_modes[i].format_code);
4294 audio_info->modes[i].channel_count =
4295 edid_caps->audio_modes[i].channel_count;
4296 audio_info->modes[i].sample_rates.all =
4297 edid_caps->audio_modes[i].sample_rate;
4298 audio_info->modes[i].sample_size =
4299 edid_caps->audio_modes[i].sample_size;
4300 }
4301 }
4302
4303 audio_info->flags.all = edid_caps->speaker_flags;
4304
4305
4306 if (drm_connector->latency_present[0]) {
4307 audio_info->video_latency = drm_connector->video_latency[0];
4308 audio_info->audio_latency = drm_connector->audio_latency[0];
4309 }
4310
4311
4312
4313}
4314
4315static void
4316copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4317 struct drm_display_mode *dst_mode)
4318{
4319 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4320 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4321 dst_mode->crtc_clock = src_mode->crtc_clock;
4322 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4323 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
4324 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
4325 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4326 dst_mode->crtc_htotal = src_mode->crtc_htotal;
4327 dst_mode->crtc_hskew = src_mode->crtc_hskew;
4328 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4329 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4330 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4331 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4332 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4333}
4334
4335static void
4336decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4337 const struct drm_display_mode *native_mode,
4338 bool scale_enabled)
4339{
4340 if (scale_enabled) {
4341 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4342 } else if (native_mode->clock == drm_mode->clock &&
4343 native_mode->htotal == drm_mode->htotal &&
4344 native_mode->vtotal == drm_mode->vtotal) {
4345 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4346 } else {
4347
4348 }
4349}
4350
4351static struct dc_sink *
4352create_fake_sink(struct amdgpu_dm_connector *aconnector)
4353{
4354 struct dc_sink_init_data sink_init_data = { 0 };
4355 struct dc_sink *sink = NULL;
4356 sink_init_data.link = aconnector->dc_link;
4357 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4358
4359 sink = dc_sink_create(&sink_init_data);
4360 if (!sink) {
4361 DRM_ERROR("Failed to create sink!\n");
4362 return NULL;
4363 }
4364 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
4365
4366 return sink;
4367}
4368
4369static void set_multisync_trigger_params(
4370 struct dc_stream_state *stream)
4371{
4372 if (stream->triggered_crtc_reset.enabled) {
4373 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4374 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4375 }
4376}
4377
4378static void set_master_stream(struct dc_stream_state *stream_set[],
4379 int stream_count)
4380{
4381 int j, highest_rfr = 0, master_stream = 0;
4382
4383 for (j = 0; j < stream_count; j++) {
4384 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4385 int refresh_rate = 0;
4386
4387 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
4388 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4389 if (refresh_rate > highest_rfr) {
4390 highest_rfr = refresh_rate;
4391 master_stream = j;
4392 }
4393 }
4394 }
4395 for (j = 0; j < stream_count; j++) {
4396 if (stream_set[j])
4397 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4398 }
4399}
4400
4401static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4402{
4403 int i = 0;
4404
4405 if (context->stream_count < 2)
4406 return;
4407 for (i = 0; i < context->stream_count ; i++) {
4408 if (!context->streams[i])
4409 continue;
4410
4411
4412
4413
4414
4415 set_multisync_trigger_params(context->streams[i]);
4416 }
4417 set_master_stream(context->streams, context->stream_count);
4418}
4419
4420static struct dc_stream_state *
4421create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4422 const struct drm_display_mode *drm_mode,
4423 const struct dm_connector_state *dm_state,
4424 const struct dc_stream_state *old_stream,
4425 int requested_bpc)
4426{
4427 struct drm_display_mode *preferred_mode = NULL;
4428 struct drm_connector *drm_connector;
4429 const struct drm_connector_state *con_state =
4430 dm_state ? &dm_state->base : NULL;
4431 struct dc_stream_state *stream = NULL;
4432 struct drm_display_mode mode = *drm_mode;
4433 bool native_mode_found = false;
4434 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4435 int mode_refresh;
4436 int preferred_refresh = 0;
4437#if defined(CONFIG_DRM_AMD_DC_DCN)
4438 struct dsc_dec_dpcd_caps dsc_caps;
4439#endif
4440 uint32_t link_bandwidth_kbps;
4441
4442 struct dc_sink *sink = NULL;
4443 if (aconnector == NULL) {
4444 DRM_ERROR("aconnector is NULL!\n");
4445 return stream;
4446 }
4447
4448 drm_connector = &aconnector->base;
4449
4450 if (!aconnector->dc_sink) {
4451 sink = create_fake_sink(aconnector);
4452 if (!sink)
4453 return stream;
4454 } else {
4455 sink = aconnector->dc_sink;
4456 dc_sink_retain(sink);
4457 }
4458
4459 stream = dc_create_stream_for_sink(sink);
4460
4461 if (stream == NULL) {
4462 DRM_ERROR("Failed to create stream for sink!\n");
4463 goto finish;
4464 }
4465
4466 stream->dm_stream_context = aconnector;
4467
4468 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4469 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4470
4471 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4472
4473 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4474 native_mode_found = true;
4475 break;
4476 }
4477 }
4478 if (!native_mode_found)
4479 preferred_mode = list_first_entry_or_null(
4480 &aconnector->base.modes,
4481 struct drm_display_mode,
4482 head);
4483
4484 mode_refresh = drm_mode_vrefresh(&mode);
4485
4486 if (preferred_mode == NULL) {
4487
4488
4489
4490
4491
4492
4493 DRM_DEBUG_DRIVER("No preferred mode found\n");
4494 } else {
4495 decide_crtc_timing_for_drm_display_mode(
4496 &mode, preferred_mode,
4497 dm_state ? (dm_state->scaling != RMX_OFF) : false);
4498 preferred_refresh = drm_mode_vrefresh(preferred_mode);
4499 }
4500
4501 if (!dm_state)
4502 drm_mode_set_crtcinfo(&mode, 0);
4503
4504
4505
4506
4507
4508 if (!scale || mode_refresh != preferred_refresh)
4509 fill_stream_properties_from_drm_display_mode(stream,
4510 &mode, &aconnector->base, con_state, NULL, requested_bpc);
4511 else
4512 fill_stream_properties_from_drm_display_mode(stream,
4513 &mode, &aconnector->base, con_state, old_stream, requested_bpc);
4514
4515 stream->timing.flags.DSC = 0;
4516
4517 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4518#if defined(CONFIG_DRM_AMD_DC_DCN)
4519 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4520 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
4521 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_ext_caps.raw,
4522 &dsc_caps);
4523#endif
4524 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4525 dc_link_get_link_cap(aconnector->dc_link));
4526
4527#if defined(CONFIG_DRM_AMD_DC_DCN)
4528 if (dsc_caps.is_dsc_supported)
4529 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
4530 &dsc_caps,
4531 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
4532 link_bandwidth_kbps,
4533 &stream->timing,
4534 &stream->timing.dsc_cfg))
4535 stream->timing.flags.DSC = 1;
4536#endif
4537 }
4538
4539 update_stream_scaling_settings(&mode, dm_state, stream);
4540
4541 fill_audio_info(
4542 &stream->audio_info,
4543 drm_connector,
4544 sink);
4545
4546 update_stream_signal(stream, sink);
4547
4548 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4549 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
4550 if (stream->link->psr_settings.psr_feature_enabled) {
4551 struct dc *core_dc = stream->link->ctx->dc;
4552
4553 if (dc_is_dmcu_initialized(core_dc)) {
4554
4555
4556
4557
4558 stream->use_vsc_sdp_for_colorimetry = false;
4559 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4560 stream->use_vsc_sdp_for_colorimetry =
4561 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4562 } else {
4563 if (stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
4564 stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) {
4565 stream->use_vsc_sdp_for_colorimetry = true;
4566 }
4567 }
4568 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
4569 }
4570 }
4571finish:
4572 dc_sink_release(sink);
4573
4574 return stream;
4575}
4576
4577static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
4578{
4579 drm_crtc_cleanup(crtc);
4580 kfree(crtc);
4581}
4582
4583static void dm_crtc_destroy_state(struct drm_crtc *crtc,
4584 struct drm_crtc_state *state)
4585{
4586 struct dm_crtc_state *cur = to_dm_crtc_state(state);
4587
4588
4589 if (cur->stream)
4590 dc_stream_release(cur->stream);
4591
4592
4593 __drm_atomic_helper_crtc_destroy_state(state);
4594
4595
4596 kfree(state);
4597}
4598
4599static void dm_crtc_reset_state(struct drm_crtc *crtc)
4600{
4601 struct dm_crtc_state *state;
4602
4603 if (crtc->state)
4604 dm_crtc_destroy_state(crtc, crtc->state);
4605
4606 state = kzalloc(sizeof(*state), GFP_KERNEL);
4607 if (WARN_ON(!state))
4608 return;
4609
4610 crtc->state = &state->base;
4611 crtc->state->crtc = crtc;
4612
4613}
4614
4615static struct drm_crtc_state *
4616dm_crtc_duplicate_state(struct drm_crtc *crtc)
4617{
4618 struct dm_crtc_state *state, *cur;
4619
4620 cur = to_dm_crtc_state(crtc->state);
4621
4622 if (WARN_ON(!crtc->state))
4623 return NULL;
4624
4625 state = kzalloc(sizeof(*state), GFP_KERNEL);
4626 if (!state)
4627 return NULL;
4628
4629 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4630
4631 if (cur->stream) {
4632 state->stream = cur->stream;
4633 dc_stream_retain(state->stream);
4634 }
4635
4636 state->active_planes = cur->active_planes;
4637 state->interrupts_enabled = cur->interrupts_enabled;
4638 state->vrr_params = cur->vrr_params;
4639 state->vrr_infopacket = cur->vrr_infopacket;
4640 state->abm_level = cur->abm_level;
4641 state->vrr_supported = cur->vrr_supported;
4642 state->freesync_config = cur->freesync_config;
4643 state->crc_src = cur->crc_src;
4644 state->cm_has_degamma = cur->cm_has_degamma;
4645 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
4646
4647
4648
4649 return &state->base;
4650}
4651
4652static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4653{
4654 enum dc_irq_source irq_source;
4655 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4656 struct amdgpu_device *adev = crtc->dev->dev_private;
4657 int rc;
4658
4659 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4660
4661 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4662
4663 DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4664 acrtc->crtc_id, enable ? "en" : "dis", rc);
4665 return rc;
4666}
4667
4668static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4669{
4670 enum dc_irq_source irq_source;
4671 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4672 struct amdgpu_device *adev = crtc->dev->dev_private;
4673 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4674 int rc = 0;
4675
4676 if (enable) {
4677
4678 if (amdgpu_dm_vrr_active(acrtc_state))
4679 rc = dm_set_vupdate_irq(crtc, true);
4680 } else {
4681
4682 rc = dm_set_vupdate_irq(crtc, false);
4683 }
4684
4685 if (rc)
4686 return rc;
4687
4688 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
4689 return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4690}
4691
4692static int dm_enable_vblank(struct drm_crtc *crtc)
4693{
4694 return dm_set_vblank(crtc, true);
4695}
4696
4697static void dm_disable_vblank(struct drm_crtc *crtc)
4698{
4699 dm_set_vblank(crtc, false);
4700}
4701
4702
4703static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
4704 .reset = dm_crtc_reset_state,
4705 .destroy = amdgpu_dm_crtc_destroy,
4706 .gamma_set = drm_atomic_helper_legacy_gamma_set,
4707 .set_config = drm_atomic_helper_set_config,
4708 .page_flip = drm_atomic_helper_page_flip,
4709 .atomic_duplicate_state = dm_crtc_duplicate_state,
4710 .atomic_destroy_state = dm_crtc_destroy_state,
4711 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
4712 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
4713 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
4714 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
4715 .enable_vblank = dm_enable_vblank,
4716 .disable_vblank = dm_disable_vblank,
4717 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
4718};
4719
4720static enum drm_connector_status
4721amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
4722{
4723 bool connected;
4724 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4725
4726
4727
4728
4729
4730
4731
4732
4733 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
4734 !aconnector->fake_enable)
4735 connected = (aconnector->dc_sink != NULL);
4736 else
4737 connected = (aconnector->base.force == DRM_FORCE_ON);
4738
4739 return (connected ? connector_status_connected :
4740 connector_status_disconnected);
4741}
4742
4743int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
4744 struct drm_connector_state *connector_state,
4745 struct drm_property *property,
4746 uint64_t val)
4747{
4748 struct drm_device *dev = connector->dev;
4749 struct amdgpu_device *adev = dev->dev_private;
4750 struct dm_connector_state *dm_old_state =
4751 to_dm_connector_state(connector->state);
4752 struct dm_connector_state *dm_new_state =
4753 to_dm_connector_state(connector_state);
4754
4755 int ret = -EINVAL;
4756
4757 if (property == dev->mode_config.scaling_mode_property) {
4758 enum amdgpu_rmx_type rmx_type;
4759
4760 switch (val) {
4761 case DRM_MODE_SCALE_CENTER:
4762 rmx_type = RMX_CENTER;
4763 break;
4764 case DRM_MODE_SCALE_ASPECT:
4765 rmx_type = RMX_ASPECT;
4766 break;
4767 case DRM_MODE_SCALE_FULLSCREEN:
4768 rmx_type = RMX_FULL;
4769 break;
4770 case DRM_MODE_SCALE_NONE:
4771 default:
4772 rmx_type = RMX_OFF;
4773 break;
4774 }
4775
4776 if (dm_old_state->scaling == rmx_type)
4777 return 0;
4778
4779 dm_new_state->scaling = rmx_type;
4780 ret = 0;
4781 } else if (property == adev->mode_info.underscan_hborder_property) {
4782 dm_new_state->underscan_hborder = val;
4783 ret = 0;
4784 } else if (property == adev->mode_info.underscan_vborder_property) {
4785 dm_new_state->underscan_vborder = val;
4786 ret = 0;
4787 } else if (property == adev->mode_info.underscan_property) {
4788 dm_new_state->underscan_enable = val;
4789 ret = 0;
4790 } else if (property == adev->mode_info.abm_level_property) {
4791 dm_new_state->abm_level = val;
4792 ret = 0;
4793 }
4794
4795 return ret;
4796}
4797
4798int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
4799 const struct drm_connector_state *state,
4800 struct drm_property *property,
4801 uint64_t *val)
4802{
4803 struct drm_device *dev = connector->dev;
4804 struct amdgpu_device *adev = dev->dev_private;
4805 struct dm_connector_state *dm_state =
4806 to_dm_connector_state(state);
4807 int ret = -EINVAL;
4808
4809 if (property == dev->mode_config.scaling_mode_property) {
4810 switch (dm_state->scaling) {
4811 case RMX_CENTER:
4812 *val = DRM_MODE_SCALE_CENTER;
4813 break;
4814 case RMX_ASPECT:
4815 *val = DRM_MODE_SCALE_ASPECT;
4816 break;
4817 case RMX_FULL:
4818 *val = DRM_MODE_SCALE_FULLSCREEN;
4819 break;
4820 case RMX_OFF:
4821 default:
4822 *val = DRM_MODE_SCALE_NONE;
4823 break;
4824 }
4825 ret = 0;
4826 } else if (property == adev->mode_info.underscan_hborder_property) {
4827 *val = dm_state->underscan_hborder;
4828 ret = 0;
4829 } else if (property == adev->mode_info.underscan_vborder_property) {
4830 *val = dm_state->underscan_vborder;
4831 ret = 0;
4832 } else if (property == adev->mode_info.underscan_property) {
4833 *val = dm_state->underscan_enable;
4834 ret = 0;
4835 } else if (property == adev->mode_info.abm_level_property) {
4836 *val = dm_state->abm_level;
4837 ret = 0;
4838 }
4839
4840 return ret;
4841}
4842
4843static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
4844{
4845 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
4846
4847 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
4848}
4849
4850static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
4851{
4852 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4853 const struct dc_link *link = aconnector->dc_link;
4854 struct amdgpu_device *adev = connector->dev->dev_private;
4855 struct amdgpu_display_manager *dm = &adev->dm;
4856
4857#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4858 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4859
4860 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4861 link->type != dc_connection_none &&
4862 dm->backlight_dev) {
4863 backlight_device_unregister(dm->backlight_dev);
4864 dm->backlight_dev = NULL;
4865 }
4866#endif
4867
4868 if (aconnector->dc_em_sink)
4869 dc_sink_release(aconnector->dc_em_sink);
4870 aconnector->dc_em_sink = NULL;
4871 if (aconnector->dc_sink)
4872 dc_sink_release(aconnector->dc_sink);
4873 aconnector->dc_sink = NULL;
4874
4875 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
4876 drm_connector_unregister(connector);
4877 drm_connector_cleanup(connector);
4878 if (aconnector->i2c) {
4879 i2c_del_adapter(&aconnector->i2c->base);
4880 kfree(aconnector->i2c);
4881 }
4882 kfree(aconnector->dm_dp_aux.aux.name);
4883
4884 kfree(connector);
4885}
4886
4887void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
4888{
4889 struct dm_connector_state *state =
4890 to_dm_connector_state(connector->state);
4891
4892 if (connector->state)
4893 __drm_atomic_helper_connector_destroy_state(connector->state);
4894
4895 kfree(state);
4896
4897 state = kzalloc(sizeof(*state), GFP_KERNEL);
4898
4899 if (state) {
4900 state->scaling = RMX_OFF;
4901 state->underscan_enable = false;
4902 state->underscan_hborder = 0;
4903 state->underscan_vborder = 0;
4904 state->base.max_requested_bpc = 8;
4905 state->vcpi_slots = 0;
4906 state->pbn = 0;
4907 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4908 state->abm_level = amdgpu_dm_abm_level;
4909
4910 __drm_atomic_helper_connector_reset(connector, &state->base);
4911 }
4912}
4913
4914struct drm_connector_state *
4915amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
4916{
4917 struct dm_connector_state *state =
4918 to_dm_connector_state(connector->state);
4919
4920 struct dm_connector_state *new_state =
4921 kmemdup(state, sizeof(*state), GFP_KERNEL);
4922
4923 if (!new_state)
4924 return NULL;
4925
4926 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
4927
4928 new_state->freesync_capable = state->freesync_capable;
4929 new_state->abm_level = state->abm_level;
4930 new_state->scaling = state->scaling;
4931 new_state->underscan_enable = state->underscan_enable;
4932 new_state->underscan_hborder = state->underscan_hborder;
4933 new_state->underscan_vborder = state->underscan_vborder;
4934 new_state->vcpi_slots = state->vcpi_slots;
4935 new_state->pbn = state->pbn;
4936 return &new_state->base;
4937}
4938
4939static int
4940amdgpu_dm_connector_late_register(struct drm_connector *connector)
4941{
4942 struct amdgpu_dm_connector *amdgpu_dm_connector =
4943 to_amdgpu_dm_connector(connector);
4944 int r;
4945
4946 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
4947 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
4948 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
4949 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
4950 if (r)
4951 return r;
4952 }
4953
4954#if defined(CONFIG_DEBUG_FS)
4955 connector_debugfs_init(amdgpu_dm_connector);
4956#endif
4957
4958 return 0;
4959}
4960
4961static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
4962 .reset = amdgpu_dm_connector_funcs_reset,
4963 .detect = amdgpu_dm_connector_detect,
4964 .fill_modes = drm_helper_probe_single_connector_modes,
4965 .destroy = amdgpu_dm_connector_destroy,
4966 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
4967 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4968 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
4969 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
4970 .late_register = amdgpu_dm_connector_late_register,
4971 .early_unregister = amdgpu_dm_connector_unregister
4972};
4973
4974static int get_modes(struct drm_connector *connector)
4975{
4976 return amdgpu_dm_connector_get_modes(connector);
4977}
4978
4979static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
4980{
4981 struct dc_sink_init_data init_params = {
4982 .link = aconnector->dc_link,
4983 .sink_signal = SIGNAL_TYPE_VIRTUAL
4984 };
4985 struct edid *edid;
4986
4987 if (!aconnector->base.edid_blob_ptr) {
4988 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
4989 aconnector->base.name);
4990
4991 aconnector->base.force = DRM_FORCE_OFF;
4992 aconnector->base.override_edid = false;
4993 return;
4994 }
4995
4996 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
4997
4998 aconnector->edid = edid;
4999
5000 aconnector->dc_em_sink = dc_link_add_remote_sink(
5001 aconnector->dc_link,
5002 (uint8_t *)edid,
5003 (edid->extensions + 1) * EDID_LENGTH,
5004 &init_params);
5005
5006 if (aconnector->base.force == DRM_FORCE_ON) {
5007 aconnector->dc_sink = aconnector->dc_link->local_sink ?
5008 aconnector->dc_link->local_sink :
5009 aconnector->dc_em_sink;
5010 dc_sink_retain(aconnector->dc_sink);
5011 }
5012}
5013
5014static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5015{
5016 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5017
5018
5019
5020
5021
5022 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5023 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5024 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5025 }
5026
5027
5028 aconnector->base.override_edid = true;
5029 create_eml_sink(aconnector);
5030}
5031
5032static struct dc_stream_state *
5033create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5034 const struct drm_display_mode *drm_mode,
5035 const struct dm_connector_state *dm_state,
5036 const struct dc_stream_state *old_stream)
5037{
5038 struct drm_connector *connector = &aconnector->base;
5039 struct amdgpu_device *adev = connector->dev->dev_private;
5040 struct dc_stream_state *stream;
5041 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5042 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5043 enum dc_status dc_result = DC_OK;
5044
5045 do {
5046 stream = create_stream_for_sink(aconnector, drm_mode,
5047 dm_state, old_stream,
5048 requested_bpc);
5049 if (stream == NULL) {
5050 DRM_ERROR("Failed to create stream for sink!\n");
5051 break;
5052 }
5053
5054 dc_result = dc_validate_stream(adev->dm.dc, stream);
5055
5056 if (dc_result != DC_OK) {
5057 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
5058 drm_mode->hdisplay,
5059 drm_mode->vdisplay,
5060 drm_mode->clock,
5061 dc_result);
5062
5063 dc_stream_release(stream);
5064 stream = NULL;
5065 requested_bpc -= 2;
5066 }
5067
5068 } while (stream == NULL && requested_bpc >= 6);
5069
5070 return stream;
5071}
5072
5073enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5074 struct drm_display_mode *mode)
5075{
5076 int result = MODE_ERROR;
5077 struct dc_sink *dc_sink;
5078
5079 struct dc_stream_state *stream;
5080 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5081
5082 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5083 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
5084 return result;
5085
5086
5087
5088
5089
5090 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5091 !aconnector->dc_em_sink)
5092 handle_edid_mgmt(aconnector);
5093
5094 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
5095
5096 if (dc_sink == NULL) {
5097 DRM_ERROR("dc_sink is NULL!\n");
5098 goto fail;
5099 }
5100
5101 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5102 if (stream) {
5103 dc_stream_release(stream);
5104 result = MODE_OK;
5105 }
5106
5107fail:
5108
5109 return result;
5110}
5111
5112static int fill_hdr_info_packet(const struct drm_connector_state *state,
5113 struct dc_info_packet *out)
5114{
5115 struct hdmi_drm_infoframe frame;
5116 unsigned char buf[30];
5117 ssize_t len;
5118 int ret, i;
5119
5120 memset(out, 0, sizeof(*out));
5121
5122 if (!state->hdr_output_metadata)
5123 return 0;
5124
5125 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5126 if (ret)
5127 return ret;
5128
5129 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5130 if (len < 0)
5131 return (int)len;
5132
5133
5134 if (len != 30)
5135 return -EINVAL;
5136
5137
5138 switch (state->connector->connector_type) {
5139 case DRM_MODE_CONNECTOR_HDMIA:
5140 out->hb0 = 0x87;
5141 out->hb1 = 0x01;
5142 out->hb2 = 0x1A;
5143 out->sb[0] = buf[3];
5144 i = 1;
5145 break;
5146
5147 case DRM_MODE_CONNECTOR_DisplayPort:
5148 case DRM_MODE_CONNECTOR_eDP:
5149 out->hb0 = 0x00;
5150 out->hb1 = 0x87;
5151 out->hb2 = 0x1D;
5152 out->hb3 = (0x13 << 2);
5153 out->sb[0] = 0x01;
5154 out->sb[1] = 0x1A;
5155 i = 2;
5156 break;
5157
5158 default:
5159 return -EINVAL;
5160 }
5161
5162 memcpy(&out->sb[i], &buf[4], 26);
5163 out->valid = true;
5164
5165 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5166 sizeof(out->sb), false);
5167
5168 return 0;
5169}
5170
5171static bool
5172is_hdr_metadata_different(const struct drm_connector_state *old_state,
5173 const struct drm_connector_state *new_state)
5174{
5175 struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5176 struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5177
5178 if (old_blob != new_blob) {
5179 if (old_blob && new_blob &&
5180 old_blob->length == new_blob->length)
5181 return memcmp(old_blob->data, new_blob->data,
5182 old_blob->length);
5183
5184 return true;
5185 }
5186
5187 return false;
5188}
5189
5190static int
5191amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
5192 struct drm_atomic_state *state)
5193{
5194 struct drm_connector_state *new_con_state =
5195 drm_atomic_get_new_connector_state(state, conn);
5196 struct drm_connector_state *old_con_state =
5197 drm_atomic_get_old_connector_state(state, conn);
5198 struct drm_crtc *crtc = new_con_state->crtc;
5199 struct drm_crtc_state *new_crtc_state;
5200 int ret;
5201
5202 if (!crtc)
5203 return 0;
5204
5205 if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5206 struct dc_info_packet hdr_infopacket;
5207
5208 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5209 if (ret)
5210 return ret;
5211
5212 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5213 if (IS_ERR(new_crtc_state))
5214 return PTR_ERR(new_crtc_state);
5215
5216
5217
5218
5219
5220
5221
5222
5223
5224
5225
5226
5227 new_crtc_state->mode_changed =
5228 !old_con_state->hdr_output_metadata ||
5229 !new_con_state->hdr_output_metadata;
5230 }
5231
5232 return 0;
5233}
5234
5235static const struct drm_connector_helper_funcs
5236amdgpu_dm_connector_helper_funcs = {
5237
5238
5239
5240
5241
5242
5243 .get_modes = get_modes,
5244 .mode_valid = amdgpu_dm_connector_mode_valid,
5245 .atomic_check = amdgpu_dm_connector_atomic_check,
5246};
5247
5248static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5249{
5250}
5251
5252static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
5253{
5254 struct drm_device *dev = new_crtc_state->crtc->dev;
5255 struct drm_plane *plane;
5256
5257 drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
5258 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5259 return true;
5260 }
5261
5262 return false;
5263}
5264
5265static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5266{
5267 struct drm_atomic_state *state = new_crtc_state->state;
5268 struct drm_plane *plane;
5269 int num_active = 0;
5270
5271 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5272 struct drm_plane_state *new_plane_state;
5273
5274
5275 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5276 continue;
5277
5278 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5279
5280 if (!new_plane_state) {
5281
5282
5283
5284
5285
5286 num_active += 1;
5287 continue;
5288 }
5289
5290
5291 num_active += (new_plane_state->fb != NULL);
5292 }
5293
5294 return num_active;
5295}
5296
5297
5298
5299
5300
5301
5302static void
5303dm_update_crtc_interrupt_state(struct drm_crtc *crtc,
5304 struct drm_crtc_state *new_crtc_state)
5305{
5306 struct dm_crtc_state *dm_new_crtc_state =
5307 to_dm_crtc_state(new_crtc_state);
5308
5309 dm_new_crtc_state->active_planes = 0;
5310 dm_new_crtc_state->interrupts_enabled = false;
5311
5312 if (!dm_new_crtc_state->stream)
5313 return;
5314
5315 dm_new_crtc_state->active_planes =
5316 count_crtc_active_planes(new_crtc_state);
5317
5318 dm_new_crtc_state->interrupts_enabled =
5319 dm_new_crtc_state->active_planes > 0;
5320}
5321
5322static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5323 struct drm_crtc_state *state)
5324{
5325 struct amdgpu_device *adev = crtc->dev->dev_private;
5326 struct dc *dc = adev->dm.dc;
5327 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
5328 int ret = -EINVAL;
5329
5330
5331
5332
5333
5334
5335
5336 dm_update_crtc_interrupt_state(crtc, state);
5337
5338 if (unlikely(!dm_crtc_state->stream &&
5339 modeset_required(state, NULL, dm_crtc_state->stream))) {
5340 WARN_ON(1);
5341 return ret;
5342 }
5343
5344
5345 if (!dm_crtc_state->stream)
5346 return 0;
5347
5348
5349
5350
5351
5352 if (state->enable && state->active &&
5353 does_crtc_have_active_cursor(state) &&
5354 dm_crtc_state->active_planes == 0)
5355 return -EINVAL;
5356
5357 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
5358 return 0;
5359
5360 return ret;
5361}
5362
5363static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5364 const struct drm_display_mode *mode,
5365 struct drm_display_mode *adjusted_mode)
5366{
5367 return true;
5368}
5369
5370static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5371 .disable = dm_crtc_helper_disable,
5372 .atomic_check = dm_crtc_helper_atomic_check,
5373 .mode_fixup = dm_crtc_helper_mode_fixup,
5374 .get_scanout_position = amdgpu_crtc_get_scanout_position,
5375};
5376
5377static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5378{
5379
5380}
5381
5382static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5383{
5384 switch (display_color_depth) {
5385 case COLOR_DEPTH_666:
5386 return 6;
5387 case COLOR_DEPTH_888:
5388 return 8;
5389 case COLOR_DEPTH_101010:
5390 return 10;
5391 case COLOR_DEPTH_121212:
5392 return 12;
5393 case COLOR_DEPTH_141414:
5394 return 14;
5395 case COLOR_DEPTH_161616:
5396 return 16;
5397 default:
5398 break;
5399 }
5400 return 0;
5401}
5402
5403static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5404 struct drm_crtc_state *crtc_state,
5405 struct drm_connector_state *conn_state)
5406{
5407 struct drm_atomic_state *state = crtc_state->state;
5408 struct drm_connector *connector = conn_state->connector;
5409 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5410 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5411 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5412 struct drm_dp_mst_topology_mgr *mst_mgr;
5413 struct drm_dp_mst_port *mst_port;
5414 enum dc_color_depth color_depth;
5415 int clock, bpp = 0;
5416 bool is_y420 = false;
5417
5418 if (!aconnector->port || !aconnector->dc_sink)
5419 return 0;
5420
5421 mst_port = aconnector->port;
5422 mst_mgr = &aconnector->mst_port->mst_mgr;
5423
5424 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5425 return 0;
5426
5427 if (!state->duplicated) {
5428 int max_bpc = conn_state->max_requested_bpc;
5429 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5430 aconnector->force_yuv420_output;
5431 color_depth = convert_color_depth_from_display_info(connector,
5432 is_y420,
5433 max_bpc);
5434 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5435 clock = adjusted_mode->clock;
5436 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
5437 }
5438 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5439 mst_mgr,
5440 mst_port,
5441 dm_new_connector_state->pbn,
5442 0);
5443 if (dm_new_connector_state->vcpi_slots < 0) {
5444 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5445 return dm_new_connector_state->vcpi_slots;
5446 }
5447 return 0;
5448}
5449
5450const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5451 .disable = dm_encoder_helper_disable,
5452 .atomic_check = dm_encoder_helper_atomic_check
5453};
5454
5455#if defined(CONFIG_DRM_AMD_DC_DCN)
5456static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5457 struct dc_state *dc_state)
5458{
5459 struct dc_stream_state *stream = NULL;
5460 struct drm_connector *connector;
5461 struct drm_connector_state *new_con_state, *old_con_state;
5462 struct amdgpu_dm_connector *aconnector;
5463 struct dm_connector_state *dm_conn_state;
5464 int i, j, clock, bpp;
5465 int vcpi, pbn_div, pbn = 0;
5466
5467 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5468
5469 aconnector = to_amdgpu_dm_connector(connector);
5470
5471 if (!aconnector->port)
5472 continue;
5473
5474 if (!new_con_state || !new_con_state->crtc)
5475 continue;
5476
5477 dm_conn_state = to_dm_connector_state(new_con_state);
5478
5479 for (j = 0; j < dc_state->stream_count; j++) {
5480 stream = dc_state->streams[j];
5481 if (!stream)
5482 continue;
5483
5484 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5485 break;
5486
5487 stream = NULL;
5488 }
5489
5490 if (!stream)
5491 continue;
5492
5493 if (stream->timing.flags.DSC != 1) {
5494 drm_dp_mst_atomic_enable_dsc(state,
5495 aconnector->port,
5496 dm_conn_state->pbn,
5497 0,
5498 false);
5499 continue;
5500 }
5501
5502 pbn_div = dm_mst_get_pbn_divider(stream->link);
5503 bpp = stream->timing.dsc_cfg.bits_per_pixel;
5504 clock = stream->timing.pix_clk_100hz / 10;
5505 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5506 vcpi = drm_dp_mst_atomic_enable_dsc(state,
5507 aconnector->port,
5508 pbn, pbn_div,
5509 true);
5510 if (vcpi < 0)
5511 return vcpi;
5512
5513 dm_conn_state->pbn = pbn;
5514 dm_conn_state->vcpi_slots = vcpi;
5515 }
5516 return 0;
5517}
5518#endif
5519
5520static void dm_drm_plane_reset(struct drm_plane *plane)
5521{
5522 struct dm_plane_state *amdgpu_state = NULL;
5523
5524 if (plane->state)
5525 plane->funcs->atomic_destroy_state(plane, plane->state);
5526
5527 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
5528 WARN_ON(amdgpu_state == NULL);
5529
5530 if (amdgpu_state)
5531 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
5532}
5533
5534static struct drm_plane_state *
5535dm_drm_plane_duplicate_state(struct drm_plane *plane)
5536{
5537 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5538
5539 old_dm_plane_state = to_dm_plane_state(plane->state);
5540 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5541 if (!dm_plane_state)
5542 return NULL;
5543
5544 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5545
5546 if (old_dm_plane_state->dc_state) {
5547 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5548 dc_plane_state_retain(dm_plane_state->dc_state);
5549 }
5550
5551 return &dm_plane_state->base;
5552}
5553
5554void dm_drm_plane_destroy_state(struct drm_plane *plane,
5555 struct drm_plane_state *state)
5556{
5557 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5558
5559 if (dm_plane_state->dc_state)
5560 dc_plane_state_release(dm_plane_state->dc_state);
5561
5562 drm_atomic_helper_plane_destroy_state(plane, state);
5563}
5564
5565static const struct drm_plane_funcs dm_plane_funcs = {
5566 .update_plane = drm_atomic_helper_update_plane,
5567 .disable_plane = drm_atomic_helper_disable_plane,
5568 .destroy = drm_primary_helper_destroy,
5569 .reset = dm_drm_plane_reset,
5570 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
5571 .atomic_destroy_state = dm_drm_plane_destroy_state,
5572};
5573
5574static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5575 struct drm_plane_state *new_state)
5576{
5577 struct amdgpu_framebuffer *afb;
5578 struct drm_gem_object *obj;
5579 struct amdgpu_device *adev;
5580 struct amdgpu_bo *rbo;
5581 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
5582 struct list_head list;
5583 struct ttm_validate_buffer tv;
5584 struct ww_acquire_ctx ticket;
5585 uint64_t tiling_flags;
5586 uint32_t domain;
5587 int r;
5588 bool tmz_surface = false;
5589 bool force_disable_dcc = false;
5590
5591 dm_plane_state_old = to_dm_plane_state(plane->state);
5592 dm_plane_state_new = to_dm_plane_state(new_state);
5593
5594 if (!new_state->fb) {
5595 DRM_DEBUG_DRIVER("No FB bound\n");
5596 return 0;
5597 }
5598
5599 afb = to_amdgpu_framebuffer(new_state->fb);
5600 obj = new_state->fb->obj[0];
5601 rbo = gem_to_amdgpu_bo(obj);
5602 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
5603 INIT_LIST_HEAD(&list);
5604
5605 tv.bo = &rbo->tbo;
5606 tv.num_shared = 1;
5607 list_add(&tv.head, &list);
5608
5609 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
5610 if (r) {
5611 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
5612 return r;
5613 }
5614
5615 if (plane->type != DRM_PLANE_TYPE_CURSOR)
5616 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5617 else
5618 domain = AMDGPU_GEM_DOMAIN_VRAM;
5619
5620 r = amdgpu_bo_pin(rbo, domain);
5621 if (unlikely(r != 0)) {
5622 if (r != -ERESTARTSYS)
5623 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
5624 ttm_eu_backoff_reservation(&ticket, &list);
5625 return r;
5626 }
5627
5628 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5629 if (unlikely(r != 0)) {
5630 amdgpu_bo_unpin(rbo);
5631 ttm_eu_backoff_reservation(&ticket, &list);
5632 DRM_ERROR("%p bind failed\n", rbo);
5633 return r;
5634 }
5635
5636 amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
5637
5638 tmz_surface = amdgpu_bo_encrypted(rbo);
5639
5640 ttm_eu_backoff_reservation(&ticket, &list);
5641
5642 afb->address = amdgpu_bo_gpu_offset(rbo);
5643
5644 amdgpu_bo_ref(rbo);
5645
5646 if (dm_plane_state_new->dc_state &&
5647 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5648 struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
5649
5650 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5651 fill_plane_buffer_attributes(
5652 adev, afb, plane_state->format, plane_state->rotation,
5653 tiling_flags, &plane_state->tiling_info,
5654 &plane_state->plane_size, &plane_state->dcc,
5655 &plane_state->address, tmz_surface,
5656 force_disable_dcc);
5657 }
5658
5659 return 0;
5660}
5661
5662static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5663 struct drm_plane_state *old_state)
5664{
5665 struct amdgpu_bo *rbo;
5666 int r;
5667
5668 if (!old_state->fb)
5669 return;
5670
5671 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
5672 r = amdgpu_bo_reserve(rbo, false);
5673 if (unlikely(r)) {
5674 DRM_ERROR("failed to reserve rbo before unpin\n");
5675 return;
5676 }
5677
5678 amdgpu_bo_unpin(rbo);
5679 amdgpu_bo_unreserve(rbo);
5680 amdgpu_bo_unref(&rbo);
5681}
5682
5683static int dm_plane_atomic_check(struct drm_plane *plane,
5684 struct drm_plane_state *state)
5685{
5686 struct amdgpu_device *adev = plane->dev->dev_private;
5687 struct dc *dc = adev->dm.dc;
5688 struct dm_plane_state *dm_plane_state;
5689 struct dc_scaling_info scaling_info;
5690 int ret;
5691
5692 dm_plane_state = to_dm_plane_state(state);
5693
5694 if (!dm_plane_state->dc_state)
5695 return 0;
5696
5697 ret = fill_dc_scaling_info(state, &scaling_info);
5698 if (ret)
5699 return ret;
5700
5701 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
5702 return 0;
5703
5704 return -EINVAL;
5705}
5706
5707static int dm_plane_atomic_async_check(struct drm_plane *plane,
5708 struct drm_plane_state *new_plane_state)
5709{
5710
5711 if (plane->type != DRM_PLANE_TYPE_CURSOR)
5712 return -EINVAL;
5713
5714 return 0;
5715}
5716
5717static void dm_plane_atomic_async_update(struct drm_plane *plane,
5718 struct drm_plane_state *new_state)
5719{
5720 struct drm_plane_state *old_state =
5721 drm_atomic_get_old_plane_state(new_state->state, plane);
5722
5723 swap(plane->state->fb, new_state->fb);
5724
5725 plane->state->src_x = new_state->src_x;
5726 plane->state->src_y = new_state->src_y;
5727 plane->state->src_w = new_state->src_w;
5728 plane->state->src_h = new_state->src_h;
5729 plane->state->crtc_x = new_state->crtc_x;
5730 plane->state->crtc_y = new_state->crtc_y;
5731 plane->state->crtc_w = new_state->crtc_w;
5732 plane->state->crtc_h = new_state->crtc_h;
5733
5734 handle_cursor_update(plane, old_state);
5735}
5736
5737static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
5738 .prepare_fb = dm_plane_helper_prepare_fb,
5739 .cleanup_fb = dm_plane_helper_cleanup_fb,
5740 .atomic_check = dm_plane_atomic_check,
5741 .atomic_async_check = dm_plane_atomic_async_check,
5742 .atomic_async_update = dm_plane_atomic_async_update
5743};
5744
5745
5746
5747
5748
5749
5750
5751static const uint32_t rgb_formats[] = {
5752 DRM_FORMAT_XRGB8888,
5753 DRM_FORMAT_ARGB8888,
5754 DRM_FORMAT_RGBA8888,
5755 DRM_FORMAT_XRGB2101010,
5756 DRM_FORMAT_XBGR2101010,
5757 DRM_FORMAT_ARGB2101010,
5758 DRM_FORMAT_ABGR2101010,
5759 DRM_FORMAT_XBGR8888,
5760 DRM_FORMAT_ABGR8888,
5761 DRM_FORMAT_RGB565,
5762};
5763
5764static const uint32_t overlay_formats[] = {
5765 DRM_FORMAT_XRGB8888,
5766 DRM_FORMAT_ARGB8888,
5767 DRM_FORMAT_RGBA8888,
5768 DRM_FORMAT_XBGR8888,
5769 DRM_FORMAT_ABGR8888,
5770 DRM_FORMAT_RGB565
5771};
5772
5773static const u32 cursor_formats[] = {
5774 DRM_FORMAT_ARGB8888
5775};
5776
5777static int get_plane_formats(const struct drm_plane *plane,
5778 const struct dc_plane_cap *plane_cap,
5779 uint32_t *formats, int max_formats)
5780{
5781 int i, num_formats = 0;
5782
5783
5784
5785
5786
5787
5788
5789 switch (plane->type) {
5790 case DRM_PLANE_TYPE_PRIMARY:
5791 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
5792 if (num_formats >= max_formats)
5793 break;
5794
5795 formats[num_formats++] = rgb_formats[i];
5796 }
5797
5798 if (plane_cap && plane_cap->pixel_format_support.nv12)
5799 formats[num_formats++] = DRM_FORMAT_NV12;
5800 if (plane_cap && plane_cap->pixel_format_support.p010)
5801 formats[num_formats++] = DRM_FORMAT_P010;
5802 if (plane_cap && plane_cap->pixel_format_support.fp16) {
5803 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
5804 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
5805 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
5806 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
5807 }
5808 break;
5809
5810 case DRM_PLANE_TYPE_OVERLAY:
5811 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
5812 if (num_formats >= max_formats)
5813 break;
5814
5815 formats[num_formats++] = overlay_formats[i];
5816 }
5817 break;
5818
5819 case DRM_PLANE_TYPE_CURSOR:
5820 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
5821 if (num_formats >= max_formats)
5822 break;
5823
5824 formats[num_formats++] = cursor_formats[i];
5825 }
5826 break;
5827 }
5828
5829 return num_formats;
5830}
5831
5832static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
5833 struct drm_plane *plane,
5834 unsigned long possible_crtcs,
5835 const struct dc_plane_cap *plane_cap)
5836{
5837 uint32_t formats[32];
5838 int num_formats;
5839 int res = -EPERM;
5840
5841 num_formats = get_plane_formats(plane, plane_cap, formats,
5842 ARRAY_SIZE(formats));
5843
5844 res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs,
5845 &dm_plane_funcs, formats, num_formats,
5846 NULL, plane->type, NULL);
5847 if (res)
5848 return res;
5849
5850 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
5851 plane_cap && plane_cap->per_pixel_alpha) {
5852 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
5853 BIT(DRM_MODE_BLEND_PREMULTI);
5854
5855 drm_plane_create_alpha_property(plane);
5856 drm_plane_create_blend_mode_property(plane, blend_caps);
5857 }
5858
5859 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
5860 plane_cap &&
5861 (plane_cap->pixel_format_support.nv12 ||
5862 plane_cap->pixel_format_support.p010)) {
5863
5864 drm_plane_create_color_properties(
5865 plane,
5866 BIT(DRM_COLOR_YCBCR_BT601) |
5867 BIT(DRM_COLOR_YCBCR_BT709) |
5868 BIT(DRM_COLOR_YCBCR_BT2020),
5869 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
5870 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
5871 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
5872 }
5873
5874 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
5875
5876
5877 if (plane->funcs->reset)
5878 plane->funcs->reset(plane);
5879
5880 return 0;
5881}
5882
5883static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
5884 struct drm_plane *plane,
5885 uint32_t crtc_index)
5886{
5887 struct amdgpu_crtc *acrtc = NULL;
5888 struct drm_plane *cursor_plane;
5889
5890 int res = -ENOMEM;
5891
5892 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
5893 if (!cursor_plane)
5894 goto fail;
5895
5896 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
5897 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
5898
5899 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
5900 if (!acrtc)
5901 goto fail;
5902
5903 res = drm_crtc_init_with_planes(
5904 dm->ddev,
5905 &acrtc->base,
5906 plane,
5907 cursor_plane,
5908 &amdgpu_dm_crtc_funcs, NULL);
5909
5910 if (res)
5911 goto fail;
5912
5913 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
5914
5915
5916 if (acrtc->base.funcs->reset)
5917 acrtc->base.funcs->reset(&acrtc->base);
5918
5919 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
5920 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
5921
5922 acrtc->crtc_id = crtc_index;
5923 acrtc->base.enabled = false;
5924 acrtc->otg_inst = -1;
5925
5926 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
5927 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
5928 true, MAX_COLOR_LUT_ENTRIES);
5929 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
5930
5931 return 0;
5932
5933fail:
5934 kfree(acrtc);
5935 kfree(cursor_plane);
5936 return res;
5937}
5938
5939
5940static int to_drm_connector_type(enum signal_type st)
5941{
5942 switch (st) {
5943 case SIGNAL_TYPE_HDMI_TYPE_A:
5944 return DRM_MODE_CONNECTOR_HDMIA;
5945 case SIGNAL_TYPE_EDP:
5946 return DRM_MODE_CONNECTOR_eDP;
5947 case SIGNAL_TYPE_LVDS:
5948 return DRM_MODE_CONNECTOR_LVDS;
5949 case SIGNAL_TYPE_RGB:
5950 return DRM_MODE_CONNECTOR_VGA;
5951 case SIGNAL_TYPE_DISPLAY_PORT:
5952 case SIGNAL_TYPE_DISPLAY_PORT_MST:
5953 return DRM_MODE_CONNECTOR_DisplayPort;
5954 case SIGNAL_TYPE_DVI_DUAL_LINK:
5955 case SIGNAL_TYPE_DVI_SINGLE_LINK:
5956 return DRM_MODE_CONNECTOR_DVID;
5957 case SIGNAL_TYPE_VIRTUAL:
5958 return DRM_MODE_CONNECTOR_VIRTUAL;
5959
5960 default:
5961 return DRM_MODE_CONNECTOR_Unknown;
5962 }
5963}
5964
5965static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
5966{
5967 struct drm_encoder *encoder;
5968
5969
5970 drm_connector_for_each_possible_encoder(connector, encoder)
5971 return encoder;
5972
5973 return NULL;
5974}
5975
5976static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
5977{
5978 struct drm_encoder *encoder;
5979 struct amdgpu_encoder *amdgpu_encoder;
5980
5981 encoder = amdgpu_dm_connector_to_encoder(connector);
5982
5983 if (encoder == NULL)
5984 return;
5985
5986 amdgpu_encoder = to_amdgpu_encoder(encoder);
5987
5988 amdgpu_encoder->native_mode.clock = 0;
5989
5990 if (!list_empty(&connector->probed_modes)) {
5991 struct drm_display_mode *preferred_mode = NULL;
5992
5993 list_for_each_entry(preferred_mode,
5994 &connector->probed_modes,
5995 head) {
5996 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
5997 amdgpu_encoder->native_mode = *preferred_mode;
5998
5999 break;
6000 }
6001
6002 }
6003}
6004
6005static struct drm_display_mode *
6006amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6007 char *name,
6008 int hdisplay, int vdisplay)
6009{
6010 struct drm_device *dev = encoder->dev;
6011 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6012 struct drm_display_mode *mode = NULL;
6013 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6014
6015 mode = drm_mode_duplicate(dev, native_mode);
6016
6017 if (mode == NULL)
6018 return NULL;
6019
6020 mode->hdisplay = hdisplay;
6021 mode->vdisplay = vdisplay;
6022 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6023 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6024
6025 return mode;
6026
6027}
6028
6029static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6030 struct drm_connector *connector)
6031{
6032 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6033 struct drm_display_mode *mode = NULL;
6034 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6035 struct amdgpu_dm_connector *amdgpu_dm_connector =
6036 to_amdgpu_dm_connector(connector);
6037 int i;
6038 int n;
6039 struct mode_size {
6040 char name[DRM_DISPLAY_MODE_LEN];
6041 int w;
6042 int h;
6043 } common_modes[] = {
6044 { "640x480", 640, 480},
6045 { "800x600", 800, 600},
6046 { "1024x768", 1024, 768},
6047 { "1280x720", 1280, 720},
6048 { "1280x800", 1280, 800},
6049 {"1280x1024", 1280, 1024},
6050 { "1440x900", 1440, 900},
6051 {"1680x1050", 1680, 1050},
6052 {"1600x1200", 1600, 1200},
6053 {"1920x1080", 1920, 1080},
6054 {"1920x1200", 1920, 1200}
6055 };
6056
6057 n = ARRAY_SIZE(common_modes);
6058
6059 for (i = 0; i < n; i++) {
6060 struct drm_display_mode *curmode = NULL;
6061 bool mode_existed = false;
6062
6063 if (common_modes[i].w > native_mode->hdisplay ||
6064 common_modes[i].h > native_mode->vdisplay ||
6065 (common_modes[i].w == native_mode->hdisplay &&
6066 common_modes[i].h == native_mode->vdisplay))
6067 continue;
6068
6069 list_for_each_entry(curmode, &connector->probed_modes, head) {
6070 if (common_modes[i].w == curmode->hdisplay &&
6071 common_modes[i].h == curmode->vdisplay) {
6072 mode_existed = true;
6073 break;
6074 }
6075 }
6076
6077 if (mode_existed)
6078 continue;
6079
6080 mode = amdgpu_dm_create_common_mode(encoder,
6081 common_modes[i].name, common_modes[i].w,
6082 common_modes[i].h);
6083 drm_mode_probed_add(connector, mode);
6084 amdgpu_dm_connector->num_modes++;
6085 }
6086}
6087
6088static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6089 struct edid *edid)
6090{
6091 struct amdgpu_dm_connector *amdgpu_dm_connector =
6092 to_amdgpu_dm_connector(connector);
6093
6094 if (edid) {
6095
6096 INIT_LIST_HEAD(&connector->probed_modes);
6097 amdgpu_dm_connector->num_modes =
6098 drm_add_edid_modes(connector, edid);
6099
6100
6101
6102
6103
6104
6105
6106
6107
6108 drm_mode_sort(&connector->probed_modes);
6109 amdgpu_dm_get_native_mode(connector);
6110 } else {
6111 amdgpu_dm_connector->num_modes = 0;
6112 }
6113}
6114
6115static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6116{
6117 struct amdgpu_dm_connector *amdgpu_dm_connector =
6118 to_amdgpu_dm_connector(connector);
6119 struct drm_encoder *encoder;
6120 struct edid *edid = amdgpu_dm_connector->edid;
6121
6122 encoder = amdgpu_dm_connector_to_encoder(connector);
6123
6124 if (!edid || !drm_edid_is_valid(edid)) {
6125 amdgpu_dm_connector->num_modes =
6126 drm_add_modes_noedid(connector, 640, 480);
6127 } else {
6128 amdgpu_dm_connector_ddc_get_modes(connector, edid);
6129 amdgpu_dm_connector_add_common_modes(encoder, connector);
6130 }
6131 amdgpu_dm_fbc_init(connector);
6132
6133 return amdgpu_dm_connector->num_modes;
6134}
6135
6136void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6137 struct amdgpu_dm_connector *aconnector,
6138 int connector_type,
6139 struct dc_link *link,
6140 int link_index)
6141{
6142 struct amdgpu_device *adev = dm->ddev->dev_private;
6143
6144
6145
6146
6147
6148 if (aconnector->base.funcs->reset)
6149 aconnector->base.funcs->reset(&aconnector->base);
6150
6151 aconnector->connector_id = link_index;
6152 aconnector->dc_link = link;
6153 aconnector->base.interlace_allowed = false;
6154 aconnector->base.doublescan_allowed = false;
6155 aconnector->base.stereo_allowed = false;
6156 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6157 aconnector->hpd.hpd = AMDGPU_HPD_NONE;
6158 aconnector->audio_inst = -1;
6159 mutex_init(&aconnector->hpd_lock);
6160
6161
6162
6163
6164
6165 switch (connector_type) {
6166 case DRM_MODE_CONNECTOR_HDMIA:
6167 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6168 aconnector->base.ycbcr_420_allowed =
6169 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
6170 break;
6171 case DRM_MODE_CONNECTOR_DisplayPort:
6172 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6173 aconnector->base.ycbcr_420_allowed =
6174 link->link_enc->features.dp_ycbcr420_supported ? true : false;
6175 break;
6176 case DRM_MODE_CONNECTOR_DVID:
6177 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6178 break;
6179 default:
6180 break;
6181 }
6182
6183 drm_object_attach_property(&aconnector->base.base,
6184 dm->ddev->mode_config.scaling_mode_property,
6185 DRM_MODE_SCALE_NONE);
6186
6187 drm_object_attach_property(&aconnector->base.base,
6188 adev->mode_info.underscan_property,
6189 UNDERSCAN_OFF);
6190 drm_object_attach_property(&aconnector->base.base,
6191 adev->mode_info.underscan_hborder_property,
6192 0);
6193 drm_object_attach_property(&aconnector->base.base,
6194 adev->mode_info.underscan_vborder_property,
6195 0);
6196
6197 if (!aconnector->mst_port)
6198 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
6199
6200
6201 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
6202 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
6203
6204 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
6205 dc_is_dmcu_initialized(adev->dm.dc)) {
6206 drm_object_attach_property(&aconnector->base.base,
6207 adev->mode_info.abm_level_property, 0);
6208 }
6209
6210 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
6211 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
6212 connector_type == DRM_MODE_CONNECTOR_eDP) {
6213 drm_object_attach_property(
6214 &aconnector->base.base,
6215 dm->ddev->mode_config.hdr_output_metadata_property, 0);
6216
6217 if (!aconnector->mst_port)
6218 drm_connector_attach_vrr_capable_property(&aconnector->base);
6219
6220#ifdef CONFIG_DRM_AMD_DC_HDCP
6221 if (adev->dm.hdcp_workqueue)
6222 drm_connector_attach_content_protection_property(&aconnector->base, true);
6223#endif
6224 }
6225}
6226
6227static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
6228 struct i2c_msg *msgs, int num)
6229{
6230 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
6231 struct ddc_service *ddc_service = i2c->ddc_service;
6232 struct i2c_command cmd;
6233 int i;
6234 int result = -EIO;
6235
6236 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
6237
6238 if (!cmd.payloads)
6239 return result;
6240
6241 cmd.number_of_payloads = num;
6242 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
6243 cmd.speed = 100;
6244
6245 for (i = 0; i < num; i++) {
6246 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
6247 cmd.payloads[i].address = msgs[i].addr;
6248 cmd.payloads[i].length = msgs[i].len;
6249 cmd.payloads[i].data = msgs[i].buf;
6250 }
6251
6252 if (dc_submit_i2c(
6253 ddc_service->ctx->dc,
6254 ddc_service->ddc_pin->hw_info.ddc_channel,
6255 &cmd))
6256 result = num;
6257
6258 kfree(cmd.payloads);
6259 return result;
6260}
6261
6262static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
6263{
6264 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6265}
6266
6267static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6268 .master_xfer = amdgpu_dm_i2c_xfer,
6269 .functionality = amdgpu_dm_i2c_func,
6270};
6271
6272static struct amdgpu_i2c_adapter *
6273create_i2c(struct ddc_service *ddc_service,
6274 int link_index,
6275 int *res)
6276{
6277 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6278 struct amdgpu_i2c_adapter *i2c;
6279
6280 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
6281 if (!i2c)
6282 return NULL;
6283 i2c->base.owner = THIS_MODULE;
6284 i2c->base.class = I2C_CLASS_DDC;
6285 i2c->base.dev.parent = &adev->pdev->dev;
6286 i2c->base.algo = &amdgpu_dm_i2c_algo;
6287 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
6288 i2c_set_adapdata(&i2c->base, i2c);
6289 i2c->ddc_service = ddc_service;
6290 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
6291
6292 return i2c;
6293}
6294
6295
6296
6297
6298
6299
6300static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6301 struct amdgpu_dm_connector *aconnector,
6302 uint32_t link_index,
6303 struct amdgpu_encoder *aencoder)
6304{
6305 int res = 0;
6306 int connector_type;
6307 struct dc *dc = dm->dc;
6308 struct dc_link *link = dc_get_link_at_index(dc, link_index);
6309 struct amdgpu_i2c_adapter *i2c;
6310
6311 link->priv = aconnector;
6312
6313 DRM_DEBUG_DRIVER("%s()\n", __func__);
6314
6315 i2c = create_i2c(link->ddc, link->link_index, &res);
6316 if (!i2c) {
6317 DRM_ERROR("Failed to create i2c adapter data\n");
6318 return -ENOMEM;
6319 }
6320
6321 aconnector->i2c = i2c;
6322 res = i2c_add_adapter(&i2c->base);
6323
6324 if (res) {
6325 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6326 goto out_free;
6327 }
6328
6329 connector_type = to_drm_connector_type(link->connector_signal);
6330
6331 res = drm_connector_init_with_ddc(
6332 dm->ddev,
6333 &aconnector->base,
6334 &amdgpu_dm_connector_funcs,
6335 connector_type,
6336 &i2c->base);
6337
6338 if (res) {
6339 DRM_ERROR("connector_init failed\n");
6340 aconnector->connector_id = -1;
6341 goto out_free;
6342 }
6343
6344 drm_connector_helper_add(
6345 &aconnector->base,
6346 &amdgpu_dm_connector_helper_funcs);
6347
6348 amdgpu_dm_connector_init_helper(
6349 dm,
6350 aconnector,
6351 connector_type,
6352 link,
6353 link_index);
6354
6355 drm_connector_attach_encoder(
6356 &aconnector->base, &aencoder->base);
6357
6358 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6359 || connector_type == DRM_MODE_CONNECTOR_eDP)
6360 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
6361
6362out_free:
6363 if (res) {
6364 kfree(i2c);
6365 aconnector->i2c = NULL;
6366 }
6367 return res;
6368}
6369
6370int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6371{
6372 switch (adev->mode_info.num_crtc) {
6373 case 1:
6374 return 0x1;
6375 case 2:
6376 return 0x3;
6377 case 3:
6378 return 0x7;
6379 case 4:
6380 return 0xf;
6381 case 5:
6382 return 0x1f;
6383 case 6:
6384 default:
6385 return 0x3f;
6386 }
6387}
6388
6389static int amdgpu_dm_encoder_init(struct drm_device *dev,
6390 struct amdgpu_encoder *aencoder,
6391 uint32_t link_index)
6392{
6393 struct amdgpu_device *adev = dev->dev_private;
6394
6395 int res = drm_encoder_init(dev,
6396 &aencoder->base,
6397 &amdgpu_dm_encoder_funcs,
6398 DRM_MODE_ENCODER_TMDS,
6399 NULL);
6400
6401 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6402
6403 if (!res)
6404 aencoder->encoder_id = link_index;
6405 else
6406 aencoder->encoder_id = -1;
6407
6408 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6409
6410 return res;
6411}
6412
6413static void manage_dm_interrupts(struct amdgpu_device *adev,
6414 struct amdgpu_crtc *acrtc,
6415 bool enable)
6416{
6417
6418
6419
6420
6421 int irq_type =
6422 amdgpu_display_crtc_idx_to_irq_type(
6423 adev,
6424 acrtc->crtc_id);
6425
6426 if (enable) {
6427 drm_crtc_vblank_on(&acrtc->base);
6428 amdgpu_irq_get(
6429 adev,
6430 &adev->pageflip_irq,
6431 irq_type);
6432 } else {
6433
6434 amdgpu_irq_put(
6435 adev,
6436 &adev->pageflip_irq,
6437 irq_type);
6438 drm_crtc_vblank_off(&acrtc->base);
6439 }
6440}
6441
6442static bool
6443is_scaling_state_different(const struct dm_connector_state *dm_state,
6444 const struct dm_connector_state *old_dm_state)
6445{
6446 if (dm_state->scaling != old_dm_state->scaling)
6447 return true;
6448 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6449 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6450 return true;
6451 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6452 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6453 return true;
6454 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6455 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6456 return true;
6457 return false;
6458}
6459
6460#ifdef CONFIG_DRM_AMD_DC_HDCP
6461static bool is_content_protection_different(struct drm_connector_state *state,
6462 const struct drm_connector_state *old_state,
6463 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6464{
6465 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6466
6467 if (old_state->hdcp_content_type != state->hdcp_content_type &&
6468 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6469 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6470 return true;
6471 }
6472
6473
6474 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6475 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6476 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6477 return false;
6478 }
6479
6480
6481 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6482 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6483 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6484
6485
6486
6487
6488 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6489 aconnector->dc_sink != NULL)
6490 return true;
6491
6492 if (old_state->content_protection == state->content_protection)
6493 return false;
6494
6495 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6496 return true;
6497
6498 return false;
6499}
6500
6501#endif
6502static void remove_stream(struct amdgpu_device *adev,
6503 struct amdgpu_crtc *acrtc,
6504 struct dc_stream_state *stream)
6505{
6506
6507
6508 acrtc->otg_inst = -1;
6509 acrtc->enabled = false;
6510}
6511
6512static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6513 struct dc_cursor_position *position)
6514{
6515 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6516 int x, y;
6517 int xorigin = 0, yorigin = 0;
6518
6519 position->enable = false;
6520 position->x = 0;
6521 position->y = 0;
6522
6523 if (!crtc || !plane->state->fb)
6524 return 0;
6525
6526 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6527 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6528 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6529 __func__,
6530 plane->state->crtc_w,
6531 plane->state->crtc_h);
6532 return -EINVAL;
6533 }
6534
6535 x = plane->state->crtc_x;
6536 y = plane->state->crtc_y;
6537
6538 if (x <= -amdgpu_crtc->max_cursor_width ||
6539 y <= -amdgpu_crtc->max_cursor_height)
6540 return 0;
6541
6542 if (x < 0) {
6543 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6544 x = 0;
6545 }
6546 if (y < 0) {
6547 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6548 y = 0;
6549 }
6550 position->enable = true;
6551 position->translate_by_source = true;
6552 position->x = x;
6553 position->y = y;
6554 position->x_hotspot = xorigin;
6555 position->y_hotspot = yorigin;
6556
6557 return 0;
6558}
6559
6560static void handle_cursor_update(struct drm_plane *plane,
6561 struct drm_plane_state *old_plane_state)
6562{
6563 struct amdgpu_device *adev = plane->dev->dev_private;
6564 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6565 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6566 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6567 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6568 uint64_t address = afb ? afb->address : 0;
6569 struct dc_cursor_position position;
6570 struct dc_cursor_attributes attributes;
6571 int ret;
6572
6573 if (!plane->state->fb && !old_plane_state->fb)
6574 return;
6575
6576 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
6577 __func__,
6578 amdgpu_crtc->crtc_id,
6579 plane->state->crtc_w,
6580 plane->state->crtc_h);
6581
6582 ret = get_cursor_position(plane, crtc, &position);
6583 if (ret)
6584 return;
6585
6586 if (!position.enable) {
6587
6588 if (crtc_state && crtc_state->stream) {
6589 mutex_lock(&adev->dm.dc_lock);
6590 dc_stream_set_cursor_position(crtc_state->stream,
6591 &position);
6592 mutex_unlock(&adev->dm.dc_lock);
6593 }
6594 return;
6595 }
6596
6597 amdgpu_crtc->cursor_width = plane->state->crtc_w;
6598 amdgpu_crtc->cursor_height = plane->state->crtc_h;
6599
6600 memset(&attributes, 0, sizeof(attributes));
6601 attributes.address.high_part = upper_32_bits(address);
6602 attributes.address.low_part = lower_32_bits(address);
6603 attributes.width = plane->state->crtc_w;
6604 attributes.height = plane->state->crtc_h;
6605 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6606 attributes.rotation_angle = 0;
6607 attributes.attribute_flags.value = 0;
6608
6609 attributes.pitch = attributes.width;
6610
6611 if (crtc_state->stream) {
6612 mutex_lock(&adev->dm.dc_lock);
6613 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6614 &attributes))
6615 DRM_ERROR("DC failed to set cursor attributes\n");
6616
6617 if (!dc_stream_set_cursor_position(crtc_state->stream,
6618 &position))
6619 DRM_ERROR("DC failed to set cursor position\n");
6620 mutex_unlock(&adev->dm.dc_lock);
6621 }
6622}
6623
6624static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6625{
6626
6627 assert_spin_locked(&acrtc->base.dev->event_lock);
6628 WARN_ON(acrtc->event);
6629
6630 acrtc->event = acrtc->base.state->event;
6631
6632
6633 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6634
6635
6636 acrtc->base.state->event = NULL;
6637
6638 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6639 acrtc->crtc_id);
6640}
6641
6642static void update_freesync_state_on_stream(
6643 struct amdgpu_display_manager *dm,
6644 struct dm_crtc_state *new_crtc_state,
6645 struct dc_stream_state *new_stream,
6646 struct dc_plane_state *surface,
6647 u32 flip_timestamp_in_us)
6648{
6649 struct mod_vrr_params vrr_params;
6650 struct dc_info_packet vrr_infopacket = {0};
6651 struct amdgpu_device *adev = dm->adev;
6652 unsigned long flags;
6653
6654 if (!new_stream)
6655 return;
6656
6657
6658
6659
6660
6661
6662 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6663 return;
6664
6665 spin_lock_irqsave(&adev->ddev->event_lock, flags);
6666 vrr_params = new_crtc_state->vrr_params;
6667
6668 if (surface) {
6669 mod_freesync_handle_preflip(
6670 dm->freesync_module,
6671 surface,
6672 new_stream,
6673 flip_timestamp_in_us,
6674 &vrr_params);
6675
6676 if (adev->family < AMDGPU_FAMILY_AI &&
6677 amdgpu_dm_vrr_active(new_crtc_state)) {
6678 mod_freesync_handle_v_update(dm->freesync_module,
6679 new_stream, &vrr_params);
6680
6681
6682 dc_stream_adjust_vmin_vmax(dm->dc,
6683 new_crtc_state->stream,
6684 &vrr_params.adjust);
6685 }
6686 }
6687
6688 mod_freesync_build_vrr_infopacket(
6689 dm->freesync_module,
6690 new_stream,
6691 &vrr_params,
6692 PACKET_TYPE_VRR,
6693 TRANSFER_FUNC_UNKNOWN,
6694 &vrr_infopacket);
6695
6696 new_crtc_state->freesync_timing_changed |=
6697 (memcmp(&new_crtc_state->vrr_params.adjust,
6698 &vrr_params.adjust,
6699 sizeof(vrr_params.adjust)) != 0);
6700
6701 new_crtc_state->freesync_vrr_info_changed |=
6702 (memcmp(&new_crtc_state->vrr_infopacket,
6703 &vrr_infopacket,
6704 sizeof(vrr_infopacket)) != 0);
6705
6706 new_crtc_state->vrr_params = vrr_params;
6707 new_crtc_state->vrr_infopacket = vrr_infopacket;
6708
6709 new_stream->adjust = new_crtc_state->vrr_params.adjust;
6710 new_stream->vrr_infopacket = vrr_infopacket;
6711
6712 if (new_crtc_state->freesync_vrr_info_changed)
6713 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
6714 new_crtc_state->base.crtc->base.id,
6715 (int)new_crtc_state->base.vrr_enabled,
6716 (int)vrr_params.state);
6717
6718 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6719}
6720
6721static void pre_update_freesync_state_on_stream(
6722 struct amdgpu_display_manager *dm,
6723 struct dm_crtc_state *new_crtc_state)
6724{
6725 struct dc_stream_state *new_stream = new_crtc_state->stream;
6726 struct mod_vrr_params vrr_params;
6727 struct mod_freesync_config config = new_crtc_state->freesync_config;
6728 struct amdgpu_device *adev = dm->adev;
6729 unsigned long flags;
6730
6731 if (!new_stream)
6732 return;
6733
6734
6735
6736
6737
6738 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6739 return;
6740
6741 spin_lock_irqsave(&adev->ddev->event_lock, flags);
6742 vrr_params = new_crtc_state->vrr_params;
6743
6744 if (new_crtc_state->vrr_supported &&
6745 config.min_refresh_in_uhz &&
6746 config.max_refresh_in_uhz) {
6747 config.state = new_crtc_state->base.vrr_enabled ?
6748 VRR_STATE_ACTIVE_VARIABLE :
6749 VRR_STATE_INACTIVE;
6750 } else {
6751 config.state = VRR_STATE_UNSUPPORTED;
6752 }
6753
6754 mod_freesync_build_vrr_params(dm->freesync_module,
6755 new_stream,
6756 &config, &vrr_params);
6757
6758 new_crtc_state->freesync_timing_changed |=
6759 (memcmp(&new_crtc_state->vrr_params.adjust,
6760 &vrr_params.adjust,
6761 sizeof(vrr_params.adjust)) != 0);
6762
6763 new_crtc_state->vrr_params = vrr_params;
6764 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6765}
6766
6767static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
6768 struct dm_crtc_state *new_state)
6769{
6770 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
6771 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
6772
6773 if (!old_vrr_active && new_vrr_active) {
6774
6775
6776
6777
6778
6779
6780
6781
6782 dm_set_vupdate_irq(new_state->base.crtc, true);
6783 drm_crtc_vblank_get(new_state->base.crtc);
6784 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
6785 __func__, new_state->base.crtc->base.id);
6786 } else if (old_vrr_active && !new_vrr_active) {
6787
6788
6789
6790 dm_set_vupdate_irq(new_state->base.crtc, false);
6791 drm_crtc_vblank_put(new_state->base.crtc);
6792 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
6793 __func__, new_state->base.crtc->base.id);
6794 }
6795}
6796
6797static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
6798{
6799 struct drm_plane *plane;
6800 struct drm_plane_state *old_plane_state, *new_plane_state;
6801 int i;
6802
6803
6804
6805
6806
6807 for_each_oldnew_plane_in_state(state, plane, old_plane_state,
6808 new_plane_state, i)
6809 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6810 handle_cursor_update(plane, old_plane_state);
6811}
6812
6813static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
6814 struct dc_state *dc_state,
6815 struct drm_device *dev,
6816 struct amdgpu_display_manager *dm,
6817 struct drm_crtc *pcrtc,
6818 bool wait_for_vblank)
6819{
6820 uint32_t i;
6821 uint64_t timestamp_ns;
6822 struct drm_plane *plane;
6823 struct drm_plane_state *old_plane_state, *new_plane_state;
6824 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
6825 struct drm_crtc_state *new_pcrtc_state =
6826 drm_atomic_get_new_crtc_state(state, pcrtc);
6827 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
6828 struct dm_crtc_state *dm_old_crtc_state =
6829 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
6830 int planes_count = 0, vpos, hpos;
6831 long r;
6832 unsigned long flags;
6833 struct amdgpu_bo *abo;
6834 uint64_t tiling_flags;
6835 bool tmz_surface = false;
6836 uint32_t target_vblank, last_flip_vblank;
6837 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
6838 bool pflip_present = false;
6839 struct {
6840 struct dc_surface_update surface_updates[MAX_SURFACES];
6841 struct dc_plane_info plane_infos[MAX_SURFACES];
6842 struct dc_scaling_info scaling_infos[MAX_SURFACES];
6843 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
6844 struct dc_stream_update stream_update;
6845 } *bundle;
6846
6847 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
6848
6849 if (!bundle) {
6850 dm_error("Failed to allocate update bundle\n");
6851 goto cleanup;
6852 }
6853
6854
6855
6856
6857
6858
6859 if (acrtc_state->active_planes == 0)
6860 amdgpu_dm_commit_cursors(state);
6861
6862
6863 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
6864 struct drm_crtc *crtc = new_plane_state->crtc;
6865 struct drm_crtc_state *new_crtc_state;
6866 struct drm_framebuffer *fb = new_plane_state->fb;
6867 bool plane_needs_flip;
6868 struct dc_plane_state *dc_plane;
6869 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
6870
6871
6872 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6873 continue;
6874
6875 if (!fb || !crtc || pcrtc != crtc)
6876 continue;
6877
6878 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
6879 if (!new_crtc_state->active)
6880 continue;
6881
6882 dc_plane = dm_new_plane_state->dc_state;
6883
6884 bundle->surface_updates[planes_count].surface = dc_plane;
6885 if (new_pcrtc_state->color_mgmt_changed) {
6886 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
6887 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
6888 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
6889 }
6890
6891 fill_dc_scaling_info(new_plane_state,
6892 &bundle->scaling_infos[planes_count]);
6893
6894 bundle->surface_updates[planes_count].scaling_info =
6895 &bundle->scaling_infos[planes_count];
6896
6897 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
6898
6899 pflip_present = pflip_present || plane_needs_flip;
6900
6901 if (!plane_needs_flip) {
6902 planes_count += 1;
6903 continue;
6904 }
6905
6906 abo = gem_to_amdgpu_bo(fb->obj[0]);
6907
6908
6909
6910
6911
6912
6913 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
6914 false,
6915 msecs_to_jiffies(5000));
6916 if (unlikely(r <= 0))
6917 DRM_ERROR("Waiting for fences timed out!");
6918
6919
6920
6921
6922
6923
6924
6925 r = amdgpu_bo_reserve(abo, true);
6926 if (unlikely(r != 0))
6927 DRM_ERROR("failed to reserve buffer before flip\n");
6928
6929 amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
6930
6931 tmz_surface = amdgpu_bo_encrypted(abo);
6932
6933 amdgpu_bo_unreserve(abo);
6934
6935 fill_dc_plane_info_and_addr(
6936 dm->adev, new_plane_state, tiling_flags,
6937 &bundle->plane_infos[planes_count],
6938 &bundle->flip_addrs[planes_count].address,
6939 tmz_surface,
6940 false);
6941
6942 DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
6943 new_plane_state->plane->index,
6944 bundle->plane_infos[planes_count].dcc.enable);
6945
6946 bundle->surface_updates[planes_count].plane_info =
6947 &bundle->plane_infos[planes_count];
6948
6949
6950
6951
6952
6953 bundle->flip_addrs[planes_count].flip_immediate =
6954 crtc->state->async_flip &&
6955 acrtc_state->update_type == UPDATE_TYPE_FAST;
6956
6957 timestamp_ns = ktime_get_ns();
6958 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
6959 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
6960 bundle->surface_updates[planes_count].surface = dc_plane;
6961
6962 if (!bundle->surface_updates[planes_count].surface) {
6963 DRM_ERROR("No surface for CRTC: id=%d\n",
6964 acrtc_attach->crtc_id);
6965 continue;
6966 }
6967
6968 if (plane == pcrtc->primary)
6969 update_freesync_state_on_stream(
6970 dm,
6971 acrtc_state,
6972 acrtc_state->stream,
6973 dc_plane,
6974 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
6975
6976 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
6977 __func__,
6978 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
6979 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
6980
6981 planes_count += 1;
6982
6983 }
6984
6985 if (pflip_present) {
6986 if (!vrr_active) {
6987
6988
6989
6990
6991
6992
6993 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
6994 }
6995 else {
6996
6997
6998
6999
7000
7001
7002
7003
7004 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7005 last_flip_vblank = acrtc_attach->last_flip_vblank;
7006 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7007 }
7008
7009 target_vblank = last_flip_vblank + wait_for_vblank;
7010
7011
7012
7013
7014
7015 while ((acrtc_attach->enabled &&
7016 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7017 0, &vpos, &hpos, NULL,
7018 NULL, &pcrtc->hwmode)
7019 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7020 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7021 (int)(target_vblank -
7022 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7023 usleep_range(1000, 1100);
7024 }
7025
7026 if (acrtc_attach->base.state->event) {
7027 drm_crtc_vblank_get(pcrtc);
7028
7029 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7030
7031 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7032 prepare_flip_isr(acrtc_attach);
7033
7034 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7035 }
7036
7037 if (acrtc_state->stream) {
7038 if (acrtc_state->freesync_vrr_info_changed)
7039 bundle->stream_update.vrr_infopacket =
7040 &acrtc_state->stream->vrr_infopacket;
7041 }
7042 }
7043
7044
7045 if ((planes_count || acrtc_state->active_planes == 0) &&
7046 acrtc_state->stream) {
7047 bundle->stream_update.stream = acrtc_state->stream;
7048 if (new_pcrtc_state->mode_changed) {
7049 bundle->stream_update.src = acrtc_state->stream->src;
7050 bundle->stream_update.dst = acrtc_state->stream->dst;
7051 }
7052
7053 if (new_pcrtc_state->color_mgmt_changed) {
7054
7055
7056
7057
7058 bundle->stream_update.gamut_remap =
7059 &acrtc_state->stream->gamut_remap_matrix;
7060 bundle->stream_update.output_csc_transform =
7061 &acrtc_state->stream->csc_color_matrix;
7062 bundle->stream_update.out_transfer_func =
7063 acrtc_state->stream->out_transfer_func;
7064 }
7065
7066 acrtc_state->stream->abm_level = acrtc_state->abm_level;
7067 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7068 bundle->stream_update.abm_level = &acrtc_state->abm_level;
7069
7070
7071
7072
7073
7074
7075 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7076 amdgpu_dm_vrr_active(acrtc_state)) {
7077 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7078 dc_stream_adjust_vmin_vmax(
7079 dm->dc, acrtc_state->stream,
7080 &acrtc_state->vrr_params.adjust);
7081 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7082 }
7083 mutex_lock(&dm->dc_lock);
7084 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7085 acrtc_state->stream->link->psr_settings.psr_allow_active)
7086 amdgpu_dm_psr_disable(acrtc_state->stream);
7087
7088 dc_commit_updates_for_stream(dm->dc,
7089 bundle->surface_updates,
7090 planes_count,
7091 acrtc_state->stream,
7092 &bundle->stream_update,
7093 dc_state);
7094
7095 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7096 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
7097 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
7098 amdgpu_dm_link_setup_psr(acrtc_state->stream);
7099 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
7100 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7101 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
7102 amdgpu_dm_psr_enable(acrtc_state->stream);
7103 }
7104
7105 mutex_unlock(&dm->dc_lock);
7106 }
7107
7108
7109
7110
7111
7112
7113 if (acrtc_state->active_planes)
7114 amdgpu_dm_commit_cursors(state);
7115
7116cleanup:
7117 kfree(bundle);
7118}
7119
7120static void amdgpu_dm_commit_audio(struct drm_device *dev,
7121 struct drm_atomic_state *state)
7122{
7123 struct amdgpu_device *adev = dev->dev_private;
7124 struct amdgpu_dm_connector *aconnector;
7125 struct drm_connector *connector;
7126 struct drm_connector_state *old_con_state, *new_con_state;
7127 struct drm_crtc_state *new_crtc_state;
7128 struct dm_crtc_state *new_dm_crtc_state;
7129 const struct dc_stream_status *status;
7130 int i, inst;
7131
7132
7133 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7134 if (old_con_state->crtc != new_con_state->crtc) {
7135
7136 goto notify;
7137 }
7138
7139 if (!new_con_state->crtc)
7140 continue;
7141
7142 new_crtc_state = drm_atomic_get_new_crtc_state(
7143 state, new_con_state->crtc);
7144
7145 if (!new_crtc_state)
7146 continue;
7147
7148 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7149 continue;
7150
7151 notify:
7152 aconnector = to_amdgpu_dm_connector(connector);
7153
7154 mutex_lock(&adev->dm.audio_lock);
7155 inst = aconnector->audio_inst;
7156 aconnector->audio_inst = -1;
7157 mutex_unlock(&adev->dm.audio_lock);
7158
7159 amdgpu_dm_audio_eld_notify(adev, inst);
7160 }
7161
7162
7163 for_each_new_connector_in_state(state, connector, new_con_state, i) {
7164 if (!new_con_state->crtc)
7165 continue;
7166
7167 new_crtc_state = drm_atomic_get_new_crtc_state(
7168 state, new_con_state->crtc);
7169
7170 if (!new_crtc_state)
7171 continue;
7172
7173 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7174 continue;
7175
7176 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7177 if (!new_dm_crtc_state->stream)
7178 continue;
7179
7180 status = dc_stream_get_status(new_dm_crtc_state->stream);
7181 if (!status)
7182 continue;
7183
7184 aconnector = to_amdgpu_dm_connector(connector);
7185
7186 mutex_lock(&adev->dm.audio_lock);
7187 inst = status->audio_inst;
7188 aconnector->audio_inst = inst;
7189 mutex_unlock(&adev->dm.audio_lock);
7190
7191 amdgpu_dm_audio_eld_notify(adev, inst);
7192 }
7193}
7194
7195
7196
7197
7198
7199
7200
7201
7202
7203
7204
7205
7206
7207static void amdgpu_dm_enable_crtc_interrupts(struct drm_device *dev,
7208 struct drm_atomic_state *state,
7209 bool for_modeset)
7210{
7211 struct amdgpu_device *adev = dev->dev_private;
7212 struct drm_crtc *crtc;
7213 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7214 int i;
7215#ifdef CONFIG_DEBUG_FS
7216 enum amdgpu_dm_pipe_crc_source source;
7217#endif
7218
7219 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7220 new_crtc_state, i) {
7221 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7222 struct dm_crtc_state *dm_new_crtc_state =
7223 to_dm_crtc_state(new_crtc_state);
7224 struct dm_crtc_state *dm_old_crtc_state =
7225 to_dm_crtc_state(old_crtc_state);
7226 bool modeset = drm_atomic_crtc_needs_modeset(new_crtc_state);
7227 bool run_pass;
7228
7229 run_pass = (for_modeset && modeset) ||
7230 (!for_modeset && !modeset &&
7231 !dm_old_crtc_state->interrupts_enabled);
7232
7233 if (!run_pass)
7234 continue;
7235
7236 if (!dm_new_crtc_state->interrupts_enabled)
7237 continue;
7238
7239 manage_dm_interrupts(adev, acrtc, true);
7240
7241#ifdef CONFIG_DEBUG_FS
7242
7243 source = dm_new_crtc_state->crc_src;
7244 if (amdgpu_dm_is_valid_crc_source(source)) {
7245 amdgpu_dm_crtc_configure_crc_source(
7246 crtc, dm_new_crtc_state,
7247 dm_new_crtc_state->crc_src);
7248 }
7249#endif
7250 }
7251}
7252
7253
7254
7255
7256
7257
7258
7259
7260
7261static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7262 struct dc_stream_state *stream_state)
7263{
7264 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
7265}
7266
7267static int amdgpu_dm_atomic_commit(struct drm_device *dev,
7268 struct drm_atomic_state *state,
7269 bool nonblock)
7270{
7271 struct drm_crtc *crtc;
7272 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7273 struct amdgpu_device *adev = dev->dev_private;
7274 int i;
7275
7276
7277
7278
7279
7280
7281
7282
7283
7284
7285
7286
7287
7288
7289
7290
7291 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7292 struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7293 struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7294 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7295
7296 if (dm_old_crtc_state->interrupts_enabled &&
7297 (!dm_new_crtc_state->interrupts_enabled ||
7298 drm_atomic_crtc_needs_modeset(new_crtc_state)))
7299 manage_dm_interrupts(adev, acrtc, false);
7300 }
7301
7302
7303
7304
7305
7306 return drm_atomic_helper_commit(dev, state, nonblock);
7307
7308
7309}
7310
7311
7312
7313
7314
7315
7316
7317
7318
7319static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7320{
7321 struct drm_device *dev = state->dev;
7322 struct amdgpu_device *adev = dev->dev_private;
7323 struct amdgpu_display_manager *dm = &adev->dm;
7324 struct dm_atomic_state *dm_state;
7325 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
7326 uint32_t i, j;
7327 struct drm_crtc *crtc;
7328 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7329 unsigned long flags;
7330 bool wait_for_vblank = true;
7331 struct drm_connector *connector;
7332 struct drm_connector_state *old_con_state, *new_con_state;
7333 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7334 int crtc_disable_count = 0;
7335
7336 drm_atomic_helper_update_legacy_modeset_state(dev, state);
7337
7338 dm_state = dm_atomic_get_new_state(state);
7339 if (dm_state && dm_state->context) {
7340 dc_state = dm_state->context;
7341 } else {
7342
7343 dc_state_temp = dc_create_state(dm->dc);
7344 ASSERT(dc_state_temp);
7345 dc_state = dc_state_temp;
7346 dc_resource_state_copy_construct_current(dm->dc, dc_state);
7347 }
7348
7349
7350 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7351 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7352
7353 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7354 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7355
7356 DRM_DEBUG_DRIVER(
7357 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7358 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7359 "connectors_changed:%d\n",
7360 acrtc->crtc_id,
7361 new_crtc_state->enable,
7362 new_crtc_state->active,
7363 new_crtc_state->planes_changed,
7364 new_crtc_state->mode_changed,
7365 new_crtc_state->active_changed,
7366 new_crtc_state->connectors_changed);
7367
7368
7369 if (dm_new_crtc_state->stream) {
7370 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7371 dm_new_crtc_state->stream);
7372 }
7373
7374
7375
7376
7377
7378 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
7379
7380 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
7381
7382 if (!dm_new_crtc_state->stream) {
7383
7384
7385
7386
7387
7388
7389
7390
7391
7392
7393
7394
7395
7396
7397
7398 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7399 __func__, acrtc->base.base.id);
7400 continue;
7401 }
7402
7403 if (dm_old_crtc_state->stream)
7404 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7405
7406 pm_runtime_get_noresume(dev->dev);
7407
7408 acrtc->enabled = true;
7409 acrtc->hw_mode = new_crtc_state->mode;
7410 crtc->hwmode = new_crtc_state->mode;
7411 } else if (modereset_required(new_crtc_state)) {
7412 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
7413
7414 if (dm_old_crtc_state->stream) {
7415 if (dm_old_crtc_state->stream->link->psr_settings.psr_allow_active)
7416 amdgpu_dm_psr_disable(dm_old_crtc_state->stream);
7417
7418 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7419 }
7420 }
7421 }
7422
7423 if (dc_state) {
7424 dm_enable_per_frame_crtc_master_sync(dc_state);
7425 mutex_lock(&dm->dc_lock);
7426 WARN_ON(!dc_commit_state(dm->dc, dc_state));
7427 mutex_unlock(&dm->dc_lock);
7428 }
7429
7430 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7431 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7432
7433 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7434
7435 if (dm_new_crtc_state->stream != NULL) {
7436 const struct dc_stream_status *status =
7437 dc_stream_get_status(dm_new_crtc_state->stream);
7438
7439 if (!status)
7440 status = dc_stream_get_status_from_state(dc_state,
7441 dm_new_crtc_state->stream);
7442
7443 if (!status)
7444 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
7445 else
7446 acrtc->otg_inst = status->primary_otg_inst;
7447 }
7448 }
7449#ifdef CONFIG_DRM_AMD_DC_HDCP
7450 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7451 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7452 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7453 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7454
7455 new_crtc_state = NULL;
7456
7457 if (acrtc)
7458 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7459
7460 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7461
7462 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7463 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7464 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7465 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7466 continue;
7467 }
7468
7469 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
7470 hdcp_update_display(
7471 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
7472 new_con_state->hdcp_content_type,
7473 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7474 : false);
7475 }
7476#endif
7477
7478
7479 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7480 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7481 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7482 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7483 struct dc_surface_update dummy_updates[MAX_SURFACES];
7484 struct dc_stream_update stream_update;
7485 struct dc_info_packet hdr_packet;
7486 struct dc_stream_status *status = NULL;
7487 bool abm_changed, hdr_changed, scaling_changed;
7488
7489 memset(&dummy_updates, 0, sizeof(dummy_updates));
7490 memset(&stream_update, 0, sizeof(stream_update));
7491
7492 if (acrtc) {
7493 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7494 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7495 }
7496
7497
7498 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
7499 continue;
7500
7501 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7502 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7503
7504 scaling_changed = is_scaling_state_different(dm_new_con_state,
7505 dm_old_con_state);
7506
7507 abm_changed = dm_new_crtc_state->abm_level !=
7508 dm_old_crtc_state->abm_level;
7509
7510 hdr_changed =
7511 is_hdr_metadata_different(old_con_state, new_con_state);
7512
7513 if (!scaling_changed && !abm_changed && !hdr_changed)
7514 continue;
7515
7516 stream_update.stream = dm_new_crtc_state->stream;
7517 if (scaling_changed) {
7518 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
7519 dm_new_con_state, dm_new_crtc_state->stream);
7520
7521 stream_update.src = dm_new_crtc_state->stream->src;
7522 stream_update.dst = dm_new_crtc_state->stream->dst;
7523 }
7524
7525 if (abm_changed) {
7526 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7527
7528 stream_update.abm_level = &dm_new_crtc_state->abm_level;
7529 }
7530
7531 if (hdr_changed) {
7532 fill_hdr_info_packet(new_con_state, &hdr_packet);
7533 stream_update.hdr_static_metadata = &hdr_packet;
7534 }
7535
7536 status = dc_stream_get_status(dm_new_crtc_state->stream);
7537 WARN_ON(!status);
7538 WARN_ON(!status->plane_count);
7539
7540
7541
7542
7543
7544
7545 for (j = 0; j < status->plane_count; j++)
7546 dummy_updates[j].surface = status->plane_states[0];
7547
7548
7549 mutex_lock(&dm->dc_lock);
7550 dc_commit_updates_for_stream(dm->dc,
7551 dummy_updates,
7552 status->plane_count,
7553 dm_new_crtc_state->stream,
7554 &stream_update,
7555 dc_state);
7556 mutex_unlock(&dm->dc_lock);
7557 }
7558
7559
7560 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7561 new_crtc_state, i) {
7562 if (old_crtc_state->active && !new_crtc_state->active)
7563 crtc_disable_count++;
7564
7565 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7566 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7567
7568
7569 pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
7570
7571
7572 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7573 dm_new_crtc_state);
7574 }
7575
7576
7577 amdgpu_dm_enable_crtc_interrupts(dev, state, true);
7578
7579 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
7580 if (new_crtc_state->async_flip)
7581 wait_for_vblank = false;
7582
7583
7584 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
7585 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7586
7587 if (dm_new_crtc_state->stream)
7588 amdgpu_dm_commit_planes(state, dc_state, dev,
7589 dm, crtc, wait_for_vblank);
7590 }
7591
7592
7593 amdgpu_dm_enable_crtc_interrupts(dev, state, false);
7594
7595
7596 amdgpu_dm_commit_audio(dev, state);
7597
7598
7599
7600
7601
7602 spin_lock_irqsave(&adev->ddev->event_lock, flags);
7603 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7604
7605 if (new_crtc_state->event)
7606 drm_send_event_locked(dev, &new_crtc_state->event->base);
7607
7608 new_crtc_state->event = NULL;
7609 }
7610 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
7611
7612
7613 drm_atomic_helper_commit_hw_done(state);
7614
7615 if (wait_for_vblank)
7616 drm_atomic_helper_wait_for_flip_done(dev, state);
7617
7618 drm_atomic_helper_cleanup_planes(dev, state);
7619
7620
7621
7622
7623
7624
7625 for (i = 0; i < crtc_disable_count; i++)
7626 pm_runtime_put_autosuspend(dev->dev);
7627 pm_runtime_mark_last_busy(dev->dev);
7628
7629 if (dc_state_temp)
7630 dc_release_state(dc_state_temp);
7631}
7632
7633
7634static int dm_force_atomic_commit(struct drm_connector *connector)
7635{
7636 int ret = 0;
7637 struct drm_device *ddev = connector->dev;
7638 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7639 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7640 struct drm_plane *plane = disconnected_acrtc->base.primary;
7641 struct drm_connector_state *conn_state;
7642 struct drm_crtc_state *crtc_state;
7643 struct drm_plane_state *plane_state;
7644
7645 if (!state)
7646 return -ENOMEM;
7647
7648 state->acquire_ctx = ddev->mode_config.acquire_ctx;
7649
7650
7651
7652
7653
7654
7655 conn_state = drm_atomic_get_connector_state(state, connector);
7656
7657 ret = PTR_ERR_OR_ZERO(conn_state);
7658 if (ret)
7659 goto err;
7660
7661
7662 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7663
7664 ret = PTR_ERR_OR_ZERO(crtc_state);
7665 if (ret)
7666 goto err;
7667
7668
7669 crtc_state->mode_changed = true;
7670
7671
7672 plane_state = drm_atomic_get_plane_state(state, plane);
7673
7674 ret = PTR_ERR_OR_ZERO(plane_state);
7675 if (ret)
7676 goto err;
7677
7678
7679
7680 ret = drm_atomic_commit(state);
7681 if (!ret)
7682 return 0;
7683
7684err:
7685 DRM_ERROR("Restoring old state failed with %i\n", ret);
7686 drm_atomic_state_put(state);
7687
7688 return ret;
7689}
7690
7691
7692
7693
7694
7695
7696void dm_restore_drm_connector_state(struct drm_device *dev,
7697 struct drm_connector *connector)
7698{
7699 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7700 struct amdgpu_crtc *disconnected_acrtc;
7701 struct dm_crtc_state *acrtc_state;
7702
7703 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
7704 return;
7705
7706 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7707 if (!disconnected_acrtc)
7708 return;
7709
7710 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
7711 if (!acrtc_state->stream)
7712 return;
7713
7714
7715
7716
7717
7718
7719 if (acrtc_state->stream->sink != aconnector->dc_sink)
7720 dm_force_atomic_commit(&aconnector->base);
7721}
7722
7723
7724
7725
7726
7727static int do_aquire_global_lock(struct drm_device *dev,
7728 struct drm_atomic_state *state)
7729{
7730 struct drm_crtc *crtc;
7731 struct drm_crtc_commit *commit;
7732 long ret;
7733
7734
7735
7736
7737
7738
7739 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
7740 if (ret)
7741 return ret;
7742
7743 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7744 spin_lock(&crtc->commit_lock);
7745 commit = list_first_entry_or_null(&crtc->commit_list,
7746 struct drm_crtc_commit, commit_entry);
7747 if (commit)
7748 drm_crtc_commit_get(commit);
7749 spin_unlock(&crtc->commit_lock);
7750
7751 if (!commit)
7752 continue;
7753
7754
7755
7756
7757
7758 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
7759
7760 if (ret > 0)
7761 ret = wait_for_completion_interruptible_timeout(
7762 &commit->flip_done, 10*HZ);
7763
7764 if (ret == 0)
7765 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
7766 "timed out\n", crtc->base.id, crtc->name);
7767
7768 drm_crtc_commit_put(commit);
7769 }
7770
7771 return ret < 0 ? ret : 0;
7772}
7773
7774static void get_freesync_config_for_crtc(
7775 struct dm_crtc_state *new_crtc_state,
7776 struct dm_connector_state *new_con_state)
7777{
7778 struct mod_freesync_config config = {0};
7779 struct amdgpu_dm_connector *aconnector =
7780 to_amdgpu_dm_connector(new_con_state->base.connector);
7781 struct drm_display_mode *mode = &new_crtc_state->base.mode;
7782 int vrefresh = drm_mode_vrefresh(mode);
7783
7784 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
7785 vrefresh >= aconnector->min_vfreq &&
7786 vrefresh <= aconnector->max_vfreq;
7787
7788 if (new_crtc_state->vrr_supported) {
7789 new_crtc_state->stream->ignore_msa_timing_param = true;
7790 config.state = new_crtc_state->base.vrr_enabled ?
7791 VRR_STATE_ACTIVE_VARIABLE :
7792 VRR_STATE_INACTIVE;
7793 config.min_refresh_in_uhz =
7794 aconnector->min_vfreq * 1000000;
7795 config.max_refresh_in_uhz =
7796 aconnector->max_vfreq * 1000000;
7797 config.vsif_supported = true;
7798 config.btr = true;
7799 }
7800
7801 new_crtc_state->freesync_config = config;
7802}
7803
7804static void reset_freesync_config_for_crtc(
7805 struct dm_crtc_state *new_crtc_state)
7806{
7807 new_crtc_state->vrr_supported = false;
7808
7809 memset(&new_crtc_state->vrr_params, 0,
7810 sizeof(new_crtc_state->vrr_params));
7811 memset(&new_crtc_state->vrr_infopacket, 0,
7812 sizeof(new_crtc_state->vrr_infopacket));
7813}
7814
7815static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
7816 struct drm_atomic_state *state,
7817 struct drm_crtc *crtc,
7818 struct drm_crtc_state *old_crtc_state,
7819 struct drm_crtc_state *new_crtc_state,
7820 bool enable,
7821 bool *lock_and_validation_needed)
7822{
7823 struct dm_atomic_state *dm_state = NULL;
7824 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7825 struct dc_stream_state *new_stream;
7826 int ret = 0;
7827
7828
7829
7830
7831
7832 struct amdgpu_crtc *acrtc = NULL;
7833 struct amdgpu_dm_connector *aconnector = NULL;
7834 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
7835 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
7836
7837 new_stream = NULL;
7838
7839 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7840 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7841 acrtc = to_amdgpu_crtc(crtc);
7842 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
7843
7844
7845 if (aconnector && enable) {
7846
7847 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
7848 &aconnector->base);
7849 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
7850 &aconnector->base);
7851
7852 if (IS_ERR(drm_new_conn_state)) {
7853 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
7854 goto fail;
7855 }
7856
7857 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
7858 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
7859
7860 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7861 goto skip_modeset;
7862
7863 new_stream = create_validate_stream_for_sink(aconnector,
7864 &new_crtc_state->mode,
7865 dm_new_conn_state,
7866 dm_old_crtc_state->stream);
7867
7868
7869
7870
7871
7872
7873
7874
7875 if (!new_stream) {
7876 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7877 __func__, acrtc->base.base.id);
7878 ret = -ENOMEM;
7879 goto fail;
7880 }
7881
7882 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
7883
7884 ret = fill_hdr_info_packet(drm_new_conn_state,
7885 &new_stream->hdr_static_metadata);
7886 if (ret)
7887 goto fail;
7888
7889
7890
7891
7892
7893
7894
7895
7896
7897
7898 if (dm_new_crtc_state->stream &&
7899 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
7900 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
7901 new_crtc_state->mode_changed = false;
7902 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
7903 new_crtc_state->mode_changed);
7904 }
7905 }
7906
7907
7908 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7909 goto skip_modeset;
7910
7911 DRM_DEBUG_DRIVER(
7912 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7913 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7914 "connectors_changed:%d\n",
7915 acrtc->crtc_id,
7916 new_crtc_state->enable,
7917 new_crtc_state->active,
7918 new_crtc_state->planes_changed,
7919 new_crtc_state->mode_changed,
7920 new_crtc_state->active_changed,
7921 new_crtc_state->connectors_changed);
7922
7923
7924 if (!enable) {
7925
7926 if (!dm_old_crtc_state->stream)
7927 goto skip_modeset;
7928
7929 ret = dm_atomic_get_state(state, &dm_state);
7930 if (ret)
7931 goto fail;
7932
7933 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
7934 crtc->base.id);
7935
7936
7937 if (dc_remove_stream_from_ctx(
7938 dm->dc,
7939 dm_state->context,
7940 dm_old_crtc_state->stream) != DC_OK) {
7941 ret = -EINVAL;
7942 goto fail;
7943 }
7944
7945 dc_stream_release(dm_old_crtc_state->stream);
7946 dm_new_crtc_state->stream = NULL;
7947
7948 reset_freesync_config_for_crtc(dm_new_crtc_state);
7949
7950 *lock_and_validation_needed = true;
7951
7952 } else {
7953
7954
7955
7956
7957
7958 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
7959 goto skip_modeset;
7960
7961 if (modereset_required(new_crtc_state))
7962 goto skip_modeset;
7963
7964 if (modeset_required(new_crtc_state, new_stream,
7965 dm_old_crtc_state->stream)) {
7966
7967 WARN_ON(dm_new_crtc_state->stream);
7968
7969 ret = dm_atomic_get_state(state, &dm_state);
7970 if (ret)
7971 goto fail;
7972
7973 dm_new_crtc_state->stream = new_stream;
7974
7975 dc_stream_retain(new_stream);
7976
7977 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
7978 crtc->base.id);
7979
7980 if (dc_add_stream_to_ctx(
7981 dm->dc,
7982 dm_state->context,
7983 dm_new_crtc_state->stream) != DC_OK) {
7984 ret = -EINVAL;
7985 goto fail;
7986 }
7987
7988 *lock_and_validation_needed = true;
7989 }
7990 }
7991
7992skip_modeset:
7993
7994 if (new_stream)
7995 dc_stream_release(new_stream);
7996
7997
7998
7999
8000
8001 if (!(enable && aconnector && new_crtc_state->enable &&
8002 new_crtc_state->active))
8003 return 0;
8004
8005
8006
8007
8008
8009
8010
8011
8012 BUG_ON(dm_new_crtc_state->stream == NULL);
8013
8014
8015 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8016 update_stream_scaling_settings(
8017 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8018
8019
8020 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8021
8022
8023
8024
8025
8026 if (dm_new_crtc_state->base.color_mgmt_changed ||
8027 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8028 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8029 if (ret)
8030 goto fail;
8031 }
8032
8033
8034 get_freesync_config_for_crtc(dm_new_crtc_state,
8035 dm_new_conn_state);
8036
8037 return ret;
8038
8039fail:
8040 if (new_stream)
8041 dc_stream_release(new_stream);
8042 return ret;
8043}
8044
8045static bool should_reset_plane(struct drm_atomic_state *state,
8046 struct drm_plane *plane,
8047 struct drm_plane_state *old_plane_state,
8048 struct drm_plane_state *new_plane_state)
8049{
8050 struct drm_plane *other;
8051 struct drm_plane_state *old_other_state, *new_other_state;
8052 struct drm_crtc_state *new_crtc_state;
8053 int i;
8054
8055
8056
8057
8058
8059
8060 if (state->allow_modeset)
8061 return true;
8062
8063
8064 if (old_plane_state->crtc != new_plane_state->crtc)
8065 return true;
8066
8067
8068 if (!new_plane_state->crtc)
8069 return false;
8070
8071 new_crtc_state =
8072 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8073
8074 if (!new_crtc_state)
8075 return true;
8076
8077
8078 if (new_crtc_state->color_mgmt_changed)
8079 return true;
8080
8081 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8082 return true;
8083
8084
8085
8086
8087
8088
8089
8090
8091
8092 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8093 if (other->type == DRM_PLANE_TYPE_CURSOR)
8094 continue;
8095
8096 if (old_other_state->crtc != new_plane_state->crtc &&
8097 new_other_state->crtc != new_plane_state->crtc)
8098 continue;
8099
8100 if (old_other_state->crtc != new_other_state->crtc)
8101 return true;
8102
8103
8104 if (old_other_state->fb && new_other_state->fb &&
8105 old_other_state->fb->format != new_other_state->fb->format)
8106 return true;
8107 }
8108
8109 return false;
8110}
8111
8112static int dm_update_plane_state(struct dc *dc,
8113 struct drm_atomic_state *state,
8114 struct drm_plane *plane,
8115 struct drm_plane_state *old_plane_state,
8116 struct drm_plane_state *new_plane_state,
8117 bool enable,
8118 bool *lock_and_validation_needed)
8119{
8120
8121 struct dm_atomic_state *dm_state = NULL;
8122 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
8123 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8124 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
8125 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
8126 struct amdgpu_crtc *new_acrtc;
8127 bool needs_reset;
8128 int ret = 0;
8129
8130
8131 new_plane_crtc = new_plane_state->crtc;
8132 old_plane_crtc = old_plane_state->crtc;
8133 dm_new_plane_state = to_dm_plane_state(new_plane_state);
8134 dm_old_plane_state = to_dm_plane_state(old_plane_state);
8135
8136
8137 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
8138 if (!enable || !new_plane_crtc ||
8139 drm_atomic_plane_disabling(plane->state, new_plane_state))
8140 return 0;
8141
8142 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
8143
8144 if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
8145 (new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
8146 DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
8147 new_plane_state->crtc_w, new_plane_state->crtc_h);
8148 return -EINVAL;
8149 }
8150
8151 return 0;
8152 }
8153
8154 needs_reset = should_reset_plane(state, plane, old_plane_state,
8155 new_plane_state);
8156
8157
8158 if (!enable) {
8159 if (!needs_reset)
8160 return 0;
8161
8162 if (!old_plane_crtc)
8163 return 0;
8164
8165 old_crtc_state = drm_atomic_get_old_crtc_state(
8166 state, old_plane_crtc);
8167 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8168
8169 if (!dm_old_crtc_state->stream)
8170 return 0;
8171
8172 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
8173 plane->base.id, old_plane_crtc->base.id);
8174
8175 ret = dm_atomic_get_state(state, &dm_state);
8176 if (ret)
8177 return ret;
8178
8179 if (!dc_remove_plane_from_context(
8180 dc,
8181 dm_old_crtc_state->stream,
8182 dm_old_plane_state->dc_state,
8183 dm_state->context)) {
8184
8185 ret = EINVAL;
8186 return ret;
8187 }
8188
8189
8190 dc_plane_state_release(dm_old_plane_state->dc_state);
8191 dm_new_plane_state->dc_state = NULL;
8192
8193 *lock_and_validation_needed = true;
8194
8195 } else {
8196 struct dc_plane_state *dc_new_plane_state;
8197
8198 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
8199 return 0;
8200
8201 if (!new_plane_crtc)
8202 return 0;
8203
8204 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
8205 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8206
8207 if (!dm_new_crtc_state->stream)
8208 return 0;
8209
8210 if (!needs_reset)
8211 return 0;
8212
8213 WARN_ON(dm_new_plane_state->dc_state);
8214
8215 dc_new_plane_state = dc_create_plane_state(dc);
8216 if (!dc_new_plane_state)
8217 return -ENOMEM;
8218
8219 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
8220 plane->base.id, new_plane_crtc->base.id);
8221
8222 ret = fill_dc_plane_attributes(
8223 new_plane_crtc->dev->dev_private,
8224 dc_new_plane_state,
8225 new_plane_state,
8226 new_crtc_state);
8227 if (ret) {
8228 dc_plane_state_release(dc_new_plane_state);
8229 return ret;
8230 }
8231
8232 ret = dm_atomic_get_state(state, &dm_state);
8233 if (ret) {
8234 dc_plane_state_release(dc_new_plane_state);
8235 return ret;
8236 }
8237
8238
8239
8240
8241
8242
8243
8244
8245 if (!dc_add_plane_to_context(
8246 dc,
8247 dm_new_crtc_state->stream,
8248 dc_new_plane_state,
8249 dm_state->context)) {
8250
8251 dc_plane_state_release(dc_new_plane_state);
8252 return -EINVAL;
8253 }
8254
8255 dm_new_plane_state->dc_state = dc_new_plane_state;
8256
8257
8258
8259
8260 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
8261
8262 *lock_and_validation_needed = true;
8263 }
8264
8265
8266 return ret;
8267}
8268
8269static int
8270dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
8271 struct drm_atomic_state *state,
8272 enum surface_update_type *out_type)
8273{
8274 struct dc *dc = dm->dc;
8275 struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
8276 int i, j, num_plane, ret = 0;
8277 struct drm_plane_state *old_plane_state, *new_plane_state;
8278 struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
8279 struct drm_crtc *new_plane_crtc;
8280 struct drm_plane *plane;
8281
8282 struct drm_crtc *crtc;
8283 struct drm_crtc_state *new_crtc_state, *old_crtc_state;
8284 struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
8285 struct dc_stream_status *status = NULL;
8286 enum surface_update_type update_type = UPDATE_TYPE_FAST;
8287 struct surface_info_bundle {
8288 struct dc_surface_update surface_updates[MAX_SURFACES];
8289 struct dc_plane_info plane_infos[MAX_SURFACES];
8290 struct dc_scaling_info scaling_infos[MAX_SURFACES];
8291 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8292 struct dc_stream_update stream_update;
8293 } *bundle;
8294
8295 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8296
8297 if (!bundle) {
8298 DRM_ERROR("Failed to allocate update bundle\n");
8299
8300 update_type = UPDATE_TYPE_FULL;
8301 goto cleanup;
8302 }
8303
8304 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8305
8306 memset(bundle, 0, sizeof(struct surface_info_bundle));
8307
8308 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8309 old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
8310 num_plane = 0;
8311
8312 if (new_dm_crtc_state->stream != old_dm_crtc_state->stream) {
8313 update_type = UPDATE_TYPE_FULL;
8314 goto cleanup;
8315 }
8316
8317 if (!new_dm_crtc_state->stream)
8318 continue;
8319
8320 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
8321 const struct amdgpu_framebuffer *amdgpu_fb =
8322 to_amdgpu_framebuffer(new_plane_state->fb);
8323 struct dc_plane_info *plane_info = &bundle->plane_infos[num_plane];
8324 struct dc_flip_addrs *flip_addr = &bundle->flip_addrs[num_plane];
8325 struct dc_scaling_info *scaling_info = &bundle->scaling_infos[num_plane];
8326 uint64_t tiling_flags;
8327 bool tmz_surface = false;
8328
8329 new_plane_crtc = new_plane_state->crtc;
8330 new_dm_plane_state = to_dm_plane_state(new_plane_state);
8331 old_dm_plane_state = to_dm_plane_state(old_plane_state);
8332
8333 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8334 continue;
8335
8336 if (new_dm_plane_state->dc_state != old_dm_plane_state->dc_state) {
8337 update_type = UPDATE_TYPE_FULL;
8338 goto cleanup;
8339 }
8340
8341 if (crtc != new_plane_crtc)
8342 continue;
8343
8344 bundle->surface_updates[num_plane].surface =
8345 new_dm_plane_state->dc_state;
8346
8347 if (new_crtc_state->mode_changed) {
8348 bundle->stream_update.dst = new_dm_crtc_state->stream->dst;
8349 bundle->stream_update.src = new_dm_crtc_state->stream->src;
8350 }
8351
8352 if (new_crtc_state->color_mgmt_changed) {
8353 bundle->surface_updates[num_plane].gamma =
8354 new_dm_plane_state->dc_state->gamma_correction;
8355 bundle->surface_updates[num_plane].in_transfer_func =
8356 new_dm_plane_state->dc_state->in_transfer_func;
8357 bundle->surface_updates[num_plane].gamut_remap_matrix =
8358 &new_dm_plane_state->dc_state->gamut_remap_matrix;
8359 bundle->stream_update.gamut_remap =
8360 &new_dm_crtc_state->stream->gamut_remap_matrix;
8361 bundle->stream_update.output_csc_transform =
8362 &new_dm_crtc_state->stream->csc_color_matrix;
8363 bundle->stream_update.out_transfer_func =
8364 new_dm_crtc_state->stream->out_transfer_func;
8365 }
8366
8367 ret = fill_dc_scaling_info(new_plane_state,
8368 scaling_info);
8369 if (ret)
8370 goto cleanup;
8371
8372 bundle->surface_updates[num_plane].scaling_info = scaling_info;
8373
8374 if (amdgpu_fb) {
8375 ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface);
8376 if (ret)
8377 goto cleanup;
8378
8379 ret = fill_dc_plane_info_and_addr(
8380 dm->adev, new_plane_state, tiling_flags,
8381 plane_info,
8382 &flip_addr->address, tmz_surface,
8383 false);
8384 if (ret)
8385 goto cleanup;
8386
8387 bundle->surface_updates[num_plane].plane_info = plane_info;
8388 bundle->surface_updates[num_plane].flip_addr = flip_addr;
8389 }
8390
8391 num_plane++;
8392 }
8393
8394 if (num_plane == 0)
8395 continue;
8396
8397 ret = dm_atomic_get_state(state, &dm_state);
8398 if (ret)
8399 goto cleanup;
8400
8401 old_dm_state = dm_atomic_get_old_state(state);
8402 if (!old_dm_state) {
8403 ret = -EINVAL;
8404 goto cleanup;
8405 }
8406
8407 status = dc_stream_get_status_from_state(old_dm_state->context,
8408 new_dm_crtc_state->stream);
8409 bundle->stream_update.stream = new_dm_crtc_state->stream;
8410
8411
8412
8413
8414 mutex_lock(&dm->dc_lock);
8415 update_type = dc_check_update_surfaces_for_stream(
8416 dc, bundle->surface_updates, num_plane,
8417 &bundle->stream_update, status);
8418 mutex_unlock(&dm->dc_lock);
8419
8420 if (update_type > UPDATE_TYPE_MED) {
8421 update_type = UPDATE_TYPE_FULL;
8422 goto cleanup;
8423 }
8424 }
8425
8426cleanup:
8427 kfree(bundle);
8428
8429 *out_type = update_type;
8430 return ret;
8431}
8432
8433static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8434{
8435 struct drm_connector *connector;
8436 struct drm_connector_state *conn_state;
8437 struct amdgpu_dm_connector *aconnector = NULL;
8438 int i;
8439 for_each_new_connector_in_state(state, connector, conn_state, i) {
8440 if (conn_state->crtc != crtc)
8441 continue;
8442
8443 aconnector = to_amdgpu_dm_connector(connector);
8444 if (!aconnector->port || !aconnector->mst_port)
8445 aconnector = NULL;
8446 else
8447 break;
8448 }
8449
8450 if (!aconnector)
8451 return 0;
8452
8453 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8454}
8455
8456
8457
8458
8459
8460
8461
8462
8463
8464
8465
8466
8467
8468
8469
8470
8471
8472
8473
8474
8475
8476
8477
8478
8479
8480
8481static int amdgpu_dm_atomic_check(struct drm_device *dev,
8482 struct drm_atomic_state *state)
8483{
8484 struct amdgpu_device *adev = dev->dev_private;
8485 struct dm_atomic_state *dm_state = NULL;
8486 struct dc *dc = adev->dm.dc;
8487 struct drm_connector *connector;
8488 struct drm_connector_state *old_con_state, *new_con_state;
8489 struct drm_crtc *crtc;
8490 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8491 struct drm_plane *plane;
8492 struct drm_plane_state *old_plane_state, *new_plane_state;
8493 enum surface_update_type update_type = UPDATE_TYPE_FAST;
8494 enum surface_update_type overall_update_type = UPDATE_TYPE_FAST;
8495
8496 int ret, i;
8497
8498
8499
8500
8501
8502 bool lock_and_validation_needed = false;
8503
8504 ret = drm_atomic_helper_check_modeset(dev, state);
8505 if (ret)
8506 goto fail;
8507
8508 if (adev->asic_type >= CHIP_NAVI10) {
8509 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8510 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8511 ret = add_affected_mst_dsc_crtcs(state, crtc);
8512 if (ret)
8513 goto fail;
8514 }
8515 }
8516 }
8517
8518 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8519 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
8520 !new_crtc_state->color_mgmt_changed &&
8521 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
8522 continue;
8523
8524 if (!new_crtc_state->enable)
8525 continue;
8526
8527 ret = drm_atomic_add_affected_connectors(state, crtc);
8528 if (ret)
8529 return ret;
8530
8531 ret = drm_atomic_add_affected_planes(state, crtc);
8532 if (ret)
8533 goto fail;
8534 }
8535
8536
8537
8538
8539
8540
8541 drm_for_each_crtc(crtc, dev) {
8542 bool modified = false;
8543
8544 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8545 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8546 continue;
8547
8548 if (new_plane_state->crtc == crtc ||
8549 old_plane_state->crtc == crtc) {
8550 modified = true;
8551 break;
8552 }
8553 }
8554
8555 if (!modified)
8556 continue;
8557
8558 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8559 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8560 continue;
8561
8562 new_plane_state =
8563 drm_atomic_get_plane_state(state, plane);
8564
8565 if (IS_ERR(new_plane_state)) {
8566 ret = PTR_ERR(new_plane_state);
8567 goto fail;
8568 }
8569 }
8570 }
8571
8572
8573 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8574 ret = dm_update_plane_state(dc, state, plane,
8575 old_plane_state,
8576 new_plane_state,
8577 false,
8578 &lock_and_validation_needed);
8579 if (ret)
8580 goto fail;
8581 }
8582
8583
8584 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8585 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8586 old_crtc_state,
8587 new_crtc_state,
8588 false,
8589 &lock_and_validation_needed);
8590 if (ret)
8591 goto fail;
8592 }
8593
8594
8595 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8596 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8597 old_crtc_state,
8598 new_crtc_state,
8599 true,
8600 &lock_and_validation_needed);
8601 if (ret)
8602 goto fail;
8603 }
8604
8605
8606 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8607 ret = dm_update_plane_state(dc, state, plane,
8608 old_plane_state,
8609 new_plane_state,
8610 true,
8611 &lock_and_validation_needed);
8612 if (ret)
8613 goto fail;
8614 }
8615
8616
8617 ret = drm_atomic_helper_check_planes(dev, state);
8618 if (ret)
8619 goto fail;
8620
8621 if (state->legacy_cursor_update) {
8622
8623
8624
8625
8626
8627 state->async_update =
8628 !drm_atomic_helper_async_check(dev, state);
8629
8630
8631
8632
8633
8634
8635
8636
8637 if (state->async_update)
8638 return 0;
8639 }
8640
8641
8642
8643
8644
8645
8646 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8647 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8648 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8649 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8650
8651
8652 if (!acrtc || drm_atomic_crtc_needs_modeset(
8653 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
8654 continue;
8655
8656
8657 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
8658 continue;
8659
8660 overall_update_type = UPDATE_TYPE_FULL;
8661 lock_and_validation_needed = true;
8662 }
8663
8664 ret = dm_determine_update_type_for_commit(&adev->dm, state, &update_type);
8665 if (ret)
8666 goto fail;
8667
8668 if (overall_update_type < update_type)
8669 overall_update_type = update_type;
8670
8671
8672
8673
8674
8675
8676
8677 if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
8678 WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
8679
8680 if (overall_update_type > UPDATE_TYPE_FAST) {
8681 ret = dm_atomic_get_state(state, &dm_state);
8682 if (ret)
8683 goto fail;
8684
8685 ret = do_aquire_global_lock(dev, state);
8686 if (ret)
8687 goto fail;
8688
8689#if defined(CONFIG_DRM_AMD_DC_DCN)
8690 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8691 goto fail;
8692
8693 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8694 if (ret)
8695 goto fail;
8696#endif
8697
8698
8699
8700
8701
8702
8703
8704 ret = drm_dp_mst_atomic_check(state);
8705 if (ret)
8706 goto fail;
8707
8708 if (dc_validate_global_state(dc, dm_state->context, false) != DC_OK) {
8709 ret = -EINVAL;
8710 goto fail;
8711 }
8712 } else {
8713
8714
8715
8716
8717
8718
8719
8720
8721
8722
8723
8724
8725
8726
8727 for (i = 0; i < state->num_private_objs; i++) {
8728 struct drm_private_obj *obj = state->private_objs[i].ptr;
8729
8730 if (obj->funcs == adev->dm.atomic_obj.funcs) {
8731 int j = state->num_private_objs-1;
8732
8733 dm_atomic_destroy_state(obj,
8734 state->private_objs[i].state);
8735
8736
8737
8738
8739
8740 if (i != j)
8741 state->private_objs[i] =
8742 state->private_objs[j];
8743
8744 state->private_objs[j].ptr = NULL;
8745 state->private_objs[j].state = NULL;
8746 state->private_objs[j].old_state = NULL;
8747 state->private_objs[j].new_state = NULL;
8748
8749 state->num_private_objs = j;
8750 break;
8751 }
8752 }
8753 }
8754
8755
8756 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
8757 struct dm_crtc_state *dm_new_crtc_state =
8758 to_dm_crtc_state(new_crtc_state);
8759
8760 dm_new_crtc_state->update_type = (int)overall_update_type;
8761 }
8762
8763
8764 WARN_ON(ret);
8765 return ret;
8766
8767fail:
8768 if (ret == -EDEADLK)
8769 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
8770 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
8771 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
8772 else
8773 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
8774
8775 return ret;
8776}
8777
8778static bool is_dp_capable_without_timing_msa(struct dc *dc,
8779 struct amdgpu_dm_connector *amdgpu_dm_connector)
8780{
8781 uint8_t dpcd_data;
8782 bool capable = false;
8783
8784 if (amdgpu_dm_connector->dc_link &&
8785 dm_helpers_dp_read_dpcd(
8786 NULL,
8787 amdgpu_dm_connector->dc_link,
8788 DP_DOWN_STREAM_PORT_COUNT,
8789 &dpcd_data,
8790 sizeof(dpcd_data))) {
8791 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
8792 }
8793
8794 return capable;
8795}
8796void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
8797 struct edid *edid)
8798{
8799 int i;
8800 bool edid_check_required;
8801 struct detailed_timing *timing;
8802 struct detailed_non_pixel *data;
8803 struct detailed_data_monitor_range *range;
8804 struct amdgpu_dm_connector *amdgpu_dm_connector =
8805 to_amdgpu_dm_connector(connector);
8806 struct dm_connector_state *dm_con_state = NULL;
8807
8808 struct drm_device *dev = connector->dev;
8809 struct amdgpu_device *adev = dev->dev_private;
8810 bool freesync_capable = false;
8811
8812 if (!connector->state) {
8813 DRM_ERROR("%s - Connector has no state", __func__);
8814 goto update;
8815 }
8816
8817 if (!edid) {
8818 dm_con_state = to_dm_connector_state(connector->state);
8819
8820 amdgpu_dm_connector->min_vfreq = 0;
8821 amdgpu_dm_connector->max_vfreq = 0;
8822 amdgpu_dm_connector->pixel_clock_mhz = 0;
8823
8824 goto update;
8825 }
8826
8827 dm_con_state = to_dm_connector_state(connector->state);
8828
8829 edid_check_required = false;
8830 if (!amdgpu_dm_connector->dc_sink) {
8831 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
8832 goto update;
8833 }
8834 if (!adev->dm.freesync_module)
8835 goto update;
8836
8837
8838
8839 if (edid) {
8840 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
8841 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
8842 edid_check_required = is_dp_capable_without_timing_msa(
8843 adev->dm.dc,
8844 amdgpu_dm_connector);
8845 }
8846 }
8847 if (edid_check_required == true && (edid->version > 1 ||
8848 (edid->version == 1 && edid->revision > 1))) {
8849 for (i = 0; i < 4; i++) {
8850
8851 timing = &edid->detailed_timings[i];
8852 data = &timing->data.other_data;
8853 range = &data->data.range;
8854
8855
8856
8857 if (data->type != EDID_DETAIL_MONITOR_RANGE)
8858 continue;
8859
8860
8861
8862
8863
8864
8865 if (range->flags != 1)
8866 continue;
8867
8868 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
8869 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
8870 amdgpu_dm_connector->pixel_clock_mhz =
8871 range->pixel_clock_mhz * 10;
8872 break;
8873 }
8874
8875 if (amdgpu_dm_connector->max_vfreq -
8876 amdgpu_dm_connector->min_vfreq > 10) {
8877
8878 freesync_capable = true;
8879 }
8880 }
8881
8882update:
8883 if (dm_con_state)
8884 dm_con_state->freesync_capable = freesync_capable;
8885
8886 if (connector->vrr_capable_property)
8887 drm_connector_set_vrr_capable_property(connector,
8888 freesync_capable);
8889}
8890
8891static void amdgpu_dm_set_psr_caps(struct dc_link *link)
8892{
8893 uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
8894
8895 if (!(link->connector_signal & SIGNAL_TYPE_EDP))
8896 return;
8897 if (link->type == dc_connection_none)
8898 return;
8899 if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
8900 dpcd_data, sizeof(dpcd_data))) {
8901 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
8902
8903 if (dpcd_data[0] == 0) {
8904 link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
8905 link->psr_settings.psr_feature_enabled = false;
8906 } else {
8907 link->psr_settings.psr_version = DC_PSR_VERSION_1;
8908 link->psr_settings.psr_feature_enabled = true;
8909 }
8910
8911 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
8912 }
8913}
8914
8915
8916
8917
8918
8919
8920
8921static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
8922{
8923 struct dc_link *link = NULL;
8924 struct psr_config psr_config = {0};
8925 struct psr_context psr_context = {0};
8926 bool ret = false;
8927
8928 if (stream == NULL)
8929 return false;
8930
8931 link = stream->link;
8932
8933 psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
8934
8935 if (psr_config.psr_version > 0) {
8936 psr_config.psr_exit_link_training_required = 0x1;
8937 psr_config.psr_frame_capture_indication_req = 0;
8938 psr_config.psr_rfb_setup_time = 0x37;
8939 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
8940 psr_config.allow_smu_optimizations = 0x0;
8941
8942 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
8943
8944 }
8945 DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_settings.psr_feature_enabled);
8946
8947 return ret;
8948}
8949
8950
8951
8952
8953
8954
8955
8956bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
8957{
8958 struct dc_link *link = stream->link;
8959 unsigned int vsync_rate_hz = 0;
8960 struct dc_static_screen_params params = {0};
8961
8962
8963
8964
8965 unsigned int num_frames_static = 2;
8966
8967 DRM_DEBUG_DRIVER("Enabling psr...\n");
8968
8969 vsync_rate_hz = div64_u64(div64_u64((
8970 stream->timing.pix_clk_100hz * 100),
8971 stream->timing.v_total),
8972 stream->timing.h_total);
8973
8974
8975
8976
8977
8978 if (vsync_rate_hz != 0) {
8979 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
8980 num_frames_static = (30000 / frame_time_microsec) + 1;
8981 }
8982
8983 params.triggers.cursor_update = true;
8984 params.triggers.overlay_update = true;
8985 params.triggers.surface_update = true;
8986 params.num_frames = num_frames_static;
8987
8988 dc_stream_set_static_screen_params(link->ctx->dc,
8989 &stream, 1,
8990 ¶ms);
8991
8992 return dc_link_set_psr_allow_active(link, true, false);
8993}
8994
8995
8996
8997
8998
8999
9000
9001static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9002{
9003
9004 DRM_DEBUG_DRIVER("Disabling psr...\n");
9005
9006 return dc_link_set_psr_allow_active(stream->link, false, true);
9007}
9008