1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27#define CREATE_TRACE_POINTS
28
29#include "dm_services_types.h"
30#include "dc.h"
31#include "dc/inc/core_types.h"
32#include "dal_asic_id.h"
33
34#include "vid.h"
35#include "amdgpu.h"
36#include "amdgpu_display.h"
37#include "amdgpu_ucode.h"
38#include "atom.h"
39#include "amdgpu_dm.h"
40#ifdef CONFIG_DRM_AMD_DC_HDCP
41#include "amdgpu_dm_hdcp.h"
42#endif
43#include "amdgpu_pm.h"
44
45#include "amd_shared.h"
46#include "amdgpu_dm_irq.h"
47#include "dm_helpers.h"
48#include "amdgpu_dm_mst_types.h"
49#if defined(CONFIG_DEBUG_FS)
50#include "amdgpu_dm_debugfs.h"
51#endif
52
53#include "ivsrcid/ivsrcid_vislands30.h"
54
55#include <linux/module.h>
56#include <linux/moduleparam.h>
57#include <linux/version.h>
58#include <linux/types.h>
59#include <linux/pm_runtime.h>
60#include <linux/pci.h>
61#include <linux/firmware.h>
62#include <linux/component.h>
63
64#include <drm/drm_atomic.h>
65#include <drm/drm_atomic_uapi.h>
66#include <drm/drm_atomic_helper.h>
67#include <drm/drm_dp_mst_helper.h>
68#include <drm/drm_fb_helper.h>
69#include <drm/drm_fourcc.h>
70#include <drm/drm_edid.h>
71#include <drm/drm_vblank.h>
72#include <drm/drm_audio_component.h>
73#include <drm/drm_hdcp.h>
74
75#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
76#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
77
78#include "dcn/dcn_1_0_offset.h"
79#include "dcn/dcn_1_0_sh_mask.h"
80#include "soc15_hw_ip.h"
81#include "vega10_ip_offset.h"
82
83#include "soc15_common.h"
84#endif
85
86#include "modules/inc/mod_freesync.h"
87#include "modules/power/power_helpers.h"
88#include "modules/inc/mod_info_packet.h"
89
90#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
91MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
92
93
94
95
96
97
98
99
100
101
102
103
104static int amdgpu_dm_init(struct amdgpu_device *adev);
105static void amdgpu_dm_fini(struct amdgpu_device *adev);
106
107
108
109
110
111
112
113
114static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
115
116static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
117
118static void
119amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector);
120
121static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
122 struct drm_plane *plane,
123 unsigned long possible_crtcs,
124 const struct dc_plane_cap *plane_cap);
125static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
126 struct drm_plane *plane,
127 uint32_t link_index);
128static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
129 struct amdgpu_dm_connector *amdgpu_dm_connector,
130 uint32_t link_index,
131 struct amdgpu_encoder *amdgpu_encoder);
132static int amdgpu_dm_encoder_init(struct drm_device *dev,
133 struct amdgpu_encoder *aencoder,
134 uint32_t link_index);
135
136static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
137
138static int amdgpu_dm_atomic_commit(struct drm_device *dev,
139 struct drm_atomic_state *state,
140 bool nonblock);
141
142static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
143
144static int amdgpu_dm_atomic_check(struct drm_device *dev,
145 struct drm_atomic_state *state);
146
147static void handle_cursor_update(struct drm_plane *plane,
148 struct drm_plane_state *old_plane_state);
149
150static void amdgpu_dm_set_psr_caps(struct dc_link *link);
151static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
152static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
153static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
170{
171 if (crtc >= adev->mode_info.num_crtc)
172 return 0;
173 else {
174 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
175 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
176 acrtc->base.state);
177
178
179 if (acrtc_state->stream == NULL) {
180 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
181 crtc);
182 return 0;
183 }
184
185 return dc_stream_get_vblank_counter(acrtc_state->stream);
186 }
187}
188
189static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
190 u32 *vbl, u32 *position)
191{
192 uint32_t v_blank_start, v_blank_end, h_position, v_position;
193
194 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
195 return -EINVAL;
196 else {
197 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
198 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
199 acrtc->base.state);
200
201 if (acrtc_state->stream == NULL) {
202 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
203 crtc);
204 return 0;
205 }
206
207
208
209
210
211 dc_stream_get_scanoutpos(acrtc_state->stream,
212 &v_blank_start,
213 &v_blank_end,
214 &h_position,
215 &v_position);
216
217 *position = v_position | (h_position << 16);
218 *vbl = v_blank_start | (v_blank_end << 16);
219 }
220
221 return 0;
222}
223
224static bool dm_is_idle(void *handle)
225{
226
227 return true;
228}
229
230static int dm_wait_for_idle(void *handle)
231{
232
233 return 0;
234}
235
236static bool dm_check_soft_reset(void *handle)
237{
238 return false;
239}
240
241static int dm_soft_reset(void *handle)
242{
243
244 return 0;
245}
246
247static struct amdgpu_crtc *
248get_crtc_by_otg_inst(struct amdgpu_device *adev,
249 int otg_inst)
250{
251 struct drm_device *dev = adev->ddev;
252 struct drm_crtc *crtc;
253 struct amdgpu_crtc *amdgpu_crtc;
254
255 if (otg_inst == -1) {
256 WARN_ON(1);
257 return adev->mode_info.crtcs[0];
258 }
259
260 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
261 amdgpu_crtc = to_amdgpu_crtc(crtc);
262
263 if (amdgpu_crtc->otg_inst == otg_inst)
264 return amdgpu_crtc;
265 }
266
267 return NULL;
268}
269
270static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
271{
272 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
273 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
274}
275
276
277
278
279
280
281
282
283static void dm_pflip_high_irq(void *interrupt_params)
284{
285 struct amdgpu_crtc *amdgpu_crtc;
286 struct common_irq_params *irq_params = interrupt_params;
287 struct amdgpu_device *adev = irq_params->adev;
288 unsigned long flags;
289 struct drm_pending_vblank_event *e;
290 struct dm_crtc_state *acrtc_state;
291 uint32_t vpos, hpos, v_blank_start, v_blank_end;
292 bool vrr_active;
293
294 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
295
296
297
298 if (amdgpu_crtc == NULL) {
299 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
300 return;
301 }
302
303 spin_lock_irqsave(&adev->ddev->event_lock, flags);
304
305 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
306 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
307 amdgpu_crtc->pflip_status,
308 AMDGPU_FLIP_SUBMITTED,
309 amdgpu_crtc->crtc_id,
310 amdgpu_crtc);
311 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
312 return;
313 }
314
315
316 e = amdgpu_crtc->event;
317 amdgpu_crtc->event = NULL;
318
319 if (!e)
320 WARN_ON(1);
321
322 acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
323 vrr_active = amdgpu_dm_vrr_active(acrtc_state);
324
325
326 if (!vrr_active ||
327 !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
328 &v_blank_end, &hpos, &vpos) ||
329 (vpos < v_blank_start)) {
330
331
332
333
334 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
335
336
337
338
339 if (e) {
340 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
341
342
343 drm_crtc_vblank_put(&amdgpu_crtc->base);
344 }
345 } else if (e) {
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
361 e->pipe = amdgpu_crtc->crtc_id;
362
363 list_add_tail(&e->base.link, &adev->ddev->vblank_event_list);
364 e = NULL;
365 }
366
367
368
369
370
371
372 amdgpu_crtc->last_flip_vblank = amdgpu_get_vblank_counter_kms(adev->ddev,
373 amdgpu_crtc->crtc_id);
374
375 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
376 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
377
378 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
379 amdgpu_crtc->crtc_id, amdgpu_crtc,
380 vrr_active, (int) !e);
381}
382
383static void dm_vupdate_high_irq(void *interrupt_params)
384{
385 struct common_irq_params *irq_params = interrupt_params;
386 struct amdgpu_device *adev = irq_params->adev;
387 struct amdgpu_crtc *acrtc;
388 struct dm_crtc_state *acrtc_state;
389 unsigned long flags;
390
391 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
392
393 if (acrtc) {
394 acrtc_state = to_dm_crtc_state(acrtc->base.state);
395
396 DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
397 amdgpu_dm_vrr_active(acrtc_state));
398
399
400
401
402
403
404
405 if (amdgpu_dm_vrr_active(acrtc_state)) {
406 drm_crtc_handle_vblank(&acrtc->base);
407
408
409 if (acrtc_state->stream &&
410 adev->family < AMDGPU_FAMILY_AI) {
411 spin_lock_irqsave(&adev->ddev->event_lock, flags);
412 mod_freesync_handle_v_update(
413 adev->dm.freesync_module,
414 acrtc_state->stream,
415 &acrtc_state->vrr_params);
416
417 dc_stream_adjust_vmin_vmax(
418 adev->dm.dc,
419 acrtc_state->stream,
420 &acrtc_state->vrr_params.adjust);
421 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
422 }
423 }
424 }
425}
426
427
428
429
430
431
432
433
434static void dm_crtc_high_irq(void *interrupt_params)
435{
436 struct common_irq_params *irq_params = interrupt_params;
437 struct amdgpu_device *adev = irq_params->adev;
438 struct amdgpu_crtc *acrtc;
439 struct dm_crtc_state *acrtc_state;
440 unsigned long flags;
441
442 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
443
444 if (acrtc) {
445 acrtc_state = to_dm_crtc_state(acrtc->base.state);
446
447 DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
448 amdgpu_dm_vrr_active(acrtc_state));
449
450
451
452
453
454
455 if (!amdgpu_dm_vrr_active(acrtc_state))
456 drm_crtc_handle_vblank(&acrtc->base);
457
458
459
460
461 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
462
463 if (acrtc_state->stream && adev->family >= AMDGPU_FAMILY_AI &&
464 acrtc_state->vrr_params.supported &&
465 acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
466 spin_lock_irqsave(&adev->ddev->event_lock, flags);
467 mod_freesync_handle_v_update(
468 adev->dm.freesync_module,
469 acrtc_state->stream,
470 &acrtc_state->vrr_params);
471
472 dc_stream_adjust_vmin_vmax(
473 adev->dm.dc,
474 acrtc_state->stream,
475 &acrtc_state->vrr_params.adjust);
476 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
477 }
478 }
479}
480
481static int dm_set_clockgating_state(void *handle,
482 enum amd_clockgating_state state)
483{
484 return 0;
485}
486
487static int dm_set_powergating_state(void *handle,
488 enum amd_powergating_state state)
489{
490 return 0;
491}
492
493
494static int dm_early_init(void* handle);
495
496
497static void amdgpu_dm_fbc_init(struct drm_connector *connector)
498{
499 struct drm_device *dev = connector->dev;
500 struct amdgpu_device *adev = dev->dev_private;
501 struct dm_comressor_info *compressor = &adev->dm.compressor;
502 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
503 struct drm_display_mode *mode;
504 unsigned long max_size = 0;
505
506 if (adev->dm.dc->fbc_compressor == NULL)
507 return;
508
509 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
510 return;
511
512 if (compressor->bo_ptr)
513 return;
514
515
516 list_for_each_entry(mode, &connector->modes, head) {
517 if (max_size < mode->htotal * mode->vtotal)
518 max_size = mode->htotal * mode->vtotal;
519 }
520
521 if (max_size) {
522 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
523 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
524 &compressor->gpu_addr, &compressor->cpu_addr);
525
526 if (r)
527 DRM_ERROR("DM: Failed to initialize FBC\n");
528 else {
529 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
530 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
531 }
532
533 }
534
535}
536
537static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
538 int pipe, bool *enabled,
539 unsigned char *buf, int max_bytes)
540{
541 struct drm_device *dev = dev_get_drvdata(kdev);
542 struct amdgpu_device *adev = dev->dev_private;
543 struct drm_connector *connector;
544 struct drm_connector_list_iter conn_iter;
545 struct amdgpu_dm_connector *aconnector;
546 int ret = 0;
547
548 *enabled = false;
549
550 mutex_lock(&adev->dm.audio_lock);
551
552 drm_connector_list_iter_begin(dev, &conn_iter);
553 drm_for_each_connector_iter(connector, &conn_iter) {
554 aconnector = to_amdgpu_dm_connector(connector);
555 if (aconnector->audio_inst != port)
556 continue;
557
558 *enabled = true;
559 ret = drm_eld_size(connector->eld);
560 memcpy(buf, connector->eld, min(max_bytes, ret));
561
562 break;
563 }
564 drm_connector_list_iter_end(&conn_iter);
565
566 mutex_unlock(&adev->dm.audio_lock);
567
568 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
569
570 return ret;
571}
572
573static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
574 .get_eld = amdgpu_dm_audio_component_get_eld,
575};
576
577static int amdgpu_dm_audio_component_bind(struct device *kdev,
578 struct device *hda_kdev, void *data)
579{
580 struct drm_device *dev = dev_get_drvdata(kdev);
581 struct amdgpu_device *adev = dev->dev_private;
582 struct drm_audio_component *acomp = data;
583
584 acomp->ops = &amdgpu_dm_audio_component_ops;
585 acomp->dev = kdev;
586 adev->dm.audio_component = acomp;
587
588 return 0;
589}
590
591static void amdgpu_dm_audio_component_unbind(struct device *kdev,
592 struct device *hda_kdev, void *data)
593{
594 struct drm_device *dev = dev_get_drvdata(kdev);
595 struct amdgpu_device *adev = dev->dev_private;
596 struct drm_audio_component *acomp = data;
597
598 acomp->ops = NULL;
599 acomp->dev = NULL;
600 adev->dm.audio_component = NULL;
601}
602
603static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
604 .bind = amdgpu_dm_audio_component_bind,
605 .unbind = amdgpu_dm_audio_component_unbind,
606};
607
608static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
609{
610 int i, ret;
611
612 if (!amdgpu_audio)
613 return 0;
614
615 adev->mode_info.audio.enabled = true;
616
617 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
618
619 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
620 adev->mode_info.audio.pin[i].channels = -1;
621 adev->mode_info.audio.pin[i].rate = -1;
622 adev->mode_info.audio.pin[i].bits_per_sample = -1;
623 adev->mode_info.audio.pin[i].status_bits = 0;
624 adev->mode_info.audio.pin[i].category_code = 0;
625 adev->mode_info.audio.pin[i].connected = false;
626 adev->mode_info.audio.pin[i].id =
627 adev->dm.dc->res_pool->audios[i]->inst;
628 adev->mode_info.audio.pin[i].offset = 0;
629 }
630
631 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
632 if (ret < 0)
633 return ret;
634
635 adev->dm.audio_registered = true;
636
637 return 0;
638}
639
640static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
641{
642 if (!amdgpu_audio)
643 return;
644
645 if (!adev->mode_info.audio.enabled)
646 return;
647
648 if (adev->dm.audio_registered) {
649 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
650 adev->dm.audio_registered = false;
651 }
652
653
654
655 adev->mode_info.audio.enabled = false;
656}
657
658void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
659{
660 struct drm_audio_component *acomp = adev->dm.audio_component;
661
662 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
663 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
664
665 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
666 pin, -1);
667 }
668}
669
670static int amdgpu_dm_init(struct amdgpu_device *adev)
671{
672 struct dc_init_data init_data;
673#ifdef CONFIG_DRM_AMD_DC_HDCP
674 struct dc_callback_init init_params;
675#endif
676
677 adev->dm.ddev = adev->ddev;
678 adev->dm.adev = adev;
679
680
681 memset(&init_data, 0, sizeof(init_data));
682#ifdef CONFIG_DRM_AMD_DC_HDCP
683 memset(&init_params, 0, sizeof(init_params));
684#endif
685
686 mutex_init(&adev->dm.dc_lock);
687 mutex_init(&adev->dm.audio_lock);
688
689 if(amdgpu_dm_irq_init(adev)) {
690 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
691 goto error;
692 }
693
694 init_data.asic_id.chip_family = adev->family;
695
696 init_data.asic_id.pci_revision_id = adev->rev_id;
697 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
698
699 init_data.asic_id.vram_width = adev->gmc.vram_width;
700
701 init_data.asic_id.atombios_base_address =
702 adev->mode_info.atom_context->bios;
703
704 init_data.driver = adev;
705
706 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
707
708 if (!adev->dm.cgs_device) {
709 DRM_ERROR("amdgpu: failed to create cgs device.\n");
710 goto error;
711 }
712
713 init_data.cgs_device = adev->dm.cgs_device;
714
715 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
716
717
718
719
720 if (adev->flags & AMD_IS_APU &&
721 adev->asic_type >= CHIP_CARRIZO &&
722 adev->asic_type < CHIP_RAVEN)
723 init_data.flags.gpu_vm_support = true;
724
725 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
726 init_data.flags.fbc_support = true;
727
728 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
729 init_data.flags.multi_mon_pp_mclk_switch = true;
730
731 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
732 init_data.flags.disable_fractional_pwm = true;
733
734 init_data.flags.power_down_display_on_boot = true;
735
736#ifdef CONFIG_DRM_AMD_DC_DCN2_0
737 init_data.soc_bounding_box = adev->dm.soc_bounding_box;
738#endif
739
740
741 adev->dm.dc = dc_create(&init_data);
742
743 if (adev->dm.dc) {
744 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
745 } else {
746 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
747 goto error;
748 }
749
750 dc_hardware_init(adev->dm.dc);
751
752 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
753 if (!adev->dm.freesync_module) {
754 DRM_ERROR(
755 "amdgpu: failed to initialize freesync_module.\n");
756 } else
757 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
758 adev->dm.freesync_module);
759
760 amdgpu_dm_init_color_mod();
761
762#ifdef CONFIG_DRM_AMD_DC_HDCP
763 if (adev->asic_type >= CHIP_RAVEN) {
764 adev->dm.hdcp_workqueue = hdcp_create_workqueue(&adev->psp, &init_params.cp_psp, adev->dm.dc);
765
766 if (!adev->dm.hdcp_workqueue)
767 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
768 else
769 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
770
771 dc_init_callbacks(adev->dm.dc, &init_params);
772 }
773#endif
774 if (amdgpu_dm_initialize_drm_device(adev)) {
775 DRM_ERROR(
776 "amdgpu: failed to initialize sw for display support.\n");
777 goto error;
778 }
779
780
781 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
782
783
784
785
786 adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
787 adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
788
789 if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
790 DRM_ERROR(
791 "amdgpu: failed to initialize sw for display support.\n");
792 goto error;
793 }
794
795#if defined(CONFIG_DEBUG_FS)
796 if (dtn_debugfs_init(adev))
797 DRM_ERROR("amdgpu: failed initialize dtn debugfs support.\n");
798#endif
799
800 DRM_DEBUG_DRIVER("KMS initialized.\n");
801
802 return 0;
803error:
804 amdgpu_dm_fini(adev);
805
806 return -EINVAL;
807}
808
809static void amdgpu_dm_fini(struct amdgpu_device *adev)
810{
811 amdgpu_dm_audio_fini(adev);
812
813 amdgpu_dm_destroy_drm_device(&adev->dm);
814
815#ifdef CONFIG_DRM_AMD_DC_HDCP
816 if (adev->dm.hdcp_workqueue) {
817 hdcp_destroy(adev->dm.hdcp_workqueue);
818 adev->dm.hdcp_workqueue = NULL;
819 }
820
821 if (adev->dm.dc)
822 dc_deinit_callbacks(adev->dm.dc);
823#endif
824
825
826 if (adev->dm.dc)
827 dc_destroy(&adev->dm.dc);
828
829
830
831
832
833
834 if (adev->dm.cgs_device) {
835 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
836 adev->dm.cgs_device = NULL;
837 }
838 if (adev->dm.freesync_module) {
839 mod_freesync_destroy(adev->dm.freesync_module);
840 adev->dm.freesync_module = NULL;
841 }
842
843 mutex_destroy(&adev->dm.audio_lock);
844 mutex_destroy(&adev->dm.dc_lock);
845
846 return;
847}
848
849static int load_dmcu_fw(struct amdgpu_device *adev)
850{
851 const char *fw_name_dmcu = NULL;
852 int r;
853 const struct dmcu_firmware_header_v1_0 *hdr;
854
855 switch(adev->asic_type) {
856 case CHIP_BONAIRE:
857 case CHIP_HAWAII:
858 case CHIP_KAVERI:
859 case CHIP_KABINI:
860 case CHIP_MULLINS:
861 case CHIP_TONGA:
862 case CHIP_FIJI:
863 case CHIP_CARRIZO:
864 case CHIP_STONEY:
865 case CHIP_POLARIS11:
866 case CHIP_POLARIS10:
867 case CHIP_POLARIS12:
868 case CHIP_VEGAM:
869 case CHIP_VEGA10:
870 case CHIP_VEGA12:
871 case CHIP_VEGA20:
872 case CHIP_NAVI10:
873 case CHIP_NAVI14:
874 case CHIP_NAVI12:
875 case CHIP_RENOIR:
876 return 0;
877 case CHIP_RAVEN:
878 if (ASICREV_IS_PICASSO(adev->external_rev_id))
879 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
880 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
881 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
882 else
883 return 0;
884 break;
885 default:
886 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
887 return -EINVAL;
888 }
889
890 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
891 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
892 return 0;
893 }
894
895 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
896 if (r == -ENOENT) {
897
898 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
899 adev->dm.fw_dmcu = NULL;
900 return 0;
901 }
902 if (r) {
903 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
904 fw_name_dmcu);
905 return r;
906 }
907
908 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
909 if (r) {
910 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
911 fw_name_dmcu);
912 release_firmware(adev->dm.fw_dmcu);
913 adev->dm.fw_dmcu = NULL;
914 return r;
915 }
916
917 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
918 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
919 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
920 adev->firmware.fw_size +=
921 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
922
923 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
924 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
925 adev->firmware.fw_size +=
926 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
927
928 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
929
930 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
931
932 return 0;
933}
934
935static int dm_sw_init(void *handle)
936{
937 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
938
939 return load_dmcu_fw(adev);
940}
941
942static int dm_sw_fini(void *handle)
943{
944 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
945
946 if(adev->dm.fw_dmcu) {
947 release_firmware(adev->dm.fw_dmcu);
948 adev->dm.fw_dmcu = NULL;
949 }
950
951 return 0;
952}
953
954static int detect_mst_link_for_all_connectors(struct drm_device *dev)
955{
956 struct amdgpu_dm_connector *aconnector;
957 struct drm_connector *connector;
958 struct drm_connector_list_iter iter;
959 int ret = 0;
960
961 drm_connector_list_iter_begin(dev, &iter);
962 drm_for_each_connector_iter(connector, &iter) {
963 aconnector = to_amdgpu_dm_connector(connector);
964 if (aconnector->dc_link->type == dc_connection_mst_branch &&
965 aconnector->mst_mgr.aux) {
966 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
967 aconnector,
968 aconnector->base.base.id);
969
970 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
971 if (ret < 0) {
972 DRM_ERROR("DM_MST: Failed to start MST\n");
973 aconnector->dc_link->type =
974 dc_connection_single;
975 break;
976 }
977 }
978 }
979 drm_connector_list_iter_end(&iter);
980
981 return ret;
982}
983
984static int dm_late_init(void *handle)
985{
986 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
987
988 struct dmcu_iram_parameters params;
989 unsigned int linear_lut[16];
990 int i;
991 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
992 bool ret = false;
993
994 for (i = 0; i < 16; i++)
995 linear_lut[i] = 0xFFFF * i / 15;
996
997 params.set = 0;
998 params.backlight_ramping_start = 0xCCCC;
999 params.backlight_ramping_reduction = 0xCCCCCCCC;
1000 params.backlight_lut_array_size = 16;
1001 params.backlight_lut_array = linear_lut;
1002
1003
1004
1005
1006 params.min_abm_backlight = 0x28F;
1007
1008
1009 if (adev->asic_type <= CHIP_RAVEN) {
1010 ret = dmcu_load_iram(dmcu, params);
1011
1012 if (!ret)
1013 return -EINVAL;
1014 }
1015
1016 return detect_mst_link_for_all_connectors(adev->ddev);
1017}
1018
1019static void s3_handle_mst(struct drm_device *dev, bool suspend)
1020{
1021 struct amdgpu_dm_connector *aconnector;
1022 struct drm_connector *connector;
1023 struct drm_connector_list_iter iter;
1024 struct drm_dp_mst_topology_mgr *mgr;
1025 int ret;
1026 bool need_hotplug = false;
1027
1028 drm_connector_list_iter_begin(dev, &iter);
1029 drm_for_each_connector_iter(connector, &iter) {
1030 aconnector = to_amdgpu_dm_connector(connector);
1031 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1032 aconnector->mst_port)
1033 continue;
1034
1035 mgr = &aconnector->mst_mgr;
1036
1037 if (suspend) {
1038 drm_dp_mst_topology_mgr_suspend(mgr);
1039 } else {
1040 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1041 if (ret < 0) {
1042 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1043 need_hotplug = true;
1044 }
1045 }
1046 }
1047 drm_connector_list_iter_end(&iter);
1048
1049 if (need_hotplug)
1050 drm_kms_helper_hotplug_event(dev);
1051}
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073static int dm_hw_init(void *handle)
1074{
1075 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1076
1077 amdgpu_dm_init(adev);
1078 amdgpu_dm_hpd_init(adev);
1079
1080 return 0;
1081}
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091static int dm_hw_fini(void *handle)
1092{
1093 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1094
1095 amdgpu_dm_hpd_fini(adev);
1096
1097 amdgpu_dm_irq_fini(adev);
1098 amdgpu_dm_fini(adev);
1099 return 0;
1100}
1101
1102static int dm_suspend(void *handle)
1103{
1104 struct amdgpu_device *adev = handle;
1105 struct amdgpu_display_manager *dm = &adev->dm;
1106 int ret = 0;
1107
1108 WARN_ON(adev->dm.cached_state);
1109 adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
1110
1111 s3_handle_mst(adev->ddev, true);
1112
1113 amdgpu_dm_irq_suspend(adev);
1114
1115
1116 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1117
1118 return ret;
1119}
1120
1121static struct amdgpu_dm_connector *
1122amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1123 struct drm_crtc *crtc)
1124{
1125 uint32_t i;
1126 struct drm_connector_state *new_con_state;
1127 struct drm_connector *connector;
1128 struct drm_crtc *crtc_from_state;
1129
1130 for_each_new_connector_in_state(state, connector, new_con_state, i) {
1131 crtc_from_state = new_con_state->crtc;
1132
1133 if (crtc_from_state == crtc)
1134 return to_amdgpu_dm_connector(connector);
1135 }
1136
1137 return NULL;
1138}
1139
1140static void emulated_link_detect(struct dc_link *link)
1141{
1142 struct dc_sink_init_data sink_init_data = { 0 };
1143 struct display_sink_capability sink_caps = { 0 };
1144 enum dc_edid_status edid_status;
1145 struct dc_context *dc_ctx = link->ctx;
1146 struct dc_sink *sink = NULL;
1147 struct dc_sink *prev_sink = NULL;
1148
1149 link->type = dc_connection_none;
1150 prev_sink = link->local_sink;
1151
1152 if (prev_sink != NULL)
1153 dc_sink_retain(prev_sink);
1154
1155 switch (link->connector_signal) {
1156 case SIGNAL_TYPE_HDMI_TYPE_A: {
1157 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1158 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1159 break;
1160 }
1161
1162 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1163 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1164 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1165 break;
1166 }
1167
1168 case SIGNAL_TYPE_DVI_DUAL_LINK: {
1169 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1170 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1171 break;
1172 }
1173
1174 case SIGNAL_TYPE_LVDS: {
1175 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1176 sink_caps.signal = SIGNAL_TYPE_LVDS;
1177 break;
1178 }
1179
1180 case SIGNAL_TYPE_EDP: {
1181 sink_caps.transaction_type =
1182 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1183 sink_caps.signal = SIGNAL_TYPE_EDP;
1184 break;
1185 }
1186
1187 case SIGNAL_TYPE_DISPLAY_PORT: {
1188 sink_caps.transaction_type =
1189 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1190 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1191 break;
1192 }
1193
1194 default:
1195 DC_ERROR("Invalid connector type! signal:%d\n",
1196 link->connector_signal);
1197 return;
1198 }
1199
1200 sink_init_data.link = link;
1201 sink_init_data.sink_signal = sink_caps.signal;
1202
1203 sink = dc_sink_create(&sink_init_data);
1204 if (!sink) {
1205 DC_ERROR("Failed to create sink!\n");
1206 return;
1207 }
1208
1209
1210 link->local_sink = sink;
1211
1212 edid_status = dm_helpers_read_local_edid(
1213 link->ctx,
1214 link,
1215 sink);
1216
1217 if (edid_status != EDID_OK)
1218 DC_ERROR("Failed to read EDID");
1219
1220}
1221
1222static int dm_resume(void *handle)
1223{
1224 struct amdgpu_device *adev = handle;
1225 struct drm_device *ddev = adev->ddev;
1226 struct amdgpu_display_manager *dm = &adev->dm;
1227 struct amdgpu_dm_connector *aconnector;
1228 struct drm_connector *connector;
1229 struct drm_connector_list_iter iter;
1230 struct drm_crtc *crtc;
1231 struct drm_crtc_state *new_crtc_state;
1232 struct dm_crtc_state *dm_new_crtc_state;
1233 struct drm_plane *plane;
1234 struct drm_plane_state *new_plane_state;
1235 struct dm_plane_state *dm_new_plane_state;
1236 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1237 enum dc_connection_type new_connection_type = dc_connection_none;
1238 int i;
1239
1240
1241 dc_release_state(dm_state->context);
1242 dm_state->context = dc_create_state(dm->dc);
1243
1244 dc_resource_state_construct(dm->dc, dm_state->context);
1245
1246
1247 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1248
1249
1250 dc_resume(dm->dc);
1251
1252
1253
1254
1255
1256 amdgpu_dm_irq_resume_early(adev);
1257
1258
1259 s3_handle_mst(ddev, false);
1260
1261
1262 drm_connector_list_iter_begin(ddev, &iter);
1263 drm_for_each_connector_iter(connector, &iter) {
1264 aconnector = to_amdgpu_dm_connector(connector);
1265
1266
1267
1268
1269
1270 if (aconnector->mst_port)
1271 continue;
1272
1273 mutex_lock(&aconnector->hpd_lock);
1274 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1275 DRM_ERROR("KMS: Failed to detect connector\n");
1276
1277 if (aconnector->base.force && new_connection_type == dc_connection_none)
1278 emulated_link_detect(aconnector->dc_link);
1279 else
1280 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
1281
1282 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
1283 aconnector->fake_enable = false;
1284
1285 if (aconnector->dc_sink)
1286 dc_sink_release(aconnector->dc_sink);
1287 aconnector->dc_sink = NULL;
1288 amdgpu_dm_update_connector_after_detect(aconnector);
1289 mutex_unlock(&aconnector->hpd_lock);
1290 }
1291 drm_connector_list_iter_end(&iter);
1292
1293
1294 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
1295 new_crtc_state->active_changed = true;
1296
1297
1298
1299
1300
1301
1302 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
1303 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1304 if (dm_new_crtc_state->stream) {
1305 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
1306 dc_stream_release(dm_new_crtc_state->stream);
1307 dm_new_crtc_state->stream = NULL;
1308 }
1309 }
1310
1311 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
1312 dm_new_plane_state = to_dm_plane_state(new_plane_state);
1313 if (dm_new_plane_state->dc_state) {
1314 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
1315 dc_plane_state_release(dm_new_plane_state->dc_state);
1316 dm_new_plane_state->dc_state = NULL;
1317 }
1318 }
1319
1320 drm_atomic_helper_resume(ddev, dm->cached_state);
1321
1322 dm->cached_state = NULL;
1323
1324 amdgpu_dm_irq_resume_late(adev);
1325
1326 return 0;
1327}
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339static const struct amd_ip_funcs amdgpu_dm_funcs = {
1340 .name = "dm",
1341 .early_init = dm_early_init,
1342 .late_init = dm_late_init,
1343 .sw_init = dm_sw_init,
1344 .sw_fini = dm_sw_fini,
1345 .hw_init = dm_hw_init,
1346 .hw_fini = dm_hw_fini,
1347 .suspend = dm_suspend,
1348 .resume = dm_resume,
1349 .is_idle = dm_is_idle,
1350 .wait_for_idle = dm_wait_for_idle,
1351 .check_soft_reset = dm_check_soft_reset,
1352 .soft_reset = dm_soft_reset,
1353 .set_clockgating_state = dm_set_clockgating_state,
1354 .set_powergating_state = dm_set_powergating_state,
1355};
1356
1357const struct amdgpu_ip_block_version dm_ip_block =
1358{
1359 .type = AMD_IP_BLOCK_TYPE_DCE,
1360 .major = 1,
1361 .minor = 0,
1362 .rev = 0,
1363 .funcs = &amdgpu_dm_funcs,
1364};
1365
1366
1367
1368
1369
1370
1371
1372
1373static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
1374 .fb_create = amdgpu_display_user_framebuffer_create,
1375 .output_poll_changed = drm_fb_helper_output_poll_changed,
1376 .atomic_check = amdgpu_dm_atomic_check,
1377 .atomic_commit = amdgpu_dm_atomic_commit,
1378};
1379
1380static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
1381 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
1382};
1383
1384static void
1385amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
1386{
1387 struct drm_connector *connector = &aconnector->base;
1388 struct drm_device *dev = connector->dev;
1389 struct dc_sink *sink;
1390
1391
1392 if (aconnector->mst_mgr.mst_state == true)
1393 return;
1394
1395
1396 sink = aconnector->dc_link->local_sink;
1397 if (sink)
1398 dc_sink_retain(sink);
1399
1400
1401
1402
1403
1404
1405 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
1406 && aconnector->dc_em_sink) {
1407
1408
1409
1410
1411
1412 mutex_lock(&dev->mode_config.mutex);
1413
1414 if (sink) {
1415 if (aconnector->dc_sink) {
1416 amdgpu_dm_update_freesync_caps(connector, NULL);
1417
1418
1419
1420
1421
1422
1423 dc_sink_release(aconnector->dc_sink);
1424 }
1425 aconnector->dc_sink = sink;
1426 dc_sink_retain(aconnector->dc_sink);
1427 amdgpu_dm_update_freesync_caps(connector,
1428 aconnector->edid);
1429 } else {
1430 amdgpu_dm_update_freesync_caps(connector, NULL);
1431 if (!aconnector->dc_sink) {
1432 aconnector->dc_sink = aconnector->dc_em_sink;
1433 dc_sink_retain(aconnector->dc_sink);
1434 }
1435 }
1436
1437 mutex_unlock(&dev->mode_config.mutex);
1438
1439 if (sink)
1440 dc_sink_release(sink);
1441 return;
1442 }
1443
1444
1445
1446
1447
1448 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
1449 dc_sink_release(sink);
1450 return;
1451 }
1452
1453 if (aconnector->dc_sink == sink) {
1454
1455
1456
1457
1458 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
1459 aconnector->connector_id);
1460 if (sink)
1461 dc_sink_release(sink);
1462 return;
1463 }
1464
1465 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
1466 aconnector->connector_id, aconnector->dc_sink, sink);
1467
1468 mutex_lock(&dev->mode_config.mutex);
1469
1470
1471
1472
1473
1474 if (sink) {
1475
1476
1477
1478
1479 if (aconnector->dc_sink)
1480 amdgpu_dm_update_freesync_caps(connector, NULL);
1481
1482 aconnector->dc_sink = sink;
1483 dc_sink_retain(aconnector->dc_sink);
1484 if (sink->dc_edid.length == 0) {
1485 aconnector->edid = NULL;
1486 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
1487 } else {
1488 aconnector->edid =
1489 (struct edid *) sink->dc_edid.raw_edid;
1490
1491
1492 drm_connector_update_edid_property(connector,
1493 aconnector->edid);
1494 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
1495 aconnector->edid);
1496 }
1497 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
1498
1499 } else {
1500 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
1501 amdgpu_dm_update_freesync_caps(connector, NULL);
1502 drm_connector_update_edid_property(connector, NULL);
1503 aconnector->num_modes = 0;
1504 dc_sink_release(aconnector->dc_sink);
1505 aconnector->dc_sink = NULL;
1506 aconnector->edid = NULL;
1507#ifdef CONFIG_DRM_AMD_DC_HDCP
1508
1509 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
1510 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
1511#endif
1512 }
1513
1514 mutex_unlock(&dev->mode_config.mutex);
1515
1516 if (sink)
1517 dc_sink_release(sink);
1518}
1519
1520static void handle_hpd_irq(void *param)
1521{
1522 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
1523 struct drm_connector *connector = &aconnector->base;
1524 struct drm_device *dev = connector->dev;
1525 enum dc_connection_type new_connection_type = dc_connection_none;
1526#ifdef CONFIG_DRM_AMD_DC_HDCP
1527 struct amdgpu_device *adev = dev->dev_private;
1528#endif
1529
1530
1531
1532
1533
1534 mutex_lock(&aconnector->hpd_lock);
1535
1536#ifdef CONFIG_DRM_AMD_DC_HDCP
1537 if (adev->asic_type >= CHIP_RAVEN)
1538 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
1539#endif
1540 if (aconnector->fake_enable)
1541 aconnector->fake_enable = false;
1542
1543 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1544 DRM_ERROR("KMS: Failed to detect connector\n");
1545
1546 if (aconnector->base.force && new_connection_type == dc_connection_none) {
1547 emulated_link_detect(aconnector->dc_link);
1548
1549
1550 drm_modeset_lock_all(dev);
1551 dm_restore_drm_connector_state(dev, connector);
1552 drm_modeset_unlock_all(dev);
1553
1554 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
1555 drm_kms_helper_hotplug_event(dev);
1556
1557 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
1558 amdgpu_dm_update_connector_after_detect(aconnector);
1559
1560
1561 drm_modeset_lock_all(dev);
1562 dm_restore_drm_connector_state(dev, connector);
1563 drm_modeset_unlock_all(dev);
1564
1565 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
1566 drm_kms_helper_hotplug_event(dev);
1567 }
1568 mutex_unlock(&aconnector->hpd_lock);
1569
1570}
1571
1572static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
1573{
1574 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
1575 uint8_t dret;
1576 bool new_irq_handled = false;
1577 int dpcd_addr;
1578 int dpcd_bytes_to_read;
1579
1580 const int max_process_count = 30;
1581 int process_count = 0;
1582
1583 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
1584
1585 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
1586 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
1587
1588 dpcd_addr = DP_SINK_COUNT;
1589 } else {
1590 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
1591
1592 dpcd_addr = DP_SINK_COUNT_ESI;
1593 }
1594
1595 dret = drm_dp_dpcd_read(
1596 &aconnector->dm_dp_aux.aux,
1597 dpcd_addr,
1598 esi,
1599 dpcd_bytes_to_read);
1600
1601 while (dret == dpcd_bytes_to_read &&
1602 process_count < max_process_count) {
1603 uint8_t retry;
1604 dret = 0;
1605
1606 process_count++;
1607
1608 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
1609
1610 if (aconnector->mst_mgr.mst_state)
1611 drm_dp_mst_hpd_irq(
1612 &aconnector->mst_mgr,
1613 esi,
1614 &new_irq_handled);
1615
1616 if (new_irq_handled) {
1617
1618 const int ack_dpcd_bytes_to_write =
1619 dpcd_bytes_to_read - 1;
1620
1621 for (retry = 0; retry < 3; retry++) {
1622 uint8_t wret;
1623
1624 wret = drm_dp_dpcd_write(
1625 &aconnector->dm_dp_aux.aux,
1626 dpcd_addr + 1,
1627 &esi[1],
1628 ack_dpcd_bytes_to_write);
1629 if (wret == ack_dpcd_bytes_to_write)
1630 break;
1631 }
1632
1633
1634 dret = drm_dp_dpcd_read(
1635 &aconnector->dm_dp_aux.aux,
1636 dpcd_addr,
1637 esi,
1638 dpcd_bytes_to_read);
1639
1640 new_irq_handled = false;
1641 } else {
1642 break;
1643 }
1644 }
1645
1646 if (process_count == max_process_count)
1647 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
1648}
1649
1650static void handle_hpd_rx_irq(void *param)
1651{
1652 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
1653 struct drm_connector *connector = &aconnector->base;
1654 struct drm_device *dev = connector->dev;
1655 struct dc_link *dc_link = aconnector->dc_link;
1656 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
1657 enum dc_connection_type new_connection_type = dc_connection_none;
1658#ifdef CONFIG_DRM_AMD_DC_HDCP
1659 union hpd_irq_data hpd_irq_data;
1660 struct amdgpu_device *adev = dev->dev_private;
1661
1662 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
1663#endif
1664
1665
1666
1667
1668
1669
1670 if (dc_link->type != dc_connection_mst_branch)
1671 mutex_lock(&aconnector->hpd_lock);
1672
1673
1674#ifdef CONFIG_DRM_AMD_DC_HDCP
1675 if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
1676#else
1677 if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
1678#endif
1679 !is_mst_root_connector) {
1680
1681 if (!dc_link_detect_sink(dc_link, &new_connection_type))
1682 DRM_ERROR("KMS: Failed to detect connector\n");
1683
1684 if (aconnector->base.force && new_connection_type == dc_connection_none) {
1685 emulated_link_detect(dc_link);
1686
1687 if (aconnector->fake_enable)
1688 aconnector->fake_enable = false;
1689
1690 amdgpu_dm_update_connector_after_detect(aconnector);
1691
1692
1693 drm_modeset_lock_all(dev);
1694 dm_restore_drm_connector_state(dev, connector);
1695 drm_modeset_unlock_all(dev);
1696
1697 drm_kms_helper_hotplug_event(dev);
1698 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
1699
1700 if (aconnector->fake_enable)
1701 aconnector->fake_enable = false;
1702
1703 amdgpu_dm_update_connector_after_detect(aconnector);
1704
1705
1706 drm_modeset_lock_all(dev);
1707 dm_restore_drm_connector_state(dev, connector);
1708 drm_modeset_unlock_all(dev);
1709
1710 drm_kms_helper_hotplug_event(dev);
1711 }
1712 }
1713#ifdef CONFIG_DRM_AMD_DC_HDCP
1714 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ)
1715 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
1716#endif
1717 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
1718 (dc_link->type == dc_connection_mst_branch))
1719 dm_handle_hpd_rx_irq(aconnector);
1720
1721 if (dc_link->type != dc_connection_mst_branch) {
1722 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
1723 mutex_unlock(&aconnector->hpd_lock);
1724 }
1725}
1726
1727static void register_hpd_handlers(struct amdgpu_device *adev)
1728{
1729 struct drm_device *dev = adev->ddev;
1730 struct drm_connector *connector;
1731 struct amdgpu_dm_connector *aconnector;
1732 const struct dc_link *dc_link;
1733 struct dc_interrupt_params int_params = {0};
1734
1735 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1736 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1737
1738 list_for_each_entry(connector,
1739 &dev->mode_config.connector_list, head) {
1740
1741 aconnector = to_amdgpu_dm_connector(connector);
1742 dc_link = aconnector->dc_link;
1743
1744 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
1745 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
1746 int_params.irq_source = dc_link->irq_source_hpd;
1747
1748 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1749 handle_hpd_irq,
1750 (void *) aconnector);
1751 }
1752
1753 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
1754
1755
1756 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
1757 int_params.irq_source = dc_link->irq_source_hpd_rx;
1758
1759 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1760 handle_hpd_rx_irq,
1761 (void *) aconnector);
1762 }
1763 }
1764}
1765
1766
1767static int dce110_register_irq_handlers(struct amdgpu_device *adev)
1768{
1769 struct dc *dc = adev->dm.dc;
1770 struct common_irq_params *c_irq_params;
1771 struct dc_interrupt_params int_params = {0};
1772 int r;
1773 int i;
1774 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
1775
1776 if (adev->asic_type >= CHIP_VEGA10)
1777 client_id = SOC15_IH_CLIENTID_DCE;
1778
1779 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1780 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
1795 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
1796 if (r) {
1797 DRM_ERROR("Failed to add crtc irq id!\n");
1798 return r;
1799 }
1800
1801 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1802 int_params.irq_source =
1803 dc_interrupt_to_irq_source(dc, i, 0);
1804
1805 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
1806
1807 c_irq_params->adev = adev;
1808 c_irq_params->irq_src = int_params.irq_source;
1809
1810 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1811 dm_crtc_high_irq, c_irq_params);
1812 }
1813
1814
1815 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
1816 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
1817 if (r) {
1818 DRM_ERROR("Failed to add vupdate irq id!\n");
1819 return r;
1820 }
1821
1822 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1823 int_params.irq_source =
1824 dc_interrupt_to_irq_source(dc, i, 0);
1825
1826 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
1827
1828 c_irq_params->adev = adev;
1829 c_irq_params->irq_src = int_params.irq_source;
1830
1831 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1832 dm_vupdate_high_irq, c_irq_params);
1833 }
1834
1835
1836 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
1837 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
1838 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
1839 if (r) {
1840 DRM_ERROR("Failed to add page flip irq id!\n");
1841 return r;
1842 }
1843
1844 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1845 int_params.irq_source =
1846 dc_interrupt_to_irq_source(dc, i, 0);
1847
1848 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
1849
1850 c_irq_params->adev = adev;
1851 c_irq_params->irq_src = int_params.irq_source;
1852
1853 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1854 dm_pflip_high_irq, c_irq_params);
1855
1856 }
1857
1858
1859 r = amdgpu_irq_add_id(adev, client_id,
1860 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
1861 if (r) {
1862 DRM_ERROR("Failed to add hpd irq id!\n");
1863 return r;
1864 }
1865
1866 register_hpd_handlers(adev);
1867
1868 return 0;
1869}
1870
1871#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1872
1873static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
1874{
1875 struct dc *dc = adev->dm.dc;
1876 struct common_irq_params *c_irq_params;
1877 struct dc_interrupt_params int_params = {0};
1878 int r;
1879 int i;
1880
1881 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1882 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
1898 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
1899 i++) {
1900 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
1901
1902 if (r) {
1903 DRM_ERROR("Failed to add crtc irq id!\n");
1904 return r;
1905 }
1906
1907 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1908 int_params.irq_source =
1909 dc_interrupt_to_irq_source(dc, i, 0);
1910
1911 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
1912
1913 c_irq_params->adev = adev;
1914 c_irq_params->irq_src = int_params.irq_source;
1915
1916 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1917 dm_crtc_high_irq, c_irq_params);
1918 }
1919
1920
1921
1922
1923
1924
1925 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
1926 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
1927 i++) {
1928 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
1929
1930 if (r) {
1931 DRM_ERROR("Failed to add vupdate irq id!\n");
1932 return r;
1933 }
1934
1935 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1936 int_params.irq_source =
1937 dc_interrupt_to_irq_source(dc, i, 0);
1938
1939 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
1940
1941 c_irq_params->adev = adev;
1942 c_irq_params->irq_src = int_params.irq_source;
1943
1944 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1945 dm_vupdate_high_irq, c_irq_params);
1946 }
1947
1948
1949 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
1950 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
1951 i++) {
1952 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
1953 if (r) {
1954 DRM_ERROR("Failed to add page flip irq id!\n");
1955 return r;
1956 }
1957
1958 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1959 int_params.irq_source =
1960 dc_interrupt_to_irq_source(dc, i, 0);
1961
1962 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
1963
1964 c_irq_params->adev = adev;
1965 c_irq_params->irq_src = int_params.irq_source;
1966
1967 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1968 dm_pflip_high_irq, c_irq_params);
1969
1970 }
1971
1972
1973 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
1974 &adev->hpd_irq);
1975 if (r) {
1976 DRM_ERROR("Failed to add hpd irq id!\n");
1977 return r;
1978 }
1979
1980 register_hpd_handlers(adev);
1981
1982 return 0;
1983}
1984#endif
1985
1986
1987
1988
1989
1990
1991
1992static int dm_atomic_get_state(struct drm_atomic_state *state,
1993 struct dm_atomic_state **dm_state)
1994{
1995 struct drm_device *dev = state->dev;
1996 struct amdgpu_device *adev = dev->dev_private;
1997 struct amdgpu_display_manager *dm = &adev->dm;
1998 struct drm_private_state *priv_state;
1999
2000 if (*dm_state)
2001 return 0;
2002
2003 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2004 if (IS_ERR(priv_state))
2005 return PTR_ERR(priv_state);
2006
2007 *dm_state = to_dm_atomic_state(priv_state);
2008
2009 return 0;
2010}
2011
2012struct dm_atomic_state *
2013dm_atomic_get_new_state(struct drm_atomic_state *state)
2014{
2015 struct drm_device *dev = state->dev;
2016 struct amdgpu_device *adev = dev->dev_private;
2017 struct amdgpu_display_manager *dm = &adev->dm;
2018 struct drm_private_obj *obj;
2019 struct drm_private_state *new_obj_state;
2020 int i;
2021
2022 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2023 if (obj->funcs == dm->atomic_obj.funcs)
2024 return to_dm_atomic_state(new_obj_state);
2025 }
2026
2027 return NULL;
2028}
2029
2030struct dm_atomic_state *
2031dm_atomic_get_old_state(struct drm_atomic_state *state)
2032{
2033 struct drm_device *dev = state->dev;
2034 struct amdgpu_device *adev = dev->dev_private;
2035 struct amdgpu_display_manager *dm = &adev->dm;
2036 struct drm_private_obj *obj;
2037 struct drm_private_state *old_obj_state;
2038 int i;
2039
2040 for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
2041 if (obj->funcs == dm->atomic_obj.funcs)
2042 return to_dm_atomic_state(old_obj_state);
2043 }
2044
2045 return NULL;
2046}
2047
2048static struct drm_private_state *
2049dm_atomic_duplicate_state(struct drm_private_obj *obj)
2050{
2051 struct dm_atomic_state *old_state, *new_state;
2052
2053 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2054 if (!new_state)
2055 return NULL;
2056
2057 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2058
2059 old_state = to_dm_atomic_state(obj->state);
2060
2061 if (old_state && old_state->context)
2062 new_state->context = dc_copy_state(old_state->context);
2063
2064 if (!new_state->context) {
2065 kfree(new_state);
2066 return NULL;
2067 }
2068
2069 return &new_state->base;
2070}
2071
2072static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2073 struct drm_private_state *state)
2074{
2075 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2076
2077 if (dm_state && dm_state->context)
2078 dc_release_state(dm_state->context);
2079
2080 kfree(dm_state);
2081}
2082
2083static struct drm_private_state_funcs dm_atomic_state_funcs = {
2084 .atomic_duplicate_state = dm_atomic_duplicate_state,
2085 .atomic_destroy_state = dm_atomic_destroy_state,
2086};
2087
2088static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2089{
2090 struct dm_atomic_state *state;
2091 int r;
2092
2093 adev->mode_info.mode_config_initialized = true;
2094
2095 adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
2096 adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
2097
2098 adev->ddev->mode_config.max_width = 16384;
2099 adev->ddev->mode_config.max_height = 16384;
2100
2101 adev->ddev->mode_config.preferred_depth = 24;
2102 adev->ddev->mode_config.prefer_shadow = 1;
2103
2104 adev->ddev->mode_config.async_page_flip = true;
2105
2106 adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
2107
2108 state = kzalloc(sizeof(*state), GFP_KERNEL);
2109 if (!state)
2110 return -ENOMEM;
2111
2112 state->context = dc_create_state(adev->dm.dc);
2113 if (!state->context) {
2114 kfree(state);
2115 return -ENOMEM;
2116 }
2117
2118 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
2119
2120 drm_atomic_private_obj_init(adev->ddev,
2121 &adev->dm.atomic_obj,
2122 &state->base,
2123 &dm_atomic_state_funcs);
2124
2125 r = amdgpu_display_modeset_create_props(adev);
2126 if (r)
2127 return r;
2128
2129 r = amdgpu_dm_audio_init(adev);
2130 if (r)
2131 return r;
2132
2133 return 0;
2134}
2135
2136#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
2137#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
2138
2139#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2140 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2141
2142static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
2143{
2144#if defined(CONFIG_ACPI)
2145 struct amdgpu_dm_backlight_caps caps;
2146
2147 if (dm->backlight_caps.caps_valid)
2148 return;
2149
2150 amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
2151 if (caps.caps_valid) {
2152 dm->backlight_caps.min_input_signal = caps.min_input_signal;
2153 dm->backlight_caps.max_input_signal = caps.max_input_signal;
2154 dm->backlight_caps.caps_valid = true;
2155 } else {
2156 dm->backlight_caps.min_input_signal =
2157 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2158 dm->backlight_caps.max_input_signal =
2159 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2160 }
2161#else
2162 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2163 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2164#endif
2165}
2166
2167static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
2168{
2169 struct amdgpu_display_manager *dm = bl_get_data(bd);
2170 struct amdgpu_dm_backlight_caps caps;
2171 uint32_t brightness = bd->props.brightness;
2172
2173 amdgpu_dm_update_backlight_caps(dm);
2174 caps = dm->backlight_caps;
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184 brightness =
2185 brightness
2186 * 0x101
2187 * (caps.max_input_signal - caps.min_input_signal)
2188 / AMDGPU_MAX_BL_LEVEL
2189 + caps.min_input_signal * 0x101;
2190
2191 if (dc_link_set_backlight_level(dm->backlight_link,
2192 brightness, 0))
2193 return 0;
2194 else
2195 return 1;
2196}
2197
2198static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
2199{
2200 struct amdgpu_display_manager *dm = bl_get_data(bd);
2201 int ret = dc_link_get_backlight_level(dm->backlight_link);
2202
2203 if (ret == DC_ERROR_UNEXPECTED)
2204 return bd->props.brightness;
2205 return ret;
2206}
2207
2208static const struct backlight_ops amdgpu_dm_backlight_ops = {
2209 .options = BL_CORE_SUSPENDRESUME,
2210 .get_brightness = amdgpu_dm_backlight_get_brightness,
2211 .update_status = amdgpu_dm_backlight_update_status,
2212};
2213
2214static void
2215amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
2216{
2217 char bl_name[16];
2218 struct backlight_properties props = { 0 };
2219
2220 amdgpu_dm_update_backlight_caps(dm);
2221
2222 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
2223 props.brightness = AMDGPU_MAX_BL_LEVEL;
2224 props.type = BACKLIGHT_RAW;
2225
2226 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
2227 dm->adev->ddev->primary->index);
2228
2229 dm->backlight_dev = backlight_device_register(bl_name,
2230 dm->adev->ddev->dev,
2231 dm,
2232 &amdgpu_dm_backlight_ops,
2233 &props);
2234
2235 if (IS_ERR(dm->backlight_dev))
2236 DRM_ERROR("DM: Backlight registration failed!\n");
2237 else
2238 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
2239}
2240
2241#endif
2242
2243static int initialize_plane(struct amdgpu_display_manager *dm,
2244 struct amdgpu_mode_info *mode_info, int plane_id,
2245 enum drm_plane_type plane_type,
2246 const struct dc_plane_cap *plane_cap)
2247{
2248 struct drm_plane *plane;
2249 unsigned long possible_crtcs;
2250 int ret = 0;
2251
2252 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
2253 if (!plane) {
2254 DRM_ERROR("KMS: Failed to allocate plane\n");
2255 return -ENOMEM;
2256 }
2257 plane->type = plane_type;
2258
2259
2260
2261
2262
2263
2264
2265 possible_crtcs = 1 << plane_id;
2266 if (plane_id >= dm->dc->caps.max_streams)
2267 possible_crtcs = 0xff;
2268
2269 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
2270
2271 if (ret) {
2272 DRM_ERROR("KMS: Failed to initialize plane\n");
2273 kfree(plane);
2274 return ret;
2275 }
2276
2277 if (mode_info)
2278 mode_info->planes[plane_id] = plane;
2279
2280 return ret;
2281}
2282
2283
2284static void register_backlight_device(struct amdgpu_display_manager *dm,
2285 struct dc_link *link)
2286{
2287#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2288 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2289
2290 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
2291 link->type != dc_connection_none) {
2292
2293
2294
2295
2296
2297 amdgpu_dm_register_backlight_device(dm);
2298
2299 if (dm->backlight_dev)
2300 dm->backlight_link = link;
2301 }
2302#endif
2303}
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
2315{
2316 struct amdgpu_display_manager *dm = &adev->dm;
2317 int32_t i;
2318 struct amdgpu_dm_connector *aconnector = NULL;
2319 struct amdgpu_encoder *aencoder = NULL;
2320 struct amdgpu_mode_info *mode_info = &adev->mode_info;
2321 uint32_t link_cnt;
2322 int32_t primary_planes;
2323 enum dc_connection_type new_connection_type = dc_connection_none;
2324 const struct dc_plane_cap *plane;
2325
2326 link_cnt = dm->dc->caps.max_links;
2327 if (amdgpu_dm_mode_config_init(dm->adev)) {
2328 DRM_ERROR("DM: Failed to initialize mode config\n");
2329 return -EINVAL;
2330 }
2331
2332
2333 primary_planes = dm->dc->caps.max_streams;
2334 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
2335
2336
2337
2338
2339
2340 for (i = (primary_planes - 1); i >= 0; i--) {
2341 plane = &dm->dc->caps.planes[i];
2342
2343 if (initialize_plane(dm, mode_info, i,
2344 DRM_PLANE_TYPE_PRIMARY, plane)) {
2345 DRM_ERROR("KMS: Failed to initialize primary plane\n");
2346 goto fail;
2347 }
2348 }
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
2360 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
2361
2362 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
2363 continue;
2364
2365 if (!plane->blends_with_above || !plane->blends_with_below)
2366 continue;
2367
2368 if (!plane->pixel_format_support.argb8888)
2369 continue;
2370
2371 if (initialize_plane(dm, NULL, primary_planes + i,
2372 DRM_PLANE_TYPE_OVERLAY, plane)) {
2373 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
2374 goto fail;
2375 }
2376
2377
2378 break;
2379 }
2380
2381 for (i = 0; i < dm->dc->caps.max_streams; i++)
2382 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
2383 DRM_ERROR("KMS: Failed to initialize crtc\n");
2384 goto fail;
2385 }
2386
2387 dm->display_indexes_num = dm->dc->caps.max_streams;
2388
2389
2390 for (i = 0; i < link_cnt; i++) {
2391 struct dc_link *link = NULL;
2392
2393 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
2394 DRM_ERROR(
2395 "KMS: Cannot support more than %d display indexes\n",
2396 AMDGPU_DM_MAX_DISPLAY_INDEX);
2397 continue;
2398 }
2399
2400 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
2401 if (!aconnector)
2402 goto fail;
2403
2404 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
2405 if (!aencoder)
2406 goto fail;
2407
2408 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
2409 DRM_ERROR("KMS: Failed to initialize encoder\n");
2410 goto fail;
2411 }
2412
2413 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
2414 DRM_ERROR("KMS: Failed to initialize connector\n");
2415 goto fail;
2416 }
2417
2418 link = dc_get_link_at_index(dm->dc, i);
2419
2420 if (!dc_link_detect_sink(link, &new_connection_type))
2421 DRM_ERROR("KMS: Failed to detect connector\n");
2422
2423 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2424 emulated_link_detect(link);
2425 amdgpu_dm_update_connector_after_detect(aconnector);
2426
2427 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
2428 amdgpu_dm_update_connector_after_detect(aconnector);
2429 register_backlight_device(dm, link);
2430 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
2431 amdgpu_dm_set_psr_caps(link);
2432 }
2433
2434
2435 }
2436
2437
2438 switch (adev->asic_type) {
2439 case CHIP_BONAIRE:
2440 case CHIP_HAWAII:
2441 case CHIP_KAVERI:
2442 case CHIP_KABINI:
2443 case CHIP_MULLINS:
2444 case CHIP_TONGA:
2445 case CHIP_FIJI:
2446 case CHIP_CARRIZO:
2447 case CHIP_STONEY:
2448 case CHIP_POLARIS11:
2449 case CHIP_POLARIS10:
2450 case CHIP_POLARIS12:
2451 case CHIP_VEGAM:
2452 case CHIP_VEGA10:
2453 case CHIP_VEGA12:
2454 case CHIP_VEGA20:
2455 if (dce110_register_irq_handlers(dm->adev)) {
2456 DRM_ERROR("DM: Failed to initialize IRQ\n");
2457 goto fail;
2458 }
2459 break;
2460#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
2461 case CHIP_RAVEN:
2462#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
2463 case CHIP_NAVI12:
2464 case CHIP_NAVI10:
2465 case CHIP_NAVI14:
2466#endif
2467#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
2468 case CHIP_RENOIR:
2469#endif
2470 if (dcn10_register_irq_handlers(dm->adev)) {
2471 DRM_ERROR("DM: Failed to initialize IRQ\n");
2472 goto fail;
2473 }
2474 break;
2475#endif
2476 default:
2477 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
2478 goto fail;
2479 }
2480
2481 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
2482 dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
2483
2484 return 0;
2485fail:
2486 kfree(aencoder);
2487 kfree(aconnector);
2488
2489 return -EINVAL;
2490}
2491
2492static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
2493{
2494 drm_mode_config_cleanup(dm->ddev);
2495 drm_atomic_private_obj_fini(&dm->atomic_obj);
2496 return;
2497}
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510static void dm_bandwidth_update(struct amdgpu_device *adev)
2511{
2512
2513}
2514
2515static const struct amdgpu_display_funcs dm_display_funcs = {
2516 .bandwidth_update = dm_bandwidth_update,
2517 .vblank_get_counter = dm_vblank_get_counter,
2518 .backlight_set_level = NULL,
2519 .backlight_get_level = NULL,
2520 .hpd_sense = NULL,
2521 .hpd_set_polarity = NULL,
2522 .hpd_get_gpio_reg = NULL,
2523 .page_flip_get_scanoutpos =
2524 dm_crtc_get_scanoutpos,
2525 .add_encoder = NULL,
2526 .add_connector = NULL,
2527};
2528
2529#if defined(CONFIG_DEBUG_KERNEL_DC)
2530
2531static ssize_t s3_debug_store(struct device *device,
2532 struct device_attribute *attr,
2533 const char *buf,
2534 size_t count)
2535{
2536 int ret;
2537 int s3_state;
2538 struct drm_device *drm_dev = dev_get_drvdata(device);
2539 struct amdgpu_device *adev = drm_dev->dev_private;
2540
2541 ret = kstrtoint(buf, 0, &s3_state);
2542
2543 if (ret == 0) {
2544 if (s3_state) {
2545 dm_resume(adev);
2546 drm_kms_helper_hotplug_event(adev->ddev);
2547 } else
2548 dm_suspend(adev);
2549 }
2550
2551 return ret == 0 ? count : 0;
2552}
2553
2554DEVICE_ATTR_WO(s3_debug);
2555
2556#endif
2557
2558static int dm_early_init(void *handle)
2559{
2560 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2561
2562 switch (adev->asic_type) {
2563 case CHIP_BONAIRE:
2564 case CHIP_HAWAII:
2565 adev->mode_info.num_crtc = 6;
2566 adev->mode_info.num_hpd = 6;
2567 adev->mode_info.num_dig = 6;
2568 break;
2569 case CHIP_KAVERI:
2570 adev->mode_info.num_crtc = 4;
2571 adev->mode_info.num_hpd = 6;
2572 adev->mode_info.num_dig = 7;
2573 break;
2574 case CHIP_KABINI:
2575 case CHIP_MULLINS:
2576 adev->mode_info.num_crtc = 2;
2577 adev->mode_info.num_hpd = 6;
2578 adev->mode_info.num_dig = 6;
2579 break;
2580 case CHIP_FIJI:
2581 case CHIP_TONGA:
2582 adev->mode_info.num_crtc = 6;
2583 adev->mode_info.num_hpd = 6;
2584 adev->mode_info.num_dig = 7;
2585 break;
2586 case CHIP_CARRIZO:
2587 adev->mode_info.num_crtc = 3;
2588 adev->mode_info.num_hpd = 6;
2589 adev->mode_info.num_dig = 9;
2590 break;
2591 case CHIP_STONEY:
2592 adev->mode_info.num_crtc = 2;
2593 adev->mode_info.num_hpd = 6;
2594 adev->mode_info.num_dig = 9;
2595 break;
2596 case CHIP_POLARIS11:
2597 case CHIP_POLARIS12:
2598 adev->mode_info.num_crtc = 5;
2599 adev->mode_info.num_hpd = 5;
2600 adev->mode_info.num_dig = 5;
2601 break;
2602 case CHIP_POLARIS10:
2603 case CHIP_VEGAM:
2604 adev->mode_info.num_crtc = 6;
2605 adev->mode_info.num_hpd = 6;
2606 adev->mode_info.num_dig = 6;
2607 break;
2608 case CHIP_VEGA10:
2609 case CHIP_VEGA12:
2610 case CHIP_VEGA20:
2611 adev->mode_info.num_crtc = 6;
2612 adev->mode_info.num_hpd = 6;
2613 adev->mode_info.num_dig = 6;
2614 break;
2615#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
2616 case CHIP_RAVEN:
2617 adev->mode_info.num_crtc = 4;
2618 adev->mode_info.num_hpd = 4;
2619 adev->mode_info.num_dig = 4;
2620 break;
2621#endif
2622#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
2623 case CHIP_NAVI10:
2624 case CHIP_NAVI12:
2625 adev->mode_info.num_crtc = 6;
2626 adev->mode_info.num_hpd = 6;
2627 adev->mode_info.num_dig = 6;
2628 break;
2629 case CHIP_NAVI14:
2630 adev->mode_info.num_crtc = 5;
2631 adev->mode_info.num_hpd = 5;
2632 adev->mode_info.num_dig = 5;
2633 break;
2634#endif
2635#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
2636 case CHIP_RENOIR:
2637 adev->mode_info.num_crtc = 4;
2638 adev->mode_info.num_hpd = 4;
2639 adev->mode_info.num_dig = 4;
2640 break;
2641#endif
2642 default:
2643 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
2644 return -EINVAL;
2645 }
2646
2647 amdgpu_dm_set_irq_funcs(adev);
2648
2649 if (adev->mode_info.funcs == NULL)
2650 adev->mode_info.funcs = &dm_display_funcs;
2651
2652
2653
2654
2655
2656
2657#if defined(CONFIG_DEBUG_KERNEL_DC)
2658 device_create_file(
2659 adev->ddev->dev,
2660 &dev_attr_s3_debug);
2661#endif
2662
2663 return 0;
2664}
2665
2666static bool modeset_required(struct drm_crtc_state *crtc_state,
2667 struct dc_stream_state *new_stream,
2668 struct dc_stream_state *old_stream)
2669{
2670 if (!drm_atomic_crtc_needs_modeset(crtc_state))
2671 return false;
2672
2673 if (!crtc_state->enable)
2674 return false;
2675
2676 return crtc_state->active;
2677}
2678
2679static bool modereset_required(struct drm_crtc_state *crtc_state)
2680{
2681 if (!drm_atomic_crtc_needs_modeset(crtc_state))
2682 return false;
2683
2684 return !crtc_state->enable || !crtc_state->active;
2685}
2686
2687static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
2688{
2689 drm_encoder_cleanup(encoder);
2690 kfree(encoder);
2691}
2692
2693static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
2694 .destroy = amdgpu_dm_encoder_destroy,
2695};
2696
2697
2698static int fill_dc_scaling_info(const struct drm_plane_state *state,
2699 struct dc_scaling_info *scaling_info)
2700{
2701 int scale_w, scale_h;
2702
2703 memset(scaling_info, 0, sizeof(*scaling_info));
2704
2705
2706 scaling_info->src_rect.x = state->src_x >> 16;
2707 scaling_info->src_rect.y = state->src_y >> 16;
2708
2709 scaling_info->src_rect.width = state->src_w >> 16;
2710 if (scaling_info->src_rect.width == 0)
2711 return -EINVAL;
2712
2713 scaling_info->src_rect.height = state->src_h >> 16;
2714 if (scaling_info->src_rect.height == 0)
2715 return -EINVAL;
2716
2717 scaling_info->dst_rect.x = state->crtc_x;
2718 scaling_info->dst_rect.y = state->crtc_y;
2719
2720 if (state->crtc_w == 0)
2721 return -EINVAL;
2722
2723 scaling_info->dst_rect.width = state->crtc_w;
2724
2725 if (state->crtc_h == 0)
2726 return -EINVAL;
2727
2728 scaling_info->dst_rect.height = state->crtc_h;
2729
2730
2731 scaling_info->clip_rect = scaling_info->dst_rect;
2732
2733
2734 scale_w = scaling_info->dst_rect.width * 1000 /
2735 scaling_info->src_rect.width;
2736
2737 if (scale_w < 250 || scale_w > 16000)
2738 return -EINVAL;
2739
2740 scale_h = scaling_info->dst_rect.height * 1000 /
2741 scaling_info->src_rect.height;
2742
2743 if (scale_h < 250 || scale_h > 16000)
2744 return -EINVAL;
2745
2746
2747
2748
2749
2750
2751 return 0;
2752}
2753
2754static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
2755 uint64_t *tiling_flags)
2756{
2757 struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
2758 int r = amdgpu_bo_reserve(rbo, false);
2759
2760 if (unlikely(r)) {
2761
2762 if (r != -ERESTARTSYS)
2763 DRM_ERROR("Unable to reserve buffer: %d\n", r);
2764 return r;
2765 }
2766
2767 if (tiling_flags)
2768 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
2769
2770 amdgpu_bo_unreserve(rbo);
2771
2772 return r;
2773}
2774
2775static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
2776{
2777 uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
2778
2779 return offset ? (address + offset * 256) : 0;
2780}
2781
2782static int
2783fill_plane_dcc_attributes(struct amdgpu_device *adev,
2784 const struct amdgpu_framebuffer *afb,
2785 const enum surface_pixel_format format,
2786 const enum dc_rotation_angle rotation,
2787 const struct plane_size *plane_size,
2788 const union dc_tiling_info *tiling_info,
2789 const uint64_t info,
2790 struct dc_plane_dcc_param *dcc,
2791 struct dc_plane_address *address)
2792{
2793 struct dc *dc = adev->dm.dc;
2794 struct dc_dcc_surface_param input;
2795 struct dc_surface_dcc_cap output;
2796 uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
2797 uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
2798 uint64_t dcc_address;
2799
2800 memset(&input, 0, sizeof(input));
2801 memset(&output, 0, sizeof(output));
2802
2803 if (!offset)
2804 return 0;
2805
2806 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
2807 return 0;
2808
2809 if (!dc->cap_funcs.get_dcc_compression_cap)
2810 return -EINVAL;
2811
2812 input.format = format;
2813 input.surface_size.width = plane_size->surface_size.width;
2814 input.surface_size.height = plane_size->surface_size.height;
2815 input.swizzle_mode = tiling_info->gfx9.swizzle;
2816
2817 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
2818 input.scan = SCAN_DIRECTION_HORIZONTAL;
2819 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
2820 input.scan = SCAN_DIRECTION_VERTICAL;
2821
2822 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
2823 return -EINVAL;
2824
2825 if (!output.capable)
2826 return -EINVAL;
2827
2828 if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
2829 return -EINVAL;
2830
2831 dcc->enable = 1;
2832 dcc->meta_pitch =
2833 AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
2834 dcc->independent_64b_blks = i64b;
2835
2836 dcc_address = get_dcc_address(afb->address, info);
2837 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
2838 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
2839
2840 return 0;
2841}
2842
2843static int
2844fill_plane_buffer_attributes(struct amdgpu_device *adev,
2845 const struct amdgpu_framebuffer *afb,
2846 const enum surface_pixel_format format,
2847 const enum dc_rotation_angle rotation,
2848 const uint64_t tiling_flags,
2849 union dc_tiling_info *tiling_info,
2850 struct plane_size *plane_size,
2851 struct dc_plane_dcc_param *dcc,
2852 struct dc_plane_address *address)
2853{
2854 const struct drm_framebuffer *fb = &afb->base;
2855 int ret;
2856
2857 memset(tiling_info, 0, sizeof(*tiling_info));
2858 memset(plane_size, 0, sizeof(*plane_size));
2859 memset(dcc, 0, sizeof(*dcc));
2860 memset(address, 0, sizeof(*address));
2861
2862 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
2863 plane_size->surface_size.x = 0;
2864 plane_size->surface_size.y = 0;
2865 plane_size->surface_size.width = fb->width;
2866 plane_size->surface_size.height = fb->height;
2867 plane_size->surface_pitch =
2868 fb->pitches[0] / fb->format->cpp[0];
2869
2870 address->type = PLN_ADDR_TYPE_GRAPHICS;
2871 address->grph.addr.low_part = lower_32_bits(afb->address);
2872 address->grph.addr.high_part = upper_32_bits(afb->address);
2873 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
2874 uint64_t chroma_addr = afb->address + fb->offsets[1];
2875
2876 plane_size->surface_size.x = 0;
2877 plane_size->surface_size.y = 0;
2878 plane_size->surface_size.width = fb->width;
2879 plane_size->surface_size.height = fb->height;
2880 plane_size->surface_pitch =
2881 fb->pitches[0] / fb->format->cpp[0];
2882
2883 plane_size->chroma_size.x = 0;
2884 plane_size->chroma_size.y = 0;
2885
2886 plane_size->chroma_size.width = fb->width / 2;
2887 plane_size->chroma_size.height = fb->height / 2;
2888
2889 plane_size->chroma_pitch =
2890 fb->pitches[1] / fb->format->cpp[1];
2891
2892 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
2893 address->video_progressive.luma_addr.low_part =
2894 lower_32_bits(afb->address);
2895 address->video_progressive.luma_addr.high_part =
2896 upper_32_bits(afb->address);
2897 address->video_progressive.chroma_addr.low_part =
2898 lower_32_bits(chroma_addr);
2899 address->video_progressive.chroma_addr.high_part =
2900 upper_32_bits(chroma_addr);
2901 }
2902
2903
2904 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
2905 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
2906
2907 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
2908 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
2909 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
2910 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
2911 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
2912
2913
2914 tiling_info->gfx8.num_banks = num_banks;
2915 tiling_info->gfx8.array_mode =
2916 DC_ARRAY_2D_TILED_THIN1;
2917 tiling_info->gfx8.tile_split = tile_split;
2918 tiling_info->gfx8.bank_width = bankw;
2919 tiling_info->gfx8.bank_height = bankh;
2920 tiling_info->gfx8.tile_aspect = mtaspect;
2921 tiling_info->gfx8.tile_mode =
2922 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
2923 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
2924 == DC_ARRAY_1D_TILED_THIN1) {
2925 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
2926 }
2927
2928 tiling_info->gfx8.pipe_config =
2929 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
2930
2931 if (adev->asic_type == CHIP_VEGA10 ||
2932 adev->asic_type == CHIP_VEGA12 ||
2933 adev->asic_type == CHIP_VEGA20 ||
2934#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
2935 adev->asic_type == CHIP_NAVI10 ||
2936 adev->asic_type == CHIP_NAVI14 ||
2937 adev->asic_type == CHIP_NAVI12 ||
2938#endif
2939#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
2940 adev->asic_type == CHIP_RENOIR ||
2941#endif
2942 adev->asic_type == CHIP_RAVEN) {
2943
2944 tiling_info->gfx9.num_pipes =
2945 adev->gfx.config.gb_addr_config_fields.num_pipes;
2946 tiling_info->gfx9.num_banks =
2947 adev->gfx.config.gb_addr_config_fields.num_banks;
2948 tiling_info->gfx9.pipe_interleave =
2949 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
2950 tiling_info->gfx9.num_shader_engines =
2951 adev->gfx.config.gb_addr_config_fields.num_se;
2952 tiling_info->gfx9.max_compressed_frags =
2953 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
2954 tiling_info->gfx9.num_rb_per_se =
2955 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
2956 tiling_info->gfx9.swizzle =
2957 AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
2958 tiling_info->gfx9.shaderEnable = 1;
2959
2960 ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
2961 plane_size, tiling_info,
2962 tiling_flags, dcc, address);
2963 if (ret)
2964 return ret;
2965 }
2966
2967 return 0;
2968}
2969
2970static void
2971fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
2972 bool *per_pixel_alpha, bool *global_alpha,
2973 int *global_alpha_value)
2974{
2975 *per_pixel_alpha = false;
2976 *global_alpha = false;
2977 *global_alpha_value = 0xff;
2978
2979 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
2980 return;
2981
2982 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
2983 static const uint32_t alpha_formats[] = {
2984 DRM_FORMAT_ARGB8888,
2985 DRM_FORMAT_RGBA8888,
2986 DRM_FORMAT_ABGR8888,
2987 };
2988 uint32_t format = plane_state->fb->format->format;
2989 unsigned int i;
2990
2991 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
2992 if (format == alpha_formats[i]) {
2993 *per_pixel_alpha = true;
2994 break;
2995 }
2996 }
2997 }
2998
2999 if (plane_state->alpha < 0xffff) {
3000 *global_alpha = true;
3001 *global_alpha_value = plane_state->alpha >> 8;
3002 }
3003}
3004
3005static int
3006fill_plane_color_attributes(const struct drm_plane_state *plane_state,
3007 const enum surface_pixel_format format,
3008 enum dc_color_space *color_space)
3009{
3010 bool full_range;
3011
3012 *color_space = COLOR_SPACE_SRGB;
3013
3014
3015 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3016 return 0;
3017
3018 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
3019
3020 switch (plane_state->color_encoding) {
3021 case DRM_COLOR_YCBCR_BT601:
3022 if (full_range)
3023 *color_space = COLOR_SPACE_YCBCR601;
3024 else
3025 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
3026 break;
3027
3028 case DRM_COLOR_YCBCR_BT709:
3029 if (full_range)
3030 *color_space = COLOR_SPACE_YCBCR709;
3031 else
3032 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
3033 break;
3034
3035 case DRM_COLOR_YCBCR_BT2020:
3036 if (full_range)
3037 *color_space = COLOR_SPACE_2020_YCBCR;
3038 else
3039 return -EINVAL;
3040 break;
3041
3042 default:
3043 return -EINVAL;
3044 }
3045
3046 return 0;
3047}
3048
3049static int
3050fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
3051 const struct drm_plane_state *plane_state,
3052 const uint64_t tiling_flags,
3053 struct dc_plane_info *plane_info,
3054 struct dc_plane_address *address)
3055{
3056 const struct drm_framebuffer *fb = plane_state->fb;
3057 const struct amdgpu_framebuffer *afb =
3058 to_amdgpu_framebuffer(plane_state->fb);
3059 struct drm_format_name_buf format_name;
3060 int ret;
3061
3062 memset(plane_info, 0, sizeof(*plane_info));
3063
3064 switch (fb->format->format) {
3065 case DRM_FORMAT_C8:
3066 plane_info->format =
3067 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
3068 break;
3069 case DRM_FORMAT_RGB565:
3070 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
3071 break;
3072 case DRM_FORMAT_XRGB8888:
3073 case DRM_FORMAT_ARGB8888:
3074 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
3075 break;
3076 case DRM_FORMAT_XRGB2101010:
3077 case DRM_FORMAT_ARGB2101010:
3078 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
3079 break;
3080 case DRM_FORMAT_XBGR2101010:
3081 case DRM_FORMAT_ABGR2101010:
3082 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
3083 break;
3084 case DRM_FORMAT_XBGR8888:
3085 case DRM_FORMAT_ABGR8888:
3086 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
3087 break;
3088 case DRM_FORMAT_NV21:
3089 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
3090 break;
3091 case DRM_FORMAT_NV12:
3092 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
3093 break;
3094 default:
3095 DRM_ERROR(
3096 "Unsupported screen format %s\n",
3097 drm_get_format_name(fb->format->format, &format_name));
3098 return -EINVAL;
3099 }
3100
3101 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
3102 case DRM_MODE_ROTATE_0:
3103 plane_info->rotation = ROTATION_ANGLE_0;
3104 break;
3105 case DRM_MODE_ROTATE_90:
3106 plane_info->rotation = ROTATION_ANGLE_90;
3107 break;
3108 case DRM_MODE_ROTATE_180:
3109 plane_info->rotation = ROTATION_ANGLE_180;
3110 break;
3111 case DRM_MODE_ROTATE_270:
3112 plane_info->rotation = ROTATION_ANGLE_270;
3113 break;
3114 default:
3115 plane_info->rotation = ROTATION_ANGLE_0;
3116 break;
3117 }
3118
3119 plane_info->visible = true;
3120 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
3121
3122 plane_info->layer_index = 0;
3123
3124 ret = fill_plane_color_attributes(plane_state, plane_info->format,
3125 &plane_info->color_space);
3126 if (ret)
3127 return ret;
3128
3129 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
3130 plane_info->rotation, tiling_flags,
3131 &plane_info->tiling_info,
3132 &plane_info->plane_size,
3133 &plane_info->dcc, address);
3134 if (ret)
3135 return ret;
3136
3137 fill_blending_from_plane_state(
3138 plane_state, &plane_info->per_pixel_alpha,
3139 &plane_info->global_alpha, &plane_info->global_alpha_value);
3140
3141 return 0;
3142}
3143
3144static int fill_dc_plane_attributes(struct amdgpu_device *adev,
3145 struct dc_plane_state *dc_plane_state,
3146 struct drm_plane_state *plane_state,
3147 struct drm_crtc_state *crtc_state)
3148{
3149 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
3150 const struct amdgpu_framebuffer *amdgpu_fb =
3151 to_amdgpu_framebuffer(plane_state->fb);
3152 struct dc_scaling_info scaling_info;
3153 struct dc_plane_info plane_info;
3154 uint64_t tiling_flags;
3155 int ret;
3156
3157 ret = fill_dc_scaling_info(plane_state, &scaling_info);
3158 if (ret)
3159 return ret;
3160
3161 dc_plane_state->src_rect = scaling_info.src_rect;
3162 dc_plane_state->dst_rect = scaling_info.dst_rect;
3163 dc_plane_state->clip_rect = scaling_info.clip_rect;
3164 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
3165
3166 ret = get_fb_info(amdgpu_fb, &tiling_flags);
3167 if (ret)
3168 return ret;
3169
3170 ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
3171 &plane_info,
3172 &dc_plane_state->address);
3173 if (ret)
3174 return ret;
3175
3176 dc_plane_state->format = plane_info.format;
3177 dc_plane_state->color_space = plane_info.color_space;
3178 dc_plane_state->format = plane_info.format;
3179 dc_plane_state->plane_size = plane_info.plane_size;
3180 dc_plane_state->rotation = plane_info.rotation;
3181 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
3182 dc_plane_state->stereo_format = plane_info.stereo_format;
3183 dc_plane_state->tiling_info = plane_info.tiling_info;
3184 dc_plane_state->visible = plane_info.visible;
3185 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
3186 dc_plane_state->global_alpha = plane_info.global_alpha;
3187 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
3188 dc_plane_state->dcc = plane_info.dcc;
3189 dc_plane_state->layer_index = plane_info.layer_index;
3190
3191
3192
3193
3194
3195 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
3196 if (ret)
3197 return ret;
3198
3199 return 0;
3200}
3201
3202static void update_stream_scaling_settings(const struct drm_display_mode *mode,
3203 const struct dm_connector_state *dm_state,
3204 struct dc_stream_state *stream)
3205{
3206 enum amdgpu_rmx_type rmx_type;
3207
3208 struct rect src = { 0 };
3209 struct rect dst = { 0 };
3210
3211
3212 if (!mode)
3213 return;
3214
3215
3216 src.width = mode->hdisplay;
3217 src.height = mode->vdisplay;
3218 dst.width = stream->timing.h_addressable;
3219 dst.height = stream->timing.v_addressable;
3220
3221 if (dm_state) {
3222 rmx_type = dm_state->scaling;
3223 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
3224 if (src.width * dst.height <
3225 src.height * dst.width) {
3226
3227 dst.width = src.width *
3228 dst.height / src.height;
3229 } else {
3230
3231 dst.height = src.height *
3232 dst.width / src.width;
3233 }
3234 } else if (rmx_type == RMX_CENTER) {
3235 dst = src;
3236 }
3237
3238 dst.x = (stream->timing.h_addressable - dst.width) / 2;
3239 dst.y = (stream->timing.v_addressable - dst.height) / 2;
3240
3241 if (dm_state->underscan_enable) {
3242 dst.x += dm_state->underscan_hborder / 2;
3243 dst.y += dm_state->underscan_vborder / 2;
3244 dst.width -= dm_state->underscan_hborder;
3245 dst.height -= dm_state->underscan_vborder;
3246 }
3247 }
3248
3249 stream->src = src;
3250 stream->dst = dst;
3251
3252 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
3253 dst.x, dst.y, dst.width, dst.height);
3254
3255}
3256
3257static enum dc_color_depth
3258convert_color_depth_from_display_info(const struct drm_connector *connector,
3259 const struct drm_connector_state *state)
3260{
3261 uint8_t bpc = (uint8_t)connector->display_info.bpc;
3262
3263
3264 bpc = bpc ? bpc : 8;
3265
3266 if (!state)
3267 state = connector->state;
3268
3269 if (state) {
3270
3271
3272
3273
3274
3275
3276
3277
3278 bpc = min(bpc, state->max_requested_bpc);
3279
3280
3281 bpc = bpc - (bpc & 1);
3282 }
3283
3284 switch (bpc) {
3285 case 0:
3286
3287
3288
3289
3290
3291 return COLOR_DEPTH_888;
3292 case 6:
3293 return COLOR_DEPTH_666;
3294 case 8:
3295 return COLOR_DEPTH_888;
3296 case 10:
3297 return COLOR_DEPTH_101010;
3298 case 12:
3299 return COLOR_DEPTH_121212;
3300 case 14:
3301 return COLOR_DEPTH_141414;
3302 case 16:
3303 return COLOR_DEPTH_161616;
3304 default:
3305 return COLOR_DEPTH_UNDEFINED;
3306 }
3307}
3308
3309static enum dc_aspect_ratio
3310get_aspect_ratio(const struct drm_display_mode *mode_in)
3311{
3312
3313 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
3314}
3315
3316static enum dc_color_space
3317get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
3318{
3319 enum dc_color_space color_space = COLOR_SPACE_SRGB;
3320
3321 switch (dc_crtc_timing->pixel_encoding) {
3322 case PIXEL_ENCODING_YCBCR422:
3323 case PIXEL_ENCODING_YCBCR444:
3324 case PIXEL_ENCODING_YCBCR420:
3325 {
3326
3327
3328
3329
3330
3331 if (dc_crtc_timing->pix_clk_100hz > 270300) {
3332 if (dc_crtc_timing->flags.Y_ONLY)
3333 color_space =
3334 COLOR_SPACE_YCBCR709_LIMITED;
3335 else
3336 color_space = COLOR_SPACE_YCBCR709;
3337 } else {
3338 if (dc_crtc_timing->flags.Y_ONLY)
3339 color_space =
3340 COLOR_SPACE_YCBCR601_LIMITED;
3341 else
3342 color_space = COLOR_SPACE_YCBCR601;
3343 }
3344
3345 }
3346 break;
3347 case PIXEL_ENCODING_RGB:
3348 color_space = COLOR_SPACE_SRGB;
3349 break;
3350
3351 default:
3352 WARN_ON(1);
3353 break;
3354 }
3355
3356 return color_space;
3357}
3358
3359static bool adjust_colour_depth_from_display_info(
3360 struct dc_crtc_timing *timing_out,
3361 const struct drm_display_info *info)
3362{
3363 enum dc_color_depth depth = timing_out->display_color_depth;
3364 int normalized_clk;
3365 do {
3366 normalized_clk = timing_out->pix_clk_100hz / 10;
3367
3368 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
3369 normalized_clk /= 2;
3370
3371 switch (depth) {
3372 case COLOR_DEPTH_888:
3373 break;
3374 case COLOR_DEPTH_101010:
3375 normalized_clk = (normalized_clk * 30) / 24;
3376 break;
3377 case COLOR_DEPTH_121212:
3378 normalized_clk = (normalized_clk * 36) / 24;
3379 break;
3380 case COLOR_DEPTH_161616:
3381 normalized_clk = (normalized_clk * 48) / 24;
3382 break;
3383 default:
3384
3385 return false;
3386 }
3387 if (normalized_clk <= info->max_tmds_clock) {
3388 timing_out->display_color_depth = depth;
3389 return true;
3390 }
3391 } while (--depth > COLOR_DEPTH_666);
3392 return false;
3393}
3394
3395static void fill_stream_properties_from_drm_display_mode(
3396 struct dc_stream_state *stream,
3397 const struct drm_display_mode *mode_in,
3398 const struct drm_connector *connector,
3399 const struct drm_connector_state *connector_state,
3400 const struct dc_stream_state *old_stream)
3401{
3402 struct dc_crtc_timing *timing_out = &stream->timing;
3403 const struct drm_display_info *info = &connector->display_info;
3404 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
3405 struct hdmi_vendor_infoframe hv_frame;
3406 struct hdmi_avi_infoframe avi_frame;
3407
3408 memset(&hv_frame, 0, sizeof(hv_frame));
3409 memset(&avi_frame, 0, sizeof(avi_frame));
3410
3411 timing_out->h_border_left = 0;
3412 timing_out->h_border_right = 0;
3413 timing_out->v_border_top = 0;
3414 timing_out->v_border_bottom = 0;
3415
3416 if (drm_mode_is_420_only(info, mode_in)
3417 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
3418 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
3419 else if (drm_mode_is_420_also(info, mode_in)
3420 && aconnector->force_yuv420_output)
3421 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
3422 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
3423 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
3424 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
3425 else
3426 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
3427
3428 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
3429 timing_out->display_color_depth = convert_color_depth_from_display_info(
3430 connector, connector_state);
3431 timing_out->scan_type = SCANNING_TYPE_NODATA;
3432 timing_out->hdmi_vic = 0;
3433
3434 if(old_stream) {
3435 timing_out->vic = old_stream->timing.vic;
3436 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
3437 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
3438 } else {
3439 timing_out->vic = drm_match_cea_mode(mode_in);
3440 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
3441 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
3442 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
3443 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
3444 }
3445
3446 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
3447 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
3448 timing_out->vic = avi_frame.video_code;
3449 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
3450 timing_out->hdmi_vic = hv_frame.vic;
3451 }
3452
3453 timing_out->h_addressable = mode_in->crtc_hdisplay;
3454 timing_out->h_total = mode_in->crtc_htotal;
3455 timing_out->h_sync_width =
3456 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
3457 timing_out->h_front_porch =
3458 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
3459 timing_out->v_total = mode_in->crtc_vtotal;
3460 timing_out->v_addressable = mode_in->crtc_vdisplay;
3461 timing_out->v_front_porch =
3462 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
3463 timing_out->v_sync_width =
3464 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
3465 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
3466 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
3467
3468 stream->output_color_space = get_output_color_space(timing_out);
3469
3470 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
3471 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
3472 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
3473 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
3474 drm_mode_is_420_also(info, mode_in) &&
3475 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
3476 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
3477 adjust_colour_depth_from_display_info(timing_out, info);
3478 }
3479 }
3480}
3481
3482static void fill_audio_info(struct audio_info *audio_info,
3483 const struct drm_connector *drm_connector,
3484 const struct dc_sink *dc_sink)
3485{
3486 int i = 0;
3487 int cea_revision = 0;
3488 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
3489
3490 audio_info->manufacture_id = edid_caps->manufacturer_id;
3491 audio_info->product_id = edid_caps->product_id;
3492
3493 cea_revision = drm_connector->display_info.cea_rev;
3494
3495 strscpy(audio_info->display_name,
3496 edid_caps->display_name,
3497 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
3498
3499 if (cea_revision >= 3) {
3500 audio_info->mode_count = edid_caps->audio_mode_count;
3501
3502 for (i = 0; i < audio_info->mode_count; ++i) {
3503 audio_info->modes[i].format_code =
3504 (enum audio_format_code)
3505 (edid_caps->audio_modes[i].format_code);
3506 audio_info->modes[i].channel_count =
3507 edid_caps->audio_modes[i].channel_count;
3508 audio_info->modes[i].sample_rates.all =
3509 edid_caps->audio_modes[i].sample_rate;
3510 audio_info->modes[i].sample_size =
3511 edid_caps->audio_modes[i].sample_size;
3512 }
3513 }
3514
3515 audio_info->flags.all = edid_caps->speaker_flags;
3516
3517
3518 if (drm_connector->latency_present[0]) {
3519 audio_info->video_latency = drm_connector->video_latency[0];
3520 audio_info->audio_latency = drm_connector->audio_latency[0];
3521 }
3522
3523
3524
3525}
3526
3527static void
3528copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
3529 struct drm_display_mode *dst_mode)
3530{
3531 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
3532 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
3533 dst_mode->crtc_clock = src_mode->crtc_clock;
3534 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
3535 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
3536 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
3537 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
3538 dst_mode->crtc_htotal = src_mode->crtc_htotal;
3539 dst_mode->crtc_hskew = src_mode->crtc_hskew;
3540 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
3541 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
3542 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
3543 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
3544 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
3545}
3546
3547static void
3548decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
3549 const struct drm_display_mode *native_mode,
3550 bool scale_enabled)
3551{
3552 if (scale_enabled) {
3553 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
3554 } else if (native_mode->clock == drm_mode->clock &&
3555 native_mode->htotal == drm_mode->htotal &&
3556 native_mode->vtotal == drm_mode->vtotal) {
3557 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
3558 } else {
3559
3560 }
3561}
3562
3563static struct dc_sink *
3564create_fake_sink(struct amdgpu_dm_connector *aconnector)
3565{
3566 struct dc_sink_init_data sink_init_data = { 0 };
3567 struct dc_sink *sink = NULL;
3568 sink_init_data.link = aconnector->dc_link;
3569 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
3570
3571 sink = dc_sink_create(&sink_init_data);
3572 if (!sink) {
3573 DRM_ERROR("Failed to create sink!\n");
3574 return NULL;
3575 }
3576 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
3577
3578 return sink;
3579}
3580
3581static void set_multisync_trigger_params(
3582 struct dc_stream_state *stream)
3583{
3584 if (stream->triggered_crtc_reset.enabled) {
3585 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
3586 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
3587 }
3588}
3589
3590static void set_master_stream(struct dc_stream_state *stream_set[],
3591 int stream_count)
3592{
3593 int j, highest_rfr = 0, master_stream = 0;
3594
3595 for (j = 0; j < stream_count; j++) {
3596 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
3597 int refresh_rate = 0;
3598
3599 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
3600 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
3601 if (refresh_rate > highest_rfr) {
3602 highest_rfr = refresh_rate;
3603 master_stream = j;
3604 }
3605 }
3606 }
3607 for (j = 0; j < stream_count; j++) {
3608 if (stream_set[j])
3609 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
3610 }
3611}
3612
3613static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
3614{
3615 int i = 0;
3616
3617 if (context->stream_count < 2)
3618 return;
3619 for (i = 0; i < context->stream_count ; i++) {
3620 if (!context->streams[i])
3621 continue;
3622
3623
3624
3625
3626
3627 set_multisync_trigger_params(context->streams[i]);
3628 }
3629 set_master_stream(context->streams, context->stream_count);
3630}
3631
3632static struct dc_stream_state *
3633create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
3634 const struct drm_display_mode *drm_mode,
3635 const struct dm_connector_state *dm_state,
3636 const struct dc_stream_state *old_stream)
3637{
3638 struct drm_display_mode *preferred_mode = NULL;
3639 struct drm_connector *drm_connector;
3640 const struct drm_connector_state *con_state =
3641 dm_state ? &dm_state->base : NULL;
3642 struct dc_stream_state *stream = NULL;
3643 struct drm_display_mode mode = *drm_mode;
3644 bool native_mode_found = false;
3645 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
3646 int mode_refresh;
3647 int preferred_refresh = 0;
3648#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
3649 struct dsc_dec_dpcd_caps dsc_caps;
3650 uint32_t link_bandwidth_kbps;
3651#endif
3652
3653 struct dc_sink *sink = NULL;
3654 if (aconnector == NULL) {
3655 DRM_ERROR("aconnector is NULL!\n");
3656 return stream;
3657 }
3658
3659 drm_connector = &aconnector->base;
3660
3661 if (!aconnector->dc_sink) {
3662 sink = create_fake_sink(aconnector);
3663 if (!sink)
3664 return stream;
3665 } else {
3666 sink = aconnector->dc_sink;
3667 dc_sink_retain(sink);
3668 }
3669
3670 stream = dc_create_stream_for_sink(sink);
3671
3672 if (stream == NULL) {
3673 DRM_ERROR("Failed to create stream for sink!\n");
3674 goto finish;
3675 }
3676
3677 stream->dm_stream_context = aconnector;
3678
3679 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
3680 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
3681
3682 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
3683
3684 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
3685 native_mode_found = true;
3686 break;
3687 }
3688 }
3689 if (!native_mode_found)
3690 preferred_mode = list_first_entry_or_null(
3691 &aconnector->base.modes,
3692 struct drm_display_mode,
3693 head);
3694
3695 mode_refresh = drm_mode_vrefresh(&mode);
3696
3697 if (preferred_mode == NULL) {
3698
3699
3700
3701
3702
3703
3704 DRM_DEBUG_DRIVER("No preferred mode found\n");
3705 } else {
3706 decide_crtc_timing_for_drm_display_mode(
3707 &mode, preferred_mode,
3708 dm_state ? (dm_state->scaling != RMX_OFF) : false);
3709 preferred_refresh = drm_mode_vrefresh(preferred_mode);
3710 }
3711
3712 if (!dm_state)
3713 drm_mode_set_crtcinfo(&mode, 0);
3714
3715
3716
3717
3718
3719 if (!scale || mode_refresh != preferred_refresh)
3720 fill_stream_properties_from_drm_display_mode(stream,
3721 &mode, &aconnector->base, con_state, NULL);
3722 else
3723 fill_stream_properties_from_drm_display_mode(stream,
3724 &mode, &aconnector->base, con_state, old_stream);
3725
3726#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
3727 stream->timing.flags.DSC = 0;
3728
3729 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
3730 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
3731 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_ext_caps.raw,
3732 &dsc_caps);
3733 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
3734 dc_link_get_link_cap(aconnector->dc_link));
3735
3736 if (dsc_caps.is_dsc_supported)
3737 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
3738 &dsc_caps,
3739 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
3740 link_bandwidth_kbps,
3741 &stream->timing,
3742 &stream->timing.dsc_cfg))
3743 stream->timing.flags.DSC = 1;
3744 }
3745#endif
3746
3747 update_stream_scaling_settings(&mode, dm_state, stream);
3748
3749 fill_audio_info(
3750 &stream->audio_info,
3751 drm_connector,
3752 sink);
3753
3754 update_stream_signal(stream, sink);
3755
3756 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
3757 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
3758 if (stream->link->psr_feature_enabled) {
3759 struct dc *core_dc = stream->link->ctx->dc;
3760
3761 if (dc_is_dmcu_initialized(core_dc)) {
3762 struct dmcu *dmcu = core_dc->res_pool->dmcu;
3763
3764 stream->psr_version = dmcu->dmcu_version.psr_version;
3765 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
3766 }
3767 }
3768finish:
3769 dc_sink_release(sink);
3770
3771 return stream;
3772}
3773
3774static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
3775{
3776 drm_crtc_cleanup(crtc);
3777 kfree(crtc);
3778}
3779
3780static void dm_crtc_destroy_state(struct drm_crtc *crtc,
3781 struct drm_crtc_state *state)
3782{
3783 struct dm_crtc_state *cur = to_dm_crtc_state(state);
3784
3785
3786 if (cur->stream)
3787 dc_stream_release(cur->stream);
3788
3789
3790 __drm_atomic_helper_crtc_destroy_state(state);
3791
3792
3793 kfree(state);
3794}
3795
3796static void dm_crtc_reset_state(struct drm_crtc *crtc)
3797{
3798 struct dm_crtc_state *state;
3799
3800 if (crtc->state)
3801 dm_crtc_destroy_state(crtc, crtc->state);
3802
3803 state = kzalloc(sizeof(*state), GFP_KERNEL);
3804 if (WARN_ON(!state))
3805 return;
3806
3807 crtc->state = &state->base;
3808 crtc->state->crtc = crtc;
3809
3810}
3811
3812static struct drm_crtc_state *
3813dm_crtc_duplicate_state(struct drm_crtc *crtc)
3814{
3815 struct dm_crtc_state *state, *cur;
3816
3817 cur = to_dm_crtc_state(crtc->state);
3818
3819 if (WARN_ON(!crtc->state))
3820 return NULL;
3821
3822 state = kzalloc(sizeof(*state), GFP_KERNEL);
3823 if (!state)
3824 return NULL;
3825
3826 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
3827
3828 if (cur->stream) {
3829 state->stream = cur->stream;
3830 dc_stream_retain(state->stream);
3831 }
3832
3833 state->active_planes = cur->active_planes;
3834 state->interrupts_enabled = cur->interrupts_enabled;
3835 state->vrr_params = cur->vrr_params;
3836 state->vrr_infopacket = cur->vrr_infopacket;
3837 state->abm_level = cur->abm_level;
3838 state->vrr_supported = cur->vrr_supported;
3839 state->freesync_config = cur->freesync_config;
3840 state->crc_src = cur->crc_src;
3841 state->cm_has_degamma = cur->cm_has_degamma;
3842 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
3843
3844
3845
3846 return &state->base;
3847}
3848
3849static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
3850{
3851 enum dc_irq_source irq_source;
3852 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
3853 struct amdgpu_device *adev = crtc->dev->dev_private;
3854 int rc;
3855
3856 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
3857
3858 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
3859
3860 DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
3861 acrtc->crtc_id, enable ? "en" : "dis", rc);
3862 return rc;
3863}
3864
3865static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
3866{
3867 enum dc_irq_source irq_source;
3868 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
3869 struct amdgpu_device *adev = crtc->dev->dev_private;
3870 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
3871 int rc = 0;
3872
3873 if (enable) {
3874
3875 if (amdgpu_dm_vrr_active(acrtc_state))
3876 rc = dm_set_vupdate_irq(crtc, true);
3877 } else {
3878
3879 rc = dm_set_vupdate_irq(crtc, false);
3880 }
3881
3882 if (rc)
3883 return rc;
3884
3885 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
3886 return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
3887}
3888
3889static int dm_enable_vblank(struct drm_crtc *crtc)
3890{
3891 return dm_set_vblank(crtc, true);
3892}
3893
3894static void dm_disable_vblank(struct drm_crtc *crtc)
3895{
3896 dm_set_vblank(crtc, false);
3897}
3898
3899
3900static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
3901 .reset = dm_crtc_reset_state,
3902 .destroy = amdgpu_dm_crtc_destroy,
3903 .gamma_set = drm_atomic_helper_legacy_gamma_set,
3904 .set_config = drm_atomic_helper_set_config,
3905 .page_flip = drm_atomic_helper_page_flip,
3906 .atomic_duplicate_state = dm_crtc_duplicate_state,
3907 .atomic_destroy_state = dm_crtc_destroy_state,
3908 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
3909 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
3910 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
3911 .enable_vblank = dm_enable_vblank,
3912 .disable_vblank = dm_disable_vblank,
3913};
3914
3915static enum drm_connector_status
3916amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
3917{
3918 bool connected;
3919 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
3920
3921
3922
3923
3924
3925
3926
3927
3928 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
3929 !aconnector->fake_enable)
3930 connected = (aconnector->dc_sink != NULL);
3931 else
3932 connected = (aconnector->base.force == DRM_FORCE_ON);
3933
3934 return (connected ? connector_status_connected :
3935 connector_status_disconnected);
3936}
3937
3938int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
3939 struct drm_connector_state *connector_state,
3940 struct drm_property *property,
3941 uint64_t val)
3942{
3943 struct drm_device *dev = connector->dev;
3944 struct amdgpu_device *adev = dev->dev_private;
3945 struct dm_connector_state *dm_old_state =
3946 to_dm_connector_state(connector->state);
3947 struct dm_connector_state *dm_new_state =
3948 to_dm_connector_state(connector_state);
3949
3950 int ret = -EINVAL;
3951
3952 if (property == dev->mode_config.scaling_mode_property) {
3953 enum amdgpu_rmx_type rmx_type;
3954
3955 switch (val) {
3956 case DRM_MODE_SCALE_CENTER:
3957 rmx_type = RMX_CENTER;
3958 break;
3959 case DRM_MODE_SCALE_ASPECT:
3960 rmx_type = RMX_ASPECT;
3961 break;
3962 case DRM_MODE_SCALE_FULLSCREEN:
3963 rmx_type = RMX_FULL;
3964 break;
3965 case DRM_MODE_SCALE_NONE:
3966 default:
3967 rmx_type = RMX_OFF;
3968 break;
3969 }
3970
3971 if (dm_old_state->scaling == rmx_type)
3972 return 0;
3973
3974 dm_new_state->scaling = rmx_type;
3975 ret = 0;
3976 } else if (property == adev->mode_info.underscan_hborder_property) {
3977 dm_new_state->underscan_hborder = val;
3978 ret = 0;
3979 } else if (property == adev->mode_info.underscan_vborder_property) {
3980 dm_new_state->underscan_vborder = val;
3981 ret = 0;
3982 } else if (property == adev->mode_info.underscan_property) {
3983 dm_new_state->underscan_enable = val;
3984 ret = 0;
3985 } else if (property == adev->mode_info.abm_level_property) {
3986 dm_new_state->abm_level = val;
3987 ret = 0;
3988 }
3989
3990 return ret;
3991}
3992
3993int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
3994 const struct drm_connector_state *state,
3995 struct drm_property *property,
3996 uint64_t *val)
3997{
3998 struct drm_device *dev = connector->dev;
3999 struct amdgpu_device *adev = dev->dev_private;
4000 struct dm_connector_state *dm_state =
4001 to_dm_connector_state(state);
4002 int ret = -EINVAL;
4003
4004 if (property == dev->mode_config.scaling_mode_property) {
4005 switch (dm_state->scaling) {
4006 case RMX_CENTER:
4007 *val = DRM_MODE_SCALE_CENTER;
4008 break;
4009 case RMX_ASPECT:
4010 *val = DRM_MODE_SCALE_ASPECT;
4011 break;
4012 case RMX_FULL:
4013 *val = DRM_MODE_SCALE_FULLSCREEN;
4014 break;
4015 case RMX_OFF:
4016 default:
4017 *val = DRM_MODE_SCALE_NONE;
4018 break;
4019 }
4020 ret = 0;
4021 } else if (property == adev->mode_info.underscan_hborder_property) {
4022 *val = dm_state->underscan_hborder;
4023 ret = 0;
4024 } else if (property == adev->mode_info.underscan_vborder_property) {
4025 *val = dm_state->underscan_vborder;
4026 ret = 0;
4027 } else if (property == adev->mode_info.underscan_property) {
4028 *val = dm_state->underscan_enable;
4029 ret = 0;
4030 } else if (property == adev->mode_info.abm_level_property) {
4031 *val = dm_state->abm_level;
4032 ret = 0;
4033 }
4034
4035 return ret;
4036}
4037
4038static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
4039{
4040 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
4041
4042 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
4043}
4044
4045static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
4046{
4047 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4048 const struct dc_link *link = aconnector->dc_link;
4049 struct amdgpu_device *adev = connector->dev->dev_private;
4050 struct amdgpu_display_manager *dm = &adev->dm;
4051
4052#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4053 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4054
4055 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4056 link->type != dc_connection_none &&
4057 dm->backlight_dev) {
4058 backlight_device_unregister(dm->backlight_dev);
4059 dm->backlight_dev = NULL;
4060 }
4061#endif
4062
4063 if (aconnector->dc_em_sink)
4064 dc_sink_release(aconnector->dc_em_sink);
4065 aconnector->dc_em_sink = NULL;
4066 if (aconnector->dc_sink)
4067 dc_sink_release(aconnector->dc_sink);
4068 aconnector->dc_sink = NULL;
4069
4070 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
4071 drm_connector_unregister(connector);
4072 drm_connector_cleanup(connector);
4073 if (aconnector->i2c) {
4074 i2c_del_adapter(&aconnector->i2c->base);
4075 kfree(aconnector->i2c);
4076 }
4077
4078 kfree(connector);
4079}
4080
4081void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
4082{
4083 struct dm_connector_state *state =
4084 to_dm_connector_state(connector->state);
4085
4086 if (connector->state)
4087 __drm_atomic_helper_connector_destroy_state(connector->state);
4088
4089 kfree(state);
4090
4091 state = kzalloc(sizeof(*state), GFP_KERNEL);
4092
4093 if (state) {
4094 state->scaling = RMX_OFF;
4095 state->underscan_enable = false;
4096 state->underscan_hborder = 0;
4097 state->underscan_vborder = 0;
4098 state->base.max_requested_bpc = 8;
4099
4100 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4101 state->abm_level = amdgpu_dm_abm_level;
4102
4103 __drm_atomic_helper_connector_reset(connector, &state->base);
4104 }
4105}
4106
4107struct drm_connector_state *
4108amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
4109{
4110 struct dm_connector_state *state =
4111 to_dm_connector_state(connector->state);
4112
4113 struct dm_connector_state *new_state =
4114 kmemdup(state, sizeof(*state), GFP_KERNEL);
4115
4116 if (!new_state)
4117 return NULL;
4118
4119 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
4120
4121 new_state->freesync_capable = state->freesync_capable;
4122 new_state->abm_level = state->abm_level;
4123 new_state->scaling = state->scaling;
4124 new_state->underscan_enable = state->underscan_enable;
4125 new_state->underscan_hborder = state->underscan_hborder;
4126 new_state->underscan_vborder = state->underscan_vborder;
4127
4128 return &new_state->base;
4129}
4130
4131static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
4132 .reset = amdgpu_dm_connector_funcs_reset,
4133 .detect = amdgpu_dm_connector_detect,
4134 .fill_modes = drm_helper_probe_single_connector_modes,
4135 .destroy = amdgpu_dm_connector_destroy,
4136 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
4137 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4138 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
4139 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
4140 .early_unregister = amdgpu_dm_connector_unregister
4141};
4142
4143static int get_modes(struct drm_connector *connector)
4144{
4145 return amdgpu_dm_connector_get_modes(connector);
4146}
4147
4148static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
4149{
4150 struct dc_sink_init_data init_params = {
4151 .link = aconnector->dc_link,
4152 .sink_signal = SIGNAL_TYPE_VIRTUAL
4153 };
4154 struct edid *edid;
4155
4156 if (!aconnector->base.edid_blob_ptr) {
4157 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
4158 aconnector->base.name);
4159
4160 aconnector->base.force = DRM_FORCE_OFF;
4161 aconnector->base.override_edid = false;
4162 return;
4163 }
4164
4165 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
4166
4167 aconnector->edid = edid;
4168
4169 aconnector->dc_em_sink = dc_link_add_remote_sink(
4170 aconnector->dc_link,
4171 (uint8_t *)edid,
4172 (edid->extensions + 1) * EDID_LENGTH,
4173 &init_params);
4174
4175 if (aconnector->base.force == DRM_FORCE_ON) {
4176 aconnector->dc_sink = aconnector->dc_link->local_sink ?
4177 aconnector->dc_link->local_sink :
4178 aconnector->dc_em_sink;
4179 dc_sink_retain(aconnector->dc_sink);
4180 }
4181}
4182
4183static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
4184{
4185 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
4186
4187
4188
4189
4190
4191 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4192 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
4193 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
4194 }
4195
4196
4197 aconnector->base.override_edid = true;
4198 create_eml_sink(aconnector);
4199}
4200
4201enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
4202 struct drm_display_mode *mode)
4203{
4204 int result = MODE_ERROR;
4205 struct dc_sink *dc_sink;
4206 struct amdgpu_device *adev = connector->dev->dev_private;
4207
4208 struct dc_stream_state *stream;
4209 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4210 enum dc_status dc_result = DC_OK;
4211
4212 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
4213 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
4214 return result;
4215
4216
4217
4218
4219
4220 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
4221 !aconnector->dc_em_sink)
4222 handle_edid_mgmt(aconnector);
4223
4224 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
4225
4226 if (dc_sink == NULL) {
4227 DRM_ERROR("dc_sink is NULL!\n");
4228 goto fail;
4229 }
4230
4231 stream = create_stream_for_sink(aconnector, mode, NULL, NULL);
4232 if (stream == NULL) {
4233 DRM_ERROR("Failed to create stream for sink!\n");
4234 goto fail;
4235 }
4236
4237 dc_result = dc_validate_stream(adev->dm.dc, stream);
4238
4239 if (dc_result == DC_OK)
4240 result = MODE_OK;
4241 else
4242 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
4243 mode->hdisplay,
4244 mode->vdisplay,
4245 mode->clock,
4246 dc_result);
4247
4248 dc_stream_release(stream);
4249
4250fail:
4251
4252 return result;
4253}
4254
4255static int fill_hdr_info_packet(const struct drm_connector_state *state,
4256 struct dc_info_packet *out)
4257{
4258 struct hdmi_drm_infoframe frame;
4259 unsigned char buf[30];
4260 ssize_t len;
4261 int ret, i;
4262
4263 memset(out, 0, sizeof(*out));
4264
4265 if (!state->hdr_output_metadata)
4266 return 0;
4267
4268 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
4269 if (ret)
4270 return ret;
4271
4272 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
4273 if (len < 0)
4274 return (int)len;
4275
4276
4277 if (len != 30)
4278 return -EINVAL;
4279
4280
4281 switch (state->connector->connector_type) {
4282 case DRM_MODE_CONNECTOR_HDMIA:
4283 out->hb0 = 0x87;
4284 out->hb1 = 0x01;
4285 out->hb2 = 0x1A;
4286 out->sb[0] = buf[3];
4287 i = 1;
4288 break;
4289
4290 case DRM_MODE_CONNECTOR_DisplayPort:
4291 case DRM_MODE_CONNECTOR_eDP:
4292 out->hb0 = 0x00;
4293 out->hb1 = 0x87;
4294 out->hb2 = 0x1D;
4295 out->hb3 = (0x13 << 2);
4296 out->sb[0] = 0x01;
4297 out->sb[1] = 0x1A;
4298 i = 2;
4299 break;
4300
4301 default:
4302 return -EINVAL;
4303 }
4304
4305 memcpy(&out->sb[i], &buf[4], 26);
4306 out->valid = true;
4307
4308 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
4309 sizeof(out->sb), false);
4310
4311 return 0;
4312}
4313
4314static bool
4315is_hdr_metadata_different(const struct drm_connector_state *old_state,
4316 const struct drm_connector_state *new_state)
4317{
4318 struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
4319 struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
4320
4321 if (old_blob != new_blob) {
4322 if (old_blob && new_blob &&
4323 old_blob->length == new_blob->length)
4324 return memcmp(old_blob->data, new_blob->data,
4325 old_blob->length);
4326
4327 return true;
4328 }
4329
4330 return false;
4331}
4332
4333static int
4334amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
4335 struct drm_atomic_state *state)
4336{
4337 struct drm_connector_state *new_con_state =
4338 drm_atomic_get_new_connector_state(state, conn);
4339 struct drm_connector_state *old_con_state =
4340 drm_atomic_get_old_connector_state(state, conn);
4341 struct drm_crtc *crtc = new_con_state->crtc;
4342 struct drm_crtc_state *new_crtc_state;
4343 int ret;
4344
4345 if (!crtc)
4346 return 0;
4347
4348 if (is_hdr_metadata_different(old_con_state, new_con_state)) {
4349 struct dc_info_packet hdr_infopacket;
4350
4351 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
4352 if (ret)
4353 return ret;
4354
4355 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
4356 if (IS_ERR(new_crtc_state))
4357 return PTR_ERR(new_crtc_state);
4358
4359
4360
4361
4362
4363
4364
4365
4366
4367
4368
4369
4370 new_crtc_state->mode_changed =
4371 !old_con_state->hdr_output_metadata ||
4372 !new_con_state->hdr_output_metadata;
4373 }
4374
4375 return 0;
4376}
4377
4378static const struct drm_connector_helper_funcs
4379amdgpu_dm_connector_helper_funcs = {
4380
4381
4382
4383
4384
4385
4386 .get_modes = get_modes,
4387 .mode_valid = amdgpu_dm_connector_mode_valid,
4388 .atomic_check = amdgpu_dm_connector_atomic_check,
4389};
4390
4391static void dm_crtc_helper_disable(struct drm_crtc *crtc)
4392{
4393}
4394
4395static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
4396{
4397 struct drm_device *dev = new_crtc_state->crtc->dev;
4398 struct drm_plane *plane;
4399
4400 drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
4401 if (plane->type == DRM_PLANE_TYPE_CURSOR)
4402 return true;
4403 }
4404
4405 return false;
4406}
4407
4408static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
4409{
4410 struct drm_atomic_state *state = new_crtc_state->state;
4411 struct drm_plane *plane;
4412 int num_active = 0;
4413
4414 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
4415 struct drm_plane_state *new_plane_state;
4416
4417
4418 if (plane->type == DRM_PLANE_TYPE_CURSOR)
4419 continue;
4420
4421 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
4422
4423 if (!new_plane_state) {
4424
4425
4426
4427
4428
4429 num_active += 1;
4430 continue;
4431 }
4432
4433
4434 num_active += (new_plane_state->fb != NULL);
4435 }
4436
4437 return num_active;
4438}
4439
4440
4441
4442
4443
4444
4445static void
4446dm_update_crtc_interrupt_state(struct drm_crtc *crtc,
4447 struct drm_crtc_state *new_crtc_state)
4448{
4449 struct dm_crtc_state *dm_new_crtc_state =
4450 to_dm_crtc_state(new_crtc_state);
4451
4452 dm_new_crtc_state->active_planes = 0;
4453 dm_new_crtc_state->interrupts_enabled = false;
4454
4455 if (!dm_new_crtc_state->stream)
4456 return;
4457
4458 dm_new_crtc_state->active_planes =
4459 count_crtc_active_planes(new_crtc_state);
4460
4461 dm_new_crtc_state->interrupts_enabled =
4462 dm_new_crtc_state->active_planes > 0;
4463}
4464
4465static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
4466 struct drm_crtc_state *state)
4467{
4468 struct amdgpu_device *adev = crtc->dev->dev_private;
4469 struct dc *dc = adev->dm.dc;
4470 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
4471 int ret = -EINVAL;
4472
4473
4474
4475
4476
4477
4478
4479 dm_update_crtc_interrupt_state(crtc, state);
4480
4481 if (unlikely(!dm_crtc_state->stream &&
4482 modeset_required(state, NULL, dm_crtc_state->stream))) {
4483 WARN_ON(1);
4484 return ret;
4485 }
4486
4487
4488 if (!dm_crtc_state->stream)
4489 return 0;
4490
4491
4492
4493
4494
4495 if (state->enable && state->active &&
4496 does_crtc_have_active_cursor(state) &&
4497 dm_crtc_state->active_planes == 0)
4498 return -EINVAL;
4499
4500 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
4501 return 0;
4502
4503 return ret;
4504}
4505
4506static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
4507 const struct drm_display_mode *mode,
4508 struct drm_display_mode *adjusted_mode)
4509{
4510 return true;
4511}
4512
4513static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
4514 .disable = dm_crtc_helper_disable,
4515 .atomic_check = dm_crtc_helper_atomic_check,
4516 .mode_fixup = dm_crtc_helper_mode_fixup
4517};
4518
4519static void dm_encoder_helper_disable(struct drm_encoder *encoder)
4520{
4521
4522}
4523
4524static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
4525 struct drm_crtc_state *crtc_state,
4526 struct drm_connector_state *conn_state)
4527{
4528 return 0;
4529}
4530
4531const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
4532 .disable = dm_encoder_helper_disable,
4533 .atomic_check = dm_encoder_helper_atomic_check
4534};
4535
4536static void dm_drm_plane_reset(struct drm_plane *plane)
4537{
4538 struct dm_plane_state *amdgpu_state = NULL;
4539
4540 if (plane->state)
4541 plane->funcs->atomic_destroy_state(plane, plane->state);
4542
4543 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
4544 WARN_ON(amdgpu_state == NULL);
4545
4546 if (amdgpu_state)
4547 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
4548}
4549
4550static struct drm_plane_state *
4551dm_drm_plane_duplicate_state(struct drm_plane *plane)
4552{
4553 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
4554
4555 old_dm_plane_state = to_dm_plane_state(plane->state);
4556 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
4557 if (!dm_plane_state)
4558 return NULL;
4559
4560 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
4561
4562 if (old_dm_plane_state->dc_state) {
4563 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
4564 dc_plane_state_retain(dm_plane_state->dc_state);
4565 }
4566
4567 return &dm_plane_state->base;
4568}
4569
4570void dm_drm_plane_destroy_state(struct drm_plane *plane,
4571 struct drm_plane_state *state)
4572{
4573 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
4574
4575 if (dm_plane_state->dc_state)
4576 dc_plane_state_release(dm_plane_state->dc_state);
4577
4578 drm_atomic_helper_plane_destroy_state(plane, state);
4579}
4580
4581static const struct drm_plane_funcs dm_plane_funcs = {
4582 .update_plane = drm_atomic_helper_update_plane,
4583 .disable_plane = drm_atomic_helper_disable_plane,
4584 .destroy = drm_primary_helper_destroy,
4585 .reset = dm_drm_plane_reset,
4586 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
4587 .atomic_destroy_state = dm_drm_plane_destroy_state,
4588};
4589
4590static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
4591 struct drm_plane_state *new_state)
4592{
4593 struct amdgpu_framebuffer *afb;
4594 struct drm_gem_object *obj;
4595 struct amdgpu_device *adev;
4596 struct amdgpu_bo *rbo;
4597 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
4598 struct list_head list;
4599 struct ttm_validate_buffer tv;
4600 struct ww_acquire_ctx ticket;
4601 uint64_t tiling_flags;
4602 uint32_t domain;
4603 int r;
4604
4605 dm_plane_state_old = to_dm_plane_state(plane->state);
4606 dm_plane_state_new = to_dm_plane_state(new_state);
4607
4608 if (!new_state->fb) {
4609 DRM_DEBUG_DRIVER("No FB bound\n");
4610 return 0;
4611 }
4612
4613 afb = to_amdgpu_framebuffer(new_state->fb);
4614 obj = new_state->fb->obj[0];
4615 rbo = gem_to_amdgpu_bo(obj);
4616 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
4617 INIT_LIST_HEAD(&list);
4618
4619 tv.bo = &rbo->tbo;
4620 tv.num_shared = 1;
4621 list_add(&tv.head, &list);
4622
4623 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
4624 if (r) {
4625 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
4626 return r;
4627 }
4628
4629 if (plane->type != DRM_PLANE_TYPE_CURSOR)
4630 domain = amdgpu_display_supported_domains(adev, rbo->flags);
4631 else
4632 domain = AMDGPU_GEM_DOMAIN_VRAM;
4633
4634 r = amdgpu_bo_pin(rbo, domain);
4635 if (unlikely(r != 0)) {
4636 if (r != -ERESTARTSYS)
4637 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
4638 ttm_eu_backoff_reservation(&ticket, &list);
4639 return r;
4640 }
4641
4642 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
4643 if (unlikely(r != 0)) {
4644 amdgpu_bo_unpin(rbo);
4645 ttm_eu_backoff_reservation(&ticket, &list);
4646 DRM_ERROR("%p bind failed\n", rbo);
4647 return r;
4648 }
4649
4650 amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
4651
4652 ttm_eu_backoff_reservation(&ticket, &list);
4653
4654 afb->address = amdgpu_bo_gpu_offset(rbo);
4655
4656 amdgpu_bo_ref(rbo);
4657
4658 if (dm_plane_state_new->dc_state &&
4659 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
4660 struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
4661
4662 fill_plane_buffer_attributes(
4663 adev, afb, plane_state->format, plane_state->rotation,
4664 tiling_flags, &plane_state->tiling_info,
4665 &plane_state->plane_size, &plane_state->dcc,
4666 &plane_state->address);
4667 }
4668
4669 return 0;
4670}
4671
4672static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
4673 struct drm_plane_state *old_state)
4674{
4675 struct amdgpu_bo *rbo;
4676 int r;
4677
4678 if (!old_state->fb)
4679 return;
4680
4681 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
4682 r = amdgpu_bo_reserve(rbo, false);
4683 if (unlikely(r)) {
4684 DRM_ERROR("failed to reserve rbo before unpin\n");
4685 return;
4686 }
4687
4688 amdgpu_bo_unpin(rbo);
4689 amdgpu_bo_unreserve(rbo);
4690 amdgpu_bo_unref(&rbo);
4691}
4692
4693static int dm_plane_atomic_check(struct drm_plane *plane,
4694 struct drm_plane_state *state)
4695{
4696 struct amdgpu_device *adev = plane->dev->dev_private;
4697 struct dc *dc = adev->dm.dc;
4698 struct dm_plane_state *dm_plane_state;
4699 struct dc_scaling_info scaling_info;
4700 int ret;
4701
4702 dm_plane_state = to_dm_plane_state(state);
4703
4704 if (!dm_plane_state->dc_state)
4705 return 0;
4706
4707 ret = fill_dc_scaling_info(state, &scaling_info);
4708 if (ret)
4709 return ret;
4710
4711 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
4712 return 0;
4713
4714 return -EINVAL;
4715}
4716
4717static int dm_plane_atomic_async_check(struct drm_plane *plane,
4718 struct drm_plane_state *new_plane_state)
4719{
4720
4721 if (plane->type != DRM_PLANE_TYPE_CURSOR)
4722 return -EINVAL;
4723
4724 return 0;
4725}
4726
4727static void dm_plane_atomic_async_update(struct drm_plane *plane,
4728 struct drm_plane_state *new_state)
4729{
4730 struct drm_plane_state *old_state =
4731 drm_atomic_get_old_plane_state(new_state->state, plane);
4732
4733 swap(plane->state->fb, new_state->fb);
4734
4735 plane->state->src_x = new_state->src_x;
4736 plane->state->src_y = new_state->src_y;
4737 plane->state->src_w = new_state->src_w;
4738 plane->state->src_h = new_state->src_h;
4739 plane->state->crtc_x = new_state->crtc_x;
4740 plane->state->crtc_y = new_state->crtc_y;
4741 plane->state->crtc_w = new_state->crtc_w;
4742 plane->state->crtc_h = new_state->crtc_h;
4743
4744 handle_cursor_update(plane, old_state);
4745}
4746
4747static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
4748 .prepare_fb = dm_plane_helper_prepare_fb,
4749 .cleanup_fb = dm_plane_helper_cleanup_fb,
4750 .atomic_check = dm_plane_atomic_check,
4751 .atomic_async_check = dm_plane_atomic_async_check,
4752 .atomic_async_update = dm_plane_atomic_async_update
4753};
4754
4755
4756
4757
4758
4759
4760
4761static const uint32_t rgb_formats[] = {
4762 DRM_FORMAT_XRGB8888,
4763 DRM_FORMAT_ARGB8888,
4764 DRM_FORMAT_RGBA8888,
4765 DRM_FORMAT_XRGB2101010,
4766 DRM_FORMAT_XBGR2101010,
4767 DRM_FORMAT_ARGB2101010,
4768 DRM_FORMAT_ABGR2101010,
4769 DRM_FORMAT_XBGR8888,
4770 DRM_FORMAT_ABGR8888,
4771 DRM_FORMAT_RGB565,
4772};
4773
4774static const uint32_t overlay_formats[] = {
4775 DRM_FORMAT_XRGB8888,
4776 DRM_FORMAT_ARGB8888,
4777 DRM_FORMAT_RGBA8888,
4778 DRM_FORMAT_XBGR8888,
4779 DRM_FORMAT_ABGR8888,
4780 DRM_FORMAT_RGB565
4781};
4782
4783static const u32 cursor_formats[] = {
4784 DRM_FORMAT_ARGB8888
4785};
4786
4787static int get_plane_formats(const struct drm_plane *plane,
4788 const struct dc_plane_cap *plane_cap,
4789 uint32_t *formats, int max_formats)
4790{
4791 int i, num_formats = 0;
4792
4793
4794
4795
4796
4797
4798
4799 switch (plane->type) {
4800 case DRM_PLANE_TYPE_PRIMARY:
4801 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
4802 if (num_formats >= max_formats)
4803 break;
4804
4805 formats[num_formats++] = rgb_formats[i];
4806 }
4807
4808 if (plane_cap && plane_cap->pixel_format_support.nv12)
4809 formats[num_formats++] = DRM_FORMAT_NV12;
4810 break;
4811
4812 case DRM_PLANE_TYPE_OVERLAY:
4813 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
4814 if (num_formats >= max_formats)
4815 break;
4816
4817 formats[num_formats++] = overlay_formats[i];
4818 }
4819 break;
4820
4821 case DRM_PLANE_TYPE_CURSOR:
4822 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
4823 if (num_formats >= max_formats)
4824 break;
4825
4826 formats[num_formats++] = cursor_formats[i];
4827 }
4828 break;
4829 }
4830
4831 return num_formats;
4832}
4833
4834static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
4835 struct drm_plane *plane,
4836 unsigned long possible_crtcs,
4837 const struct dc_plane_cap *plane_cap)
4838{
4839 uint32_t formats[32];
4840 int num_formats;
4841 int res = -EPERM;
4842
4843 num_formats = get_plane_formats(plane, plane_cap, formats,
4844 ARRAY_SIZE(formats));
4845
4846 res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs,
4847 &dm_plane_funcs, formats, num_formats,
4848 NULL, plane->type, NULL);
4849 if (res)
4850 return res;
4851
4852 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
4853 plane_cap && plane_cap->per_pixel_alpha) {
4854 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
4855 BIT(DRM_MODE_BLEND_PREMULTI);
4856
4857 drm_plane_create_alpha_property(plane);
4858 drm_plane_create_blend_mode_property(plane, blend_caps);
4859 }
4860
4861 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
4862 plane_cap && plane_cap->pixel_format_support.nv12) {
4863
4864 drm_plane_create_color_properties(
4865 plane,
4866 BIT(DRM_COLOR_YCBCR_BT601) |
4867 BIT(DRM_COLOR_YCBCR_BT709),
4868 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
4869 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
4870 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
4871 }
4872
4873 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
4874
4875
4876 if (plane->funcs->reset)
4877 plane->funcs->reset(plane);
4878
4879 return 0;
4880}
4881
4882static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
4883 struct drm_plane *plane,
4884 uint32_t crtc_index)
4885{
4886 struct amdgpu_crtc *acrtc = NULL;
4887 struct drm_plane *cursor_plane;
4888
4889 int res = -ENOMEM;
4890
4891 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
4892 if (!cursor_plane)
4893 goto fail;
4894
4895 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
4896 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
4897
4898 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
4899 if (!acrtc)
4900 goto fail;
4901
4902 res = drm_crtc_init_with_planes(
4903 dm->ddev,
4904 &acrtc->base,
4905 plane,
4906 cursor_plane,
4907 &amdgpu_dm_crtc_funcs, NULL);
4908
4909 if (res)
4910 goto fail;
4911
4912 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
4913
4914
4915 if (acrtc->base.funcs->reset)
4916 acrtc->base.funcs->reset(&acrtc->base);
4917
4918 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
4919 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
4920
4921 acrtc->crtc_id = crtc_index;
4922 acrtc->base.enabled = false;
4923 acrtc->otg_inst = -1;
4924
4925 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
4926 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
4927 true, MAX_COLOR_LUT_ENTRIES);
4928 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
4929
4930 return 0;
4931
4932fail:
4933 kfree(acrtc);
4934 kfree(cursor_plane);
4935 return res;
4936}
4937
4938
4939static int to_drm_connector_type(enum signal_type st)
4940{
4941 switch (st) {
4942 case SIGNAL_TYPE_HDMI_TYPE_A:
4943 return DRM_MODE_CONNECTOR_HDMIA;
4944 case SIGNAL_TYPE_EDP:
4945 return DRM_MODE_CONNECTOR_eDP;
4946 case SIGNAL_TYPE_LVDS:
4947 return DRM_MODE_CONNECTOR_LVDS;
4948 case SIGNAL_TYPE_RGB:
4949 return DRM_MODE_CONNECTOR_VGA;
4950 case SIGNAL_TYPE_DISPLAY_PORT:
4951 case SIGNAL_TYPE_DISPLAY_PORT_MST:
4952 return DRM_MODE_CONNECTOR_DisplayPort;
4953 case SIGNAL_TYPE_DVI_DUAL_LINK:
4954 case SIGNAL_TYPE_DVI_SINGLE_LINK:
4955 return DRM_MODE_CONNECTOR_DVID;
4956 case SIGNAL_TYPE_VIRTUAL:
4957 return DRM_MODE_CONNECTOR_VIRTUAL;
4958
4959 default:
4960 return DRM_MODE_CONNECTOR_Unknown;
4961 }
4962}
4963
4964static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
4965{
4966 struct drm_encoder *encoder;
4967
4968
4969 drm_connector_for_each_possible_encoder(connector, encoder)
4970 return encoder;
4971
4972 return NULL;
4973}
4974
4975static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
4976{
4977 struct drm_encoder *encoder;
4978 struct amdgpu_encoder *amdgpu_encoder;
4979
4980 encoder = amdgpu_dm_connector_to_encoder(connector);
4981
4982 if (encoder == NULL)
4983 return;
4984
4985 amdgpu_encoder = to_amdgpu_encoder(encoder);
4986
4987 amdgpu_encoder->native_mode.clock = 0;
4988
4989 if (!list_empty(&connector->probed_modes)) {
4990 struct drm_display_mode *preferred_mode = NULL;
4991
4992 list_for_each_entry(preferred_mode,
4993 &connector->probed_modes,
4994 head) {
4995 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
4996 amdgpu_encoder->native_mode = *preferred_mode;
4997
4998 break;
4999 }
5000
5001 }
5002}
5003
5004static struct drm_display_mode *
5005amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
5006 char *name,
5007 int hdisplay, int vdisplay)
5008{
5009 struct drm_device *dev = encoder->dev;
5010 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
5011 struct drm_display_mode *mode = NULL;
5012 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
5013
5014 mode = drm_mode_duplicate(dev, native_mode);
5015
5016 if (mode == NULL)
5017 return NULL;
5018
5019 mode->hdisplay = hdisplay;
5020 mode->vdisplay = vdisplay;
5021 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
5022 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
5023
5024 return mode;
5025
5026}
5027
5028static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
5029 struct drm_connector *connector)
5030{
5031 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
5032 struct drm_display_mode *mode = NULL;
5033 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
5034 struct amdgpu_dm_connector *amdgpu_dm_connector =
5035 to_amdgpu_dm_connector(connector);
5036 int i;
5037 int n;
5038 struct mode_size {
5039 char name[DRM_DISPLAY_MODE_LEN];
5040 int w;
5041 int h;
5042 } common_modes[] = {
5043 { "640x480", 640, 480},
5044 { "800x600", 800, 600},
5045 { "1024x768", 1024, 768},
5046 { "1280x720", 1280, 720},
5047 { "1280x800", 1280, 800},
5048 {"1280x1024", 1280, 1024},
5049 { "1440x900", 1440, 900},
5050 {"1680x1050", 1680, 1050},
5051 {"1600x1200", 1600, 1200},
5052 {"1920x1080", 1920, 1080},
5053 {"1920x1200", 1920, 1200}
5054 };
5055
5056 n = ARRAY_SIZE(common_modes);
5057
5058 for (i = 0; i < n; i++) {
5059 struct drm_display_mode *curmode = NULL;
5060 bool mode_existed = false;
5061
5062 if (common_modes[i].w > native_mode->hdisplay ||
5063 common_modes[i].h > native_mode->vdisplay ||
5064 (common_modes[i].w == native_mode->hdisplay &&
5065 common_modes[i].h == native_mode->vdisplay))
5066 continue;
5067
5068 list_for_each_entry(curmode, &connector->probed_modes, head) {
5069 if (common_modes[i].w == curmode->hdisplay &&
5070 common_modes[i].h == curmode->vdisplay) {
5071 mode_existed = true;
5072 break;
5073 }
5074 }
5075
5076 if (mode_existed)
5077 continue;
5078
5079 mode = amdgpu_dm_create_common_mode(encoder,
5080 common_modes[i].name, common_modes[i].w,
5081 common_modes[i].h);
5082 drm_mode_probed_add(connector, mode);
5083 amdgpu_dm_connector->num_modes++;
5084 }
5085}
5086
5087static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
5088 struct edid *edid)
5089{
5090 struct amdgpu_dm_connector *amdgpu_dm_connector =
5091 to_amdgpu_dm_connector(connector);
5092
5093 if (edid) {
5094
5095 INIT_LIST_HEAD(&connector->probed_modes);
5096 amdgpu_dm_connector->num_modes =
5097 drm_add_edid_modes(connector, edid);
5098
5099
5100
5101
5102
5103
5104
5105
5106
5107 drm_mode_sort(&connector->probed_modes);
5108 amdgpu_dm_get_native_mode(connector);
5109 } else {
5110 amdgpu_dm_connector->num_modes = 0;
5111 }
5112}
5113
5114static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
5115{
5116 struct amdgpu_dm_connector *amdgpu_dm_connector =
5117 to_amdgpu_dm_connector(connector);
5118 struct drm_encoder *encoder;
5119 struct edid *edid = amdgpu_dm_connector->edid;
5120
5121 encoder = amdgpu_dm_connector_to_encoder(connector);
5122
5123 if (!edid || !drm_edid_is_valid(edid)) {
5124 amdgpu_dm_connector->num_modes =
5125 drm_add_modes_noedid(connector, 640, 480);
5126 } else {
5127 amdgpu_dm_connector_ddc_get_modes(connector, edid);
5128 amdgpu_dm_connector_add_common_modes(encoder, connector);
5129 }
5130 amdgpu_dm_fbc_init(connector);
5131
5132 return amdgpu_dm_connector->num_modes;
5133}
5134
5135void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
5136 struct amdgpu_dm_connector *aconnector,
5137 int connector_type,
5138 struct dc_link *link,
5139 int link_index)
5140{
5141 struct amdgpu_device *adev = dm->ddev->dev_private;
5142
5143
5144
5145
5146
5147 if (aconnector->base.funcs->reset)
5148 aconnector->base.funcs->reset(&aconnector->base);
5149
5150 aconnector->connector_id = link_index;
5151 aconnector->dc_link = link;
5152 aconnector->base.interlace_allowed = false;
5153 aconnector->base.doublescan_allowed = false;
5154 aconnector->base.stereo_allowed = false;
5155 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
5156 aconnector->hpd.hpd = AMDGPU_HPD_NONE;
5157 aconnector->audio_inst = -1;
5158 mutex_init(&aconnector->hpd_lock);
5159
5160
5161
5162
5163
5164 switch (connector_type) {
5165 case DRM_MODE_CONNECTOR_HDMIA:
5166 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5167 aconnector->base.ycbcr_420_allowed =
5168 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
5169 break;
5170 case DRM_MODE_CONNECTOR_DisplayPort:
5171 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5172 aconnector->base.ycbcr_420_allowed =
5173 link->link_enc->features.dp_ycbcr420_supported ? true : false;
5174 break;
5175 case DRM_MODE_CONNECTOR_DVID:
5176 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5177 break;
5178 default:
5179 break;
5180 }
5181
5182 drm_object_attach_property(&aconnector->base.base,
5183 dm->ddev->mode_config.scaling_mode_property,
5184 DRM_MODE_SCALE_NONE);
5185
5186 drm_object_attach_property(&aconnector->base.base,
5187 adev->mode_info.underscan_property,
5188 UNDERSCAN_OFF);
5189 drm_object_attach_property(&aconnector->base.base,
5190 adev->mode_info.underscan_hborder_property,
5191 0);
5192 drm_object_attach_property(&aconnector->base.base,
5193 adev->mode_info.underscan_vborder_property,
5194 0);
5195
5196 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
5197
5198
5199 aconnector->base.state->max_bpc = 8;
5200 aconnector->base.state->max_requested_bpc = 8;
5201
5202 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
5203 dc_is_dmcu_initialized(adev->dm.dc)) {
5204 drm_object_attach_property(&aconnector->base.base,
5205 adev->mode_info.abm_level_property, 0);
5206 }
5207
5208 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
5209 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5210 connector_type == DRM_MODE_CONNECTOR_eDP) {
5211 drm_object_attach_property(
5212 &aconnector->base.base,
5213 dm->ddev->mode_config.hdr_output_metadata_property, 0);
5214
5215 drm_connector_attach_vrr_capable_property(
5216 &aconnector->base);
5217#ifdef CONFIG_DRM_AMD_DC_HDCP
5218 if (adev->asic_type >= CHIP_RAVEN)
5219 drm_connector_attach_content_protection_property(&aconnector->base, false);
5220#endif
5221 }
5222}
5223
5224static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
5225 struct i2c_msg *msgs, int num)
5226{
5227 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
5228 struct ddc_service *ddc_service = i2c->ddc_service;
5229 struct i2c_command cmd;
5230 int i;
5231 int result = -EIO;
5232
5233 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
5234
5235 if (!cmd.payloads)
5236 return result;
5237
5238 cmd.number_of_payloads = num;
5239 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
5240 cmd.speed = 100;
5241
5242 for (i = 0; i < num; i++) {
5243 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
5244 cmd.payloads[i].address = msgs[i].addr;
5245 cmd.payloads[i].length = msgs[i].len;
5246 cmd.payloads[i].data = msgs[i].buf;
5247 }
5248
5249 if (dc_submit_i2c(
5250 ddc_service->ctx->dc,
5251 ddc_service->ddc_pin->hw_info.ddc_channel,
5252 &cmd))
5253 result = num;
5254
5255 kfree(cmd.payloads);
5256 return result;
5257}
5258
5259static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
5260{
5261 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
5262}
5263
5264static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
5265 .master_xfer = amdgpu_dm_i2c_xfer,
5266 .functionality = amdgpu_dm_i2c_func,
5267};
5268
5269static struct amdgpu_i2c_adapter *
5270create_i2c(struct ddc_service *ddc_service,
5271 int link_index,
5272 int *res)
5273{
5274 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
5275 struct amdgpu_i2c_adapter *i2c;
5276
5277 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
5278 if (!i2c)
5279 return NULL;
5280 i2c->base.owner = THIS_MODULE;
5281 i2c->base.class = I2C_CLASS_DDC;
5282 i2c->base.dev.parent = &adev->pdev->dev;
5283 i2c->base.algo = &amdgpu_dm_i2c_algo;
5284 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
5285 i2c_set_adapdata(&i2c->base, i2c);
5286 i2c->ddc_service = ddc_service;
5287 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
5288
5289 return i2c;
5290}
5291
5292
5293
5294
5295
5296
5297static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
5298 struct amdgpu_dm_connector *aconnector,
5299 uint32_t link_index,
5300 struct amdgpu_encoder *aencoder)
5301{
5302 int res = 0;
5303 int connector_type;
5304 struct dc *dc = dm->dc;
5305 struct dc_link *link = dc_get_link_at_index(dc, link_index);
5306 struct amdgpu_i2c_adapter *i2c;
5307
5308 link->priv = aconnector;
5309
5310 DRM_DEBUG_DRIVER("%s()\n", __func__);
5311
5312 i2c = create_i2c(link->ddc, link->link_index, &res);
5313 if (!i2c) {
5314 DRM_ERROR("Failed to create i2c adapter data\n");
5315 return -ENOMEM;
5316 }
5317
5318 aconnector->i2c = i2c;
5319 res = i2c_add_adapter(&i2c->base);
5320
5321 if (res) {
5322 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
5323 goto out_free;
5324 }
5325
5326 connector_type = to_drm_connector_type(link->connector_signal);
5327
5328 res = drm_connector_init(
5329 dm->ddev,
5330 &aconnector->base,
5331 &amdgpu_dm_connector_funcs,
5332 connector_type);
5333
5334 if (res) {
5335 DRM_ERROR("connector_init failed\n");
5336 aconnector->connector_id = -1;
5337 goto out_free;
5338 }
5339
5340 drm_connector_helper_add(
5341 &aconnector->base,
5342 &amdgpu_dm_connector_helper_funcs);
5343
5344 amdgpu_dm_connector_init_helper(
5345 dm,
5346 aconnector,
5347 connector_type,
5348 link,
5349 link_index);
5350
5351 drm_connector_attach_encoder(
5352 &aconnector->base, &aencoder->base);
5353
5354 drm_connector_register(&aconnector->base);
5355#if defined(CONFIG_DEBUG_FS)
5356 connector_debugfs_init(aconnector);
5357 aconnector->debugfs_dpcd_address = 0;
5358 aconnector->debugfs_dpcd_size = 0;
5359#endif
5360
5361 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
5362 || connector_type == DRM_MODE_CONNECTOR_eDP)
5363 amdgpu_dm_initialize_dp_connector(dm, aconnector);
5364
5365out_free:
5366 if (res) {
5367 kfree(i2c);
5368 aconnector->i2c = NULL;
5369 }
5370 return res;
5371}
5372
5373int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
5374{
5375 switch (adev->mode_info.num_crtc) {
5376 case 1:
5377 return 0x1;
5378 case 2:
5379 return 0x3;
5380 case 3:
5381 return 0x7;
5382 case 4:
5383 return 0xf;
5384 case 5:
5385 return 0x1f;
5386 case 6:
5387 default:
5388 return 0x3f;
5389 }
5390}
5391
5392static int amdgpu_dm_encoder_init(struct drm_device *dev,
5393 struct amdgpu_encoder *aencoder,
5394 uint32_t link_index)
5395{
5396 struct amdgpu_device *adev = dev->dev_private;
5397
5398 int res = drm_encoder_init(dev,
5399 &aencoder->base,
5400 &amdgpu_dm_encoder_funcs,
5401 DRM_MODE_ENCODER_TMDS,
5402 NULL);
5403
5404 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
5405
5406 if (!res)
5407 aencoder->encoder_id = link_index;
5408 else
5409 aencoder->encoder_id = -1;
5410
5411 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
5412
5413 return res;
5414}
5415
5416static void manage_dm_interrupts(struct amdgpu_device *adev,
5417 struct amdgpu_crtc *acrtc,
5418 bool enable)
5419{
5420
5421
5422
5423
5424 int irq_type =
5425 amdgpu_display_crtc_idx_to_irq_type(
5426 adev,
5427 acrtc->crtc_id);
5428
5429 if (enable) {
5430 drm_crtc_vblank_on(&acrtc->base);
5431 amdgpu_irq_get(
5432 adev,
5433 &adev->pageflip_irq,
5434 irq_type);
5435 } else {
5436
5437 amdgpu_irq_put(
5438 adev,
5439 &adev->pageflip_irq,
5440 irq_type);
5441 drm_crtc_vblank_off(&acrtc->base);
5442 }
5443}
5444
5445static bool
5446is_scaling_state_different(const struct dm_connector_state *dm_state,
5447 const struct dm_connector_state *old_dm_state)
5448{
5449 if (dm_state->scaling != old_dm_state->scaling)
5450 return true;
5451 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
5452 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
5453 return true;
5454 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
5455 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
5456 return true;
5457 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
5458 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
5459 return true;
5460 return false;
5461}
5462
5463#ifdef CONFIG_DRM_AMD_DC_HDCP
5464static bool is_content_protection_different(struct drm_connector_state *state,
5465 const struct drm_connector_state *old_state,
5466 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
5467{
5468 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5469
5470
5471 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
5472 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
5473 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
5474 return false;
5475 }
5476
5477
5478 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
5479 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
5480 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
5481
5482
5483
5484
5485 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
5486 aconnector->dc_sink != NULL)
5487 return true;
5488
5489 if (old_state->content_protection == state->content_protection)
5490 return false;
5491
5492 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
5493 return true;
5494
5495 return false;
5496}
5497
5498static void update_content_protection(struct drm_connector_state *state, const struct drm_connector *connector,
5499 struct hdcp_workqueue *hdcp_w)
5500{
5501 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5502
5503 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED)
5504 hdcp_add_display(hdcp_w, aconnector->dc_link->link_index, aconnector);
5505 else if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
5506 hdcp_remove_display(hdcp_w, aconnector->dc_link->link_index, aconnector->base.index);
5507
5508}
5509#endif
5510static void remove_stream(struct amdgpu_device *adev,
5511 struct amdgpu_crtc *acrtc,
5512 struct dc_stream_state *stream)
5513{
5514
5515
5516 acrtc->otg_inst = -1;
5517 acrtc->enabled = false;
5518}
5519
5520static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
5521 struct dc_cursor_position *position)
5522{
5523 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
5524 int x, y;
5525 int xorigin = 0, yorigin = 0;
5526
5527 position->enable = false;
5528 position->x = 0;
5529 position->y = 0;
5530
5531 if (!crtc || !plane->state->fb)
5532 return 0;
5533
5534 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
5535 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
5536 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
5537 __func__,
5538 plane->state->crtc_w,
5539 plane->state->crtc_h);
5540 return -EINVAL;
5541 }
5542
5543 x = plane->state->crtc_x;
5544 y = plane->state->crtc_y;
5545
5546 if (x <= -amdgpu_crtc->max_cursor_width ||
5547 y <= -amdgpu_crtc->max_cursor_height)
5548 return 0;
5549
5550 if (crtc->primary->state) {
5551
5552 x += crtc->primary->state->src_x >> 16;
5553 y += crtc->primary->state->src_y >> 16;
5554 }
5555
5556 if (x < 0) {
5557 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
5558 x = 0;
5559 }
5560 if (y < 0) {
5561 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
5562 y = 0;
5563 }
5564 position->enable = true;
5565 position->x = x;
5566 position->y = y;
5567 position->x_hotspot = xorigin;
5568 position->y_hotspot = yorigin;
5569
5570 return 0;
5571}
5572
5573static void handle_cursor_update(struct drm_plane *plane,
5574 struct drm_plane_state *old_plane_state)
5575{
5576 struct amdgpu_device *adev = plane->dev->dev_private;
5577 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
5578 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
5579 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
5580 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
5581 uint64_t address = afb ? afb->address : 0;
5582 struct dc_cursor_position position;
5583 struct dc_cursor_attributes attributes;
5584 int ret;
5585
5586 if (!plane->state->fb && !old_plane_state->fb)
5587 return;
5588
5589 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
5590 __func__,
5591 amdgpu_crtc->crtc_id,
5592 plane->state->crtc_w,
5593 plane->state->crtc_h);
5594
5595 ret = get_cursor_position(plane, crtc, &position);
5596 if (ret)
5597 return;
5598
5599 if (!position.enable) {
5600
5601 if (crtc_state && crtc_state->stream) {
5602 mutex_lock(&adev->dm.dc_lock);
5603 dc_stream_set_cursor_position(crtc_state->stream,
5604 &position);
5605 mutex_unlock(&adev->dm.dc_lock);
5606 }
5607 return;
5608 }
5609
5610 amdgpu_crtc->cursor_width = plane->state->crtc_w;
5611 amdgpu_crtc->cursor_height = plane->state->crtc_h;
5612
5613 memset(&attributes, 0, sizeof(attributes));
5614 attributes.address.high_part = upper_32_bits(address);
5615 attributes.address.low_part = lower_32_bits(address);
5616 attributes.width = plane->state->crtc_w;
5617 attributes.height = plane->state->crtc_h;
5618 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
5619 attributes.rotation_angle = 0;
5620 attributes.attribute_flags.value = 0;
5621
5622 attributes.pitch = attributes.width;
5623
5624 if (crtc_state->stream) {
5625 mutex_lock(&adev->dm.dc_lock);
5626 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
5627 &attributes))
5628 DRM_ERROR("DC failed to set cursor attributes\n");
5629
5630 if (!dc_stream_set_cursor_position(crtc_state->stream,
5631 &position))
5632 DRM_ERROR("DC failed to set cursor position\n");
5633 mutex_unlock(&adev->dm.dc_lock);
5634 }
5635}
5636
5637static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
5638{
5639
5640 assert_spin_locked(&acrtc->base.dev->event_lock);
5641 WARN_ON(acrtc->event);
5642
5643 acrtc->event = acrtc->base.state->event;
5644
5645
5646 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
5647
5648
5649 acrtc->base.state->event = NULL;
5650
5651 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
5652 acrtc->crtc_id);
5653}
5654
5655static void update_freesync_state_on_stream(
5656 struct amdgpu_display_manager *dm,
5657 struct dm_crtc_state *new_crtc_state,
5658 struct dc_stream_state *new_stream,
5659 struct dc_plane_state *surface,
5660 u32 flip_timestamp_in_us)
5661{
5662 struct mod_vrr_params vrr_params;
5663 struct dc_info_packet vrr_infopacket = {0};
5664 struct amdgpu_device *adev = dm->adev;
5665 unsigned long flags;
5666
5667 if (!new_stream)
5668 return;
5669
5670
5671
5672
5673
5674
5675 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
5676 return;
5677
5678 spin_lock_irqsave(&adev->ddev->event_lock, flags);
5679 vrr_params = new_crtc_state->vrr_params;
5680
5681 if (surface) {
5682 mod_freesync_handle_preflip(
5683 dm->freesync_module,
5684 surface,
5685 new_stream,
5686 flip_timestamp_in_us,
5687 &vrr_params);
5688
5689 if (adev->family < AMDGPU_FAMILY_AI &&
5690 amdgpu_dm_vrr_active(new_crtc_state)) {
5691 mod_freesync_handle_v_update(dm->freesync_module,
5692 new_stream, &vrr_params);
5693
5694
5695 dc_stream_adjust_vmin_vmax(dm->dc,
5696 new_crtc_state->stream,
5697 &vrr_params.adjust);
5698 }
5699 }
5700
5701 mod_freesync_build_vrr_infopacket(
5702 dm->freesync_module,
5703 new_stream,
5704 &vrr_params,
5705 PACKET_TYPE_VRR,
5706 TRANSFER_FUNC_UNKNOWN,
5707 &vrr_infopacket);
5708
5709 new_crtc_state->freesync_timing_changed |=
5710 (memcmp(&new_crtc_state->vrr_params.adjust,
5711 &vrr_params.adjust,
5712 sizeof(vrr_params.adjust)) != 0);
5713
5714 new_crtc_state->freesync_vrr_info_changed |=
5715 (memcmp(&new_crtc_state->vrr_infopacket,
5716 &vrr_infopacket,
5717 sizeof(vrr_infopacket)) != 0);
5718
5719 new_crtc_state->vrr_params = vrr_params;
5720 new_crtc_state->vrr_infopacket = vrr_infopacket;
5721
5722 new_stream->adjust = new_crtc_state->vrr_params.adjust;
5723 new_stream->vrr_infopacket = vrr_infopacket;
5724
5725 if (new_crtc_state->freesync_vrr_info_changed)
5726 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
5727 new_crtc_state->base.crtc->base.id,
5728 (int)new_crtc_state->base.vrr_enabled,
5729 (int)vrr_params.state);
5730
5731 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
5732}
5733
5734static void pre_update_freesync_state_on_stream(
5735 struct amdgpu_display_manager *dm,
5736 struct dm_crtc_state *new_crtc_state)
5737{
5738 struct dc_stream_state *new_stream = new_crtc_state->stream;
5739 struct mod_vrr_params vrr_params;
5740 struct mod_freesync_config config = new_crtc_state->freesync_config;
5741 struct amdgpu_device *adev = dm->adev;
5742 unsigned long flags;
5743
5744 if (!new_stream)
5745 return;
5746
5747
5748
5749
5750
5751 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
5752 return;
5753
5754 spin_lock_irqsave(&adev->ddev->event_lock, flags);
5755 vrr_params = new_crtc_state->vrr_params;
5756
5757 if (new_crtc_state->vrr_supported &&
5758 config.min_refresh_in_uhz &&
5759 config.max_refresh_in_uhz) {
5760 config.state = new_crtc_state->base.vrr_enabled ?
5761 VRR_STATE_ACTIVE_VARIABLE :
5762 VRR_STATE_INACTIVE;
5763 } else {
5764 config.state = VRR_STATE_UNSUPPORTED;
5765 }
5766
5767 mod_freesync_build_vrr_params(dm->freesync_module,
5768 new_stream,
5769 &config, &vrr_params);
5770
5771 new_crtc_state->freesync_timing_changed |=
5772 (memcmp(&new_crtc_state->vrr_params.adjust,
5773 &vrr_params.adjust,
5774 sizeof(vrr_params.adjust)) != 0);
5775
5776 new_crtc_state->vrr_params = vrr_params;
5777 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
5778}
5779
5780static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
5781 struct dm_crtc_state *new_state)
5782{
5783 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
5784 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
5785
5786 if (!old_vrr_active && new_vrr_active) {
5787
5788
5789
5790
5791
5792
5793
5794
5795 dm_set_vupdate_irq(new_state->base.crtc, true);
5796 drm_crtc_vblank_get(new_state->base.crtc);
5797 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
5798 __func__, new_state->base.crtc->base.id);
5799 } else if (old_vrr_active && !new_vrr_active) {
5800
5801
5802
5803 dm_set_vupdate_irq(new_state->base.crtc, false);
5804 drm_crtc_vblank_put(new_state->base.crtc);
5805 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
5806 __func__, new_state->base.crtc->base.id);
5807 }
5808}
5809
5810static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
5811{
5812 struct drm_plane *plane;
5813 struct drm_plane_state *old_plane_state, *new_plane_state;
5814 int i;
5815
5816
5817
5818
5819
5820 for_each_oldnew_plane_in_state(state, plane, old_plane_state,
5821 new_plane_state, i)
5822 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5823 handle_cursor_update(plane, old_plane_state);
5824}
5825
5826static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
5827 struct dc_state *dc_state,
5828 struct drm_device *dev,
5829 struct amdgpu_display_manager *dm,
5830 struct drm_crtc *pcrtc,
5831 bool wait_for_vblank)
5832{
5833 uint32_t i;
5834 uint64_t timestamp_ns;
5835 struct drm_plane *plane;
5836 struct drm_plane_state *old_plane_state, *new_plane_state;
5837 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
5838 struct drm_crtc_state *new_pcrtc_state =
5839 drm_atomic_get_new_crtc_state(state, pcrtc);
5840 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
5841 struct dm_crtc_state *dm_old_crtc_state =
5842 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
5843 int planes_count = 0, vpos, hpos;
5844 long r;
5845 unsigned long flags;
5846 struct amdgpu_bo *abo;
5847 uint64_t tiling_flags;
5848 uint32_t target_vblank, last_flip_vblank;
5849 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
5850 bool pflip_present = false;
5851 bool swizzle = true;
5852 struct {
5853 struct dc_surface_update surface_updates[MAX_SURFACES];
5854 struct dc_plane_info plane_infos[MAX_SURFACES];
5855 struct dc_scaling_info scaling_infos[MAX_SURFACES];
5856 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
5857 struct dc_stream_update stream_update;
5858 } *bundle;
5859
5860 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
5861
5862 if (!bundle) {
5863 dm_error("Failed to allocate update bundle\n");
5864 goto cleanup;
5865 }
5866
5867
5868
5869
5870
5871
5872 if (acrtc_state->active_planes == 0)
5873 amdgpu_dm_commit_cursors(state);
5874
5875
5876 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
5877 struct drm_crtc *crtc = new_plane_state->crtc;
5878 struct drm_crtc_state *new_crtc_state;
5879 struct drm_framebuffer *fb = new_plane_state->fb;
5880 bool plane_needs_flip;
5881 struct dc_plane_state *dc_plane;
5882 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
5883
5884
5885 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5886 continue;
5887
5888 if (!fb || !crtc || pcrtc != crtc)
5889 continue;
5890
5891 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
5892 if (!new_crtc_state->active)
5893 continue;
5894
5895 dc_plane = dm_new_plane_state->dc_state;
5896
5897 if (dc_plane && !dc_plane->tiling_info.gfx9.swizzle)
5898 swizzle = false;
5899
5900 bundle->surface_updates[planes_count].surface = dc_plane;
5901 if (new_pcrtc_state->color_mgmt_changed) {
5902 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
5903 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
5904 }
5905
5906 fill_dc_scaling_info(new_plane_state,
5907 &bundle->scaling_infos[planes_count]);
5908
5909 bundle->surface_updates[planes_count].scaling_info =
5910 &bundle->scaling_infos[planes_count];
5911
5912 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
5913
5914 pflip_present = pflip_present || plane_needs_flip;
5915
5916 if (!plane_needs_flip) {
5917 planes_count += 1;
5918 continue;
5919 }
5920
5921 abo = gem_to_amdgpu_bo(fb->obj[0]);
5922
5923
5924
5925
5926
5927
5928 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
5929 false,
5930 msecs_to_jiffies(5000));
5931 if (unlikely(r <= 0))
5932 DRM_ERROR("Waiting for fences timed out!");
5933
5934
5935
5936
5937
5938
5939
5940 r = amdgpu_bo_reserve(abo, true);
5941 if (unlikely(r != 0))
5942 DRM_ERROR("failed to reserve buffer before flip\n");
5943
5944 amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
5945
5946 amdgpu_bo_unreserve(abo);
5947
5948 fill_dc_plane_info_and_addr(
5949 dm->adev, new_plane_state, tiling_flags,
5950 &bundle->plane_infos[planes_count],
5951 &bundle->flip_addrs[planes_count].address);
5952
5953 bundle->surface_updates[planes_count].plane_info =
5954 &bundle->plane_infos[planes_count];
5955
5956
5957
5958
5959
5960 bundle->flip_addrs[planes_count].flip_immediate =
5961 crtc->state->async_flip &&
5962 acrtc_state->update_type == UPDATE_TYPE_FAST;
5963
5964 timestamp_ns = ktime_get_ns();
5965 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
5966 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
5967 bundle->surface_updates[planes_count].surface = dc_plane;
5968
5969 if (!bundle->surface_updates[planes_count].surface) {
5970 DRM_ERROR("No surface for CRTC: id=%d\n",
5971 acrtc_attach->crtc_id);
5972 continue;
5973 }
5974
5975 if (plane == pcrtc->primary)
5976 update_freesync_state_on_stream(
5977 dm,
5978 acrtc_state,
5979 acrtc_state->stream,
5980 dc_plane,
5981 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
5982
5983 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
5984 __func__,
5985 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
5986 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
5987
5988 planes_count += 1;
5989
5990 }
5991
5992 if (pflip_present) {
5993 if (!vrr_active) {
5994
5995
5996
5997
5998
5999
6000 last_flip_vblank = amdgpu_get_vblank_counter_kms(dm->ddev, acrtc_attach->crtc_id);
6001 }
6002 else {
6003
6004
6005
6006
6007
6008
6009
6010
6011 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6012 last_flip_vblank = acrtc_attach->last_flip_vblank;
6013 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6014 }
6015
6016 target_vblank = last_flip_vblank + wait_for_vblank;
6017
6018
6019
6020
6021
6022 while ((acrtc_attach->enabled &&
6023 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
6024 0, &vpos, &hpos, NULL,
6025 NULL, &pcrtc->hwmode)
6026 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
6027 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
6028 (int)(target_vblank -
6029 amdgpu_get_vblank_counter_kms(dm->ddev, acrtc_attach->crtc_id)) > 0)) {
6030 usleep_range(1000, 1100);
6031 }
6032
6033 if (acrtc_attach->base.state->event) {
6034 drm_crtc_vblank_get(pcrtc);
6035
6036 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6037
6038 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
6039 prepare_flip_isr(acrtc_attach);
6040
6041 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6042 }
6043
6044 if (acrtc_state->stream) {
6045 if (acrtc_state->freesync_vrr_info_changed)
6046 bundle->stream_update.vrr_infopacket =
6047 &acrtc_state->stream->vrr_infopacket;
6048 }
6049 }
6050
6051
6052 if ((planes_count || acrtc_state->active_planes == 0) &&
6053 acrtc_state->stream) {
6054 bundle->stream_update.stream = acrtc_state->stream;
6055 if (new_pcrtc_state->mode_changed) {
6056 bundle->stream_update.src = acrtc_state->stream->src;
6057 bundle->stream_update.dst = acrtc_state->stream->dst;
6058 }
6059
6060 if (new_pcrtc_state->color_mgmt_changed) {
6061
6062
6063
6064
6065 bundle->stream_update.gamut_remap =
6066 &acrtc_state->stream->gamut_remap_matrix;
6067 bundle->stream_update.output_csc_transform =
6068 &acrtc_state->stream->csc_color_matrix;
6069 bundle->stream_update.out_transfer_func =
6070 acrtc_state->stream->out_transfer_func;
6071 }
6072
6073 acrtc_state->stream->abm_level = acrtc_state->abm_level;
6074 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
6075 bundle->stream_update.abm_level = &acrtc_state->abm_level;
6076
6077
6078
6079
6080
6081
6082 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
6083 amdgpu_dm_vrr_active(acrtc_state)) {
6084 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6085 dc_stream_adjust_vmin_vmax(
6086 dm->dc, acrtc_state->stream,
6087 &acrtc_state->vrr_params.adjust);
6088 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6089 }
6090 mutex_lock(&dm->dc_lock);
6091 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
6092 acrtc_state->stream->link->psr_allow_active)
6093 amdgpu_dm_psr_disable(acrtc_state->stream);
6094
6095 dc_commit_updates_for_stream(dm->dc,
6096 bundle->surface_updates,
6097 planes_count,
6098 acrtc_state->stream,
6099 &bundle->stream_update,
6100 dc_state);
6101
6102 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
6103 acrtc_state->stream->psr_version &&
6104 !acrtc_state->stream->link->psr_feature_enabled)
6105 amdgpu_dm_link_setup_psr(acrtc_state->stream);
6106 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
6107 acrtc_state->stream->link->psr_feature_enabled &&
6108 !acrtc_state->stream->link->psr_allow_active &&
6109 swizzle) {
6110 amdgpu_dm_psr_enable(acrtc_state->stream);
6111 }
6112
6113 mutex_unlock(&dm->dc_lock);
6114 }
6115
6116
6117
6118
6119
6120
6121 if (acrtc_state->active_planes)
6122 amdgpu_dm_commit_cursors(state);
6123
6124cleanup:
6125 kfree(bundle);
6126}
6127
6128static void amdgpu_dm_commit_audio(struct drm_device *dev,
6129 struct drm_atomic_state *state)
6130{
6131 struct amdgpu_device *adev = dev->dev_private;
6132 struct amdgpu_dm_connector *aconnector;
6133 struct drm_connector *connector;
6134 struct drm_connector_state *old_con_state, *new_con_state;
6135 struct drm_crtc_state *new_crtc_state;
6136 struct dm_crtc_state *new_dm_crtc_state;
6137 const struct dc_stream_status *status;
6138 int i, inst;
6139
6140
6141 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6142 if (old_con_state->crtc != new_con_state->crtc) {
6143
6144 goto notify;
6145 }
6146
6147 if (!new_con_state->crtc)
6148 continue;
6149
6150 new_crtc_state = drm_atomic_get_new_crtc_state(
6151 state, new_con_state->crtc);
6152
6153 if (!new_crtc_state)
6154 continue;
6155
6156 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
6157 continue;
6158
6159 notify:
6160 aconnector = to_amdgpu_dm_connector(connector);
6161
6162 mutex_lock(&adev->dm.audio_lock);
6163 inst = aconnector->audio_inst;
6164 aconnector->audio_inst = -1;
6165 mutex_unlock(&adev->dm.audio_lock);
6166
6167 amdgpu_dm_audio_eld_notify(adev, inst);
6168 }
6169
6170
6171 for_each_new_connector_in_state(state, connector, new_con_state, i) {
6172 if (!new_con_state->crtc)
6173 continue;
6174
6175 new_crtc_state = drm_atomic_get_new_crtc_state(
6176 state, new_con_state->crtc);
6177
6178 if (!new_crtc_state)
6179 continue;
6180
6181 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
6182 continue;
6183
6184 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
6185 if (!new_dm_crtc_state->stream)
6186 continue;
6187
6188 status = dc_stream_get_status(new_dm_crtc_state->stream);
6189 if (!status)
6190 continue;
6191
6192 aconnector = to_amdgpu_dm_connector(connector);
6193
6194 mutex_lock(&adev->dm.audio_lock);
6195 inst = status->audio_inst;
6196 aconnector->audio_inst = inst;
6197 mutex_unlock(&adev->dm.audio_lock);
6198
6199 amdgpu_dm_audio_eld_notify(adev, inst);
6200 }
6201}
6202
6203
6204
6205
6206
6207
6208
6209
6210
6211
6212
6213
6214
6215static void amdgpu_dm_enable_crtc_interrupts(struct drm_device *dev,
6216 struct drm_atomic_state *state,
6217 bool for_modeset)
6218{
6219 struct amdgpu_device *adev = dev->dev_private;
6220 struct drm_crtc *crtc;
6221 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
6222 int i;
6223#ifdef CONFIG_DEBUG_FS
6224 enum amdgpu_dm_pipe_crc_source source;
6225#endif
6226
6227 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
6228 new_crtc_state, i) {
6229 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6230 struct dm_crtc_state *dm_new_crtc_state =
6231 to_dm_crtc_state(new_crtc_state);
6232 struct dm_crtc_state *dm_old_crtc_state =
6233 to_dm_crtc_state(old_crtc_state);
6234 bool modeset = drm_atomic_crtc_needs_modeset(new_crtc_state);
6235 bool run_pass;
6236
6237 run_pass = (for_modeset && modeset) ||
6238 (!for_modeset && !modeset &&
6239 !dm_old_crtc_state->interrupts_enabled);
6240
6241 if (!run_pass)
6242 continue;
6243
6244 if (!dm_new_crtc_state->interrupts_enabled)
6245 continue;
6246
6247 manage_dm_interrupts(adev, acrtc, true);
6248
6249#ifdef CONFIG_DEBUG_FS
6250
6251 source = dm_new_crtc_state->crc_src;
6252 if (amdgpu_dm_is_valid_crc_source(source)) {
6253 amdgpu_dm_crtc_configure_crc_source(
6254 crtc, dm_new_crtc_state,
6255 dm_new_crtc_state->crc_src);
6256 }
6257#endif
6258 }
6259}
6260
6261
6262
6263
6264
6265
6266
6267
6268
6269static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
6270 struct dc_stream_state *stream_state)
6271{
6272 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
6273}
6274
6275static int amdgpu_dm_atomic_commit(struct drm_device *dev,
6276 struct drm_atomic_state *state,
6277 bool nonblock)
6278{
6279 struct drm_crtc *crtc;
6280 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
6281 struct amdgpu_device *adev = dev->dev_private;
6282 int i;
6283
6284
6285
6286
6287
6288
6289
6290
6291
6292
6293
6294
6295
6296
6297
6298
6299 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
6300 struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
6301 struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
6302 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6303
6304 if (dm_old_crtc_state->interrupts_enabled &&
6305 (!dm_new_crtc_state->interrupts_enabled ||
6306 drm_atomic_crtc_needs_modeset(new_crtc_state)))
6307 manage_dm_interrupts(adev, acrtc, false);
6308 }
6309
6310
6311
6312
6313
6314 return drm_atomic_helper_commit(dev, state, nonblock);
6315
6316
6317}
6318
6319
6320
6321
6322
6323
6324
6325
6326
6327static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
6328{
6329 struct drm_device *dev = state->dev;
6330 struct amdgpu_device *adev = dev->dev_private;
6331 struct amdgpu_display_manager *dm = &adev->dm;
6332 struct dm_atomic_state *dm_state;
6333 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
6334 uint32_t i, j;
6335 struct drm_crtc *crtc;
6336 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
6337 unsigned long flags;
6338 bool wait_for_vblank = true;
6339 struct drm_connector *connector;
6340 struct drm_connector_state *old_con_state, *new_con_state;
6341 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
6342 int crtc_disable_count = 0;
6343
6344 drm_atomic_helper_update_legacy_modeset_state(dev, state);
6345
6346 dm_state = dm_atomic_get_new_state(state);
6347 if (dm_state && dm_state->context) {
6348 dc_state = dm_state->context;
6349 } else {
6350
6351 dc_state_temp = dc_create_state(dm->dc);
6352 ASSERT(dc_state_temp);
6353 dc_state = dc_state_temp;
6354 dc_resource_state_copy_construct_current(dm->dc, dc_state);
6355 }
6356
6357
6358 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
6359 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6360
6361 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
6362 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
6363
6364 DRM_DEBUG_DRIVER(
6365 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
6366 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
6367 "connectors_changed:%d\n",
6368 acrtc->crtc_id,
6369 new_crtc_state->enable,
6370 new_crtc_state->active,
6371 new_crtc_state->planes_changed,
6372 new_crtc_state->mode_changed,
6373 new_crtc_state->active_changed,
6374 new_crtc_state->connectors_changed);
6375
6376
6377 if (dm_new_crtc_state->stream) {
6378 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
6379 dm_new_crtc_state->stream);
6380 }
6381
6382
6383
6384
6385
6386 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
6387
6388 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
6389
6390 if (!dm_new_crtc_state->stream) {
6391
6392
6393
6394
6395
6396
6397
6398
6399
6400
6401
6402
6403
6404
6405
6406 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
6407 __func__, acrtc->base.base.id);
6408 continue;
6409 }
6410
6411 if (dm_old_crtc_state->stream)
6412 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
6413
6414 pm_runtime_get_noresume(dev->dev);
6415
6416 acrtc->enabled = true;
6417 acrtc->hw_mode = new_crtc_state->mode;
6418 crtc->hwmode = new_crtc_state->mode;
6419 } else if (modereset_required(new_crtc_state)) {
6420 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
6421
6422 if (dm_old_crtc_state->stream) {
6423 if (dm_old_crtc_state->stream->link->psr_allow_active)
6424 amdgpu_dm_psr_disable(dm_old_crtc_state->stream);
6425
6426 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
6427 }
6428 }
6429 }
6430
6431 if (dc_state) {
6432 dm_enable_per_frame_crtc_master_sync(dc_state);
6433 mutex_lock(&dm->dc_lock);
6434 WARN_ON(!dc_commit_state(dm->dc, dc_state));
6435 mutex_unlock(&dm->dc_lock);
6436 }
6437
6438 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
6439 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6440
6441 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
6442
6443 if (dm_new_crtc_state->stream != NULL) {
6444 const struct dc_stream_status *status =
6445 dc_stream_get_status(dm_new_crtc_state->stream);
6446
6447 if (!status)
6448 status = dc_stream_get_status_from_state(dc_state,
6449 dm_new_crtc_state->stream);
6450
6451 if (!status)
6452 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
6453 else
6454 acrtc->otg_inst = status->primary_otg_inst;
6455 }
6456 }
6457#ifdef CONFIG_DRM_AMD_DC_HDCP
6458 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6459 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
6460 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
6461 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6462
6463 new_crtc_state = NULL;
6464
6465 if (acrtc)
6466 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
6467
6468 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
6469
6470 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
6471 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
6472 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
6473 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6474 continue;
6475 }
6476
6477 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
6478 update_content_protection(new_con_state, connector, adev->dm.hdcp_workqueue);
6479 }
6480#endif
6481
6482
6483 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6484 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
6485 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
6486 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
6487 struct dc_surface_update dummy_updates[MAX_SURFACES];
6488 struct dc_stream_update stream_update;
6489 struct dc_info_packet hdr_packet;
6490 struct dc_stream_status *status = NULL;
6491 bool abm_changed, hdr_changed, scaling_changed;
6492
6493 memset(&dummy_updates, 0, sizeof(dummy_updates));
6494 memset(&stream_update, 0, sizeof(stream_update));
6495
6496 if (acrtc) {
6497 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
6498 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
6499 }
6500
6501
6502 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
6503 continue;
6504
6505 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
6506 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
6507
6508 scaling_changed = is_scaling_state_different(dm_new_con_state,
6509 dm_old_con_state);
6510
6511 abm_changed = dm_new_crtc_state->abm_level !=
6512 dm_old_crtc_state->abm_level;
6513
6514 hdr_changed =
6515 is_hdr_metadata_different(old_con_state, new_con_state);
6516
6517 if (!scaling_changed && !abm_changed && !hdr_changed)
6518 continue;
6519
6520 stream_update.stream = dm_new_crtc_state->stream;
6521 if (scaling_changed) {
6522 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
6523 dm_new_con_state, dm_new_crtc_state->stream);
6524
6525 stream_update.src = dm_new_crtc_state->stream->src;
6526 stream_update.dst = dm_new_crtc_state->stream->dst;
6527 }
6528
6529 if (abm_changed) {
6530 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
6531
6532 stream_update.abm_level = &dm_new_crtc_state->abm_level;
6533 }
6534
6535 if (hdr_changed) {
6536 fill_hdr_info_packet(new_con_state, &hdr_packet);
6537 stream_update.hdr_static_metadata = &hdr_packet;
6538 }
6539
6540 status = dc_stream_get_status(dm_new_crtc_state->stream);
6541 WARN_ON(!status);
6542 WARN_ON(!status->plane_count);
6543
6544
6545
6546
6547
6548
6549 for (j = 0; j < status->plane_count; j++)
6550 dummy_updates[j].surface = status->plane_states[0];
6551
6552
6553 mutex_lock(&dm->dc_lock);
6554 dc_commit_updates_for_stream(dm->dc,
6555 dummy_updates,
6556 status->plane_count,
6557 dm_new_crtc_state->stream,
6558 &stream_update,
6559 dc_state);
6560 mutex_unlock(&dm->dc_lock);
6561 }
6562
6563
6564 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
6565 new_crtc_state, i) {
6566 if (old_crtc_state->active && !new_crtc_state->active)
6567 crtc_disable_count++;
6568
6569 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
6570 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
6571
6572
6573 pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
6574
6575
6576 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
6577 dm_new_crtc_state);
6578 }
6579
6580
6581 amdgpu_dm_enable_crtc_interrupts(dev, state, true);
6582
6583 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
6584 if (new_crtc_state->async_flip)
6585 wait_for_vblank = false;
6586
6587
6588 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
6589 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
6590
6591 if (dm_new_crtc_state->stream)
6592 amdgpu_dm_commit_planes(state, dc_state, dev,
6593 dm, crtc, wait_for_vblank);
6594 }
6595
6596
6597 amdgpu_dm_enable_crtc_interrupts(dev, state, false);
6598
6599
6600 amdgpu_dm_commit_audio(dev, state);
6601
6602
6603
6604
6605
6606 spin_lock_irqsave(&adev->ddev->event_lock, flags);
6607 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
6608
6609 if (new_crtc_state->event)
6610 drm_send_event_locked(dev, &new_crtc_state->event->base);
6611
6612 new_crtc_state->event = NULL;
6613 }
6614 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6615
6616
6617 drm_atomic_helper_commit_hw_done(state);
6618
6619 if (wait_for_vblank)
6620 drm_atomic_helper_wait_for_flip_done(dev, state);
6621
6622 drm_atomic_helper_cleanup_planes(dev, state);
6623
6624
6625
6626
6627
6628
6629 for (i = 0; i < crtc_disable_count; i++)
6630 pm_runtime_put_autosuspend(dev->dev);
6631 pm_runtime_mark_last_busy(dev->dev);
6632
6633 if (dc_state_temp)
6634 dc_release_state(dc_state_temp);
6635}
6636
6637
6638static int dm_force_atomic_commit(struct drm_connector *connector)
6639{
6640 int ret = 0;
6641 struct drm_device *ddev = connector->dev;
6642 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
6643 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
6644 struct drm_plane *plane = disconnected_acrtc->base.primary;
6645 struct drm_connector_state *conn_state;
6646 struct drm_crtc_state *crtc_state;
6647 struct drm_plane_state *plane_state;
6648
6649 if (!state)
6650 return -ENOMEM;
6651
6652 state->acquire_ctx = ddev->mode_config.acquire_ctx;
6653
6654
6655
6656
6657
6658
6659 conn_state = drm_atomic_get_connector_state(state, connector);
6660
6661 ret = PTR_ERR_OR_ZERO(conn_state);
6662 if (ret)
6663 goto err;
6664
6665
6666 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
6667
6668 ret = PTR_ERR_OR_ZERO(crtc_state);
6669 if (ret)
6670 goto err;
6671
6672
6673 crtc_state->mode_changed = true;
6674
6675
6676 plane_state = drm_atomic_get_plane_state(state, plane);
6677
6678 ret = PTR_ERR_OR_ZERO(plane_state);
6679 if (ret)
6680 goto err;
6681
6682
6683
6684 ret = drm_atomic_commit(state);
6685 if (!ret)
6686 return 0;
6687
6688err:
6689 DRM_ERROR("Restoring old state failed with %i\n", ret);
6690 drm_atomic_state_put(state);
6691
6692 return ret;
6693}
6694
6695
6696
6697
6698
6699
6700void dm_restore_drm_connector_state(struct drm_device *dev,
6701 struct drm_connector *connector)
6702{
6703 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6704 struct amdgpu_crtc *disconnected_acrtc;
6705 struct dm_crtc_state *acrtc_state;
6706
6707 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
6708 return;
6709
6710 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
6711 if (!disconnected_acrtc)
6712 return;
6713
6714 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
6715 if (!acrtc_state->stream)
6716 return;
6717
6718
6719
6720
6721
6722
6723 if (acrtc_state->stream->sink != aconnector->dc_sink)
6724 dm_force_atomic_commit(&aconnector->base);
6725}
6726
6727
6728
6729
6730
6731static int do_aquire_global_lock(struct drm_device *dev,
6732 struct drm_atomic_state *state)
6733{
6734 struct drm_crtc *crtc;
6735 struct drm_crtc_commit *commit;
6736 long ret;
6737
6738
6739
6740
6741
6742
6743 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
6744 if (ret)
6745 return ret;
6746
6747 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
6748 spin_lock(&crtc->commit_lock);
6749 commit = list_first_entry_or_null(&crtc->commit_list,
6750 struct drm_crtc_commit, commit_entry);
6751 if (commit)
6752 drm_crtc_commit_get(commit);
6753 spin_unlock(&crtc->commit_lock);
6754
6755 if (!commit)
6756 continue;
6757
6758
6759
6760
6761
6762 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
6763
6764 if (ret > 0)
6765 ret = wait_for_completion_interruptible_timeout(
6766 &commit->flip_done, 10*HZ);
6767
6768 if (ret == 0)
6769 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
6770 "timed out\n", crtc->base.id, crtc->name);
6771
6772 drm_crtc_commit_put(commit);
6773 }
6774
6775 return ret < 0 ? ret : 0;
6776}
6777
6778static void get_freesync_config_for_crtc(
6779 struct dm_crtc_state *new_crtc_state,
6780 struct dm_connector_state *new_con_state)
6781{
6782 struct mod_freesync_config config = {0};
6783 struct amdgpu_dm_connector *aconnector =
6784 to_amdgpu_dm_connector(new_con_state->base.connector);
6785 struct drm_display_mode *mode = &new_crtc_state->base.mode;
6786 int vrefresh = drm_mode_vrefresh(mode);
6787
6788 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
6789 vrefresh >= aconnector->min_vfreq &&
6790 vrefresh <= aconnector->max_vfreq;
6791
6792 if (new_crtc_state->vrr_supported) {
6793 new_crtc_state->stream->ignore_msa_timing_param = true;
6794 config.state = new_crtc_state->base.vrr_enabled ?
6795 VRR_STATE_ACTIVE_VARIABLE :
6796 VRR_STATE_INACTIVE;
6797 config.min_refresh_in_uhz =
6798 aconnector->min_vfreq * 1000000;
6799 config.max_refresh_in_uhz =
6800 aconnector->max_vfreq * 1000000;
6801 config.vsif_supported = true;
6802 config.btr = true;
6803 }
6804
6805 new_crtc_state->freesync_config = config;
6806}
6807
6808static void reset_freesync_config_for_crtc(
6809 struct dm_crtc_state *new_crtc_state)
6810{
6811 new_crtc_state->vrr_supported = false;
6812
6813 memset(&new_crtc_state->vrr_params, 0,
6814 sizeof(new_crtc_state->vrr_params));
6815 memset(&new_crtc_state->vrr_infopacket, 0,
6816 sizeof(new_crtc_state->vrr_infopacket));
6817}
6818
6819static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
6820 struct drm_atomic_state *state,
6821 struct drm_crtc *crtc,
6822 struct drm_crtc_state *old_crtc_state,
6823 struct drm_crtc_state *new_crtc_state,
6824 bool enable,
6825 bool *lock_and_validation_needed)
6826{
6827 struct dm_atomic_state *dm_state = NULL;
6828 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
6829 struct dc_stream_state *new_stream;
6830 int ret = 0;
6831
6832
6833
6834
6835
6836 struct amdgpu_crtc *acrtc = NULL;
6837 struct amdgpu_dm_connector *aconnector = NULL;
6838 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
6839 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
6840
6841 new_stream = NULL;
6842
6843 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
6844 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
6845 acrtc = to_amdgpu_crtc(crtc);
6846 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
6847
6848
6849 if (aconnector && enable) {
6850
6851 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
6852 &aconnector->base);
6853 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
6854 &aconnector->base);
6855
6856 if (IS_ERR(drm_new_conn_state)) {
6857 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
6858 goto fail;
6859 }
6860
6861 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
6862 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
6863
6864 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
6865 goto skip_modeset;
6866
6867 new_stream = create_stream_for_sink(aconnector,
6868 &new_crtc_state->mode,
6869 dm_new_conn_state,
6870 dm_old_crtc_state->stream);
6871
6872
6873
6874
6875
6876
6877
6878
6879 if (!new_stream) {
6880 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
6881 __func__, acrtc->base.base.id);
6882 ret = -ENOMEM;
6883 goto fail;
6884 }
6885
6886 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
6887
6888 ret = fill_hdr_info_packet(drm_new_conn_state,
6889 &new_stream->hdr_static_metadata);
6890 if (ret)
6891 goto fail;
6892
6893
6894
6895
6896
6897
6898
6899
6900
6901
6902 if (dm_new_crtc_state->stream &&
6903 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
6904 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
6905 new_crtc_state->mode_changed = false;
6906 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
6907 new_crtc_state->mode_changed);
6908 }
6909 }
6910
6911
6912 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
6913 goto skip_modeset;
6914
6915 DRM_DEBUG_DRIVER(
6916 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
6917 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
6918 "connectors_changed:%d\n",
6919 acrtc->crtc_id,
6920 new_crtc_state->enable,
6921 new_crtc_state->active,
6922 new_crtc_state->planes_changed,
6923 new_crtc_state->mode_changed,
6924 new_crtc_state->active_changed,
6925 new_crtc_state->connectors_changed);
6926
6927
6928 if (!enable) {
6929
6930 if (!dm_old_crtc_state->stream)
6931 goto skip_modeset;
6932
6933 ret = dm_atomic_get_state(state, &dm_state);
6934 if (ret)
6935 goto fail;
6936
6937 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
6938 crtc->base.id);
6939
6940
6941 if (dc_remove_stream_from_ctx(
6942 dm->dc,
6943 dm_state->context,
6944 dm_old_crtc_state->stream) != DC_OK) {
6945 ret = -EINVAL;
6946 goto fail;
6947 }
6948
6949 dc_stream_release(dm_old_crtc_state->stream);
6950 dm_new_crtc_state->stream = NULL;
6951
6952 reset_freesync_config_for_crtc(dm_new_crtc_state);
6953
6954 *lock_and_validation_needed = true;
6955
6956 } else {
6957
6958
6959
6960
6961
6962 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
6963 goto skip_modeset;
6964
6965 if (modereset_required(new_crtc_state))
6966 goto skip_modeset;
6967
6968 if (modeset_required(new_crtc_state, new_stream,
6969 dm_old_crtc_state->stream)) {
6970
6971 WARN_ON(dm_new_crtc_state->stream);
6972
6973 ret = dm_atomic_get_state(state, &dm_state);
6974 if (ret)
6975 goto fail;
6976
6977 dm_new_crtc_state->stream = new_stream;
6978
6979 dc_stream_retain(new_stream);
6980
6981 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
6982 crtc->base.id);
6983
6984 if (dc_add_stream_to_ctx(
6985 dm->dc,
6986 dm_state->context,
6987 dm_new_crtc_state->stream) != DC_OK) {
6988 ret = -EINVAL;
6989 goto fail;
6990 }
6991
6992 *lock_and_validation_needed = true;
6993 }
6994 }
6995
6996skip_modeset:
6997
6998 if (new_stream)
6999 dc_stream_release(new_stream);
7000
7001
7002
7003
7004
7005 if (!(enable && aconnector && new_crtc_state->enable &&
7006 new_crtc_state->active))
7007 return 0;
7008
7009
7010
7011
7012
7013
7014
7015
7016 BUG_ON(dm_new_crtc_state->stream == NULL);
7017
7018
7019 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
7020 update_stream_scaling_settings(
7021 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
7022
7023
7024 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
7025
7026
7027
7028
7029
7030 if (dm_new_crtc_state->base.color_mgmt_changed ||
7031 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
7032 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
7033 if (ret)
7034 goto fail;
7035 }
7036
7037
7038 get_freesync_config_for_crtc(dm_new_crtc_state,
7039 dm_new_conn_state);
7040
7041 return ret;
7042
7043fail:
7044 if (new_stream)
7045 dc_stream_release(new_stream);
7046 return ret;
7047}
7048
7049static bool should_reset_plane(struct drm_atomic_state *state,
7050 struct drm_plane *plane,
7051 struct drm_plane_state *old_plane_state,
7052 struct drm_plane_state *new_plane_state)
7053{
7054 struct drm_plane *other;
7055 struct drm_plane_state *old_other_state, *new_other_state;
7056 struct drm_crtc_state *new_crtc_state;
7057 int i;
7058
7059
7060
7061
7062
7063
7064 if (state->allow_modeset)
7065 return true;
7066
7067
7068 if (old_plane_state->crtc != new_plane_state->crtc)
7069 return true;
7070
7071
7072 if (!new_plane_state->crtc)
7073 return false;
7074
7075 new_crtc_state =
7076 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
7077
7078 if (!new_crtc_state)
7079 return true;
7080
7081
7082 if (new_crtc_state->color_mgmt_changed)
7083 return true;
7084
7085 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
7086 return true;
7087
7088
7089
7090
7091
7092
7093
7094
7095
7096 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
7097 if (other->type == DRM_PLANE_TYPE_CURSOR)
7098 continue;
7099
7100 if (old_other_state->crtc != new_plane_state->crtc &&
7101 new_other_state->crtc != new_plane_state->crtc)
7102 continue;
7103
7104 if (old_other_state->crtc != new_other_state->crtc)
7105 return true;
7106
7107
7108 if (old_other_state->fb && new_other_state->fb &&
7109 old_other_state->fb->format != new_other_state->fb->format)
7110 return true;
7111 }
7112
7113 return false;
7114}
7115
7116static int dm_update_plane_state(struct dc *dc,
7117 struct drm_atomic_state *state,
7118 struct drm_plane *plane,
7119 struct drm_plane_state *old_plane_state,
7120 struct drm_plane_state *new_plane_state,
7121 bool enable,
7122 bool *lock_and_validation_needed)
7123{
7124
7125 struct dm_atomic_state *dm_state = NULL;
7126 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
7127 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7128 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
7129 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
7130 bool needs_reset;
7131 int ret = 0;
7132
7133
7134 new_plane_crtc = new_plane_state->crtc;
7135 old_plane_crtc = old_plane_state->crtc;
7136 dm_new_plane_state = to_dm_plane_state(new_plane_state);
7137 dm_old_plane_state = to_dm_plane_state(old_plane_state);
7138
7139
7140 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7141 return 0;
7142
7143 needs_reset = should_reset_plane(state, plane, old_plane_state,
7144 new_plane_state);
7145
7146
7147 if (!enable) {
7148 if (!needs_reset)
7149 return 0;
7150
7151 if (!old_plane_crtc)
7152 return 0;
7153
7154 old_crtc_state = drm_atomic_get_old_crtc_state(
7155 state, old_plane_crtc);
7156 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7157
7158 if (!dm_old_crtc_state->stream)
7159 return 0;
7160
7161 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
7162 plane->base.id, old_plane_crtc->base.id);
7163
7164 ret = dm_atomic_get_state(state, &dm_state);
7165 if (ret)
7166 return ret;
7167
7168 if (!dc_remove_plane_from_context(
7169 dc,
7170 dm_old_crtc_state->stream,
7171 dm_old_plane_state->dc_state,
7172 dm_state->context)) {
7173
7174 ret = EINVAL;
7175 return ret;
7176 }
7177
7178
7179 dc_plane_state_release(dm_old_plane_state->dc_state);
7180 dm_new_plane_state->dc_state = NULL;
7181
7182 *lock_and_validation_needed = true;
7183
7184 } else {
7185 struct dc_plane_state *dc_new_plane_state;
7186
7187 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
7188 return 0;
7189
7190 if (!new_plane_crtc)
7191 return 0;
7192
7193 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
7194 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7195
7196 if (!dm_new_crtc_state->stream)
7197 return 0;
7198
7199 if (!needs_reset)
7200 return 0;
7201
7202 WARN_ON(dm_new_plane_state->dc_state);
7203
7204 dc_new_plane_state = dc_create_plane_state(dc);
7205 if (!dc_new_plane_state)
7206 return -ENOMEM;
7207
7208 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
7209 plane->base.id, new_plane_crtc->base.id);
7210
7211 ret = fill_dc_plane_attributes(
7212 new_plane_crtc->dev->dev_private,
7213 dc_new_plane_state,
7214 new_plane_state,
7215 new_crtc_state);
7216 if (ret) {
7217 dc_plane_state_release(dc_new_plane_state);
7218 return ret;
7219 }
7220
7221 ret = dm_atomic_get_state(state, &dm_state);
7222 if (ret) {
7223 dc_plane_state_release(dc_new_plane_state);
7224 return ret;
7225 }
7226
7227
7228
7229
7230
7231
7232
7233
7234 if (!dc_add_plane_to_context(
7235 dc,
7236 dm_new_crtc_state->stream,
7237 dc_new_plane_state,
7238 dm_state->context)) {
7239
7240 dc_plane_state_release(dc_new_plane_state);
7241 return -EINVAL;
7242 }
7243
7244 dm_new_plane_state->dc_state = dc_new_plane_state;
7245
7246
7247
7248
7249 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
7250
7251 *lock_and_validation_needed = true;
7252 }
7253
7254
7255 return ret;
7256}
7257
7258static int
7259dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
7260 struct drm_atomic_state *state,
7261 enum surface_update_type *out_type)
7262{
7263 struct dc *dc = dm->dc;
7264 struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
7265 int i, j, num_plane, ret = 0;
7266 struct drm_plane_state *old_plane_state, *new_plane_state;
7267 struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
7268 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
7269 struct drm_plane *plane;
7270
7271 struct drm_crtc *crtc;
7272 struct drm_crtc_state *new_crtc_state, *old_crtc_state;
7273 struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
7274 struct dc_stream_status *status = NULL;
7275
7276 struct dc_surface_update *updates;
7277 enum surface_update_type update_type = UPDATE_TYPE_FAST;
7278
7279 updates = kcalloc(MAX_SURFACES, sizeof(*updates), GFP_KERNEL);
7280
7281 if (!updates) {
7282 DRM_ERROR("Failed to allocate plane updates\n");
7283
7284 update_type = UPDATE_TYPE_FULL;
7285 goto cleanup;
7286 }
7287
7288 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7289 struct dc_scaling_info scaling_info;
7290 struct dc_stream_update stream_update;
7291
7292 memset(&stream_update, 0, sizeof(stream_update));
7293
7294 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7295 old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
7296 num_plane = 0;
7297
7298 if (new_dm_crtc_state->stream != old_dm_crtc_state->stream) {
7299 update_type = UPDATE_TYPE_FULL;
7300 goto cleanup;
7301 }
7302
7303 if (!new_dm_crtc_state->stream)
7304 continue;
7305
7306 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
7307 const struct amdgpu_framebuffer *amdgpu_fb =
7308 to_amdgpu_framebuffer(new_plane_state->fb);
7309 struct dc_plane_info plane_info;
7310 struct dc_flip_addrs flip_addr;
7311 uint64_t tiling_flags;
7312
7313 new_plane_crtc = new_plane_state->crtc;
7314 old_plane_crtc = old_plane_state->crtc;
7315 new_dm_plane_state = to_dm_plane_state(new_plane_state);
7316 old_dm_plane_state = to_dm_plane_state(old_plane_state);
7317
7318 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7319 continue;
7320
7321 if (new_dm_plane_state->dc_state != old_dm_plane_state->dc_state) {
7322 update_type = UPDATE_TYPE_FULL;
7323 goto cleanup;
7324 }
7325
7326 if (crtc != new_plane_crtc)
7327 continue;
7328
7329 updates[num_plane].surface = new_dm_plane_state->dc_state;
7330
7331 if (new_crtc_state->mode_changed) {
7332 stream_update.dst = new_dm_crtc_state->stream->dst;
7333 stream_update.src = new_dm_crtc_state->stream->src;
7334 }
7335
7336 if (new_crtc_state->color_mgmt_changed) {
7337 updates[num_plane].gamma =
7338 new_dm_plane_state->dc_state->gamma_correction;
7339 updates[num_plane].in_transfer_func =
7340 new_dm_plane_state->dc_state->in_transfer_func;
7341 stream_update.gamut_remap =
7342 &new_dm_crtc_state->stream->gamut_remap_matrix;
7343 stream_update.output_csc_transform =
7344 &new_dm_crtc_state->stream->csc_color_matrix;
7345 stream_update.out_transfer_func =
7346 new_dm_crtc_state->stream->out_transfer_func;
7347 }
7348
7349 ret = fill_dc_scaling_info(new_plane_state,
7350 &scaling_info);
7351 if (ret)
7352 goto cleanup;
7353
7354 updates[num_plane].scaling_info = &scaling_info;
7355
7356 if (amdgpu_fb) {
7357 ret = get_fb_info(amdgpu_fb, &tiling_flags);
7358 if (ret)
7359 goto cleanup;
7360
7361 memset(&flip_addr, 0, sizeof(flip_addr));
7362
7363 ret = fill_dc_plane_info_and_addr(
7364 dm->adev, new_plane_state, tiling_flags,
7365 &plane_info,
7366 &flip_addr.address);
7367 if (ret)
7368 goto cleanup;
7369
7370 updates[num_plane].plane_info = &plane_info;
7371 updates[num_plane].flip_addr = &flip_addr;
7372 }
7373
7374 num_plane++;
7375 }
7376
7377 if (num_plane == 0)
7378 continue;
7379
7380 ret = dm_atomic_get_state(state, &dm_state);
7381 if (ret)
7382 goto cleanup;
7383
7384 old_dm_state = dm_atomic_get_old_state(state);
7385 if (!old_dm_state) {
7386 ret = -EINVAL;
7387 goto cleanup;
7388 }
7389
7390 status = dc_stream_get_status_from_state(old_dm_state->context,
7391 new_dm_crtc_state->stream);
7392 stream_update.stream = new_dm_crtc_state->stream;
7393
7394
7395
7396
7397 mutex_lock(&dm->dc_lock);
7398 update_type = dc_check_update_surfaces_for_stream(dc, updates, num_plane,
7399 &stream_update, status);
7400 mutex_unlock(&dm->dc_lock);
7401
7402 if (update_type > UPDATE_TYPE_MED) {
7403 update_type = UPDATE_TYPE_FULL;
7404 goto cleanup;
7405 }
7406 }
7407
7408cleanup:
7409 kfree(updates);
7410
7411 *out_type = update_type;
7412 return ret;
7413}
7414
7415
7416
7417
7418
7419
7420
7421
7422
7423
7424
7425
7426
7427
7428
7429
7430
7431
7432
7433
7434
7435
7436
7437
7438
7439
7440static int amdgpu_dm_atomic_check(struct drm_device *dev,
7441 struct drm_atomic_state *state)
7442{
7443 struct amdgpu_device *adev = dev->dev_private;
7444 struct dm_atomic_state *dm_state = NULL;
7445 struct dc *dc = adev->dm.dc;
7446 struct drm_connector *connector;
7447 struct drm_connector_state *old_con_state, *new_con_state;
7448 struct drm_crtc *crtc;
7449 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7450 struct drm_plane *plane;
7451 struct drm_plane_state *old_plane_state, *new_plane_state;
7452 enum surface_update_type update_type = UPDATE_TYPE_FAST;
7453 enum surface_update_type overall_update_type = UPDATE_TYPE_FAST;
7454
7455 int ret, i;
7456
7457
7458
7459
7460
7461 bool lock_and_validation_needed = false;
7462
7463 ret = drm_atomic_helper_check_modeset(dev, state);
7464 if (ret)
7465 goto fail;
7466
7467 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7468 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
7469 !new_crtc_state->color_mgmt_changed &&
7470 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
7471 continue;
7472
7473 if (!new_crtc_state->enable)
7474 continue;
7475
7476 ret = drm_atomic_add_affected_connectors(state, crtc);
7477 if (ret)
7478 return ret;
7479
7480 ret = drm_atomic_add_affected_planes(state, crtc);
7481 if (ret)
7482 goto fail;
7483 }
7484
7485
7486
7487
7488
7489
7490 drm_for_each_crtc(crtc, dev) {
7491 bool modified = false;
7492
7493 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
7494 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7495 continue;
7496
7497 if (new_plane_state->crtc == crtc ||
7498 old_plane_state->crtc == crtc) {
7499 modified = true;
7500 break;
7501 }
7502 }
7503
7504 if (!modified)
7505 continue;
7506
7507 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
7508 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7509 continue;
7510
7511 new_plane_state =
7512 drm_atomic_get_plane_state(state, plane);
7513
7514 if (IS_ERR(new_plane_state)) {
7515 ret = PTR_ERR(new_plane_state);
7516 goto fail;
7517 }
7518 }
7519 }
7520
7521
7522 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
7523 ret = dm_update_plane_state(dc, state, plane,
7524 old_plane_state,
7525 new_plane_state,
7526 false,
7527 &lock_and_validation_needed);
7528 if (ret)
7529 goto fail;
7530 }
7531
7532
7533 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7534 ret = dm_update_crtc_state(&adev->dm, state, crtc,
7535 old_crtc_state,
7536 new_crtc_state,
7537 false,
7538 &lock_and_validation_needed);
7539 if (ret)
7540 goto fail;
7541 }
7542
7543
7544 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7545 ret = dm_update_crtc_state(&adev->dm, state, crtc,
7546 old_crtc_state,
7547 new_crtc_state,
7548 true,
7549 &lock_and_validation_needed);
7550 if (ret)
7551 goto fail;
7552 }
7553
7554
7555 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
7556 ret = dm_update_plane_state(dc, state, plane,
7557 old_plane_state,
7558 new_plane_state,
7559 true,
7560 &lock_and_validation_needed);
7561 if (ret)
7562 goto fail;
7563 }
7564
7565
7566 ret = drm_atomic_helper_check_planes(dev, state);
7567 if (ret)
7568 goto fail;
7569
7570 if (state->legacy_cursor_update) {
7571
7572
7573
7574
7575
7576 state->async_update =
7577 !drm_atomic_helper_async_check(dev, state);
7578
7579
7580
7581
7582
7583
7584
7585
7586 if (state->async_update)
7587 return 0;
7588 }
7589
7590
7591
7592
7593
7594
7595 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7596 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7597 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7598 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7599
7600
7601 if (!acrtc || drm_atomic_crtc_needs_modeset(
7602 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
7603 continue;
7604
7605
7606 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
7607 continue;
7608
7609 overall_update_type = UPDATE_TYPE_FULL;
7610 lock_and_validation_needed = true;
7611 }
7612
7613 ret = dm_determine_update_type_for_commit(&adev->dm, state, &update_type);
7614 if (ret)
7615 goto fail;
7616
7617 if (overall_update_type < update_type)
7618 overall_update_type = update_type;
7619
7620
7621
7622
7623
7624
7625
7626 if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
7627 WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
7628
7629 if (overall_update_type > UPDATE_TYPE_FAST) {
7630 ret = dm_atomic_get_state(state, &dm_state);
7631 if (ret)
7632 goto fail;
7633
7634 ret = do_aquire_global_lock(dev, state);
7635 if (ret)
7636 goto fail;
7637
7638 if (dc_validate_global_state(dc, dm_state->context, false) != DC_OK) {
7639 ret = -EINVAL;
7640 goto fail;
7641 }
7642 } else {
7643
7644
7645
7646
7647
7648
7649
7650
7651 struct dm_atomic_state *new_dm_state, *old_dm_state;
7652
7653 new_dm_state = dm_atomic_get_new_state(state);
7654 old_dm_state = dm_atomic_get_old_state(state);
7655
7656 if (new_dm_state && old_dm_state) {
7657 if (new_dm_state->context)
7658 dc_release_state(new_dm_state->context);
7659
7660 new_dm_state->context = old_dm_state->context;
7661
7662 if (old_dm_state->context)
7663 dc_retain_state(old_dm_state->context);
7664 }
7665 }
7666
7667
7668 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
7669 struct dm_crtc_state *dm_new_crtc_state =
7670 to_dm_crtc_state(new_crtc_state);
7671
7672 dm_new_crtc_state->update_type = (int)overall_update_type;
7673 }
7674
7675
7676 WARN_ON(ret);
7677 return ret;
7678
7679fail:
7680 if (ret == -EDEADLK)
7681 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
7682 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
7683 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
7684 else
7685 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
7686
7687 return ret;
7688}
7689
7690static bool is_dp_capable_without_timing_msa(struct dc *dc,
7691 struct amdgpu_dm_connector *amdgpu_dm_connector)
7692{
7693 uint8_t dpcd_data;
7694 bool capable = false;
7695
7696 if (amdgpu_dm_connector->dc_link &&
7697 dm_helpers_dp_read_dpcd(
7698 NULL,
7699 amdgpu_dm_connector->dc_link,
7700 DP_DOWN_STREAM_PORT_COUNT,
7701 &dpcd_data,
7702 sizeof(dpcd_data))) {
7703 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
7704 }
7705
7706 return capable;
7707}
7708void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
7709 struct edid *edid)
7710{
7711 int i;
7712 bool edid_check_required;
7713 struct detailed_timing *timing;
7714 struct detailed_non_pixel *data;
7715 struct detailed_data_monitor_range *range;
7716 struct amdgpu_dm_connector *amdgpu_dm_connector =
7717 to_amdgpu_dm_connector(connector);
7718 struct dm_connector_state *dm_con_state = NULL;
7719
7720 struct drm_device *dev = connector->dev;
7721 struct amdgpu_device *adev = dev->dev_private;
7722 bool freesync_capable = false;
7723
7724 if (!connector->state) {
7725 DRM_ERROR("%s - Connector has no state", __func__);
7726 goto update;
7727 }
7728
7729 if (!edid) {
7730 dm_con_state = to_dm_connector_state(connector->state);
7731
7732 amdgpu_dm_connector->min_vfreq = 0;
7733 amdgpu_dm_connector->max_vfreq = 0;
7734 amdgpu_dm_connector->pixel_clock_mhz = 0;
7735
7736 goto update;
7737 }
7738
7739 dm_con_state = to_dm_connector_state(connector->state);
7740
7741 edid_check_required = false;
7742 if (!amdgpu_dm_connector->dc_sink) {
7743 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
7744 goto update;
7745 }
7746 if (!adev->dm.freesync_module)
7747 goto update;
7748
7749
7750
7751 if (edid) {
7752 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
7753 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
7754 edid_check_required = is_dp_capable_without_timing_msa(
7755 adev->dm.dc,
7756 amdgpu_dm_connector);
7757 }
7758 }
7759 if (edid_check_required == true && (edid->version > 1 ||
7760 (edid->version == 1 && edid->revision > 1))) {
7761 for (i = 0; i < 4; i++) {
7762
7763 timing = &edid->detailed_timings[i];
7764 data = &timing->data.other_data;
7765 range = &data->data.range;
7766
7767
7768
7769 if (data->type != EDID_DETAIL_MONITOR_RANGE)
7770 continue;
7771
7772
7773
7774
7775
7776
7777 if (range->flags != 1)
7778 continue;
7779
7780 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
7781 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
7782 amdgpu_dm_connector->pixel_clock_mhz =
7783 range->pixel_clock_mhz * 10;
7784 break;
7785 }
7786
7787 if (amdgpu_dm_connector->max_vfreq -
7788 amdgpu_dm_connector->min_vfreq > 10) {
7789
7790 freesync_capable = true;
7791 }
7792 }
7793
7794update:
7795 if (dm_con_state)
7796 dm_con_state->freesync_capable = freesync_capable;
7797
7798 if (connector->vrr_capable_property)
7799 drm_connector_set_vrr_capable_property(connector,
7800 freesync_capable);
7801}
7802
7803static void amdgpu_dm_set_psr_caps(struct dc_link *link)
7804{
7805 uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
7806
7807 if (!(link->connector_signal & SIGNAL_TYPE_EDP))
7808 return;
7809 if (link->type == dc_connection_none)
7810 return;
7811 if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
7812 dpcd_data, sizeof(dpcd_data))) {
7813 link->psr_feature_enabled = dpcd_data[0] ? true:false;
7814 DRM_INFO("PSR support:%d\n", link->psr_feature_enabled);
7815 }
7816}
7817
7818
7819
7820
7821
7822
7823
7824static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
7825{
7826 struct dc_link *link = NULL;
7827 struct psr_config psr_config = {0};
7828 struct psr_context psr_context = {0};
7829 struct dc *dc = NULL;
7830 bool ret = false;
7831
7832 if (stream == NULL)
7833 return false;
7834
7835 link = stream->link;
7836 dc = link->ctx->dc;
7837
7838 psr_config.psr_version = dc->res_pool->dmcu->dmcu_version.psr_version;
7839
7840 if (psr_config.psr_version > 0) {
7841 psr_config.psr_exit_link_training_required = 0x1;
7842 psr_config.psr_frame_capture_indication_req = 0;
7843 psr_config.psr_rfb_setup_time = 0x37;
7844 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
7845 psr_config.allow_smu_optimizations = 0x0;
7846
7847 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
7848
7849 }
7850 DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_feature_enabled);
7851
7852 return ret;
7853}
7854
7855
7856
7857
7858
7859
7860
7861bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
7862{
7863 struct dc_link *link = stream->link;
7864 struct dc_static_screen_events triggers = {0};
7865
7866 DRM_DEBUG_DRIVER("Enabling psr...\n");
7867
7868 triggers.cursor_update = true;
7869 triggers.overlay_update = true;
7870 triggers.surface_update = true;
7871
7872 dc_stream_set_static_screen_events(link->ctx->dc,
7873 &stream, 1,
7874 &triggers);
7875
7876 return dc_link_set_psr_allow_active(link, true, false);
7877}
7878
7879
7880
7881
7882
7883
7884
7885static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
7886{
7887
7888 DRM_DEBUG_DRIVER("Disabling psr...\n");
7889
7890 return dc_link_set_psr_allow_active(stream->link, false, true);
7891}
7892