1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27#define CREATE_TRACE_POINTS
28
29#include "dm_services_types.h"
30#include "dc.h"
31#include "dc_link_dp.h"
32#include "dc/inc/core_types.h"
33#include "dal_asic_id.h"
34#include "dmub/dmub_srv.h"
35#include "dc/inc/hw/dmcu.h"
36#include "dc/inc/hw/abm.h"
37#include "dc/dc_dmub_srv.h"
38#include "dc/dc_edid_parser.h"
39#include "dc/dc_stat.h"
40#include "amdgpu_dm_trace.h"
41
42#include "vid.h"
43#include "amdgpu.h"
44#include "amdgpu_display.h"
45#include "amdgpu_ucode.h"
46#include "atom.h"
47#include "amdgpu_dm.h"
48#ifdef CONFIG_DRM_AMD_DC_HDCP
49#include "amdgpu_dm_hdcp.h"
50#include <drm/drm_hdcp.h>
51#endif
52#include "amdgpu_pm.h"
53
54#include "amd_shared.h"
55#include "amdgpu_dm_irq.h"
56#include "dm_helpers.h"
57#include "amdgpu_dm_mst_types.h"
58#if defined(CONFIG_DEBUG_FS)
59#include "amdgpu_dm_debugfs.h"
60#endif
61#include "amdgpu_dm_psr.h"
62
63#include "ivsrcid/ivsrcid_vislands30.h"
64
65#include "i2caux_interface.h"
66#include <linux/module.h>
67#include <linux/moduleparam.h>
68#include <linux/types.h>
69#include <linux/pm_runtime.h>
70#include <linux/pci.h>
71#include <linux/firmware.h>
72#include <linux/component.h>
73
74#include <drm/drm_atomic.h>
75#include <drm/drm_atomic_uapi.h>
76#include <drm/drm_atomic_helper.h>
77#include <drm/drm_dp_mst_helper.h>
78#include <drm/drm_fb_helper.h>
79#include <drm/drm_fourcc.h>
80#include <drm/drm_edid.h>
81#include <drm/drm_vblank.h>
82#include <drm/drm_audio_component.h>
83
84#if defined(CONFIG_DRM_AMD_DC_DCN)
85#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
86
87#include "dcn/dcn_1_0_offset.h"
88#include "dcn/dcn_1_0_sh_mask.h"
89#include "soc15_hw_ip.h"
90#include "vega10_ip_offset.h"
91
92#include "soc15_common.h"
93#endif
94
95#include "modules/inc/mod_freesync.h"
96#include "modules/power/power_helpers.h"
97#include "modules/inc/mod_info_packet.h"
98
99#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
100MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
101#define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
102MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
103#define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
104MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
105#define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
106MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
107#define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
108MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
109#define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
110MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
111#define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
112MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
113#define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
114MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
115
116#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
117MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
118
119#define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
120MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
121
122
123#define PSP_HEADER_BYTES 0x100
124
125
126#define PSP_FOOTER_BYTES 0x100
127
128
129
130
131
132
133
134
135
136
137
138
139static int amdgpu_dm_init(struct amdgpu_device *adev);
140static void amdgpu_dm_fini(struct amdgpu_device *adev);
141static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
142
143static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
144{
145 switch (link->dpcd_caps.dongle_type) {
146 case DISPLAY_DONGLE_NONE:
147 return DRM_MODE_SUBCONNECTOR_Native;
148 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
149 return DRM_MODE_SUBCONNECTOR_VGA;
150 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
151 case DISPLAY_DONGLE_DP_DVI_DONGLE:
152 return DRM_MODE_SUBCONNECTOR_DVID;
153 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
154 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
155 return DRM_MODE_SUBCONNECTOR_HDMIA;
156 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
157 default:
158 return DRM_MODE_SUBCONNECTOR_Unknown;
159 }
160}
161
162static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
163{
164 struct dc_link *link = aconnector->dc_link;
165 struct drm_connector *connector = &aconnector->base;
166 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
167
168 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
169 return;
170
171 if (aconnector->dc_sink)
172 subconnector = get_subconnector_type(link);
173
174 drm_object_property_set_value(&connector->base,
175 connector->dev->mode_config.dp_subconnector_property,
176 subconnector);
177}
178
179
180
181
182
183
184
185
186static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
187
188static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
189
190static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
191 struct drm_plane *plane,
192 unsigned long possible_crtcs,
193 const struct dc_plane_cap *plane_cap);
194static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
195 struct drm_plane *plane,
196 uint32_t link_index);
197static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
198 struct amdgpu_dm_connector *amdgpu_dm_connector,
199 uint32_t link_index,
200 struct amdgpu_encoder *amdgpu_encoder);
201static int amdgpu_dm_encoder_init(struct drm_device *dev,
202 struct amdgpu_encoder *aencoder,
203 uint32_t link_index);
204
205static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
206
207static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
208
209static int amdgpu_dm_atomic_check(struct drm_device *dev,
210 struct drm_atomic_state *state);
211
212static void handle_cursor_update(struct drm_plane *plane,
213 struct drm_plane_state *old_plane_state);
214
215static const struct drm_format_info *
216amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
217
218static bool
219is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
220 struct drm_crtc_state *new_crtc_state);
221
222
223
224
225
226
227
228
229
230
231
232
233
234static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
235{
236 if (crtc >= adev->mode_info.num_crtc)
237 return 0;
238 else {
239 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
240
241 if (acrtc->dm_irq_params.stream == NULL) {
242 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
243 crtc);
244 return 0;
245 }
246
247 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
248 }
249}
250
251static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
252 u32 *vbl, u32 *position)
253{
254 uint32_t v_blank_start, v_blank_end, h_position, v_position;
255
256 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
257 return -EINVAL;
258 else {
259 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
260
261 if (acrtc->dm_irq_params.stream == NULL) {
262 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
263 crtc);
264 return 0;
265 }
266
267
268
269
270
271 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
272 &v_blank_start,
273 &v_blank_end,
274 &h_position,
275 &v_position);
276
277 *position = v_position | (h_position << 16);
278 *vbl = v_blank_start | (v_blank_end << 16);
279 }
280
281 return 0;
282}
283
284static bool dm_is_idle(void *handle)
285{
286
287 return true;
288}
289
290static int dm_wait_for_idle(void *handle)
291{
292
293 return 0;
294}
295
296static bool dm_check_soft_reset(void *handle)
297{
298 return false;
299}
300
301static int dm_soft_reset(void *handle)
302{
303
304 return 0;
305}
306
307static struct amdgpu_crtc *
308get_crtc_by_otg_inst(struct amdgpu_device *adev,
309 int otg_inst)
310{
311 struct drm_device *dev = adev_to_drm(adev);
312 struct drm_crtc *crtc;
313 struct amdgpu_crtc *amdgpu_crtc;
314
315 if (WARN_ON(otg_inst == -1))
316 return adev->mode_info.crtcs[0];
317
318 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
319 amdgpu_crtc = to_amdgpu_crtc(crtc);
320
321 if (amdgpu_crtc->otg_inst == otg_inst)
322 return amdgpu_crtc;
323 }
324
325 return NULL;
326}
327
328static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
329{
330 return acrtc->dm_irq_params.freesync_config.state ==
331 VRR_STATE_ACTIVE_VARIABLE ||
332 acrtc->dm_irq_params.freesync_config.state ==
333 VRR_STATE_ACTIVE_FIXED;
334}
335
336static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
337{
338 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
339 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
340}
341
342static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
343 struct dm_crtc_state *new_state)
344{
345 if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)
346 return true;
347 else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
348 return true;
349 else
350 return false;
351}
352
353
354
355
356
357
358
359
360static void dm_pflip_high_irq(void *interrupt_params)
361{
362 struct amdgpu_crtc *amdgpu_crtc;
363 struct common_irq_params *irq_params = interrupt_params;
364 struct amdgpu_device *adev = irq_params->adev;
365 unsigned long flags;
366 struct drm_pending_vblank_event *e;
367 uint32_t vpos, hpos, v_blank_start, v_blank_end;
368 bool vrr_active;
369
370 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
371
372
373
374 if (amdgpu_crtc == NULL) {
375 DC_LOG_PFLIP("CRTC is null, returning.\n");
376 return;
377 }
378
379 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
380
381 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
382 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
383 amdgpu_crtc->pflip_status,
384 AMDGPU_FLIP_SUBMITTED,
385 amdgpu_crtc->crtc_id,
386 amdgpu_crtc);
387 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
388 return;
389 }
390
391
392 e = amdgpu_crtc->event;
393 amdgpu_crtc->event = NULL;
394
395 WARN_ON(!e);
396
397 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
398
399
400 if (!vrr_active ||
401 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
402 &v_blank_end, &hpos, &vpos) ||
403 (vpos < v_blank_start)) {
404
405
406
407
408 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
409
410
411
412
413 if (e) {
414 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
415
416
417 drm_crtc_vblank_put(&amdgpu_crtc->base);
418 }
419 } else if (e) {
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
435 e->pipe = amdgpu_crtc->crtc_id;
436
437 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
438 e = NULL;
439 }
440
441
442
443
444
445
446 amdgpu_crtc->dm_irq_params.last_flip_vblank =
447 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
448
449 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
450 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
451
452 DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
453 amdgpu_crtc->crtc_id, amdgpu_crtc,
454 vrr_active, (int) !e);
455}
456
457static void dm_vupdate_high_irq(void *interrupt_params)
458{
459 struct common_irq_params *irq_params = interrupt_params;
460 struct amdgpu_device *adev = irq_params->adev;
461 struct amdgpu_crtc *acrtc;
462 struct drm_device *drm_dev;
463 struct drm_vblank_crtc *vblank;
464 ktime_t frame_duration_ns, previous_timestamp;
465 unsigned long flags;
466 int vrr_active;
467
468 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
469
470 if (acrtc) {
471 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
472 drm_dev = acrtc->base.dev;
473 vblank = &drm_dev->vblank[acrtc->base.index];
474 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
475 frame_duration_ns = vblank->time - previous_timestamp;
476
477 if (frame_duration_ns > 0) {
478 trace_amdgpu_refresh_rate_track(acrtc->base.index,
479 frame_duration_ns,
480 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
481 atomic64_set(&irq_params->previous_timestamp, vblank->time);
482 }
483
484 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
485 acrtc->crtc_id,
486 vrr_active);
487
488
489
490
491
492
493
494 if (vrr_active) {
495 drm_crtc_handle_vblank(&acrtc->base);
496
497
498 if (acrtc->dm_irq_params.stream &&
499 adev->family < AMDGPU_FAMILY_AI) {
500 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
501 mod_freesync_handle_v_update(
502 adev->dm.freesync_module,
503 acrtc->dm_irq_params.stream,
504 &acrtc->dm_irq_params.vrr_params);
505
506 dc_stream_adjust_vmin_vmax(
507 adev->dm.dc,
508 acrtc->dm_irq_params.stream,
509 &acrtc->dm_irq_params.vrr_params.adjust);
510 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
511 }
512 }
513 }
514}
515
516
517
518
519
520
521
522
523static void dm_crtc_high_irq(void *interrupt_params)
524{
525 struct common_irq_params *irq_params = interrupt_params;
526 struct amdgpu_device *adev = irq_params->adev;
527 struct amdgpu_crtc *acrtc;
528 unsigned long flags;
529 int vrr_active;
530
531 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
532 if (!acrtc)
533 return;
534
535 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
536
537 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
538 vrr_active, acrtc->dm_irq_params.active_planes);
539
540
541
542
543
544
545
546 if (!vrr_active)
547 drm_crtc_handle_vblank(&acrtc->base);
548
549
550
551
552
553 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
554
555
556 if (adev->family < AMDGPU_FAMILY_AI)
557 return;
558
559 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
560
561 if (acrtc->dm_irq_params.stream &&
562 acrtc->dm_irq_params.vrr_params.supported &&
563 acrtc->dm_irq_params.freesync_config.state ==
564 VRR_STATE_ACTIVE_VARIABLE) {
565 mod_freesync_handle_v_update(adev->dm.freesync_module,
566 acrtc->dm_irq_params.stream,
567 &acrtc->dm_irq_params.vrr_params);
568
569 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
570 &acrtc->dm_irq_params.vrr_params.adjust);
571 }
572
573
574
575
576
577
578
579
580
581
582
583 if (adev->family >= AMDGPU_FAMILY_RV &&
584 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
585 acrtc->dm_irq_params.active_planes == 0) {
586 if (acrtc->event) {
587 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
588 acrtc->event = NULL;
589 drm_crtc_vblank_put(&acrtc->base);
590 }
591 acrtc->pflip_status = AMDGPU_FLIP_NONE;
592 }
593
594 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
595}
596
597#if defined(CONFIG_DRM_AMD_DC_DCN)
598#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
599
600
601
602
603
604
605
606static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
607{
608 struct common_irq_params *irq_params = interrupt_params;
609 struct amdgpu_device *adev = irq_params->adev;
610 struct amdgpu_crtc *acrtc;
611
612 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
613
614 if (!acrtc)
615 return;
616
617 amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
618}
619#endif
620
621#define DMUB_TRACE_MAX_READ 64
622
623
624
625
626
627
628
629static void dm_dmub_outbox1_low_irq(void *interrupt_params)
630{
631 struct dmub_notification notify;
632 struct common_irq_params *irq_params = interrupt_params;
633 struct amdgpu_device *adev = irq_params->adev;
634 struct amdgpu_display_manager *dm = &adev->dm;
635 struct dmcub_trace_buf_entry entry = { 0 };
636 uint32_t count = 0;
637
638 if (dc_enable_dmub_notifications(adev->dm.dc)) {
639 if (irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
640 do {
641 dc_stat_get_dmub_notification(adev->dm.dc, ¬ify);
642 } while (notify.pending_notification);
643
644 if (adev->dm.dmub_notify)
645 memcpy(adev->dm.dmub_notify, ¬ify, sizeof(struct dmub_notification));
646 if (notify.type == DMUB_NOTIFICATION_AUX_REPLY)
647 complete(&adev->dm.dmub_aux_transfer_done);
648
649
650 } else {
651 DRM_ERROR("DM: Failed to receive correct outbox IRQ !");
652 }
653 }
654
655
656 do {
657 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
658 trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
659 entry.param0, entry.param1);
660
661 DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
662 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
663 } else
664 break;
665
666 count++;
667
668 } while (count <= DMUB_TRACE_MAX_READ);
669
670 ASSERT(count <= DMUB_TRACE_MAX_READ);
671}
672#endif
673
674static int dm_set_clockgating_state(void *handle,
675 enum amd_clockgating_state state)
676{
677 return 0;
678}
679
680static int dm_set_powergating_state(void *handle,
681 enum amd_powergating_state state)
682{
683 return 0;
684}
685
686
687static int dm_early_init(void* handle);
688
689
690static void amdgpu_dm_fbc_init(struct drm_connector *connector)
691{
692 struct drm_device *dev = connector->dev;
693 struct amdgpu_device *adev = drm_to_adev(dev);
694 struct dm_compressor_info *compressor = &adev->dm.compressor;
695 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
696 struct drm_display_mode *mode;
697 unsigned long max_size = 0;
698
699 if (adev->dm.dc->fbc_compressor == NULL)
700 return;
701
702 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
703 return;
704
705 if (compressor->bo_ptr)
706 return;
707
708
709 list_for_each_entry(mode, &connector->modes, head) {
710 if (max_size < mode->htotal * mode->vtotal)
711 max_size = mode->htotal * mode->vtotal;
712 }
713
714 if (max_size) {
715 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
716 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
717 &compressor->gpu_addr, &compressor->cpu_addr);
718
719 if (r)
720 DRM_ERROR("DM: Failed to initialize FBC\n");
721 else {
722 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
723 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
724 }
725
726 }
727
728}
729
730static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
731 int pipe, bool *enabled,
732 unsigned char *buf, int max_bytes)
733{
734 struct drm_device *dev = dev_get_drvdata(kdev);
735 struct amdgpu_device *adev = drm_to_adev(dev);
736 struct drm_connector *connector;
737 struct drm_connector_list_iter conn_iter;
738 struct amdgpu_dm_connector *aconnector;
739 int ret = 0;
740
741 *enabled = false;
742
743 mutex_lock(&adev->dm.audio_lock);
744
745 drm_connector_list_iter_begin(dev, &conn_iter);
746 drm_for_each_connector_iter(connector, &conn_iter) {
747 aconnector = to_amdgpu_dm_connector(connector);
748 if (aconnector->audio_inst != port)
749 continue;
750
751 *enabled = true;
752 ret = drm_eld_size(connector->eld);
753 memcpy(buf, connector->eld, min(max_bytes, ret));
754
755 break;
756 }
757 drm_connector_list_iter_end(&conn_iter);
758
759 mutex_unlock(&adev->dm.audio_lock);
760
761 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
762
763 return ret;
764}
765
766static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
767 .get_eld = amdgpu_dm_audio_component_get_eld,
768};
769
770static int amdgpu_dm_audio_component_bind(struct device *kdev,
771 struct device *hda_kdev, void *data)
772{
773 struct drm_device *dev = dev_get_drvdata(kdev);
774 struct amdgpu_device *adev = drm_to_adev(dev);
775 struct drm_audio_component *acomp = data;
776
777 acomp->ops = &amdgpu_dm_audio_component_ops;
778 acomp->dev = kdev;
779 adev->dm.audio_component = acomp;
780
781 return 0;
782}
783
784static void amdgpu_dm_audio_component_unbind(struct device *kdev,
785 struct device *hda_kdev, void *data)
786{
787 struct drm_device *dev = dev_get_drvdata(kdev);
788 struct amdgpu_device *adev = drm_to_adev(dev);
789 struct drm_audio_component *acomp = data;
790
791 acomp->ops = NULL;
792 acomp->dev = NULL;
793 adev->dm.audio_component = NULL;
794}
795
796static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
797 .bind = amdgpu_dm_audio_component_bind,
798 .unbind = amdgpu_dm_audio_component_unbind,
799};
800
801static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
802{
803 int i, ret;
804
805 if (!amdgpu_audio)
806 return 0;
807
808 adev->mode_info.audio.enabled = true;
809
810 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
811
812 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
813 adev->mode_info.audio.pin[i].channels = -1;
814 adev->mode_info.audio.pin[i].rate = -1;
815 adev->mode_info.audio.pin[i].bits_per_sample = -1;
816 adev->mode_info.audio.pin[i].status_bits = 0;
817 adev->mode_info.audio.pin[i].category_code = 0;
818 adev->mode_info.audio.pin[i].connected = false;
819 adev->mode_info.audio.pin[i].id =
820 adev->dm.dc->res_pool->audios[i]->inst;
821 adev->mode_info.audio.pin[i].offset = 0;
822 }
823
824 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
825 if (ret < 0)
826 return ret;
827
828 adev->dm.audio_registered = true;
829
830 return 0;
831}
832
833static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
834{
835 if (!amdgpu_audio)
836 return;
837
838 if (!adev->mode_info.audio.enabled)
839 return;
840
841 if (adev->dm.audio_registered) {
842 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
843 adev->dm.audio_registered = false;
844 }
845
846
847
848 adev->mode_info.audio.enabled = false;
849}
850
851static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
852{
853 struct drm_audio_component *acomp = adev->dm.audio_component;
854
855 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
856 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
857
858 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
859 pin, -1);
860 }
861}
862
863static int dm_dmub_hw_init(struct amdgpu_device *adev)
864{
865 const struct dmcub_firmware_header_v1_0 *hdr;
866 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
867 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
868 const struct firmware *dmub_fw = adev->dm.dmub_fw;
869 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
870 struct abm *abm = adev->dm.dc->res_pool->abm;
871 struct dmub_srv_hw_params hw_params;
872 enum dmub_status status;
873 const unsigned char *fw_inst_const, *fw_bss_data;
874 uint32_t i, fw_inst_const_size, fw_bss_data_size;
875 bool has_hw_support;
876
877 if (!dmub_srv)
878
879 return 0;
880
881 if (!fb_info) {
882 DRM_ERROR("No framebuffer info for DMUB service.\n");
883 return -EINVAL;
884 }
885
886 if (!dmub_fw) {
887
888 DRM_ERROR("No firmware provided for DMUB.\n");
889 return -EINVAL;
890 }
891
892 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
893 if (status != DMUB_STATUS_OK) {
894 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
895 return -EINVAL;
896 }
897
898 if (!has_hw_support) {
899 DRM_INFO("DMUB unsupported on ASIC\n");
900 return 0;
901 }
902
903 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
904
905 fw_inst_const = dmub_fw->data +
906 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
907 PSP_HEADER_BYTES;
908
909 fw_bss_data = dmub_fw->data +
910 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
911 le32_to_cpu(hdr->inst_const_bytes);
912
913
914 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
915 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
916
917 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
918
919
920
921
922
923
924 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
925 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
926 fw_inst_const_size);
927 }
928
929 if (fw_bss_data_size)
930 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
931 fw_bss_data, fw_bss_data_size);
932
933
934 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
935 adev->bios_size);
936
937
938 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
939 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
940
941 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
942 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
943
944 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
945 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
946
947
948 memset(&hw_params, 0, sizeof(hw_params));
949 hw_params.fb_base = adev->gmc.fb_start;
950 hw_params.fb_offset = adev->gmc.aper_base;
951
952
953 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
954 hw_params.load_inst_const = true;
955
956 if (dmcu)
957 hw_params.psp_version = dmcu->psp_version;
958
959 for (i = 0; i < fb_info->num_fb; ++i)
960 hw_params.fb[i] = &fb_info->fb[i];
961
962 status = dmub_srv_hw_init(dmub_srv, &hw_params);
963 if (status != DMUB_STATUS_OK) {
964 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
965 return -EINVAL;
966 }
967
968
969 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
970 if (status != DMUB_STATUS_OK)
971 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
972
973
974 if (dmcu && abm) {
975 dmcu->funcs->dmcu_init(dmcu);
976 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
977 }
978
979 if (!adev->dm.dc->ctx->dmub_srv)
980 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
981 if (!adev->dm.dc->ctx->dmub_srv) {
982 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
983 return -ENOMEM;
984 }
985
986 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
987 adev->dm.dmcub_fw_version);
988
989 return 0;
990}
991
992#if defined(CONFIG_DRM_AMD_DC_DCN)
993static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
994{
995 uint64_t pt_base;
996 uint32_t logical_addr_low;
997 uint32_t logical_addr_high;
998 uint32_t agp_base, agp_bot, agp_top;
999 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1000
1001 memset(pa_config, 0, sizeof(*pa_config));
1002
1003 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1004 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1005
1006 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1007
1008
1009
1010
1011
1012
1013 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1014 else
1015 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1016
1017 agp_base = 0;
1018 agp_bot = adev->gmc.agp_start >> 24;
1019 agp_top = adev->gmc.agp_end >> 24;
1020
1021
1022 page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1023 page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1024 page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1025 page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1026 page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1027 page_table_base.low_part = lower_32_bits(pt_base);
1028
1029 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1030 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1031
1032 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1033 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1034 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1035
1036 pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1037 pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1038 pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1039
1040 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1041 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1042 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1043
1044 pa_config->is_hvm_enabled = 0;
1045
1046}
1047#endif
1048#if defined(CONFIG_DRM_AMD_DC_DCN)
1049static void vblank_control_worker(struct work_struct *work)
1050{
1051 struct vblank_control_work *vblank_work =
1052 container_of(work, struct vblank_control_work, work);
1053 struct amdgpu_display_manager *dm = vblank_work->dm;
1054
1055 mutex_lock(&dm->dc_lock);
1056
1057 if (vblank_work->enable)
1058 dm->active_vblank_irq_count++;
1059 else if(dm->active_vblank_irq_count)
1060 dm->active_vblank_irq_count--;
1061
1062 dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1063
1064 DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1065
1066
1067 if (vblank_work->stream && vblank_work->stream->link) {
1068 if (vblank_work->enable) {
1069 if (vblank_work->stream->link->psr_settings.psr_allow_active)
1070 amdgpu_dm_psr_disable(vblank_work->stream);
1071 } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1072 !vblank_work->stream->link->psr_settings.psr_allow_active &&
1073 vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1074 amdgpu_dm_psr_enable(vblank_work->stream);
1075 }
1076 }
1077
1078 mutex_unlock(&dm->dc_lock);
1079
1080 dc_stream_release(vblank_work->stream);
1081
1082 kfree(vblank_work);
1083}
1084
1085#endif
1086static int amdgpu_dm_init(struct amdgpu_device *adev)
1087{
1088 struct dc_init_data init_data;
1089#ifdef CONFIG_DRM_AMD_DC_HDCP
1090 struct dc_callback_init init_params;
1091#endif
1092 int r;
1093
1094 adev->dm.ddev = adev_to_drm(adev);
1095 adev->dm.adev = adev;
1096
1097
1098 memset(&init_data, 0, sizeof(init_data));
1099#ifdef CONFIG_DRM_AMD_DC_HDCP
1100 memset(&init_params, 0, sizeof(init_params));
1101#endif
1102
1103 mutex_init(&adev->dm.dc_lock);
1104 mutex_init(&adev->dm.audio_lock);
1105#if defined(CONFIG_DRM_AMD_DC_DCN)
1106 spin_lock_init(&adev->dm.vblank_lock);
1107#endif
1108
1109 if(amdgpu_dm_irq_init(adev)) {
1110 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1111 goto error;
1112 }
1113
1114 init_data.asic_id.chip_family = adev->family;
1115
1116 init_data.asic_id.pci_revision_id = adev->pdev->revision;
1117 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1118 init_data.asic_id.chip_id = adev->pdev->device;
1119
1120 init_data.asic_id.vram_width = adev->gmc.vram_width;
1121
1122 init_data.asic_id.atombios_base_address =
1123 adev->mode_info.atom_context->bios;
1124
1125 init_data.driver = adev;
1126
1127 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1128
1129 if (!adev->dm.cgs_device) {
1130 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1131 goto error;
1132 }
1133
1134 init_data.cgs_device = adev->dm.cgs_device;
1135
1136 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1137
1138 switch (adev->asic_type) {
1139 case CHIP_CARRIZO:
1140 case CHIP_STONEY:
1141 case CHIP_RAVEN:
1142 case CHIP_RENOIR:
1143 init_data.flags.gpu_vm_support = true;
1144 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1145 init_data.flags.disable_dmcu = true;
1146 break;
1147 case CHIP_VANGOGH:
1148 case CHIP_YELLOW_CARP:
1149 init_data.flags.gpu_vm_support = true;
1150 break;
1151 default:
1152 break;
1153 }
1154
1155 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1156 init_data.flags.fbc_support = true;
1157
1158 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1159 init_data.flags.multi_mon_pp_mclk_switch = true;
1160
1161 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1162 init_data.flags.disable_fractional_pwm = true;
1163
1164 if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1165 init_data.flags.edp_no_power_sequencing = true;
1166
1167 init_data.flags.power_down_display_on_boot = true;
1168
1169 INIT_LIST_HEAD(&adev->dm.da_list);
1170
1171 adev->dm.dc = dc_create(&init_data);
1172
1173 if (adev->dm.dc) {
1174 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1175 } else {
1176 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1177 goto error;
1178 }
1179
1180 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1181 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1182 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1183 }
1184
1185 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1186 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1187
1188 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1189 adev->dm.dc->debug.disable_stutter = true;
1190
1191 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1192 adev->dm.dc->debug.disable_dsc = true;
1193
1194 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1195 adev->dm.dc->debug.disable_clock_gate = true;
1196
1197 r = dm_dmub_hw_init(adev);
1198 if (r) {
1199 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1200 goto error;
1201 }
1202
1203 dc_hardware_init(adev->dm.dc);
1204
1205#if defined(CONFIG_DRM_AMD_DC_DCN)
1206 if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1207 struct dc_phy_addr_space_config pa_config;
1208
1209 mmhub_read_system_context(adev, &pa_config);
1210
1211
1212 dc_setup_system_context(adev->dm.dc, &pa_config);
1213 }
1214#endif
1215
1216 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1217 if (!adev->dm.freesync_module) {
1218 DRM_ERROR(
1219 "amdgpu: failed to initialize freesync_module.\n");
1220 } else
1221 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1222 adev->dm.freesync_module);
1223
1224 amdgpu_dm_init_color_mod();
1225
1226#if defined(CONFIG_DRM_AMD_DC_DCN)
1227 if (adev->dm.dc->caps.max_links > 0) {
1228 adev->dm.vblank_control_workqueue =
1229 create_singlethread_workqueue("dm_vblank_control_workqueue");
1230 if (!adev->dm.vblank_control_workqueue)
1231 DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1232 }
1233#endif
1234
1235#ifdef CONFIG_DRM_AMD_DC_HDCP
1236 if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1237 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1238
1239 if (!adev->dm.hdcp_workqueue)
1240 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1241 else
1242 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1243
1244 dc_init_callbacks(adev->dm.dc, &init_params);
1245 }
1246#endif
1247#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1248 adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1249#endif
1250 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1251 init_completion(&adev->dm.dmub_aux_transfer_done);
1252 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1253 if (!adev->dm.dmub_notify) {
1254 DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1255 goto error;
1256 }
1257 amdgpu_dm_outbox_init(adev);
1258 }
1259
1260 if (amdgpu_dm_initialize_drm_device(adev)) {
1261 DRM_ERROR(
1262 "amdgpu: failed to initialize sw for display support.\n");
1263 goto error;
1264 }
1265
1266
1267 dm_dp_create_fake_mst_encoders(adev);
1268
1269
1270
1271
1272 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1273 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1274
1275 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1276 DRM_ERROR(
1277 "amdgpu: failed to initialize sw for display support.\n");
1278 goto error;
1279 }
1280
1281
1282 DRM_DEBUG_DRIVER("KMS initialized.\n");
1283
1284 return 0;
1285error:
1286 amdgpu_dm_fini(adev);
1287
1288 return -EINVAL;
1289}
1290
1291static int amdgpu_dm_early_fini(void *handle)
1292{
1293 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1294
1295 amdgpu_dm_audio_fini(adev);
1296
1297 return 0;
1298}
1299
1300static void amdgpu_dm_fini(struct amdgpu_device *adev)
1301{
1302 int i;
1303
1304#if defined(CONFIG_DRM_AMD_DC_DCN)
1305 if (adev->dm.vblank_control_workqueue) {
1306 destroy_workqueue(adev->dm.vblank_control_workqueue);
1307 adev->dm.vblank_control_workqueue = NULL;
1308 }
1309#endif
1310
1311 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1312 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1313 }
1314
1315 amdgpu_dm_destroy_drm_device(&adev->dm);
1316
1317#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1318 if (adev->dm.crc_rd_wrk) {
1319 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1320 kfree(adev->dm.crc_rd_wrk);
1321 adev->dm.crc_rd_wrk = NULL;
1322 }
1323#endif
1324#ifdef CONFIG_DRM_AMD_DC_HDCP
1325 if (adev->dm.hdcp_workqueue) {
1326 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1327 adev->dm.hdcp_workqueue = NULL;
1328 }
1329
1330 if (adev->dm.dc)
1331 dc_deinit_callbacks(adev->dm.dc);
1332#endif
1333
1334 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1335
1336 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1337 kfree(adev->dm.dmub_notify);
1338 adev->dm.dmub_notify = NULL;
1339 }
1340
1341 if (adev->dm.dmub_bo)
1342 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1343 &adev->dm.dmub_bo_gpu_addr,
1344 &adev->dm.dmub_bo_cpu_addr);
1345
1346
1347 if (adev->dm.dc)
1348 dc_destroy(&adev->dm.dc);
1349
1350
1351
1352
1353
1354
1355 if (adev->dm.cgs_device) {
1356 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1357 adev->dm.cgs_device = NULL;
1358 }
1359 if (adev->dm.freesync_module) {
1360 mod_freesync_destroy(adev->dm.freesync_module);
1361 adev->dm.freesync_module = NULL;
1362 }
1363
1364 mutex_destroy(&adev->dm.audio_lock);
1365 mutex_destroy(&adev->dm.dc_lock);
1366
1367 return;
1368}
1369
1370static int load_dmcu_fw(struct amdgpu_device *adev)
1371{
1372 const char *fw_name_dmcu = NULL;
1373 int r;
1374 const struct dmcu_firmware_header_v1_0 *hdr;
1375
1376 switch(adev->asic_type) {
1377#if defined(CONFIG_DRM_AMD_DC_SI)
1378 case CHIP_TAHITI:
1379 case CHIP_PITCAIRN:
1380 case CHIP_VERDE:
1381 case CHIP_OLAND:
1382#endif
1383 case CHIP_BONAIRE:
1384 case CHIP_HAWAII:
1385 case CHIP_KAVERI:
1386 case CHIP_KABINI:
1387 case CHIP_MULLINS:
1388 case CHIP_TONGA:
1389 case CHIP_FIJI:
1390 case CHIP_CARRIZO:
1391 case CHIP_STONEY:
1392 case CHIP_POLARIS11:
1393 case CHIP_POLARIS10:
1394 case CHIP_POLARIS12:
1395 case CHIP_VEGAM:
1396 case CHIP_VEGA10:
1397 case CHIP_VEGA12:
1398 case CHIP_VEGA20:
1399 case CHIP_NAVI10:
1400 case CHIP_NAVI14:
1401 case CHIP_RENOIR:
1402 case CHIP_SIENNA_CICHLID:
1403 case CHIP_NAVY_FLOUNDER:
1404 case CHIP_DIMGREY_CAVEFISH:
1405 case CHIP_BEIGE_GOBY:
1406 case CHIP_VANGOGH:
1407 case CHIP_YELLOW_CARP:
1408 return 0;
1409 case CHIP_NAVI12:
1410 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1411 break;
1412 case CHIP_RAVEN:
1413 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1414 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1415 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1416 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1417 else
1418 return 0;
1419 break;
1420 default:
1421 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1422 return -EINVAL;
1423 }
1424
1425 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1426 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1427 return 0;
1428 }
1429
1430 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1431 if (r == -ENOENT) {
1432
1433 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1434 adev->dm.fw_dmcu = NULL;
1435 return 0;
1436 }
1437 if (r) {
1438 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1439 fw_name_dmcu);
1440 return r;
1441 }
1442
1443 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1444 if (r) {
1445 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1446 fw_name_dmcu);
1447 release_firmware(adev->dm.fw_dmcu);
1448 adev->dm.fw_dmcu = NULL;
1449 return r;
1450 }
1451
1452 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1453 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1454 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1455 adev->firmware.fw_size +=
1456 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1457
1458 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1459 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1460 adev->firmware.fw_size +=
1461 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1462
1463 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1464
1465 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1466
1467 return 0;
1468}
1469
1470static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1471{
1472 struct amdgpu_device *adev = ctx;
1473
1474 return dm_read_reg(adev->dm.dc->ctx, address);
1475}
1476
1477static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1478 uint32_t value)
1479{
1480 struct amdgpu_device *adev = ctx;
1481
1482 return dm_write_reg(adev->dm.dc->ctx, address, value);
1483}
1484
1485static int dm_dmub_sw_init(struct amdgpu_device *adev)
1486{
1487 struct dmub_srv_create_params create_params;
1488 struct dmub_srv_region_params region_params;
1489 struct dmub_srv_region_info region_info;
1490 struct dmub_srv_fb_params fb_params;
1491 struct dmub_srv_fb_info *fb_info;
1492 struct dmub_srv *dmub_srv;
1493 const struct dmcub_firmware_header_v1_0 *hdr;
1494 const char *fw_name_dmub;
1495 enum dmub_asic dmub_asic;
1496 enum dmub_status status;
1497 int r;
1498
1499 switch (adev->asic_type) {
1500 case CHIP_RENOIR:
1501 dmub_asic = DMUB_ASIC_DCN21;
1502 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1503 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1504 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1505 break;
1506 case CHIP_SIENNA_CICHLID:
1507 dmub_asic = DMUB_ASIC_DCN30;
1508 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1509 break;
1510 case CHIP_NAVY_FLOUNDER:
1511 dmub_asic = DMUB_ASIC_DCN30;
1512 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1513 break;
1514 case CHIP_VANGOGH:
1515 dmub_asic = DMUB_ASIC_DCN301;
1516 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1517 break;
1518 case CHIP_DIMGREY_CAVEFISH:
1519 dmub_asic = DMUB_ASIC_DCN302;
1520 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1521 break;
1522 case CHIP_BEIGE_GOBY:
1523 dmub_asic = DMUB_ASIC_DCN303;
1524 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1525 break;
1526 case CHIP_YELLOW_CARP:
1527 dmub_asic = DMUB_ASIC_DCN31;
1528 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1529 break;
1530
1531 default:
1532
1533 return 0;
1534 }
1535
1536 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1537 if (r) {
1538 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1539 return 0;
1540 }
1541
1542 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1543 if (r) {
1544 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1545 return 0;
1546 }
1547
1548 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1549 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1550
1551 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1552 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1553 AMDGPU_UCODE_ID_DMCUB;
1554 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1555 adev->dm.dmub_fw;
1556 adev->firmware.fw_size +=
1557 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1558
1559 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1560 adev->dm.dmcub_fw_version);
1561 }
1562
1563
1564 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1565 dmub_srv = adev->dm.dmub_srv;
1566
1567 if (!dmub_srv) {
1568 DRM_ERROR("Failed to allocate DMUB service!\n");
1569 return -ENOMEM;
1570 }
1571
1572 memset(&create_params, 0, sizeof(create_params));
1573 create_params.user_ctx = adev;
1574 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1575 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1576 create_params.asic = dmub_asic;
1577
1578
1579 status = dmub_srv_create(dmub_srv, &create_params);
1580 if (status != DMUB_STATUS_OK) {
1581 DRM_ERROR("Error creating DMUB service: %d\n", status);
1582 return -EINVAL;
1583 }
1584
1585
1586 memset(®ion_params, 0, sizeof(region_params));
1587
1588 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1589 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1590 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1591 region_params.vbios_size = adev->bios_size;
1592 region_params.fw_bss_data = region_params.bss_data_size ?
1593 adev->dm.dmub_fw->data +
1594 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1595 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1596 region_params.fw_inst_const =
1597 adev->dm.dmub_fw->data +
1598 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1599 PSP_HEADER_BYTES;
1600
1601 status = dmub_srv_calc_region_info(dmub_srv, ®ion_params,
1602 ®ion_info);
1603
1604 if (status != DMUB_STATUS_OK) {
1605 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1606 return -EINVAL;
1607 }
1608
1609
1610
1611
1612
1613 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1614 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1615 &adev->dm.dmub_bo_gpu_addr,
1616 &adev->dm.dmub_bo_cpu_addr);
1617 if (r)
1618 return r;
1619
1620
1621 memset(&fb_params, 0, sizeof(fb_params));
1622 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1623 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1624 fb_params.region_info = ®ion_info;
1625
1626 adev->dm.dmub_fb_info =
1627 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1628 fb_info = adev->dm.dmub_fb_info;
1629
1630 if (!fb_info) {
1631 DRM_ERROR(
1632 "Failed to allocate framebuffer info for DMUB service!\n");
1633 return -ENOMEM;
1634 }
1635
1636 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1637 if (status != DMUB_STATUS_OK) {
1638 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1639 return -EINVAL;
1640 }
1641
1642 return 0;
1643}
1644
1645static int dm_sw_init(void *handle)
1646{
1647 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1648 int r;
1649
1650 r = dm_dmub_sw_init(adev);
1651 if (r)
1652 return r;
1653
1654 return load_dmcu_fw(adev);
1655}
1656
1657static int dm_sw_fini(void *handle)
1658{
1659 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1660
1661 kfree(adev->dm.dmub_fb_info);
1662 adev->dm.dmub_fb_info = NULL;
1663
1664 if (adev->dm.dmub_srv) {
1665 dmub_srv_destroy(adev->dm.dmub_srv);
1666 adev->dm.dmub_srv = NULL;
1667 }
1668
1669 release_firmware(adev->dm.dmub_fw);
1670 adev->dm.dmub_fw = NULL;
1671
1672 release_firmware(adev->dm.fw_dmcu);
1673 adev->dm.fw_dmcu = NULL;
1674
1675 return 0;
1676}
1677
1678static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1679{
1680 struct amdgpu_dm_connector *aconnector;
1681 struct drm_connector *connector;
1682 struct drm_connector_list_iter iter;
1683 int ret = 0;
1684
1685 drm_connector_list_iter_begin(dev, &iter);
1686 drm_for_each_connector_iter(connector, &iter) {
1687 aconnector = to_amdgpu_dm_connector(connector);
1688 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1689 aconnector->mst_mgr.aux) {
1690 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1691 aconnector,
1692 aconnector->base.base.id);
1693
1694 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1695 if (ret < 0) {
1696 DRM_ERROR("DM_MST: Failed to start MST\n");
1697 aconnector->dc_link->type =
1698 dc_connection_single;
1699 break;
1700 }
1701 }
1702 }
1703 drm_connector_list_iter_end(&iter);
1704
1705 return ret;
1706}
1707
1708static int dm_late_init(void *handle)
1709{
1710 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1711
1712 struct dmcu_iram_parameters params;
1713 unsigned int linear_lut[16];
1714 int i;
1715 struct dmcu *dmcu = NULL;
1716
1717 dmcu = adev->dm.dc->res_pool->dmcu;
1718
1719 for (i = 0; i < 16; i++)
1720 linear_lut[i] = 0xFFFF * i / 15;
1721
1722 params.set = 0;
1723 params.backlight_ramping_override = false;
1724 params.backlight_ramping_start = 0xCCCC;
1725 params.backlight_ramping_reduction = 0xCCCCCCCC;
1726 params.backlight_lut_array_size = 16;
1727 params.backlight_lut_array = linear_lut;
1728
1729
1730
1731
1732 params.min_abm_backlight = 0x28F;
1733
1734
1735
1736
1737 if (dmcu) {
1738 if (!dmcu_load_iram(dmcu, params))
1739 return -EINVAL;
1740 } else if (adev->dm.dc->ctx->dmub_srv) {
1741 struct dc_link *edp_links[MAX_NUM_EDP];
1742 int edp_num;
1743
1744 get_edp_links(adev->dm.dc, edp_links, &edp_num);
1745 for (i = 0; i < edp_num; i++) {
1746 if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
1747 return -EINVAL;
1748 }
1749 }
1750
1751 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1752}
1753
1754static void s3_handle_mst(struct drm_device *dev, bool suspend)
1755{
1756 struct amdgpu_dm_connector *aconnector;
1757 struct drm_connector *connector;
1758 struct drm_connector_list_iter iter;
1759 struct drm_dp_mst_topology_mgr *mgr;
1760 int ret;
1761 bool need_hotplug = false;
1762
1763 drm_connector_list_iter_begin(dev, &iter);
1764 drm_for_each_connector_iter(connector, &iter) {
1765 aconnector = to_amdgpu_dm_connector(connector);
1766 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1767 aconnector->mst_port)
1768 continue;
1769
1770 mgr = &aconnector->mst_mgr;
1771
1772 if (suspend) {
1773 drm_dp_mst_topology_mgr_suspend(mgr);
1774 } else {
1775 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1776 if (ret < 0) {
1777 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1778 need_hotplug = true;
1779 }
1780 }
1781 }
1782 drm_connector_list_iter_end(&iter);
1783
1784 if (need_hotplug)
1785 drm_kms_helper_hotplug_event(dev);
1786}
1787
1788static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1789{
1790 struct smu_context *smu = &adev->smu;
1791 int ret = 0;
1792
1793 if (!is_support_sw_smu(adev))
1794 return 0;
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826 switch(adev->asic_type) {
1827 case CHIP_NAVI10:
1828 case CHIP_NAVI14:
1829 case CHIP_NAVI12:
1830 break;
1831 default:
1832 return 0;
1833 }
1834
1835 ret = smu_write_watermarks_table(smu);
1836 if (ret) {
1837 DRM_ERROR("Failed to update WMTABLE!\n");
1838 return ret;
1839 }
1840
1841 return 0;
1842}
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864static int dm_hw_init(void *handle)
1865{
1866 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1867
1868 amdgpu_dm_init(adev);
1869 amdgpu_dm_hpd_init(adev);
1870
1871 return 0;
1872}
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882static int dm_hw_fini(void *handle)
1883{
1884 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1885
1886 amdgpu_dm_hpd_fini(adev);
1887
1888 amdgpu_dm_irq_fini(adev);
1889 amdgpu_dm_fini(adev);
1890 return 0;
1891}
1892
1893
1894static int dm_enable_vblank(struct drm_crtc *crtc);
1895static void dm_disable_vblank(struct drm_crtc *crtc);
1896
1897static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1898 struct dc_state *state, bool enable)
1899{
1900 enum dc_irq_source irq_source;
1901 struct amdgpu_crtc *acrtc;
1902 int rc = -EBUSY;
1903 int i = 0;
1904
1905 for (i = 0; i < state->stream_count; i++) {
1906 acrtc = get_crtc_by_otg_inst(
1907 adev, state->stream_status[i].primary_otg_inst);
1908
1909 if (acrtc && state->stream_status[i].plane_count != 0) {
1910 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1911 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1912 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
1913 acrtc->crtc_id, enable ? "en" : "dis", rc);
1914 if (rc)
1915 DRM_WARN("Failed to %s pflip interrupts\n",
1916 enable ? "enable" : "disable");
1917
1918 if (enable) {
1919 rc = dm_enable_vblank(&acrtc->base);
1920 if (rc)
1921 DRM_WARN("Failed to enable vblank interrupts\n");
1922 } else {
1923 dm_disable_vblank(&acrtc->base);
1924 }
1925
1926 }
1927 }
1928
1929}
1930
1931static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1932{
1933 struct dc_state *context = NULL;
1934 enum dc_status res = DC_ERROR_UNEXPECTED;
1935 int i;
1936 struct dc_stream_state *del_streams[MAX_PIPES];
1937 int del_streams_count = 0;
1938
1939 memset(del_streams, 0, sizeof(del_streams));
1940
1941 context = dc_create_state(dc);
1942 if (context == NULL)
1943 goto context_alloc_fail;
1944
1945 dc_resource_state_copy_construct_current(dc, context);
1946
1947
1948 for (i = 0; i < context->stream_count; i++) {
1949 struct dc_stream_state *stream = context->streams[i];
1950
1951 del_streams[del_streams_count++] = stream;
1952 }
1953
1954
1955 for (i = 0; i < del_streams_count; i++) {
1956 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1957 res = DC_FAIL_DETACH_SURFACES;
1958 goto fail;
1959 }
1960
1961 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1962 if (res != DC_OK)
1963 goto fail;
1964 }
1965
1966
1967 res = dc_validate_global_state(dc, context, false);
1968
1969 if (res != DC_OK) {
1970 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1971 goto fail;
1972 }
1973
1974 res = dc_commit_state(dc, context);
1975
1976fail:
1977 dc_release_state(context);
1978
1979context_alloc_fail:
1980 return res;
1981}
1982
1983static int dm_suspend(void *handle)
1984{
1985 struct amdgpu_device *adev = handle;
1986 struct amdgpu_display_manager *dm = &adev->dm;
1987 int ret = 0;
1988
1989 if (amdgpu_in_reset(adev)) {
1990 mutex_lock(&dm->dc_lock);
1991
1992#if defined(CONFIG_DRM_AMD_DC_DCN)
1993 dc_allow_idle_optimizations(adev->dm.dc, false);
1994#endif
1995
1996 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1997
1998 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1999
2000 amdgpu_dm_commit_zero_streams(dm->dc);
2001
2002 amdgpu_dm_irq_suspend(adev);
2003
2004 return ret;
2005 }
2006
2007 WARN_ON(adev->dm.cached_state);
2008 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2009
2010 s3_handle_mst(adev_to_drm(adev), true);
2011
2012 amdgpu_dm_irq_suspend(adev);
2013
2014 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2015
2016 return 0;
2017}
2018
2019static struct amdgpu_dm_connector *
2020amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2021 struct drm_crtc *crtc)
2022{
2023 uint32_t i;
2024 struct drm_connector_state *new_con_state;
2025 struct drm_connector *connector;
2026 struct drm_crtc *crtc_from_state;
2027
2028 for_each_new_connector_in_state(state, connector, new_con_state, i) {
2029 crtc_from_state = new_con_state->crtc;
2030
2031 if (crtc_from_state == crtc)
2032 return to_amdgpu_dm_connector(connector);
2033 }
2034
2035 return NULL;
2036}
2037
2038static void emulated_link_detect(struct dc_link *link)
2039{
2040 struct dc_sink_init_data sink_init_data = { 0 };
2041 struct display_sink_capability sink_caps = { 0 };
2042 enum dc_edid_status edid_status;
2043 struct dc_context *dc_ctx = link->ctx;
2044 struct dc_sink *sink = NULL;
2045 struct dc_sink *prev_sink = NULL;
2046
2047 link->type = dc_connection_none;
2048 prev_sink = link->local_sink;
2049
2050 if (prev_sink)
2051 dc_sink_release(prev_sink);
2052
2053 switch (link->connector_signal) {
2054 case SIGNAL_TYPE_HDMI_TYPE_A: {
2055 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2056 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2057 break;
2058 }
2059
2060 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2061 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2062 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2063 break;
2064 }
2065
2066 case SIGNAL_TYPE_DVI_DUAL_LINK: {
2067 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2068 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2069 break;
2070 }
2071
2072 case SIGNAL_TYPE_LVDS: {
2073 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2074 sink_caps.signal = SIGNAL_TYPE_LVDS;
2075 break;
2076 }
2077
2078 case SIGNAL_TYPE_EDP: {
2079 sink_caps.transaction_type =
2080 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2081 sink_caps.signal = SIGNAL_TYPE_EDP;
2082 break;
2083 }
2084
2085 case SIGNAL_TYPE_DISPLAY_PORT: {
2086 sink_caps.transaction_type =
2087 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2088 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2089 break;
2090 }
2091
2092 default:
2093 DC_ERROR("Invalid connector type! signal:%d\n",
2094 link->connector_signal);
2095 return;
2096 }
2097
2098 sink_init_data.link = link;
2099 sink_init_data.sink_signal = sink_caps.signal;
2100
2101 sink = dc_sink_create(&sink_init_data);
2102 if (!sink) {
2103 DC_ERROR("Failed to create sink!\n");
2104 return;
2105 }
2106
2107
2108 link->local_sink = sink;
2109
2110 edid_status = dm_helpers_read_local_edid(
2111 link->ctx,
2112 link,
2113 sink);
2114
2115 if (edid_status != EDID_OK)
2116 DC_ERROR("Failed to read EDID");
2117
2118}
2119
2120static void dm_gpureset_commit_state(struct dc_state *dc_state,
2121 struct amdgpu_display_manager *dm)
2122{
2123 struct {
2124 struct dc_surface_update surface_updates[MAX_SURFACES];
2125 struct dc_plane_info plane_infos[MAX_SURFACES];
2126 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2127 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2128 struct dc_stream_update stream_update;
2129 } * bundle;
2130 int k, m;
2131
2132 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2133
2134 if (!bundle) {
2135 dm_error("Failed to allocate update bundle\n");
2136 goto cleanup;
2137 }
2138
2139 for (k = 0; k < dc_state->stream_count; k++) {
2140 bundle->stream_update.stream = dc_state->streams[k];
2141
2142 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2143 bundle->surface_updates[m].surface =
2144 dc_state->stream_status->plane_states[m];
2145 bundle->surface_updates[m].surface->force_full_update =
2146 true;
2147 }
2148 dc_commit_updates_for_stream(
2149 dm->dc, bundle->surface_updates,
2150 dc_state->stream_status->plane_count,
2151 dc_state->streams[k], &bundle->stream_update, dc_state);
2152 }
2153
2154cleanup:
2155 kfree(bundle);
2156
2157 return;
2158}
2159
2160static void dm_set_dpms_off(struct dc_link *link)
2161{
2162 struct dc_stream_state *stream_state;
2163 struct amdgpu_dm_connector *aconnector = link->priv;
2164 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2165 struct dc_stream_update stream_update;
2166 bool dpms_off = true;
2167
2168 memset(&stream_update, 0, sizeof(stream_update));
2169 stream_update.dpms_off = &dpms_off;
2170
2171 mutex_lock(&adev->dm.dc_lock);
2172 stream_state = dc_stream_find_from_link(link);
2173
2174 if (stream_state == NULL) {
2175 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2176 mutex_unlock(&adev->dm.dc_lock);
2177 return;
2178 }
2179
2180 stream_update.stream = stream_state;
2181 dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2182 stream_state, &stream_update,
2183 stream_state->ctx->dc->current_state);
2184 mutex_unlock(&adev->dm.dc_lock);
2185}
2186
2187static int dm_resume(void *handle)
2188{
2189 struct amdgpu_device *adev = handle;
2190 struct drm_device *ddev = adev_to_drm(adev);
2191 struct amdgpu_display_manager *dm = &adev->dm;
2192 struct amdgpu_dm_connector *aconnector;
2193 struct drm_connector *connector;
2194 struct drm_connector_list_iter iter;
2195 struct drm_crtc *crtc;
2196 struct drm_crtc_state *new_crtc_state;
2197 struct dm_crtc_state *dm_new_crtc_state;
2198 struct drm_plane *plane;
2199 struct drm_plane_state *new_plane_state;
2200 struct dm_plane_state *dm_new_plane_state;
2201 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2202 enum dc_connection_type new_connection_type = dc_connection_none;
2203 struct dc_state *dc_state;
2204 int i, r, j;
2205
2206 if (amdgpu_in_reset(adev)) {
2207 dc_state = dm->cached_dc_state;
2208
2209 r = dm_dmub_hw_init(adev);
2210 if (r)
2211 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2212
2213 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2214 dc_resume(dm->dc);
2215
2216 amdgpu_dm_irq_resume_early(adev);
2217
2218 for (i = 0; i < dc_state->stream_count; i++) {
2219 dc_state->streams[i]->mode_changed = true;
2220 for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2221 dc_state->stream_status->plane_states[j]->update_flags.raw
2222 = 0xffffffff;
2223 }
2224 }
2225#if defined(CONFIG_DRM_AMD_DC_DCN)
2226
2227
2228
2229
2230
2231
2232 WARN_ON(dc_validate_global_state(dm->dc, dc_state, false) != DC_OK);
2233#endif
2234
2235 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2236
2237 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2238
2239 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2240
2241 dc_release_state(dm->cached_dc_state);
2242 dm->cached_dc_state = NULL;
2243
2244 amdgpu_dm_irq_resume_late(adev);
2245
2246 mutex_unlock(&dm->dc_lock);
2247
2248 return 0;
2249 }
2250
2251 dc_release_state(dm_state->context);
2252 dm_state->context = dc_create_state(dm->dc);
2253
2254 dc_resource_state_construct(dm->dc, dm_state->context);
2255
2256
2257 r = dm_dmub_hw_init(adev);
2258 if (r)
2259 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2260
2261
2262 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2263
2264
2265 dc_resume(dm->dc);
2266
2267
2268
2269
2270
2271 amdgpu_dm_irq_resume_early(adev);
2272
2273
2274 s3_handle_mst(ddev, false);
2275
2276
2277 drm_connector_list_iter_begin(ddev, &iter);
2278 drm_for_each_connector_iter(connector, &iter) {
2279 aconnector = to_amdgpu_dm_connector(connector);
2280
2281
2282
2283
2284
2285 if (aconnector->mst_port)
2286 continue;
2287
2288 mutex_lock(&aconnector->hpd_lock);
2289 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2290 DRM_ERROR("KMS: Failed to detect connector\n");
2291
2292 if (aconnector->base.force && new_connection_type == dc_connection_none)
2293 emulated_link_detect(aconnector->dc_link);
2294 else
2295 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2296
2297 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2298 aconnector->fake_enable = false;
2299
2300 if (aconnector->dc_sink)
2301 dc_sink_release(aconnector->dc_sink);
2302 aconnector->dc_sink = NULL;
2303 amdgpu_dm_update_connector_after_detect(aconnector);
2304 mutex_unlock(&aconnector->hpd_lock);
2305 }
2306 drm_connector_list_iter_end(&iter);
2307
2308
2309 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2310 new_crtc_state->active_changed = true;
2311
2312
2313
2314
2315
2316
2317 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2318 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2319 if (dm_new_crtc_state->stream) {
2320 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2321 dc_stream_release(dm_new_crtc_state->stream);
2322 dm_new_crtc_state->stream = NULL;
2323 }
2324 }
2325
2326 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2327 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2328 if (dm_new_plane_state->dc_state) {
2329 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2330 dc_plane_state_release(dm_new_plane_state->dc_state);
2331 dm_new_plane_state->dc_state = NULL;
2332 }
2333 }
2334
2335 drm_atomic_helper_resume(ddev, dm->cached_state);
2336
2337 dm->cached_state = NULL;
2338
2339 amdgpu_dm_irq_resume_late(adev);
2340
2341 amdgpu_dm_smu_write_watermarks_table(adev);
2342
2343 return 0;
2344}
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356static const struct amd_ip_funcs amdgpu_dm_funcs = {
2357 .name = "dm",
2358 .early_init = dm_early_init,
2359 .late_init = dm_late_init,
2360 .sw_init = dm_sw_init,
2361 .sw_fini = dm_sw_fini,
2362 .early_fini = amdgpu_dm_early_fini,
2363 .hw_init = dm_hw_init,
2364 .hw_fini = dm_hw_fini,
2365 .suspend = dm_suspend,
2366 .resume = dm_resume,
2367 .is_idle = dm_is_idle,
2368 .wait_for_idle = dm_wait_for_idle,
2369 .check_soft_reset = dm_check_soft_reset,
2370 .soft_reset = dm_soft_reset,
2371 .set_clockgating_state = dm_set_clockgating_state,
2372 .set_powergating_state = dm_set_powergating_state,
2373};
2374
2375const struct amdgpu_ip_block_version dm_ip_block =
2376{
2377 .type = AMD_IP_BLOCK_TYPE_DCE,
2378 .major = 1,
2379 .minor = 0,
2380 .rev = 0,
2381 .funcs = &amdgpu_dm_funcs,
2382};
2383
2384
2385
2386
2387
2388
2389
2390
2391static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2392 .fb_create = amdgpu_display_user_framebuffer_create,
2393 .get_format_info = amd_get_format_info,
2394 .output_poll_changed = drm_fb_helper_output_poll_changed,
2395 .atomic_check = amdgpu_dm_atomic_check,
2396 .atomic_commit = drm_atomic_helper_commit,
2397};
2398
2399static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2400 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2401};
2402
2403static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2404{
2405 u32 max_cll, min_cll, max, min, q, r;
2406 struct amdgpu_dm_backlight_caps *caps;
2407 struct amdgpu_display_manager *dm;
2408 struct drm_connector *conn_base;
2409 struct amdgpu_device *adev;
2410 struct dc_link *link = NULL;
2411 static const u8 pre_computed_values[] = {
2412 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2413 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2414 int i;
2415
2416 if (!aconnector || !aconnector->dc_link)
2417 return;
2418
2419 link = aconnector->dc_link;
2420 if (link->connector_signal != SIGNAL_TYPE_EDP)
2421 return;
2422
2423 conn_base = &aconnector->base;
2424 adev = drm_to_adev(conn_base->dev);
2425 dm = &adev->dm;
2426 for (i = 0; i < dm->num_of_edps; i++) {
2427 if (link == dm->backlight_link[i])
2428 break;
2429 }
2430 if (i >= dm->num_of_edps)
2431 return;
2432 caps = &dm->backlight_caps[i];
2433 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2434 caps->aux_support = false;
2435 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2436 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2437
2438 if (caps->ext_caps->bits.oled == 1
2439
2440)
2441 caps->aux_support = true;
2442
2443 if (amdgpu_backlight == 0)
2444 caps->aux_support = false;
2445 else if (amdgpu_backlight == 1)
2446 caps->aux_support = true;
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463 q = max_cll >> 5;
2464 r = max_cll % 32;
2465 max = (1 << q) * pre_computed_values[r];
2466
2467
2468 q = DIV_ROUND_CLOSEST(min_cll, 255);
2469 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2470
2471 caps->aux_max_input_signal = max;
2472 caps->aux_min_input_signal = min;
2473}
2474
2475void amdgpu_dm_update_connector_after_detect(
2476 struct amdgpu_dm_connector *aconnector)
2477{
2478 struct drm_connector *connector = &aconnector->base;
2479 struct drm_device *dev = connector->dev;
2480 struct dc_sink *sink;
2481
2482
2483 if (aconnector->mst_mgr.mst_state == true)
2484 return;
2485
2486 sink = aconnector->dc_link->local_sink;
2487 if (sink)
2488 dc_sink_retain(sink);
2489
2490
2491
2492
2493
2494
2495 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2496 && aconnector->dc_em_sink) {
2497
2498
2499
2500
2501
2502 mutex_lock(&dev->mode_config.mutex);
2503
2504 if (sink) {
2505 if (aconnector->dc_sink) {
2506 amdgpu_dm_update_freesync_caps(connector, NULL);
2507
2508
2509
2510
2511
2512
2513 dc_sink_release(aconnector->dc_sink);
2514 }
2515 aconnector->dc_sink = sink;
2516 dc_sink_retain(aconnector->dc_sink);
2517 amdgpu_dm_update_freesync_caps(connector,
2518 aconnector->edid);
2519 } else {
2520 amdgpu_dm_update_freesync_caps(connector, NULL);
2521 if (!aconnector->dc_sink) {
2522 aconnector->dc_sink = aconnector->dc_em_sink;
2523 dc_sink_retain(aconnector->dc_sink);
2524 }
2525 }
2526
2527 mutex_unlock(&dev->mode_config.mutex);
2528
2529 if (sink)
2530 dc_sink_release(sink);
2531 return;
2532 }
2533
2534
2535
2536
2537
2538 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2539 dc_sink_release(sink);
2540 return;
2541 }
2542
2543 if (aconnector->dc_sink == sink) {
2544
2545
2546
2547
2548 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2549 aconnector->connector_id);
2550 if (sink)
2551 dc_sink_release(sink);
2552 return;
2553 }
2554
2555 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2556 aconnector->connector_id, aconnector->dc_sink, sink);
2557
2558 mutex_lock(&dev->mode_config.mutex);
2559
2560
2561
2562
2563
2564 if (sink) {
2565
2566
2567
2568
2569 if (aconnector->dc_sink) {
2570 amdgpu_dm_update_freesync_caps(connector, NULL);
2571 dc_sink_release(aconnector->dc_sink);
2572 }
2573
2574 aconnector->dc_sink = sink;
2575 dc_sink_retain(aconnector->dc_sink);
2576 if (sink->dc_edid.length == 0) {
2577 aconnector->edid = NULL;
2578 if (aconnector->dc_link->aux_mode) {
2579 drm_dp_cec_unset_edid(
2580 &aconnector->dm_dp_aux.aux);
2581 }
2582 } else {
2583 aconnector->edid =
2584 (struct edid *)sink->dc_edid.raw_edid;
2585
2586 drm_connector_update_edid_property(connector,
2587 aconnector->edid);
2588 if (aconnector->dc_link->aux_mode)
2589 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2590 aconnector->edid);
2591 }
2592
2593 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2594 update_connector_ext_caps(aconnector);
2595 } else {
2596 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2597 amdgpu_dm_update_freesync_caps(connector, NULL);
2598 drm_connector_update_edid_property(connector, NULL);
2599 aconnector->num_modes = 0;
2600 dc_sink_release(aconnector->dc_sink);
2601 aconnector->dc_sink = NULL;
2602 aconnector->edid = NULL;
2603#ifdef CONFIG_DRM_AMD_DC_HDCP
2604
2605 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2606 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2607#endif
2608 }
2609
2610 mutex_unlock(&dev->mode_config.mutex);
2611
2612 update_subconnector_property(aconnector);
2613
2614 if (sink)
2615 dc_sink_release(sink);
2616}
2617
2618static void handle_hpd_irq(void *param)
2619{
2620 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2621 struct drm_connector *connector = &aconnector->base;
2622 struct drm_device *dev = connector->dev;
2623 enum dc_connection_type new_connection_type = dc_connection_none;
2624 struct amdgpu_device *adev = drm_to_adev(dev);
2625#ifdef CONFIG_DRM_AMD_DC_HDCP
2626 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2627#endif
2628
2629 if (adev->dm.disable_hpd_irq)
2630 return;
2631
2632
2633
2634
2635
2636 mutex_lock(&aconnector->hpd_lock);
2637
2638#ifdef CONFIG_DRM_AMD_DC_HDCP
2639 if (adev->dm.hdcp_workqueue) {
2640 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2641 dm_con_state->update_hdcp = true;
2642 }
2643#endif
2644 if (aconnector->fake_enable)
2645 aconnector->fake_enable = false;
2646
2647 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2648 DRM_ERROR("KMS: Failed to detect connector\n");
2649
2650 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2651 emulated_link_detect(aconnector->dc_link);
2652
2653
2654 drm_modeset_lock_all(dev);
2655 dm_restore_drm_connector_state(dev, connector);
2656 drm_modeset_unlock_all(dev);
2657
2658 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2659 drm_kms_helper_hotplug_event(dev);
2660
2661 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2662 if (new_connection_type == dc_connection_none &&
2663 aconnector->dc_link->type == dc_connection_none)
2664 dm_set_dpms_off(aconnector->dc_link);
2665
2666 amdgpu_dm_update_connector_after_detect(aconnector);
2667
2668 drm_modeset_lock_all(dev);
2669 dm_restore_drm_connector_state(dev, connector);
2670 drm_modeset_unlock_all(dev);
2671
2672 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2673 drm_kms_helper_hotplug_event(dev);
2674 }
2675 mutex_unlock(&aconnector->hpd_lock);
2676
2677}
2678
2679static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2680{
2681 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2682 uint8_t dret;
2683 bool new_irq_handled = false;
2684 int dpcd_addr;
2685 int dpcd_bytes_to_read;
2686
2687 const int max_process_count = 30;
2688 int process_count = 0;
2689
2690 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2691
2692 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2693 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2694
2695 dpcd_addr = DP_SINK_COUNT;
2696 } else {
2697 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2698
2699 dpcd_addr = DP_SINK_COUNT_ESI;
2700 }
2701
2702 dret = drm_dp_dpcd_read(
2703 &aconnector->dm_dp_aux.aux,
2704 dpcd_addr,
2705 esi,
2706 dpcd_bytes_to_read);
2707
2708 while (dret == dpcd_bytes_to_read &&
2709 process_count < max_process_count) {
2710 uint8_t retry;
2711 dret = 0;
2712
2713 process_count++;
2714
2715 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2716
2717 if (aconnector->mst_mgr.mst_state)
2718 drm_dp_mst_hpd_irq(
2719 &aconnector->mst_mgr,
2720 esi,
2721 &new_irq_handled);
2722
2723 if (new_irq_handled) {
2724
2725 const int ack_dpcd_bytes_to_write =
2726 dpcd_bytes_to_read - 1;
2727
2728 for (retry = 0; retry < 3; retry++) {
2729 uint8_t wret;
2730
2731 wret = drm_dp_dpcd_write(
2732 &aconnector->dm_dp_aux.aux,
2733 dpcd_addr + 1,
2734 &esi[1],
2735 ack_dpcd_bytes_to_write);
2736 if (wret == ack_dpcd_bytes_to_write)
2737 break;
2738 }
2739
2740
2741 dret = drm_dp_dpcd_read(
2742 &aconnector->dm_dp_aux.aux,
2743 dpcd_addr,
2744 esi,
2745 dpcd_bytes_to_read);
2746
2747 new_irq_handled = false;
2748 } else {
2749 break;
2750 }
2751 }
2752
2753 if (process_count == max_process_count)
2754 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2755}
2756
2757static void handle_hpd_rx_irq(void *param)
2758{
2759 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2760 struct drm_connector *connector = &aconnector->base;
2761 struct drm_device *dev = connector->dev;
2762 struct dc_link *dc_link = aconnector->dc_link;
2763 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2764 bool result = false;
2765 enum dc_connection_type new_connection_type = dc_connection_none;
2766 struct amdgpu_device *adev = drm_to_adev(dev);
2767 union hpd_irq_data hpd_irq_data;
2768 bool lock_flag = 0;
2769
2770 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2771
2772 if (adev->dm.disable_hpd_irq)
2773 return;
2774
2775
2776
2777
2778
2779
2780
2781 mutex_lock(&aconnector->hpd_lock);
2782
2783 read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2784
2785 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2786 (dc_link->type == dc_connection_mst_branch)) {
2787 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2788 result = true;
2789 dm_handle_hpd_rx_irq(aconnector);
2790 goto out;
2791 } else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2792 result = false;
2793 dm_handle_hpd_rx_irq(aconnector);
2794 goto out;
2795 }
2796 }
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806 if (!amdgpu_in_reset(adev) &&
2807 (hpd_rx_irq_check_link_loss_status(dc_link, &hpd_irq_data) ||
2808 hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST)) {
2809 mutex_lock(&adev->dm.dc_lock);
2810 lock_flag = 1;
2811 }
2812
2813#ifdef CONFIG_DRM_AMD_DC_HDCP
2814 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2815#else
2816 result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2817#endif
2818 if (!amdgpu_in_reset(adev) && lock_flag)
2819 mutex_unlock(&adev->dm.dc_lock);
2820
2821out:
2822 if (result && !is_mst_root_connector) {
2823
2824 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2825 DRM_ERROR("KMS: Failed to detect connector\n");
2826
2827 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2828 emulated_link_detect(dc_link);
2829
2830 if (aconnector->fake_enable)
2831 aconnector->fake_enable = false;
2832
2833 amdgpu_dm_update_connector_after_detect(aconnector);
2834
2835
2836 drm_modeset_lock_all(dev);
2837 dm_restore_drm_connector_state(dev, connector);
2838 drm_modeset_unlock_all(dev);
2839
2840 drm_kms_helper_hotplug_event(dev);
2841 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2842
2843 if (aconnector->fake_enable)
2844 aconnector->fake_enable = false;
2845
2846 amdgpu_dm_update_connector_after_detect(aconnector);
2847
2848
2849 drm_modeset_lock_all(dev);
2850 dm_restore_drm_connector_state(dev, connector);
2851 drm_modeset_unlock_all(dev);
2852
2853 drm_kms_helper_hotplug_event(dev);
2854 }
2855 }
2856#ifdef CONFIG_DRM_AMD_DC_HDCP
2857 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2858 if (adev->dm.hdcp_workqueue)
2859 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2860 }
2861#endif
2862
2863 if (dc_link->type != dc_connection_mst_branch)
2864 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2865
2866 mutex_unlock(&aconnector->hpd_lock);
2867}
2868
2869static void register_hpd_handlers(struct amdgpu_device *adev)
2870{
2871 struct drm_device *dev = adev_to_drm(adev);
2872 struct drm_connector *connector;
2873 struct amdgpu_dm_connector *aconnector;
2874 const struct dc_link *dc_link;
2875 struct dc_interrupt_params int_params = {0};
2876
2877 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2878 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2879
2880 list_for_each_entry(connector,
2881 &dev->mode_config.connector_list, head) {
2882
2883 aconnector = to_amdgpu_dm_connector(connector);
2884 dc_link = aconnector->dc_link;
2885
2886 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2887 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2888 int_params.irq_source = dc_link->irq_source_hpd;
2889
2890 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2891 handle_hpd_irq,
2892 (void *) aconnector);
2893 }
2894
2895 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2896
2897
2898 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2899 int_params.irq_source = dc_link->irq_source_hpd_rx;
2900
2901 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2902 handle_hpd_rx_irq,
2903 (void *) aconnector);
2904 }
2905 }
2906}
2907
2908#if defined(CONFIG_DRM_AMD_DC_SI)
2909
2910static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2911{
2912 struct dc *dc = adev->dm.dc;
2913 struct common_irq_params *c_irq_params;
2914 struct dc_interrupt_params int_params = {0};
2915 int r;
2916 int i;
2917 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2918
2919 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2920 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2935 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2936 if (r) {
2937 DRM_ERROR("Failed to add crtc irq id!\n");
2938 return r;
2939 }
2940
2941 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2942 int_params.irq_source =
2943 dc_interrupt_to_irq_source(dc, i+1 , 0);
2944
2945 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2946
2947 c_irq_params->adev = adev;
2948 c_irq_params->irq_src = int_params.irq_source;
2949
2950 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2951 dm_crtc_high_irq, c_irq_params);
2952 }
2953
2954
2955 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2956 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2957 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2958 if (r) {
2959 DRM_ERROR("Failed to add page flip irq id!\n");
2960 return r;
2961 }
2962
2963 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2964 int_params.irq_source =
2965 dc_interrupt_to_irq_source(dc, i, 0);
2966
2967 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2968
2969 c_irq_params->adev = adev;
2970 c_irq_params->irq_src = int_params.irq_source;
2971
2972 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2973 dm_pflip_high_irq, c_irq_params);
2974
2975 }
2976
2977
2978 r = amdgpu_irq_add_id(adev, client_id,
2979 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2980 if (r) {
2981 DRM_ERROR("Failed to add hpd irq id!\n");
2982 return r;
2983 }
2984
2985 register_hpd_handlers(adev);
2986
2987 return 0;
2988}
2989#endif
2990
2991
2992static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2993{
2994 struct dc *dc = adev->dm.dc;
2995 struct common_irq_params *c_irq_params;
2996 struct dc_interrupt_params int_params = {0};
2997 int r;
2998 int i;
2999 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3000
3001 if (adev->asic_type >= CHIP_VEGA10)
3002 client_id = SOC15_IH_CLIENTID_DCE;
3003
3004 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3005 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3020 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3021 if (r) {
3022 DRM_ERROR("Failed to add crtc irq id!\n");
3023 return r;
3024 }
3025
3026 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3027 int_params.irq_source =
3028 dc_interrupt_to_irq_source(dc, i, 0);
3029
3030 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3031
3032 c_irq_params->adev = adev;
3033 c_irq_params->irq_src = int_params.irq_source;
3034
3035 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3036 dm_crtc_high_irq, c_irq_params);
3037 }
3038
3039
3040 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3041 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3042 if (r) {
3043 DRM_ERROR("Failed to add vupdate irq id!\n");
3044 return r;
3045 }
3046
3047 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3048 int_params.irq_source =
3049 dc_interrupt_to_irq_source(dc, i, 0);
3050
3051 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3052
3053 c_irq_params->adev = adev;
3054 c_irq_params->irq_src = int_params.irq_source;
3055
3056 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3057 dm_vupdate_high_irq, c_irq_params);
3058 }
3059
3060
3061 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3062 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3063 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3064 if (r) {
3065 DRM_ERROR("Failed to add page flip irq id!\n");
3066 return r;
3067 }
3068
3069 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3070 int_params.irq_source =
3071 dc_interrupt_to_irq_source(dc, i, 0);
3072
3073 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3074
3075 c_irq_params->adev = adev;
3076 c_irq_params->irq_src = int_params.irq_source;
3077
3078 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3079 dm_pflip_high_irq, c_irq_params);
3080
3081 }
3082
3083
3084 r = amdgpu_irq_add_id(adev, client_id,
3085 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3086 if (r) {
3087 DRM_ERROR("Failed to add hpd irq id!\n");
3088 return r;
3089 }
3090
3091 register_hpd_handlers(adev);
3092
3093 return 0;
3094}
3095
3096#if defined(CONFIG_DRM_AMD_DC_DCN)
3097
3098static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3099{
3100 struct dc *dc = adev->dm.dc;
3101 struct common_irq_params *c_irq_params;
3102 struct dc_interrupt_params int_params = {0};
3103 int r;
3104 int i;
3105#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3106 static const unsigned int vrtl_int_srcid[] = {
3107 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3108 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3109 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3110 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3111 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3112 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3113 };
3114#endif
3115
3116 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3117 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3133 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3134 i++) {
3135 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3136
3137 if (r) {
3138 DRM_ERROR("Failed to add crtc irq id!\n");
3139 return r;
3140 }
3141
3142 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3143 int_params.irq_source =
3144 dc_interrupt_to_irq_source(dc, i, 0);
3145
3146 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3147
3148 c_irq_params->adev = adev;
3149 c_irq_params->irq_src = int_params.irq_source;
3150
3151 amdgpu_dm_irq_register_interrupt(
3152 adev, &int_params, dm_crtc_high_irq, c_irq_params);
3153 }
3154
3155
3156#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3157 for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3158 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3159 vrtl_int_srcid[i], &adev->vline0_irq);
3160
3161 if (r) {
3162 DRM_ERROR("Failed to add vline0 irq id!\n");
3163 return r;
3164 }
3165
3166 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3167 int_params.irq_source =
3168 dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3169
3170 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3171 DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3172 break;
3173 }
3174
3175 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3176 - DC_IRQ_SOURCE_DC1_VLINE0];
3177
3178 c_irq_params->adev = adev;
3179 c_irq_params->irq_src = int_params.irq_source;
3180
3181 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3182 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3183 }
3184#endif
3185
3186
3187
3188
3189
3190
3191 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3192 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3193 i++) {
3194 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3195
3196 if (r) {
3197 DRM_ERROR("Failed to add vupdate irq id!\n");
3198 return r;
3199 }
3200
3201 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3202 int_params.irq_source =
3203 dc_interrupt_to_irq_source(dc, i, 0);
3204
3205 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3206
3207 c_irq_params->adev = adev;
3208 c_irq_params->irq_src = int_params.irq_source;
3209
3210 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3211 dm_vupdate_high_irq, c_irq_params);
3212 }
3213
3214
3215 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3216 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3217 i++) {
3218 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3219 if (r) {
3220 DRM_ERROR("Failed to add page flip irq id!\n");
3221 return r;
3222 }
3223
3224 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3225 int_params.irq_source =
3226 dc_interrupt_to_irq_source(dc, i, 0);
3227
3228 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3229
3230 c_irq_params->adev = adev;
3231 c_irq_params->irq_src = int_params.irq_source;
3232
3233 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3234 dm_pflip_high_irq, c_irq_params);
3235
3236 }
3237
3238
3239 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3240 &adev->hpd_irq);
3241 if (r) {
3242 DRM_ERROR("Failed to add hpd irq id!\n");
3243 return r;
3244 }
3245
3246 register_hpd_handlers(adev);
3247
3248 return 0;
3249}
3250
3251static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3252{
3253 struct dc *dc = adev->dm.dc;
3254 struct common_irq_params *c_irq_params;
3255 struct dc_interrupt_params int_params = {0};
3256 int r, i;
3257
3258 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3259 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3260
3261 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3262 &adev->dmub_outbox_irq);
3263 if (r) {
3264 DRM_ERROR("Failed to add outbox irq id!\n");
3265 return r;
3266 }
3267
3268 if (dc->ctx->dmub_srv) {
3269 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3270 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3271 int_params.irq_source =
3272 dc_interrupt_to_irq_source(dc, i, 0);
3273
3274 c_irq_params = &adev->dm.dmub_outbox_params[0];
3275
3276 c_irq_params->adev = adev;
3277 c_irq_params->irq_src = int_params.irq_source;
3278
3279 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3280 dm_dmub_outbox1_low_irq, c_irq_params);
3281 }
3282
3283 return 0;
3284}
3285#endif
3286
3287
3288
3289
3290
3291
3292
3293static int dm_atomic_get_state(struct drm_atomic_state *state,
3294 struct dm_atomic_state **dm_state)
3295{
3296 struct drm_device *dev = state->dev;
3297 struct amdgpu_device *adev = drm_to_adev(dev);
3298 struct amdgpu_display_manager *dm = &adev->dm;
3299 struct drm_private_state *priv_state;
3300
3301 if (*dm_state)
3302 return 0;
3303
3304 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3305 if (IS_ERR(priv_state))
3306 return PTR_ERR(priv_state);
3307
3308 *dm_state = to_dm_atomic_state(priv_state);
3309
3310 return 0;
3311}
3312
3313static struct dm_atomic_state *
3314dm_atomic_get_new_state(struct drm_atomic_state *state)
3315{
3316 struct drm_device *dev = state->dev;
3317 struct amdgpu_device *adev = drm_to_adev(dev);
3318 struct amdgpu_display_manager *dm = &adev->dm;
3319 struct drm_private_obj *obj;
3320 struct drm_private_state *new_obj_state;
3321 int i;
3322
3323 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3324 if (obj->funcs == dm->atomic_obj.funcs)
3325 return to_dm_atomic_state(new_obj_state);
3326 }
3327
3328 return NULL;
3329}
3330
3331static struct drm_private_state *
3332dm_atomic_duplicate_state(struct drm_private_obj *obj)
3333{
3334 struct dm_atomic_state *old_state, *new_state;
3335
3336 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3337 if (!new_state)
3338 return NULL;
3339
3340 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3341
3342 old_state = to_dm_atomic_state(obj->state);
3343
3344 if (old_state && old_state->context)
3345 new_state->context = dc_copy_state(old_state->context);
3346
3347 if (!new_state->context) {
3348 kfree(new_state);
3349 return NULL;
3350 }
3351
3352 return &new_state->base;
3353}
3354
3355static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3356 struct drm_private_state *state)
3357{
3358 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3359
3360 if (dm_state && dm_state->context)
3361 dc_release_state(dm_state->context);
3362
3363 kfree(dm_state);
3364}
3365
3366static struct drm_private_state_funcs dm_atomic_state_funcs = {
3367 .atomic_duplicate_state = dm_atomic_duplicate_state,
3368 .atomic_destroy_state = dm_atomic_destroy_state,
3369};
3370
3371static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3372{
3373 struct dm_atomic_state *state;
3374 int r;
3375
3376 adev->mode_info.mode_config_initialized = true;
3377
3378 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3379 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3380
3381 adev_to_drm(adev)->mode_config.max_width = 16384;
3382 adev_to_drm(adev)->mode_config.max_height = 16384;
3383
3384 adev_to_drm(adev)->mode_config.preferred_depth = 24;
3385 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3386
3387 adev_to_drm(adev)->mode_config.async_page_flip = true;
3388
3389 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3390
3391 state = kzalloc(sizeof(*state), GFP_KERNEL);
3392 if (!state)
3393 return -ENOMEM;
3394
3395 state->context = dc_create_state(adev->dm.dc);
3396 if (!state->context) {
3397 kfree(state);
3398 return -ENOMEM;
3399 }
3400
3401 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3402
3403 drm_atomic_private_obj_init(adev_to_drm(adev),
3404 &adev->dm.atomic_obj,
3405 &state->base,
3406 &dm_atomic_state_funcs);
3407
3408 r = amdgpu_display_modeset_create_props(adev);
3409 if (r) {
3410 dc_release_state(state->context);
3411 kfree(state);
3412 return r;
3413 }
3414
3415 r = amdgpu_dm_audio_init(adev);
3416 if (r) {
3417 dc_release_state(state->context);
3418 kfree(state);
3419 return r;
3420 }
3421
3422 return 0;
3423}
3424
3425#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3426#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3427#define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3428
3429#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3430 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3431
3432static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3433 int bl_idx)
3434{
3435#if defined(CONFIG_ACPI)
3436 struct amdgpu_dm_backlight_caps caps;
3437
3438 memset(&caps, 0, sizeof(caps));
3439
3440 if (dm->backlight_caps[bl_idx].caps_valid)
3441 return;
3442
3443 amdgpu_acpi_get_backlight_caps(&caps);
3444 if (caps.caps_valid) {
3445 dm->backlight_caps[bl_idx].caps_valid = true;
3446 if (caps.aux_support)
3447 return;
3448 dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3449 dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3450 } else {
3451 dm->backlight_caps[bl_idx].min_input_signal =
3452 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3453 dm->backlight_caps[bl_idx].max_input_signal =
3454 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3455 }
3456#else
3457 if (dm->backlight_caps[bl_idx].aux_support)
3458 return;
3459
3460 dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3461 dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3462#endif
3463}
3464
3465static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3466 unsigned *min, unsigned *max)
3467{
3468 if (!caps)
3469 return 0;
3470
3471 if (caps->aux_support) {
3472
3473 *max = 1000 * caps->aux_max_input_signal;
3474 *min = 1000 * caps->aux_min_input_signal;
3475 } else {
3476
3477 *max = 0x101 * caps->max_input_signal;
3478 *min = 0x101 * caps->min_input_signal;
3479 }
3480 return 1;
3481}
3482
3483static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3484 uint32_t brightness)
3485{
3486 unsigned min, max;
3487
3488 if (!get_brightness_range(caps, &min, &max))
3489 return brightness;
3490
3491
3492 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3493 AMDGPU_MAX_BL_LEVEL);
3494}
3495
3496static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3497 uint32_t brightness)
3498{
3499 unsigned min, max;
3500
3501 if (!get_brightness_range(caps, &min, &max))
3502 return brightness;
3503
3504 if (brightness < min)
3505 return 0;
3506
3507 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3508 max - min);
3509}
3510
3511static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3512 int bl_idx,
3513 u32 user_brightness)
3514{
3515 struct amdgpu_dm_backlight_caps caps;
3516 struct dc_link *link;
3517 u32 brightness;
3518 bool rc;
3519
3520 amdgpu_dm_update_backlight_caps(dm, bl_idx);
3521 caps = dm->backlight_caps[bl_idx];
3522
3523 dm->brightness[bl_idx] = user_brightness;
3524 brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3525 link = (struct dc_link *)dm->backlight_link[bl_idx];
3526
3527
3528 if (caps.aux_support) {
3529 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3530 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3531 if (!rc)
3532 DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
3533 } else {
3534 rc = dc_link_set_backlight_level(link, brightness, 0);
3535 if (!rc)
3536 DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
3537 }
3538
3539 return rc ? 0 : 1;
3540}
3541
3542static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3543{
3544 struct amdgpu_display_manager *dm = bl_get_data(bd);
3545 int i;
3546
3547 for (i = 0; i < dm->num_of_edps; i++) {
3548 if (bd == dm->backlight_dev[i])
3549 break;
3550 }
3551 if (i >= AMDGPU_DM_MAX_NUM_EDP)
3552 i = 0;
3553 amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
3554
3555 return 0;
3556}
3557
3558static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
3559 int bl_idx)
3560{
3561 struct amdgpu_dm_backlight_caps caps;
3562 struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
3563
3564 amdgpu_dm_update_backlight_caps(dm, bl_idx);
3565 caps = dm->backlight_caps[bl_idx];
3566
3567 if (caps.aux_support) {
3568 u32 avg, peak;
3569 bool rc;
3570
3571 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3572 if (!rc)
3573 return dm->brightness[bl_idx];
3574 return convert_brightness_to_user(&caps, avg);
3575 } else {
3576 int ret = dc_link_get_backlight_level(link);
3577
3578 if (ret == DC_ERROR_UNEXPECTED)
3579 return dm->brightness[bl_idx];
3580 return convert_brightness_to_user(&caps, ret);
3581 }
3582}
3583
3584static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3585{
3586 struct amdgpu_display_manager *dm = bl_get_data(bd);
3587 int i;
3588
3589 for (i = 0; i < dm->num_of_edps; i++) {
3590 if (bd == dm->backlight_dev[i])
3591 break;
3592 }
3593 if (i >= AMDGPU_DM_MAX_NUM_EDP)
3594 i = 0;
3595 return amdgpu_dm_backlight_get_level(dm, i);
3596}
3597
3598static const struct backlight_ops amdgpu_dm_backlight_ops = {
3599 .options = BL_CORE_SUSPENDRESUME,
3600 .get_brightness = amdgpu_dm_backlight_get_brightness,
3601 .update_status = amdgpu_dm_backlight_update_status,
3602};
3603
3604static void
3605amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3606{
3607 char bl_name[16];
3608 struct backlight_properties props = { 0 };
3609
3610 amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
3611 dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
3612
3613 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3614 props.brightness = AMDGPU_MAX_BL_LEVEL;
3615 props.type = BACKLIGHT_RAW;
3616
3617 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3618 adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
3619
3620 dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
3621 adev_to_drm(dm->adev)->dev,
3622 dm,
3623 &amdgpu_dm_backlight_ops,
3624 &props);
3625
3626 if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
3627 DRM_ERROR("DM: Backlight registration failed!\n");
3628 else
3629 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3630}
3631#endif
3632
3633static int initialize_plane(struct amdgpu_display_manager *dm,
3634 struct amdgpu_mode_info *mode_info, int plane_id,
3635 enum drm_plane_type plane_type,
3636 const struct dc_plane_cap *plane_cap)
3637{
3638 struct drm_plane *plane;
3639 unsigned long possible_crtcs;
3640 int ret = 0;
3641
3642 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3643 if (!plane) {
3644 DRM_ERROR("KMS: Failed to allocate plane\n");
3645 return -ENOMEM;
3646 }
3647 plane->type = plane_type;
3648
3649
3650
3651
3652
3653
3654
3655 possible_crtcs = 1 << plane_id;
3656 if (plane_id >= dm->dc->caps.max_streams)
3657 possible_crtcs = 0xff;
3658
3659 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3660
3661 if (ret) {
3662 DRM_ERROR("KMS: Failed to initialize plane\n");
3663 kfree(plane);
3664 return ret;
3665 }
3666
3667 if (mode_info)
3668 mode_info->planes[plane_id] = plane;
3669
3670 return ret;
3671}
3672
3673
3674static void register_backlight_device(struct amdgpu_display_manager *dm,
3675 struct dc_link *link)
3676{
3677#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3678 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3679
3680 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3681 link->type != dc_connection_none) {
3682
3683
3684
3685
3686
3687 if (!dm->backlight_dev[dm->num_of_edps])
3688 amdgpu_dm_register_backlight_device(dm);
3689
3690 if (dm->backlight_dev[dm->num_of_edps]) {
3691 dm->backlight_link[dm->num_of_edps] = link;
3692 dm->num_of_edps++;
3693 }
3694 }
3695#endif
3696}
3697
3698
3699
3700
3701
3702
3703
3704
3705
3706
3707static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3708{
3709 struct amdgpu_display_manager *dm = &adev->dm;
3710 int32_t i;
3711 struct amdgpu_dm_connector *aconnector = NULL;
3712 struct amdgpu_encoder *aencoder = NULL;
3713 struct amdgpu_mode_info *mode_info = &adev->mode_info;
3714 uint32_t link_cnt;
3715 int32_t primary_planes;
3716 enum dc_connection_type new_connection_type = dc_connection_none;
3717 const struct dc_plane_cap *plane;
3718
3719 dm->display_indexes_num = dm->dc->caps.max_streams;
3720
3721 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3722
3723 link_cnt = dm->dc->caps.max_links;
3724 if (amdgpu_dm_mode_config_init(dm->adev)) {
3725 DRM_ERROR("DM: Failed to initialize mode config\n");
3726 return -EINVAL;
3727 }
3728
3729
3730 primary_planes = dm->dc->caps.max_streams;
3731 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3732
3733
3734
3735
3736
3737 for (i = (primary_planes - 1); i >= 0; i--) {
3738 plane = &dm->dc->caps.planes[i];
3739
3740 if (initialize_plane(dm, mode_info, i,
3741 DRM_PLANE_TYPE_PRIMARY, plane)) {
3742 DRM_ERROR("KMS: Failed to initialize primary plane\n");
3743 goto fail;
3744 }
3745 }
3746
3747
3748
3749
3750
3751
3752
3753
3754
3755
3756 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3757 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3758
3759 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3760 continue;
3761
3762 if (!plane->blends_with_above || !plane->blends_with_below)
3763 continue;
3764
3765 if (!plane->pixel_format_support.argb8888)
3766 continue;
3767
3768 if (initialize_plane(dm, NULL, primary_planes + i,
3769 DRM_PLANE_TYPE_OVERLAY, plane)) {
3770 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3771 goto fail;
3772 }
3773
3774
3775 break;
3776 }
3777
3778 for (i = 0; i < dm->dc->caps.max_streams; i++)
3779 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3780 DRM_ERROR("KMS: Failed to initialize crtc\n");
3781 goto fail;
3782 }
3783
3784#if defined(CONFIG_DRM_AMD_DC_DCN)
3785
3786 switch (adev->asic_type) {
3787 case CHIP_SIENNA_CICHLID:
3788 case CHIP_NAVY_FLOUNDER:
3789 case CHIP_YELLOW_CARP:
3790 case CHIP_RENOIR:
3791 if (register_outbox_irq_handlers(dm->adev)) {
3792 DRM_ERROR("DM: Failed to initialize IRQ\n");
3793 goto fail;
3794 }
3795 break;
3796 default:
3797 DRM_DEBUG_KMS("Unsupported ASIC type for outbox: 0x%X\n", adev->asic_type);
3798 }
3799#endif
3800
3801
3802 for (i = 0; i < link_cnt; i++) {
3803 struct dc_link *link = NULL;
3804
3805 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3806 DRM_ERROR(
3807 "KMS: Cannot support more than %d display indexes\n",
3808 AMDGPU_DM_MAX_DISPLAY_INDEX);
3809 continue;
3810 }
3811
3812 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3813 if (!aconnector)
3814 goto fail;
3815
3816 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3817 if (!aencoder)
3818 goto fail;
3819
3820 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3821 DRM_ERROR("KMS: Failed to initialize encoder\n");
3822 goto fail;
3823 }
3824
3825 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3826 DRM_ERROR("KMS: Failed to initialize connector\n");
3827 goto fail;
3828 }
3829
3830 link = dc_get_link_at_index(dm->dc, i);
3831
3832 if (!dc_link_detect_sink(link, &new_connection_type))
3833 DRM_ERROR("KMS: Failed to detect connector\n");
3834
3835 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3836 emulated_link_detect(link);
3837 amdgpu_dm_update_connector_after_detect(aconnector);
3838
3839 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3840 amdgpu_dm_update_connector_after_detect(aconnector);
3841 register_backlight_device(dm, link);
3842 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3843 amdgpu_dm_set_psr_caps(link);
3844 }
3845
3846
3847 }
3848
3849
3850 switch (adev->asic_type) {
3851#if defined(CONFIG_DRM_AMD_DC_SI)
3852 case CHIP_TAHITI:
3853 case CHIP_PITCAIRN:
3854 case CHIP_VERDE:
3855 case CHIP_OLAND:
3856 if (dce60_register_irq_handlers(dm->adev)) {
3857 DRM_ERROR("DM: Failed to initialize IRQ\n");
3858 goto fail;
3859 }
3860 break;
3861#endif
3862 case CHIP_BONAIRE:
3863 case CHIP_HAWAII:
3864 case CHIP_KAVERI:
3865 case CHIP_KABINI:
3866 case CHIP_MULLINS:
3867 case CHIP_TONGA:
3868 case CHIP_FIJI:
3869 case CHIP_CARRIZO:
3870 case CHIP_STONEY:
3871 case CHIP_POLARIS11:
3872 case CHIP_POLARIS10:
3873 case CHIP_POLARIS12:
3874 case CHIP_VEGAM:
3875 case CHIP_VEGA10:
3876 case CHIP_VEGA12:
3877 case CHIP_VEGA20:
3878 if (dce110_register_irq_handlers(dm->adev)) {
3879 DRM_ERROR("DM: Failed to initialize IRQ\n");
3880 goto fail;
3881 }
3882 break;
3883#if defined(CONFIG_DRM_AMD_DC_DCN)
3884 case CHIP_RAVEN:
3885 case CHIP_NAVI12:
3886 case CHIP_NAVI10:
3887 case CHIP_NAVI14:
3888 case CHIP_RENOIR:
3889 case CHIP_SIENNA_CICHLID:
3890 case CHIP_NAVY_FLOUNDER:
3891 case CHIP_DIMGREY_CAVEFISH:
3892 case CHIP_BEIGE_GOBY:
3893 case CHIP_VANGOGH:
3894 case CHIP_YELLOW_CARP:
3895 if (dcn10_register_irq_handlers(dm->adev)) {
3896 DRM_ERROR("DM: Failed to initialize IRQ\n");
3897 goto fail;
3898 }
3899 break;
3900#endif
3901 default:
3902 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3903 goto fail;
3904 }
3905
3906 return 0;
3907fail:
3908 kfree(aencoder);
3909 kfree(aconnector);
3910
3911 return -EINVAL;
3912}
3913
3914static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3915{
3916 drm_atomic_private_obj_fini(&dm->atomic_obj);
3917 return;
3918}
3919
3920
3921
3922
3923
3924
3925
3926
3927
3928
3929
3930
3931static void dm_bandwidth_update(struct amdgpu_device *adev)
3932{
3933
3934}
3935
3936static const struct amdgpu_display_funcs dm_display_funcs = {
3937 .bandwidth_update = dm_bandwidth_update,
3938 .vblank_get_counter = dm_vblank_get_counter,
3939 .backlight_set_level = NULL,
3940 .backlight_get_level = NULL,
3941 .hpd_sense = NULL,
3942 .hpd_set_polarity = NULL,
3943 .hpd_get_gpio_reg = NULL,
3944 .page_flip_get_scanoutpos =
3945 dm_crtc_get_scanoutpos,
3946 .add_encoder = NULL,
3947 .add_connector = NULL,
3948};
3949
3950#if defined(CONFIG_DEBUG_KERNEL_DC)
3951
3952static ssize_t s3_debug_store(struct device *device,
3953 struct device_attribute *attr,
3954 const char *buf,
3955 size_t count)
3956{
3957 int ret;
3958 int s3_state;
3959 struct drm_device *drm_dev = dev_get_drvdata(device);
3960 struct amdgpu_device *adev = drm_to_adev(drm_dev);
3961
3962 ret = kstrtoint(buf, 0, &s3_state);
3963
3964 if (ret == 0) {
3965 if (s3_state) {
3966 dm_resume(adev);
3967 drm_kms_helper_hotplug_event(adev_to_drm(adev));
3968 } else
3969 dm_suspend(adev);
3970 }
3971
3972 return ret == 0 ? count : 0;
3973}
3974
3975DEVICE_ATTR_WO(s3_debug);
3976
3977#endif
3978
3979static int dm_early_init(void *handle)
3980{
3981 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3982
3983 switch (adev->asic_type) {
3984#if defined(CONFIG_DRM_AMD_DC_SI)
3985 case CHIP_TAHITI:
3986 case CHIP_PITCAIRN:
3987 case CHIP_VERDE:
3988 adev->mode_info.num_crtc = 6;
3989 adev->mode_info.num_hpd = 6;
3990 adev->mode_info.num_dig = 6;
3991 break;
3992 case CHIP_OLAND:
3993 adev->mode_info.num_crtc = 2;
3994 adev->mode_info.num_hpd = 2;
3995 adev->mode_info.num_dig = 2;
3996 break;
3997#endif
3998 case CHIP_BONAIRE:
3999 case CHIP_HAWAII:
4000 adev->mode_info.num_crtc = 6;
4001 adev->mode_info.num_hpd = 6;
4002 adev->mode_info.num_dig = 6;
4003 break;
4004 case CHIP_KAVERI:
4005 adev->mode_info.num_crtc = 4;
4006 adev->mode_info.num_hpd = 6;
4007 adev->mode_info.num_dig = 7;
4008 break;
4009 case CHIP_KABINI:
4010 case CHIP_MULLINS:
4011 adev->mode_info.num_crtc = 2;
4012 adev->mode_info.num_hpd = 6;
4013 adev->mode_info.num_dig = 6;
4014 break;
4015 case CHIP_FIJI:
4016 case CHIP_TONGA:
4017 adev->mode_info.num_crtc = 6;
4018 adev->mode_info.num_hpd = 6;
4019 adev->mode_info.num_dig = 7;
4020 break;
4021 case CHIP_CARRIZO:
4022 adev->mode_info.num_crtc = 3;
4023 adev->mode_info.num_hpd = 6;
4024 adev->mode_info.num_dig = 9;
4025 break;
4026 case CHIP_STONEY:
4027 adev->mode_info.num_crtc = 2;
4028 adev->mode_info.num_hpd = 6;
4029 adev->mode_info.num_dig = 9;
4030 break;
4031 case CHIP_POLARIS11:
4032 case CHIP_POLARIS12:
4033 adev->mode_info.num_crtc = 5;
4034 adev->mode_info.num_hpd = 5;
4035 adev->mode_info.num_dig = 5;
4036 break;
4037 case CHIP_POLARIS10:
4038 case CHIP_VEGAM:
4039 adev->mode_info.num_crtc = 6;
4040 adev->mode_info.num_hpd = 6;
4041 adev->mode_info.num_dig = 6;
4042 break;
4043 case CHIP_VEGA10:
4044 case CHIP_VEGA12:
4045 case CHIP_VEGA20:
4046 adev->mode_info.num_crtc = 6;
4047 adev->mode_info.num_hpd = 6;
4048 adev->mode_info.num_dig = 6;
4049 break;
4050#if defined(CONFIG_DRM_AMD_DC_DCN)
4051 case CHIP_RAVEN:
4052 case CHIP_RENOIR:
4053 case CHIP_VANGOGH:
4054 adev->mode_info.num_crtc = 4;
4055 adev->mode_info.num_hpd = 4;
4056 adev->mode_info.num_dig = 4;
4057 break;
4058 case CHIP_NAVI10:
4059 case CHIP_NAVI12:
4060 case CHIP_SIENNA_CICHLID:
4061 case CHIP_NAVY_FLOUNDER:
4062 adev->mode_info.num_crtc = 6;
4063 adev->mode_info.num_hpd = 6;
4064 adev->mode_info.num_dig = 6;
4065 break;
4066 case CHIP_YELLOW_CARP:
4067 adev->mode_info.num_crtc = 4;
4068 adev->mode_info.num_hpd = 4;
4069 adev->mode_info.num_dig = 4;
4070 break;
4071 case CHIP_NAVI14:
4072 case CHIP_DIMGREY_CAVEFISH:
4073 adev->mode_info.num_crtc = 5;
4074 adev->mode_info.num_hpd = 5;
4075 adev->mode_info.num_dig = 5;
4076 break;
4077 case CHIP_BEIGE_GOBY:
4078 adev->mode_info.num_crtc = 2;
4079 adev->mode_info.num_hpd = 2;
4080 adev->mode_info.num_dig = 2;
4081 break;
4082#endif
4083 default:
4084 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
4085 return -EINVAL;
4086 }
4087
4088 amdgpu_dm_set_irq_funcs(adev);
4089
4090 if (adev->mode_info.funcs == NULL)
4091 adev->mode_info.funcs = &dm_display_funcs;
4092
4093
4094
4095
4096
4097
4098#if defined(CONFIG_DEBUG_KERNEL_DC)
4099 device_create_file(
4100 adev_to_drm(adev)->dev,
4101 &dev_attr_s3_debug);
4102#endif
4103
4104 return 0;
4105}
4106
4107static bool modeset_required(struct drm_crtc_state *crtc_state,
4108 struct dc_stream_state *new_stream,
4109 struct dc_stream_state *old_stream)
4110{
4111 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4112}
4113
4114static bool modereset_required(struct drm_crtc_state *crtc_state)
4115{
4116 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4117}
4118
4119static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4120{
4121 drm_encoder_cleanup(encoder);
4122 kfree(encoder);
4123}
4124
4125static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4126 .destroy = amdgpu_dm_encoder_destroy,
4127};
4128
4129
4130static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4131 struct drm_framebuffer *fb,
4132 int *min_downscale, int *max_upscale)
4133{
4134 struct amdgpu_device *adev = drm_to_adev(dev);
4135 struct dc *dc = adev->dm.dc;
4136
4137 struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4138
4139 switch (fb->format->format) {
4140 case DRM_FORMAT_P010:
4141 case DRM_FORMAT_NV12:
4142 case DRM_FORMAT_NV21:
4143 *max_upscale = plane_cap->max_upscale_factor.nv12;
4144 *min_downscale = plane_cap->max_downscale_factor.nv12;
4145 break;
4146
4147 case DRM_FORMAT_XRGB16161616F:
4148 case DRM_FORMAT_ARGB16161616F:
4149 case DRM_FORMAT_XBGR16161616F:
4150 case DRM_FORMAT_ABGR16161616F:
4151 *max_upscale = plane_cap->max_upscale_factor.fp16;
4152 *min_downscale = plane_cap->max_downscale_factor.fp16;
4153 break;
4154
4155 default:
4156 *max_upscale = plane_cap->max_upscale_factor.argb8888;
4157 *min_downscale = plane_cap->max_downscale_factor.argb8888;
4158 break;
4159 }
4160
4161
4162
4163
4164
4165 if (*max_upscale == 1)
4166 *max_upscale = 1000;
4167
4168 if (*min_downscale == 1)
4169 *min_downscale = 1000;
4170}
4171
4172
4173static int fill_dc_scaling_info(const struct drm_plane_state *state,
4174 struct dc_scaling_info *scaling_info)
4175{
4176 int scale_w, scale_h, min_downscale, max_upscale;
4177
4178 memset(scaling_info, 0, sizeof(*scaling_info));
4179
4180
4181 scaling_info->src_rect.x = state->src_x >> 16;
4182 scaling_info->src_rect.y = state->src_y >> 16;
4183
4184
4185
4186
4187
4188
4189
4190
4191
4192
4193
4194
4195 if (state->fb &&
4196 state->fb->format->format == DRM_FORMAT_NV12 &&
4197 (scaling_info->src_rect.x != 0 ||
4198 scaling_info->src_rect.y != 0))
4199 return -EINVAL;
4200
4201 scaling_info->src_rect.width = state->src_w >> 16;
4202 if (scaling_info->src_rect.width == 0)
4203 return -EINVAL;
4204
4205 scaling_info->src_rect.height = state->src_h >> 16;
4206 if (scaling_info->src_rect.height == 0)
4207 return -EINVAL;
4208
4209 scaling_info->dst_rect.x = state->crtc_x;
4210 scaling_info->dst_rect.y = state->crtc_y;
4211
4212 if (state->crtc_w == 0)
4213 return -EINVAL;
4214
4215 scaling_info->dst_rect.width = state->crtc_w;
4216
4217 if (state->crtc_h == 0)
4218 return -EINVAL;
4219
4220 scaling_info->dst_rect.height = state->crtc_h;
4221
4222
4223 scaling_info->clip_rect = scaling_info->dst_rect;
4224
4225
4226 if (state->plane && state->plane->dev && state->fb) {
4227 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4228 &min_downscale, &max_upscale);
4229 } else {
4230 min_downscale = 250;
4231 max_upscale = 16000;
4232 }
4233
4234 scale_w = scaling_info->dst_rect.width * 1000 /
4235 scaling_info->src_rect.width;
4236
4237 if (scale_w < min_downscale || scale_w > max_upscale)
4238 return -EINVAL;
4239
4240 scale_h = scaling_info->dst_rect.height * 1000 /
4241 scaling_info->src_rect.height;
4242
4243 if (scale_h < min_downscale || scale_h > max_upscale)
4244 return -EINVAL;
4245
4246
4247
4248
4249
4250
4251 return 0;
4252}
4253
4254static void
4255fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4256 uint64_t tiling_flags)
4257{
4258
4259 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4260 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4261
4262 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4263 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4264 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4265 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4266 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4267
4268
4269 tiling_info->gfx8.num_banks = num_banks;
4270 tiling_info->gfx8.array_mode =
4271 DC_ARRAY_2D_TILED_THIN1;
4272 tiling_info->gfx8.tile_split = tile_split;
4273 tiling_info->gfx8.bank_width = bankw;
4274 tiling_info->gfx8.bank_height = bankh;
4275 tiling_info->gfx8.tile_aspect = mtaspect;
4276 tiling_info->gfx8.tile_mode =
4277 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4278 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4279 == DC_ARRAY_1D_TILED_THIN1) {
4280 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4281 }
4282
4283 tiling_info->gfx8.pipe_config =
4284 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4285}
4286
4287static void
4288fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4289 union dc_tiling_info *tiling_info)
4290{
4291 tiling_info->gfx9.num_pipes =
4292 adev->gfx.config.gb_addr_config_fields.num_pipes;
4293 tiling_info->gfx9.num_banks =
4294 adev->gfx.config.gb_addr_config_fields.num_banks;
4295 tiling_info->gfx9.pipe_interleave =
4296 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4297 tiling_info->gfx9.num_shader_engines =
4298 adev->gfx.config.gb_addr_config_fields.num_se;
4299 tiling_info->gfx9.max_compressed_frags =
4300 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4301 tiling_info->gfx9.num_rb_per_se =
4302 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4303 tiling_info->gfx9.shaderEnable = 1;
4304 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
4305 adev->asic_type == CHIP_NAVY_FLOUNDER ||
4306 adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
4307 adev->asic_type == CHIP_BEIGE_GOBY ||
4308 adev->asic_type == CHIP_YELLOW_CARP ||
4309 adev->asic_type == CHIP_VANGOGH)
4310 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4311}
4312
4313static int
4314validate_dcc(struct amdgpu_device *adev,
4315 const enum surface_pixel_format format,
4316 const enum dc_rotation_angle rotation,
4317 const union dc_tiling_info *tiling_info,
4318 const struct dc_plane_dcc_param *dcc,
4319 const struct dc_plane_address *address,
4320 const struct plane_size *plane_size)
4321{
4322 struct dc *dc = adev->dm.dc;
4323 struct dc_dcc_surface_param input;
4324 struct dc_surface_dcc_cap output;
4325
4326 memset(&input, 0, sizeof(input));
4327 memset(&output, 0, sizeof(output));
4328
4329 if (!dcc->enable)
4330 return 0;
4331
4332 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4333 !dc->cap_funcs.get_dcc_compression_cap)
4334 return -EINVAL;
4335
4336 input.format = format;
4337 input.surface_size.width = plane_size->surface_size.width;
4338 input.surface_size.height = plane_size->surface_size.height;
4339 input.swizzle_mode = tiling_info->gfx9.swizzle;
4340
4341 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4342 input.scan = SCAN_DIRECTION_HORIZONTAL;
4343 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4344 input.scan = SCAN_DIRECTION_VERTICAL;
4345
4346 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4347 return -EINVAL;
4348
4349 if (!output.capable)
4350 return -EINVAL;
4351
4352 if (dcc->independent_64b_blks == 0 &&
4353 output.grph.rgb.independent_64b_blks != 0)
4354 return -EINVAL;
4355
4356 return 0;
4357}
4358
4359static bool
4360modifier_has_dcc(uint64_t modifier)
4361{
4362 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4363}
4364
4365static unsigned
4366modifier_gfx9_swizzle_mode(uint64_t modifier)
4367{
4368 if (modifier == DRM_FORMAT_MOD_LINEAR)
4369 return 0;
4370
4371 return AMD_FMT_MOD_GET(TILE, modifier);
4372}
4373
4374static const struct drm_format_info *
4375amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4376{
4377 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4378}
4379
4380static void
4381fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4382 union dc_tiling_info *tiling_info,
4383 uint64_t modifier)
4384{
4385 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4386 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4387 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4388 unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4389
4390 fill_gfx9_tiling_info_from_device(adev, tiling_info);
4391
4392 if (!IS_AMD_FMT_MOD(modifier))
4393 return;
4394
4395 tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4396 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4397
4398 if (adev->family >= AMDGPU_FAMILY_NV) {
4399 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4400 } else {
4401 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4402
4403
4404 }
4405}
4406
4407enum dm_micro_swizzle {
4408 MICRO_SWIZZLE_Z = 0,
4409 MICRO_SWIZZLE_S = 1,
4410 MICRO_SWIZZLE_D = 2,
4411 MICRO_SWIZZLE_R = 3
4412};
4413
4414static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4415 uint32_t format,
4416 uint64_t modifier)
4417{
4418 struct amdgpu_device *adev = drm_to_adev(plane->dev);
4419 const struct drm_format_info *info = drm_format_info(format);
4420 int i;
4421
4422 enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4423
4424 if (!info)
4425 return false;
4426
4427
4428
4429
4430
4431
4432 if (modifier == DRM_FORMAT_MOD_LINEAR ||
4433 modifier == DRM_FORMAT_MOD_INVALID) {
4434 return true;
4435 }
4436
4437
4438 for (i = 0; i < plane->modifier_count; i++) {
4439 if (modifier == plane->modifiers[i])
4440 break;
4441 }
4442 if (i == plane->modifier_count)
4443 return false;
4444
4445
4446
4447
4448
4449 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4450 adev->family >= AMDGPU_FAMILY_NV) {
4451 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4452 return false;
4453 }
4454
4455 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4456 info->cpp[0] < 8)
4457 return false;
4458
4459 if (modifier_has_dcc(modifier)) {
4460
4461 if (info->cpp[0] != 4)
4462 return false;
4463
4464
4465 if (info->num_planes > 1)
4466 return false;
4467 }
4468
4469 return true;
4470}
4471
4472static void
4473add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4474{
4475 if (!*mods)
4476 return;
4477
4478 if (*cap - *size < 1) {
4479 uint64_t new_cap = *cap * 2;
4480 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4481
4482 if (!new_mods) {
4483 kfree(*mods);
4484 *mods = NULL;
4485 return;
4486 }
4487
4488 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4489 kfree(*mods);
4490 *mods = new_mods;
4491 *cap = new_cap;
4492 }
4493
4494 (*mods)[*size] = mod;
4495 *size += 1;
4496}
4497
4498static void
4499add_gfx9_modifiers(const struct amdgpu_device *adev,
4500 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4501{
4502 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4503 int pipe_xor_bits = min(8, pipes +
4504 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4505 int bank_xor_bits = min(8 - pipe_xor_bits,
4506 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4507 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4508 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4509
4510
4511 if (adev->family == AMDGPU_FAMILY_RV) {
4512
4513 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4514
4515
4516
4517
4518
4519
4520 if (has_constant_encode) {
4521 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4522 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4523 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4524 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4525 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4526 AMD_FMT_MOD_SET(DCC, 1) |
4527 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4528 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4529 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4530 }
4531
4532 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4533 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4534 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4535 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4536 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4537 AMD_FMT_MOD_SET(DCC, 1) |
4538 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4539 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4540 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4541
4542 if (has_constant_encode) {
4543 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4544 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4545 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4546 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4547 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4548 AMD_FMT_MOD_SET(DCC, 1) |
4549 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4550 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4551 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4552
4553 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4554 AMD_FMT_MOD_SET(RB, rb) |
4555 AMD_FMT_MOD_SET(PIPE, pipes));
4556 }
4557
4558 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4559 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4560 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4561 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4562 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4563 AMD_FMT_MOD_SET(DCC, 1) |
4564 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4565 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4566 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4567 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4568 AMD_FMT_MOD_SET(RB, rb) |
4569 AMD_FMT_MOD_SET(PIPE, pipes));
4570 }
4571
4572
4573
4574
4575
4576 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4577 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4578 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4579 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4580 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4581
4582 if (adev->family == AMDGPU_FAMILY_RV) {
4583 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4584 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4585 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4586 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4587 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4588 }
4589
4590
4591
4592
4593
4594 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4595 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4596 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4597
4598 if (adev->family == AMDGPU_FAMILY_RV) {
4599 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4600 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4601 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4602 }
4603}
4604
4605static void
4606add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4607 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4608{
4609 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4610
4611 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4612 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4613 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4614 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4615 AMD_FMT_MOD_SET(DCC, 1) |
4616 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4617 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4618 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4619
4620 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4621 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4622 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4623 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4624 AMD_FMT_MOD_SET(DCC, 1) |
4625 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4626 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4627 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4628 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4629
4630 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4631 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4632 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4633 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4634
4635 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4636 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4637 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4638 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4639
4640
4641
4642 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4643 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4644 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4645
4646 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4647 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4648 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4649}
4650
4651static void
4652add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4653 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4654{
4655 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4656 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4657
4658 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4659 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4660 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4661 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4662 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4663 AMD_FMT_MOD_SET(DCC, 1) |
4664 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4665 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4666 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4667 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4668
4669 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4670 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4671 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4672 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4673 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4674 AMD_FMT_MOD_SET(DCC, 1) |
4675 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4676 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4677 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4678 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4679 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4680
4681 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4682 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4683 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4684 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4685 AMD_FMT_MOD_SET(PACKERS, pkrs));
4686
4687 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4688 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4689 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4690 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4691 AMD_FMT_MOD_SET(PACKERS, pkrs));
4692
4693
4694 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4695 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4696 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4697
4698 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4699 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4700 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4701}
4702
4703static int
4704get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4705{
4706 uint64_t size = 0, capacity = 128;
4707 *mods = NULL;
4708
4709
4710 if (adev->family < AMDGPU_FAMILY_AI)
4711 return 0;
4712
4713 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4714
4715 if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4716 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4717 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4718 return *mods ? 0 : -ENOMEM;
4719 }
4720
4721 switch (adev->family) {
4722 case AMDGPU_FAMILY_AI:
4723 case AMDGPU_FAMILY_RV:
4724 add_gfx9_modifiers(adev, mods, &size, &capacity);
4725 break;
4726 case AMDGPU_FAMILY_NV:
4727 case AMDGPU_FAMILY_VGH:
4728 case AMDGPU_FAMILY_YC:
4729 if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4730 add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4731 else
4732 add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4733 break;
4734 }
4735
4736 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4737
4738
4739 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4740
4741 if (!*mods)
4742 return -ENOMEM;
4743
4744 return 0;
4745}
4746
4747static int
4748fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4749 const struct amdgpu_framebuffer *afb,
4750 const enum surface_pixel_format format,
4751 const enum dc_rotation_angle rotation,
4752 const struct plane_size *plane_size,
4753 union dc_tiling_info *tiling_info,
4754 struct dc_plane_dcc_param *dcc,
4755 struct dc_plane_address *address,
4756 const bool force_disable_dcc)
4757{
4758 const uint64_t modifier = afb->base.modifier;
4759 int ret = 0;
4760
4761 fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4762 tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4763
4764 if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4765 uint64_t dcc_address = afb->address + afb->base.offsets[1];
4766
4767 dcc->enable = 1;
4768 dcc->meta_pitch = afb->base.pitches[1];
4769 dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4770
4771 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4772 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4773 }
4774
4775 ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4776 if (ret)
4777 drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
4778
4779 return ret;
4780}
4781
4782static int
4783fill_plane_buffer_attributes(struct amdgpu_device *adev,
4784 const struct amdgpu_framebuffer *afb,
4785 const enum surface_pixel_format format,
4786 const enum dc_rotation_angle rotation,
4787 const uint64_t tiling_flags,
4788 union dc_tiling_info *tiling_info,
4789 struct plane_size *plane_size,
4790 struct dc_plane_dcc_param *dcc,
4791 struct dc_plane_address *address,
4792 bool tmz_surface,
4793 bool force_disable_dcc)
4794{
4795 const struct drm_framebuffer *fb = &afb->base;
4796 int ret;
4797
4798 memset(tiling_info, 0, sizeof(*tiling_info));
4799 memset(plane_size, 0, sizeof(*plane_size));
4800 memset(dcc, 0, sizeof(*dcc));
4801 memset(address, 0, sizeof(*address));
4802
4803 address->tmz_surface = tmz_surface;
4804
4805 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4806 uint64_t addr = afb->address + fb->offsets[0];
4807
4808 plane_size->surface_size.x = 0;
4809 plane_size->surface_size.y = 0;
4810 plane_size->surface_size.width = fb->width;
4811 plane_size->surface_size.height = fb->height;
4812 plane_size->surface_pitch =
4813 fb->pitches[0] / fb->format->cpp[0];
4814
4815 address->type = PLN_ADDR_TYPE_GRAPHICS;
4816 address->grph.addr.low_part = lower_32_bits(addr);
4817 address->grph.addr.high_part = upper_32_bits(addr);
4818 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4819 uint64_t luma_addr = afb->address + fb->offsets[0];
4820 uint64_t chroma_addr = afb->address + fb->offsets[1];
4821
4822 plane_size->surface_size.x = 0;
4823 plane_size->surface_size.y = 0;
4824 plane_size->surface_size.width = fb->width;
4825 plane_size->surface_size.height = fb->height;
4826 plane_size->surface_pitch =
4827 fb->pitches[0] / fb->format->cpp[0];
4828
4829 plane_size->chroma_size.x = 0;
4830 plane_size->chroma_size.y = 0;
4831
4832 plane_size->chroma_size.width = fb->width / 2;
4833 plane_size->chroma_size.height = fb->height / 2;
4834
4835 plane_size->chroma_pitch =
4836 fb->pitches[1] / fb->format->cpp[1];
4837
4838 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4839 address->video_progressive.luma_addr.low_part =
4840 lower_32_bits(luma_addr);
4841 address->video_progressive.luma_addr.high_part =
4842 upper_32_bits(luma_addr);
4843 address->video_progressive.chroma_addr.low_part =
4844 lower_32_bits(chroma_addr);
4845 address->video_progressive.chroma_addr.high_part =
4846 upper_32_bits(chroma_addr);
4847 }
4848
4849 if (adev->family >= AMDGPU_FAMILY_AI) {
4850 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4851 rotation, plane_size,
4852 tiling_info, dcc,
4853 address,
4854 force_disable_dcc);
4855 if (ret)
4856 return ret;
4857 } else {
4858 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4859 }
4860
4861 return 0;
4862}
4863
4864static void
4865fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4866 bool *per_pixel_alpha, bool *global_alpha,
4867 int *global_alpha_value)
4868{
4869 *per_pixel_alpha = false;
4870 *global_alpha = false;
4871 *global_alpha_value = 0xff;
4872
4873 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4874 return;
4875
4876 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4877 static const uint32_t alpha_formats[] = {
4878 DRM_FORMAT_ARGB8888,
4879 DRM_FORMAT_RGBA8888,
4880 DRM_FORMAT_ABGR8888,
4881 };
4882 uint32_t format = plane_state->fb->format->format;
4883 unsigned int i;
4884
4885 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4886 if (format == alpha_formats[i]) {
4887 *per_pixel_alpha = true;
4888 break;
4889 }
4890 }
4891 }
4892
4893 if (plane_state->alpha < 0xffff) {
4894 *global_alpha = true;
4895 *global_alpha_value = plane_state->alpha >> 8;
4896 }
4897}
4898
4899static int
4900fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4901 const enum surface_pixel_format format,
4902 enum dc_color_space *color_space)
4903{
4904 bool full_range;
4905
4906 *color_space = COLOR_SPACE_SRGB;
4907
4908
4909 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4910 return 0;
4911
4912 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4913
4914 switch (plane_state->color_encoding) {
4915 case DRM_COLOR_YCBCR_BT601:
4916 if (full_range)
4917 *color_space = COLOR_SPACE_YCBCR601;
4918 else
4919 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4920 break;
4921
4922 case DRM_COLOR_YCBCR_BT709:
4923 if (full_range)
4924 *color_space = COLOR_SPACE_YCBCR709;
4925 else
4926 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4927 break;
4928
4929 case DRM_COLOR_YCBCR_BT2020:
4930 if (full_range)
4931 *color_space = COLOR_SPACE_2020_YCBCR;
4932 else
4933 return -EINVAL;
4934 break;
4935
4936 default:
4937 return -EINVAL;
4938 }
4939
4940 return 0;
4941}
4942
4943static int
4944fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4945 const struct drm_plane_state *plane_state,
4946 const uint64_t tiling_flags,
4947 struct dc_plane_info *plane_info,
4948 struct dc_plane_address *address,
4949 bool tmz_surface,
4950 bool force_disable_dcc)
4951{
4952 const struct drm_framebuffer *fb = plane_state->fb;
4953 const struct amdgpu_framebuffer *afb =
4954 to_amdgpu_framebuffer(plane_state->fb);
4955 int ret;
4956
4957 memset(plane_info, 0, sizeof(*plane_info));
4958
4959 switch (fb->format->format) {
4960 case DRM_FORMAT_C8:
4961 plane_info->format =
4962 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4963 break;
4964 case DRM_FORMAT_RGB565:
4965 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4966 break;
4967 case DRM_FORMAT_XRGB8888:
4968 case DRM_FORMAT_ARGB8888:
4969 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4970 break;
4971 case DRM_FORMAT_XRGB2101010:
4972 case DRM_FORMAT_ARGB2101010:
4973 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4974 break;
4975 case DRM_FORMAT_XBGR2101010:
4976 case DRM_FORMAT_ABGR2101010:
4977 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4978 break;
4979 case DRM_FORMAT_XBGR8888:
4980 case DRM_FORMAT_ABGR8888:
4981 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4982 break;
4983 case DRM_FORMAT_NV21:
4984 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4985 break;
4986 case DRM_FORMAT_NV12:
4987 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4988 break;
4989 case DRM_FORMAT_P010:
4990 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4991 break;
4992 case DRM_FORMAT_XRGB16161616F:
4993 case DRM_FORMAT_ARGB16161616F:
4994 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4995 break;
4996 case DRM_FORMAT_XBGR16161616F:
4997 case DRM_FORMAT_ABGR16161616F:
4998 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4999 break;
5000 case DRM_FORMAT_XRGB16161616:
5001 case DRM_FORMAT_ARGB16161616:
5002 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5003 break;
5004 case DRM_FORMAT_XBGR16161616:
5005 case DRM_FORMAT_ABGR16161616:
5006 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5007 break;
5008 default:
5009 DRM_ERROR(
5010 "Unsupported screen format %p4cc\n",
5011 &fb->format->format);
5012 return -EINVAL;
5013 }
5014
5015 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5016 case DRM_MODE_ROTATE_0:
5017 plane_info->rotation = ROTATION_ANGLE_0;
5018 break;
5019 case DRM_MODE_ROTATE_90:
5020 plane_info->rotation = ROTATION_ANGLE_90;
5021 break;
5022 case DRM_MODE_ROTATE_180:
5023 plane_info->rotation = ROTATION_ANGLE_180;
5024 break;
5025 case DRM_MODE_ROTATE_270:
5026 plane_info->rotation = ROTATION_ANGLE_270;
5027 break;
5028 default:
5029 plane_info->rotation = ROTATION_ANGLE_0;
5030 break;
5031 }
5032
5033 plane_info->visible = true;
5034 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5035
5036 plane_info->layer_index = 0;
5037
5038 ret = fill_plane_color_attributes(plane_state, plane_info->format,
5039 &plane_info->color_space);
5040 if (ret)
5041 return ret;
5042
5043 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5044 plane_info->rotation, tiling_flags,
5045 &plane_info->tiling_info,
5046 &plane_info->plane_size,
5047 &plane_info->dcc, address, tmz_surface,
5048 force_disable_dcc);
5049 if (ret)
5050 return ret;
5051
5052 fill_blending_from_plane_state(
5053 plane_state, &plane_info->per_pixel_alpha,
5054 &plane_info->global_alpha, &plane_info->global_alpha_value);
5055
5056 return 0;
5057}
5058
5059static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5060 struct dc_plane_state *dc_plane_state,
5061 struct drm_plane_state *plane_state,
5062 struct drm_crtc_state *crtc_state)
5063{
5064 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5065 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5066 struct dc_scaling_info scaling_info;
5067 struct dc_plane_info plane_info;
5068 int ret;
5069 bool force_disable_dcc = false;
5070
5071 ret = fill_dc_scaling_info(plane_state, &scaling_info);
5072 if (ret)
5073 return ret;
5074
5075 dc_plane_state->src_rect = scaling_info.src_rect;
5076 dc_plane_state->dst_rect = scaling_info.dst_rect;
5077 dc_plane_state->clip_rect = scaling_info.clip_rect;
5078 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5079
5080 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5081 ret = fill_dc_plane_info_and_addr(adev, plane_state,
5082 afb->tiling_flags,
5083 &plane_info,
5084 &dc_plane_state->address,
5085 afb->tmz_surface,
5086 force_disable_dcc);
5087 if (ret)
5088 return ret;
5089
5090 dc_plane_state->format = plane_info.format;
5091 dc_plane_state->color_space = plane_info.color_space;
5092 dc_plane_state->format = plane_info.format;
5093 dc_plane_state->plane_size = plane_info.plane_size;
5094 dc_plane_state->rotation = plane_info.rotation;
5095 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5096 dc_plane_state->stereo_format = plane_info.stereo_format;
5097 dc_plane_state->tiling_info = plane_info.tiling_info;
5098 dc_plane_state->visible = plane_info.visible;
5099 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5100 dc_plane_state->global_alpha = plane_info.global_alpha;
5101 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5102 dc_plane_state->dcc = plane_info.dcc;
5103 dc_plane_state->layer_index = plane_info.layer_index;
5104 dc_plane_state->flip_int_enabled = true;
5105
5106
5107
5108
5109
5110 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5111 if (ret)
5112 return ret;
5113
5114 return 0;
5115}
5116
5117static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5118 const struct dm_connector_state *dm_state,
5119 struct dc_stream_state *stream)
5120{
5121 enum amdgpu_rmx_type rmx_type;
5122
5123 struct rect src = { 0 };
5124 struct rect dst = { 0 };
5125
5126
5127 if (!mode)
5128 return;
5129
5130
5131 src.width = mode->hdisplay;
5132 src.height = mode->vdisplay;
5133 dst.width = stream->timing.h_addressable;
5134 dst.height = stream->timing.v_addressable;
5135
5136 if (dm_state) {
5137 rmx_type = dm_state->scaling;
5138 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5139 if (src.width * dst.height <
5140 src.height * dst.width) {
5141
5142 dst.width = src.width *
5143 dst.height / src.height;
5144 } else {
5145
5146 dst.height = src.height *
5147 dst.width / src.width;
5148 }
5149 } else if (rmx_type == RMX_CENTER) {
5150 dst = src;
5151 }
5152
5153 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5154 dst.y = (stream->timing.v_addressable - dst.height) / 2;
5155
5156 if (dm_state->underscan_enable) {
5157 dst.x += dm_state->underscan_hborder / 2;
5158 dst.y += dm_state->underscan_vborder / 2;
5159 dst.width -= dm_state->underscan_hborder;
5160 dst.height -= dm_state->underscan_vborder;
5161 }
5162 }
5163
5164 stream->src = src;
5165 stream->dst = dst;
5166
5167 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n",
5168 dst.x, dst.y, dst.width, dst.height);
5169
5170}
5171
5172static enum dc_color_depth
5173convert_color_depth_from_display_info(const struct drm_connector *connector,
5174 bool is_y420, int requested_bpc)
5175{
5176 uint8_t bpc;
5177
5178 if (is_y420) {
5179 bpc = 8;
5180
5181
5182 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5183 bpc = 16;
5184 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5185 bpc = 12;
5186 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5187 bpc = 10;
5188 } else {
5189 bpc = (uint8_t)connector->display_info.bpc;
5190
5191 bpc = bpc ? bpc : 8;
5192 }
5193
5194 if (requested_bpc > 0) {
5195
5196
5197
5198
5199
5200
5201
5202
5203 bpc = min_t(u8, bpc, requested_bpc);
5204
5205
5206 bpc = bpc - (bpc & 1);
5207 }
5208
5209 switch (bpc) {
5210 case 0:
5211
5212
5213
5214
5215
5216 return COLOR_DEPTH_888;
5217 case 6:
5218 return COLOR_DEPTH_666;
5219 case 8:
5220 return COLOR_DEPTH_888;
5221 case 10:
5222 return COLOR_DEPTH_101010;
5223 case 12:
5224 return COLOR_DEPTH_121212;
5225 case 14:
5226 return COLOR_DEPTH_141414;
5227 case 16:
5228 return COLOR_DEPTH_161616;
5229 default:
5230 return COLOR_DEPTH_UNDEFINED;
5231 }
5232}
5233
5234static enum dc_aspect_ratio
5235get_aspect_ratio(const struct drm_display_mode *mode_in)
5236{
5237
5238 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5239}
5240
5241static enum dc_color_space
5242get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5243{
5244 enum dc_color_space color_space = COLOR_SPACE_SRGB;
5245
5246 switch (dc_crtc_timing->pixel_encoding) {
5247 case PIXEL_ENCODING_YCBCR422:
5248 case PIXEL_ENCODING_YCBCR444:
5249 case PIXEL_ENCODING_YCBCR420:
5250 {
5251
5252
5253
5254
5255
5256 if (dc_crtc_timing->pix_clk_100hz > 270300) {
5257 if (dc_crtc_timing->flags.Y_ONLY)
5258 color_space =
5259 COLOR_SPACE_YCBCR709_LIMITED;
5260 else
5261 color_space = COLOR_SPACE_YCBCR709;
5262 } else {
5263 if (dc_crtc_timing->flags.Y_ONLY)
5264 color_space =
5265 COLOR_SPACE_YCBCR601_LIMITED;
5266 else
5267 color_space = COLOR_SPACE_YCBCR601;
5268 }
5269
5270 }
5271 break;
5272 case PIXEL_ENCODING_RGB:
5273 color_space = COLOR_SPACE_SRGB;
5274 break;
5275
5276 default:
5277 WARN_ON(1);
5278 break;
5279 }
5280
5281 return color_space;
5282}
5283
5284static bool adjust_colour_depth_from_display_info(
5285 struct dc_crtc_timing *timing_out,
5286 const struct drm_display_info *info)
5287{
5288 enum dc_color_depth depth = timing_out->display_color_depth;
5289 int normalized_clk;
5290 do {
5291 normalized_clk = timing_out->pix_clk_100hz / 10;
5292
5293 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5294 normalized_clk /= 2;
5295
5296 switch (depth) {
5297 case COLOR_DEPTH_888:
5298 break;
5299 case COLOR_DEPTH_101010:
5300 normalized_clk = (normalized_clk * 30) / 24;
5301 break;
5302 case COLOR_DEPTH_121212:
5303 normalized_clk = (normalized_clk * 36) / 24;
5304 break;
5305 case COLOR_DEPTH_161616:
5306 normalized_clk = (normalized_clk * 48) / 24;
5307 break;
5308 default:
5309
5310 return false;
5311 }
5312 if (normalized_clk <= info->max_tmds_clock) {
5313 timing_out->display_color_depth = depth;
5314 return true;
5315 }
5316 } while (--depth > COLOR_DEPTH_666);
5317 return false;
5318}
5319
5320static void fill_stream_properties_from_drm_display_mode(
5321 struct dc_stream_state *stream,
5322 const struct drm_display_mode *mode_in,
5323 const struct drm_connector *connector,
5324 const struct drm_connector_state *connector_state,
5325 const struct dc_stream_state *old_stream,
5326 int requested_bpc)
5327{
5328 struct dc_crtc_timing *timing_out = &stream->timing;
5329 const struct drm_display_info *info = &connector->display_info;
5330 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5331 struct hdmi_vendor_infoframe hv_frame;
5332 struct hdmi_avi_infoframe avi_frame;
5333
5334 memset(&hv_frame, 0, sizeof(hv_frame));
5335 memset(&avi_frame, 0, sizeof(avi_frame));
5336
5337 timing_out->h_border_left = 0;
5338 timing_out->h_border_right = 0;
5339 timing_out->v_border_top = 0;
5340 timing_out->v_border_bottom = 0;
5341
5342 if (drm_mode_is_420_only(info, mode_in)
5343 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5344 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5345 else if (drm_mode_is_420_also(info, mode_in)
5346 && aconnector->force_yuv420_output)
5347 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5348 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
5349 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5350 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5351 else
5352 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5353
5354 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5355 timing_out->display_color_depth = convert_color_depth_from_display_info(
5356 connector,
5357 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5358 requested_bpc);
5359 timing_out->scan_type = SCANNING_TYPE_NODATA;
5360 timing_out->hdmi_vic = 0;
5361
5362 if(old_stream) {
5363 timing_out->vic = old_stream->timing.vic;
5364 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5365 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5366 } else {
5367 timing_out->vic = drm_match_cea_mode(mode_in);
5368 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5369 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5370 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5371 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5372 }
5373
5374 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5375 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5376 timing_out->vic = avi_frame.video_code;
5377 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5378 timing_out->hdmi_vic = hv_frame.vic;
5379 }
5380
5381 if (is_freesync_video_mode(mode_in, aconnector)) {
5382 timing_out->h_addressable = mode_in->hdisplay;
5383 timing_out->h_total = mode_in->htotal;
5384 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5385 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5386 timing_out->v_total = mode_in->vtotal;
5387 timing_out->v_addressable = mode_in->vdisplay;
5388 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5389 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5390 timing_out->pix_clk_100hz = mode_in->clock * 10;
5391 } else {
5392 timing_out->h_addressable = mode_in->crtc_hdisplay;
5393 timing_out->h_total = mode_in->crtc_htotal;
5394 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5395 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5396 timing_out->v_total = mode_in->crtc_vtotal;
5397 timing_out->v_addressable = mode_in->crtc_vdisplay;
5398 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5399 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5400 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5401 }
5402
5403 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5404
5405 stream->output_color_space = get_output_color_space(timing_out);
5406
5407 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5408 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5409 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5410 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5411 drm_mode_is_420_also(info, mode_in) &&
5412 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5413 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5414 adjust_colour_depth_from_display_info(timing_out, info);
5415 }
5416 }
5417}
5418
5419static void fill_audio_info(struct audio_info *audio_info,
5420 const struct drm_connector *drm_connector,
5421 const struct dc_sink *dc_sink)
5422{
5423 int i = 0;
5424 int cea_revision = 0;
5425 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5426
5427 audio_info->manufacture_id = edid_caps->manufacturer_id;
5428 audio_info->product_id = edid_caps->product_id;
5429
5430 cea_revision = drm_connector->display_info.cea_rev;
5431
5432 strscpy(audio_info->display_name,
5433 edid_caps->display_name,
5434 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5435
5436 if (cea_revision >= 3) {
5437 audio_info->mode_count = edid_caps->audio_mode_count;
5438
5439 for (i = 0; i < audio_info->mode_count; ++i) {
5440 audio_info->modes[i].format_code =
5441 (enum audio_format_code)
5442 (edid_caps->audio_modes[i].format_code);
5443 audio_info->modes[i].channel_count =
5444 edid_caps->audio_modes[i].channel_count;
5445 audio_info->modes[i].sample_rates.all =
5446 edid_caps->audio_modes[i].sample_rate;
5447 audio_info->modes[i].sample_size =
5448 edid_caps->audio_modes[i].sample_size;
5449 }
5450 }
5451
5452 audio_info->flags.all = edid_caps->speaker_flags;
5453
5454
5455 if (drm_connector->latency_present[0]) {
5456 audio_info->video_latency = drm_connector->video_latency[0];
5457 audio_info->audio_latency = drm_connector->audio_latency[0];
5458 }
5459
5460
5461
5462}
5463
5464static void
5465copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5466 struct drm_display_mode *dst_mode)
5467{
5468 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5469 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5470 dst_mode->crtc_clock = src_mode->crtc_clock;
5471 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5472 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5473 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
5474 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5475 dst_mode->crtc_htotal = src_mode->crtc_htotal;
5476 dst_mode->crtc_hskew = src_mode->crtc_hskew;
5477 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5478 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5479 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5480 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5481 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5482}
5483
5484static void
5485decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5486 const struct drm_display_mode *native_mode,
5487 bool scale_enabled)
5488{
5489 if (scale_enabled) {
5490 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5491 } else if (native_mode->clock == drm_mode->clock &&
5492 native_mode->htotal == drm_mode->htotal &&
5493 native_mode->vtotal == drm_mode->vtotal) {
5494 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5495 } else {
5496
5497 }
5498}
5499
5500static struct dc_sink *
5501create_fake_sink(struct amdgpu_dm_connector *aconnector)
5502{
5503 struct dc_sink_init_data sink_init_data = { 0 };
5504 struct dc_sink *sink = NULL;
5505 sink_init_data.link = aconnector->dc_link;
5506 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5507
5508 sink = dc_sink_create(&sink_init_data);
5509 if (!sink) {
5510 DRM_ERROR("Failed to create sink!\n");
5511 return NULL;
5512 }
5513 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5514
5515 return sink;
5516}
5517
5518static void set_multisync_trigger_params(
5519 struct dc_stream_state *stream)
5520{
5521 struct dc_stream_state *master = NULL;
5522
5523 if (stream->triggered_crtc_reset.enabled) {
5524 master = stream->triggered_crtc_reset.event_source;
5525 stream->triggered_crtc_reset.event =
5526 master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5527 CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5528 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
5529 }
5530}
5531
5532static void set_master_stream(struct dc_stream_state *stream_set[],
5533 int stream_count)
5534{
5535 int j, highest_rfr = 0, master_stream = 0;
5536
5537 for (j = 0; j < stream_count; j++) {
5538 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5539 int refresh_rate = 0;
5540
5541 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5542 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5543 if (refresh_rate > highest_rfr) {
5544 highest_rfr = refresh_rate;
5545 master_stream = j;
5546 }
5547 }
5548 }
5549 for (j = 0; j < stream_count; j++) {
5550 if (stream_set[j])
5551 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5552 }
5553}
5554
5555static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5556{
5557 int i = 0;
5558 struct dc_stream_state *stream;
5559
5560 if (context->stream_count < 2)
5561 return;
5562 for (i = 0; i < context->stream_count ; i++) {
5563 if (!context->streams[i])
5564 continue;
5565
5566
5567
5568
5569
5570 }
5571
5572 set_master_stream(context->streams, context->stream_count);
5573
5574 for (i = 0; i < context->stream_count ; i++) {
5575 stream = context->streams[i];
5576
5577 if (!stream)
5578 continue;
5579
5580 set_multisync_trigger_params(stream);
5581 }
5582}
5583
5584#if defined(CONFIG_DRM_AMD_DC_DCN)
5585static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
5586 struct dc_sink *sink, struct dc_stream_state *stream,
5587 struct dsc_dec_dpcd_caps *dsc_caps)
5588{
5589 stream->timing.flags.DSC = 0;
5590
5591 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5592 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5593 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5594 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5595 dsc_caps);
5596 }
5597}
5598
5599static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
5600 struct dc_sink *sink, struct dc_stream_state *stream,
5601 struct dsc_dec_dpcd_caps *dsc_caps)
5602{
5603 struct drm_connector *drm_connector = &aconnector->base;
5604 uint32_t link_bandwidth_kbps;
5605
5606 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5607 dc_link_get_link_cap(aconnector->dc_link));
5608
5609 dc_dsc_policy_set_enable_dsc_when_not_needed(
5610 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5611
5612 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5613
5614 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5615 dsc_caps,
5616 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5617 0,
5618 link_bandwidth_kbps,
5619 &stream->timing,
5620 &stream->timing.dsc_cfg)) {
5621 stream->timing.flags.DSC = 1;
5622 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
5623 }
5624 }
5625
5626
5627 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5628 stream->timing.flags.DSC = 1;
5629
5630 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5631 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5632
5633 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5634 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5635
5636 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5637 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5638}
5639#endif
5640
5641
5642
5643
5644
5645
5646
5647
5648
5649
5650
5651
5652
5653
5654
5655
5656
5657
5658
5659
5660
5661
5662
5663
5664
5665
5666
5667
5668
5669
5670
5671static struct drm_display_mode *
5672get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
5673 bool use_probed_modes)
5674{
5675 struct drm_display_mode *m, *m_pref = NULL;
5676 u16 current_refresh, highest_refresh;
5677 struct list_head *list_head = use_probed_modes ?
5678 &aconnector->base.probed_modes :
5679 &aconnector->base.modes;
5680
5681 if (aconnector->freesync_vid_base.clock != 0)
5682 return &aconnector->freesync_vid_base;
5683
5684
5685 list_for_each_entry (m, list_head, head) {
5686 if (m->type & DRM_MODE_TYPE_PREFERRED) {
5687 m_pref = m;
5688 break;
5689 }
5690 }
5691
5692 if (!m_pref) {
5693
5694 m_pref = list_first_entry_or_null(
5695 &aconnector->base.modes, struct drm_display_mode, head);
5696 if (!m_pref) {
5697 DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
5698 return NULL;
5699 }
5700 }
5701
5702 highest_refresh = drm_mode_vrefresh(m_pref);
5703
5704
5705
5706
5707
5708
5709 list_for_each_entry (m, list_head, head) {
5710 current_refresh = drm_mode_vrefresh(m);
5711
5712 if (m->hdisplay == m_pref->hdisplay &&
5713 m->vdisplay == m_pref->vdisplay &&
5714 highest_refresh < current_refresh) {
5715 highest_refresh = current_refresh;
5716 m_pref = m;
5717 }
5718 }
5719
5720 aconnector->freesync_vid_base = *m_pref;
5721 return m_pref;
5722}
5723
5724static bool is_freesync_video_mode(const struct drm_display_mode *mode,
5725 struct amdgpu_dm_connector *aconnector)
5726{
5727 struct drm_display_mode *high_mode;
5728 int timing_diff;
5729
5730 high_mode = get_highest_refresh_rate_mode(aconnector, false);
5731 if (!high_mode || !mode)
5732 return false;
5733
5734 timing_diff = high_mode->vtotal - mode->vtotal;
5735
5736 if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
5737 high_mode->hdisplay != mode->hdisplay ||
5738 high_mode->vdisplay != mode->vdisplay ||
5739 high_mode->hsync_start != mode->hsync_start ||
5740 high_mode->hsync_end != mode->hsync_end ||
5741 high_mode->htotal != mode->htotal ||
5742 high_mode->hskew != mode->hskew ||
5743 high_mode->vscan != mode->vscan ||
5744 high_mode->vsync_start - mode->vsync_start != timing_diff ||
5745 high_mode->vsync_end - mode->vsync_end != timing_diff)
5746 return false;
5747 else
5748 return true;
5749}
5750
5751static struct dc_stream_state *
5752create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5753 const struct drm_display_mode *drm_mode,
5754 const struct dm_connector_state *dm_state,
5755 const struct dc_stream_state *old_stream,
5756 int requested_bpc)
5757{
5758 struct drm_display_mode *preferred_mode = NULL;
5759 struct drm_connector *drm_connector;
5760 const struct drm_connector_state *con_state =
5761 dm_state ? &dm_state->base : NULL;
5762 struct dc_stream_state *stream = NULL;
5763 struct drm_display_mode mode = *drm_mode;
5764 struct drm_display_mode saved_mode;
5765 struct drm_display_mode *freesync_mode = NULL;
5766 bool native_mode_found = false;
5767 bool recalculate_timing = false;
5768 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5769 int mode_refresh;
5770 int preferred_refresh = 0;
5771#if defined(CONFIG_DRM_AMD_DC_DCN)
5772 struct dsc_dec_dpcd_caps dsc_caps;
5773#endif
5774 struct dc_sink *sink = NULL;
5775
5776 memset(&saved_mode, 0, sizeof(saved_mode));
5777
5778 if (aconnector == NULL) {
5779 DRM_ERROR("aconnector is NULL!\n");
5780 return stream;
5781 }
5782
5783 drm_connector = &aconnector->base;
5784
5785 if (!aconnector->dc_sink) {
5786 sink = create_fake_sink(aconnector);
5787 if (!sink)
5788 return stream;
5789 } else {
5790 sink = aconnector->dc_sink;
5791 dc_sink_retain(sink);
5792 }
5793
5794 stream = dc_create_stream_for_sink(sink);
5795
5796 if (stream == NULL) {
5797 DRM_ERROR("Failed to create stream for sink!\n");
5798 goto finish;
5799 }
5800
5801 stream->dm_stream_context = aconnector;
5802
5803 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5804 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5805
5806 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5807
5808 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5809 native_mode_found = true;
5810 break;
5811 }
5812 }
5813 if (!native_mode_found)
5814 preferred_mode = list_first_entry_or_null(
5815 &aconnector->base.modes,
5816 struct drm_display_mode,
5817 head);
5818
5819 mode_refresh = drm_mode_vrefresh(&mode);
5820
5821 if (preferred_mode == NULL) {
5822
5823
5824
5825
5826
5827
5828 DRM_DEBUG_DRIVER("No preferred mode found\n");
5829 } else {
5830 recalculate_timing = amdgpu_freesync_vid_mode &&
5831 is_freesync_video_mode(&mode, aconnector);
5832 if (recalculate_timing) {
5833 freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
5834 saved_mode = mode;
5835 mode = *freesync_mode;
5836 } else {
5837 decide_crtc_timing_for_drm_display_mode(
5838 &mode, preferred_mode, scale);
5839
5840 preferred_refresh = drm_mode_vrefresh(preferred_mode);
5841 }
5842 }
5843
5844 if (recalculate_timing)
5845 drm_mode_set_crtcinfo(&saved_mode, 0);
5846 else if (!dm_state)
5847 drm_mode_set_crtcinfo(&mode, 0);
5848
5849
5850
5851
5852
5853 if (!scale || mode_refresh != preferred_refresh)
5854 fill_stream_properties_from_drm_display_mode(
5855 stream, &mode, &aconnector->base, con_state, NULL,
5856 requested_bpc);
5857 else
5858 fill_stream_properties_from_drm_display_mode(
5859 stream, &mode, &aconnector->base, con_state, old_stream,
5860 requested_bpc);
5861
5862#if defined(CONFIG_DRM_AMD_DC_DCN)
5863
5864 update_dsc_caps(aconnector, sink, stream, &dsc_caps);
5865 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
5866 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
5867#endif
5868
5869 update_stream_scaling_settings(&mode, dm_state, stream);
5870
5871 fill_audio_info(
5872 &stream->audio_info,
5873 drm_connector,
5874 sink);
5875
5876 update_stream_signal(stream, sink);
5877
5878 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5879 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5880
5881 if (stream->link->psr_settings.psr_feature_enabled) {
5882
5883
5884
5885
5886 stream->use_vsc_sdp_for_colorimetry = false;
5887 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5888 stream->use_vsc_sdp_for_colorimetry =
5889 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5890 } else {
5891 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5892 stream->use_vsc_sdp_for_colorimetry = true;
5893 }
5894 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5895 aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
5896
5897 }
5898finish:
5899 dc_sink_release(sink);
5900
5901 return stream;
5902}
5903
5904static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5905{
5906 drm_crtc_cleanup(crtc);
5907 kfree(crtc);
5908}
5909
5910static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5911 struct drm_crtc_state *state)
5912{
5913 struct dm_crtc_state *cur = to_dm_crtc_state(state);
5914
5915
5916 if (cur->stream)
5917 dc_stream_release(cur->stream);
5918
5919
5920 __drm_atomic_helper_crtc_destroy_state(state);
5921
5922
5923 kfree(state);
5924}
5925
5926static void dm_crtc_reset_state(struct drm_crtc *crtc)
5927{
5928 struct dm_crtc_state *state;
5929
5930 if (crtc->state)
5931 dm_crtc_destroy_state(crtc, crtc->state);
5932
5933 state = kzalloc(sizeof(*state), GFP_KERNEL);
5934 if (WARN_ON(!state))
5935 return;
5936
5937 __drm_atomic_helper_crtc_reset(crtc, &state->base);
5938}
5939
5940static struct drm_crtc_state *
5941dm_crtc_duplicate_state(struct drm_crtc *crtc)
5942{
5943 struct dm_crtc_state *state, *cur;
5944
5945 cur = to_dm_crtc_state(crtc->state);
5946
5947 if (WARN_ON(!crtc->state))
5948 return NULL;
5949
5950 state = kzalloc(sizeof(*state), GFP_KERNEL);
5951 if (!state)
5952 return NULL;
5953
5954 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5955
5956 if (cur->stream) {
5957 state->stream = cur->stream;
5958 dc_stream_retain(state->stream);
5959 }
5960
5961 state->active_planes = cur->active_planes;
5962 state->vrr_infopacket = cur->vrr_infopacket;
5963 state->abm_level = cur->abm_level;
5964 state->vrr_supported = cur->vrr_supported;
5965 state->freesync_config = cur->freesync_config;
5966 state->cm_has_degamma = cur->cm_has_degamma;
5967 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5968
5969
5970 return &state->base;
5971}
5972
5973#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
5974static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
5975{
5976 crtc_debugfs_init(crtc);
5977
5978 return 0;
5979}
5980#endif
5981
5982static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5983{
5984 enum dc_irq_source irq_source;
5985 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5986 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5987 int rc;
5988
5989 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5990
5991 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5992
5993 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
5994 acrtc->crtc_id, enable ? "en" : "dis", rc);
5995 return rc;
5996}
5997
5998static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5999{
6000 enum dc_irq_source irq_source;
6001 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6002 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6003 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
6004#if defined(CONFIG_DRM_AMD_DC_DCN)
6005 struct amdgpu_display_manager *dm = &adev->dm;
6006 struct vblank_control_work *work;
6007#endif
6008 int rc = 0;
6009
6010 if (enable) {
6011
6012 if (amdgpu_dm_vrr_active(acrtc_state))
6013 rc = dm_set_vupdate_irq(crtc, true);
6014 } else {
6015
6016 rc = dm_set_vupdate_irq(crtc, false);
6017 }
6018
6019 if (rc)
6020 return rc;
6021
6022 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
6023
6024 if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6025 return -EBUSY;
6026
6027 if (amdgpu_in_reset(adev))
6028 return 0;
6029
6030#if defined(CONFIG_DRM_AMD_DC_DCN)
6031 if (dm->vblank_control_workqueue) {
6032 work = kzalloc(sizeof(*work), GFP_ATOMIC);
6033 if (!work)
6034 return -ENOMEM;
6035
6036 INIT_WORK(&work->work, vblank_control_worker);
6037 work->dm = dm;
6038 work->acrtc = acrtc;
6039 work->enable = enable;
6040
6041 if (acrtc_state->stream) {
6042 dc_stream_retain(acrtc_state->stream);
6043 work->stream = acrtc_state->stream;
6044 }
6045
6046 queue_work(dm->vblank_control_workqueue, &work->work);
6047 }
6048#endif
6049
6050 return 0;
6051}
6052
6053static int dm_enable_vblank(struct drm_crtc *crtc)
6054{
6055 return dm_set_vblank(crtc, true);
6056}
6057
6058static void dm_disable_vblank(struct drm_crtc *crtc)
6059{
6060 dm_set_vblank(crtc, false);
6061}
6062
6063
6064static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6065 .reset = dm_crtc_reset_state,
6066 .destroy = amdgpu_dm_crtc_destroy,
6067 .set_config = drm_atomic_helper_set_config,
6068 .page_flip = drm_atomic_helper_page_flip,
6069 .atomic_duplicate_state = dm_crtc_duplicate_state,
6070 .atomic_destroy_state = dm_crtc_destroy_state,
6071 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
6072 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
6073 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6074 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
6075 .enable_vblank = dm_enable_vblank,
6076 .disable_vblank = dm_disable_vblank,
6077 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6078#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6079 .late_register = amdgpu_dm_crtc_late_register,
6080#endif
6081};
6082
6083static enum drm_connector_status
6084amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6085{
6086 bool connected;
6087 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6088
6089
6090
6091
6092
6093
6094
6095
6096 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6097 !aconnector->fake_enable)
6098 connected = (aconnector->dc_sink != NULL);
6099 else
6100 connected = (aconnector->base.force == DRM_FORCE_ON);
6101
6102 update_subconnector_property(aconnector);
6103
6104 return (connected ? connector_status_connected :
6105 connector_status_disconnected);
6106}
6107
6108int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6109 struct drm_connector_state *connector_state,
6110 struct drm_property *property,
6111 uint64_t val)
6112{
6113 struct drm_device *dev = connector->dev;
6114 struct amdgpu_device *adev = drm_to_adev(dev);
6115 struct dm_connector_state *dm_old_state =
6116 to_dm_connector_state(connector->state);
6117 struct dm_connector_state *dm_new_state =
6118 to_dm_connector_state(connector_state);
6119
6120 int ret = -EINVAL;
6121
6122 if (property == dev->mode_config.scaling_mode_property) {
6123 enum amdgpu_rmx_type rmx_type;
6124
6125 switch (val) {
6126 case DRM_MODE_SCALE_CENTER:
6127 rmx_type = RMX_CENTER;
6128 break;
6129 case DRM_MODE_SCALE_ASPECT:
6130 rmx_type = RMX_ASPECT;
6131 break;
6132 case DRM_MODE_SCALE_FULLSCREEN:
6133 rmx_type = RMX_FULL;
6134 break;
6135 case DRM_MODE_SCALE_NONE:
6136 default:
6137 rmx_type = RMX_OFF;
6138 break;
6139 }
6140
6141 if (dm_old_state->scaling == rmx_type)
6142 return 0;
6143
6144 dm_new_state->scaling = rmx_type;
6145 ret = 0;
6146 } else if (property == adev->mode_info.underscan_hborder_property) {
6147 dm_new_state->underscan_hborder = val;
6148 ret = 0;
6149 } else if (property == adev->mode_info.underscan_vborder_property) {
6150 dm_new_state->underscan_vborder = val;
6151 ret = 0;
6152 } else if (property == adev->mode_info.underscan_property) {
6153 dm_new_state->underscan_enable = val;
6154 ret = 0;
6155 } else if (property == adev->mode_info.abm_level_property) {
6156 dm_new_state->abm_level = val;
6157 ret = 0;
6158 }
6159
6160 return ret;
6161}
6162
6163int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6164 const struct drm_connector_state *state,
6165 struct drm_property *property,
6166 uint64_t *val)
6167{
6168 struct drm_device *dev = connector->dev;
6169 struct amdgpu_device *adev = drm_to_adev(dev);
6170 struct dm_connector_state *dm_state =
6171 to_dm_connector_state(state);
6172 int ret = -EINVAL;
6173
6174 if (property == dev->mode_config.scaling_mode_property) {
6175 switch (dm_state->scaling) {
6176 case RMX_CENTER:
6177 *val = DRM_MODE_SCALE_CENTER;
6178 break;
6179 case RMX_ASPECT:
6180 *val = DRM_MODE_SCALE_ASPECT;
6181 break;
6182 case RMX_FULL:
6183 *val = DRM_MODE_SCALE_FULLSCREEN;
6184 break;
6185 case RMX_OFF:
6186 default:
6187 *val = DRM_MODE_SCALE_NONE;
6188 break;
6189 }
6190 ret = 0;
6191 } else if (property == adev->mode_info.underscan_hborder_property) {
6192 *val = dm_state->underscan_hborder;
6193 ret = 0;
6194 } else if (property == adev->mode_info.underscan_vborder_property) {
6195 *val = dm_state->underscan_vborder;
6196 ret = 0;
6197 } else if (property == adev->mode_info.underscan_property) {
6198 *val = dm_state->underscan_enable;
6199 ret = 0;
6200 } else if (property == adev->mode_info.abm_level_property) {
6201 *val = dm_state->abm_level;
6202 ret = 0;
6203 }
6204
6205 return ret;
6206}
6207
6208static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6209{
6210 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6211
6212 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6213}
6214
6215static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6216{
6217 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6218 const struct dc_link *link = aconnector->dc_link;
6219 struct amdgpu_device *adev = drm_to_adev(connector->dev);
6220 struct amdgpu_display_manager *dm = &adev->dm;
6221 int i;
6222
6223
6224
6225
6226
6227 if (aconnector->mst_mgr.dev)
6228 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6229
6230#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6231 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6232 for (i = 0; i < dm->num_of_edps; i++) {
6233 if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6234 backlight_device_unregister(dm->backlight_dev[i]);
6235 dm->backlight_dev[i] = NULL;
6236 }
6237 }
6238#endif
6239
6240 if (aconnector->dc_em_sink)
6241 dc_sink_release(aconnector->dc_em_sink);
6242 aconnector->dc_em_sink = NULL;
6243 if (aconnector->dc_sink)
6244 dc_sink_release(aconnector->dc_sink);
6245 aconnector->dc_sink = NULL;
6246
6247 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6248 drm_connector_unregister(connector);
6249 drm_connector_cleanup(connector);
6250 if (aconnector->i2c) {
6251 i2c_del_adapter(&aconnector->i2c->base);
6252 kfree(aconnector->i2c);
6253 }
6254 kfree(aconnector->dm_dp_aux.aux.name);
6255
6256 kfree(connector);
6257}
6258
6259void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6260{
6261 struct dm_connector_state *state =
6262 to_dm_connector_state(connector->state);
6263
6264 if (connector->state)
6265 __drm_atomic_helper_connector_destroy_state(connector->state);
6266
6267 kfree(state);
6268
6269 state = kzalloc(sizeof(*state), GFP_KERNEL);
6270
6271 if (state) {
6272 state->scaling = RMX_OFF;
6273 state->underscan_enable = false;
6274 state->underscan_hborder = 0;
6275 state->underscan_vborder = 0;
6276 state->base.max_requested_bpc = 8;
6277 state->vcpi_slots = 0;
6278 state->pbn = 0;
6279 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6280 state->abm_level = amdgpu_dm_abm_level;
6281
6282 __drm_atomic_helper_connector_reset(connector, &state->base);
6283 }
6284}
6285
6286struct drm_connector_state *
6287amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6288{
6289 struct dm_connector_state *state =
6290 to_dm_connector_state(connector->state);
6291
6292 struct dm_connector_state *new_state =
6293 kmemdup(state, sizeof(*state), GFP_KERNEL);
6294
6295 if (!new_state)
6296 return NULL;
6297
6298 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6299
6300 new_state->freesync_capable = state->freesync_capable;
6301 new_state->abm_level = state->abm_level;
6302 new_state->scaling = state->scaling;
6303 new_state->underscan_enable = state->underscan_enable;
6304 new_state->underscan_hborder = state->underscan_hborder;
6305 new_state->underscan_vborder = state->underscan_vborder;
6306 new_state->vcpi_slots = state->vcpi_slots;
6307 new_state->pbn = state->pbn;
6308 return &new_state->base;
6309}
6310
6311static int
6312amdgpu_dm_connector_late_register(struct drm_connector *connector)
6313{
6314 struct amdgpu_dm_connector *amdgpu_dm_connector =
6315 to_amdgpu_dm_connector(connector);
6316 int r;
6317
6318 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6319 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6320 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6321 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6322 if (r)
6323 return r;
6324 }
6325
6326#if defined(CONFIG_DEBUG_FS)
6327 connector_debugfs_init(amdgpu_dm_connector);
6328#endif
6329
6330 return 0;
6331}
6332
6333static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6334 .reset = amdgpu_dm_connector_funcs_reset,
6335 .detect = amdgpu_dm_connector_detect,
6336 .fill_modes = drm_helper_probe_single_connector_modes,
6337 .destroy = amdgpu_dm_connector_destroy,
6338 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6339 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6340 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6341 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6342 .late_register = amdgpu_dm_connector_late_register,
6343 .early_unregister = amdgpu_dm_connector_unregister
6344};
6345
6346static int get_modes(struct drm_connector *connector)
6347{
6348 return amdgpu_dm_connector_get_modes(connector);
6349}
6350
6351static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6352{
6353 struct dc_sink_init_data init_params = {
6354 .link = aconnector->dc_link,
6355 .sink_signal = SIGNAL_TYPE_VIRTUAL
6356 };
6357 struct edid *edid;
6358
6359 if (!aconnector->base.edid_blob_ptr) {
6360 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6361 aconnector->base.name);
6362
6363 aconnector->base.force = DRM_FORCE_OFF;
6364 aconnector->base.override_edid = false;
6365 return;
6366 }
6367
6368 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6369
6370 aconnector->edid = edid;
6371
6372 aconnector->dc_em_sink = dc_link_add_remote_sink(
6373 aconnector->dc_link,
6374 (uint8_t *)edid,
6375 (edid->extensions + 1) * EDID_LENGTH,
6376 &init_params);
6377
6378 if (aconnector->base.force == DRM_FORCE_ON) {
6379 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6380 aconnector->dc_link->local_sink :
6381 aconnector->dc_em_sink;
6382 dc_sink_retain(aconnector->dc_sink);
6383 }
6384}
6385
6386static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6387{
6388 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6389
6390
6391
6392
6393
6394 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6395 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6396 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6397 }
6398
6399
6400 aconnector->base.override_edid = true;
6401 create_eml_sink(aconnector);
6402}
6403
6404static struct dc_stream_state *
6405create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6406 const struct drm_display_mode *drm_mode,
6407 const struct dm_connector_state *dm_state,
6408 const struct dc_stream_state *old_stream)
6409{
6410 struct drm_connector *connector = &aconnector->base;
6411 struct amdgpu_device *adev = drm_to_adev(connector->dev);
6412 struct dc_stream_state *stream;
6413 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6414 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6415 enum dc_status dc_result = DC_OK;
6416
6417 do {
6418 stream = create_stream_for_sink(aconnector, drm_mode,
6419 dm_state, old_stream,
6420 requested_bpc);
6421 if (stream == NULL) {
6422 DRM_ERROR("Failed to create stream for sink!\n");
6423 break;
6424 }
6425
6426 dc_result = dc_validate_stream(adev->dm.dc, stream);
6427
6428 if (dc_result != DC_OK) {
6429 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6430 drm_mode->hdisplay,
6431 drm_mode->vdisplay,
6432 drm_mode->clock,
6433 dc_result,
6434 dc_status_to_str(dc_result));
6435
6436 dc_stream_release(stream);
6437 stream = NULL;
6438 requested_bpc -= 2;
6439 }
6440
6441 } while (stream == NULL && requested_bpc >= 6);
6442
6443 if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6444 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6445
6446 aconnector->force_yuv420_output = true;
6447 stream = create_validate_stream_for_sink(aconnector, drm_mode,
6448 dm_state, old_stream);
6449 aconnector->force_yuv420_output = false;
6450 }
6451
6452 return stream;
6453}
6454
6455enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6456 struct drm_display_mode *mode)
6457{
6458 int result = MODE_ERROR;
6459 struct dc_sink *dc_sink;
6460
6461 struct dc_stream_state *stream;
6462 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6463
6464 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6465 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
6466 return result;
6467
6468
6469
6470
6471
6472 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6473 !aconnector->dc_em_sink)
6474 handle_edid_mgmt(aconnector);
6475
6476 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6477
6478 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6479 aconnector->base.force != DRM_FORCE_ON) {
6480 DRM_ERROR("dc_sink is NULL!\n");
6481 goto fail;
6482 }
6483
6484 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6485 if (stream) {
6486 dc_stream_release(stream);
6487 result = MODE_OK;
6488 }
6489
6490fail:
6491
6492 return result;
6493}
6494
6495static int fill_hdr_info_packet(const struct drm_connector_state *state,
6496 struct dc_info_packet *out)
6497{
6498 struct hdmi_drm_infoframe frame;
6499 unsigned char buf[30];
6500 ssize_t len;
6501 int ret, i;
6502
6503 memset(out, 0, sizeof(*out));
6504
6505 if (!state->hdr_output_metadata)
6506 return 0;
6507
6508 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6509 if (ret)
6510 return ret;
6511
6512 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6513 if (len < 0)
6514 return (int)len;
6515
6516
6517 if (len != 30)
6518 return -EINVAL;
6519
6520
6521 switch (state->connector->connector_type) {
6522 case DRM_MODE_CONNECTOR_HDMIA:
6523 out->hb0 = 0x87;
6524 out->hb1 = 0x01;
6525 out->hb2 = 0x1A;
6526 out->sb[0] = buf[3];
6527 i = 1;
6528 break;
6529
6530 case DRM_MODE_CONNECTOR_DisplayPort:
6531 case DRM_MODE_CONNECTOR_eDP:
6532 out->hb0 = 0x00;
6533 out->hb1 = 0x87;
6534 out->hb2 = 0x1D;
6535 out->hb3 = (0x13 << 2);
6536 out->sb[0] = 0x01;
6537 out->sb[1] = 0x1A;
6538 i = 2;
6539 break;
6540
6541 default:
6542 return -EINVAL;
6543 }
6544
6545 memcpy(&out->sb[i], &buf[4], 26);
6546 out->valid = true;
6547
6548 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6549 sizeof(out->sb), false);
6550
6551 return 0;
6552}
6553
6554static int
6555amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6556 struct drm_atomic_state *state)
6557{
6558 struct drm_connector_state *new_con_state =
6559 drm_atomic_get_new_connector_state(state, conn);
6560 struct drm_connector_state *old_con_state =
6561 drm_atomic_get_old_connector_state(state, conn);
6562 struct drm_crtc *crtc = new_con_state->crtc;
6563 struct drm_crtc_state *new_crtc_state;
6564 int ret;
6565
6566 trace_amdgpu_dm_connector_atomic_check(new_con_state);
6567
6568 if (!crtc)
6569 return 0;
6570
6571 if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
6572 struct dc_info_packet hdr_infopacket;
6573
6574 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6575 if (ret)
6576 return ret;
6577
6578 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6579 if (IS_ERR(new_crtc_state))
6580 return PTR_ERR(new_crtc_state);
6581
6582
6583
6584
6585
6586
6587
6588
6589
6590
6591
6592
6593 new_crtc_state->mode_changed =
6594 !old_con_state->hdr_output_metadata ||
6595 !new_con_state->hdr_output_metadata;
6596 }
6597
6598 return 0;
6599}
6600
6601static const struct drm_connector_helper_funcs
6602amdgpu_dm_connector_helper_funcs = {
6603
6604
6605
6606
6607
6608
6609 .get_modes = get_modes,
6610 .mode_valid = amdgpu_dm_connector_mode_valid,
6611 .atomic_check = amdgpu_dm_connector_atomic_check,
6612};
6613
6614static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6615{
6616}
6617
6618static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
6619{
6620 struct drm_atomic_state *state = new_crtc_state->state;
6621 struct drm_plane *plane;
6622 int num_active = 0;
6623
6624 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6625 struct drm_plane_state *new_plane_state;
6626
6627
6628 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6629 continue;
6630
6631 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6632
6633 if (!new_plane_state) {
6634
6635
6636
6637
6638
6639 num_active += 1;
6640 continue;
6641 }
6642
6643
6644 num_active += (new_plane_state->fb != NULL);
6645 }
6646
6647 return num_active;
6648}
6649
6650static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6651 struct drm_crtc_state *new_crtc_state)
6652{
6653 struct dm_crtc_state *dm_new_crtc_state =
6654 to_dm_crtc_state(new_crtc_state);
6655
6656 dm_new_crtc_state->active_planes = 0;
6657
6658 if (!dm_new_crtc_state->stream)
6659 return;
6660
6661 dm_new_crtc_state->active_planes =
6662 count_crtc_active_planes(new_crtc_state);
6663}
6664
6665static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6666 struct drm_atomic_state *state)
6667{
6668 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6669 crtc);
6670 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6671 struct dc *dc = adev->dm.dc;
6672 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6673 int ret = -EINVAL;
6674
6675 trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6676
6677 dm_update_crtc_active_planes(crtc, crtc_state);
6678
6679 if (WARN_ON(unlikely(!dm_crtc_state->stream &&
6680 modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
6681 return ret;
6682 }
6683
6684
6685
6686
6687
6688
6689
6690 if (crtc_state->enable &&
6691 !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6692 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6693 return -EINVAL;
6694 }
6695
6696
6697 if (!dm_crtc_state->stream)
6698 return 0;
6699
6700 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6701 return 0;
6702
6703 DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6704 return ret;
6705}
6706
6707static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6708 const struct drm_display_mode *mode,
6709 struct drm_display_mode *adjusted_mode)
6710{
6711 return true;
6712}
6713
6714static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6715 .disable = dm_crtc_helper_disable,
6716 .atomic_check = dm_crtc_helper_atomic_check,
6717 .mode_fixup = dm_crtc_helper_mode_fixup,
6718 .get_scanout_position = amdgpu_crtc_get_scanout_position,
6719};
6720
6721static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6722{
6723
6724}
6725
6726static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6727{
6728 switch (display_color_depth) {
6729 case COLOR_DEPTH_666:
6730 return 6;
6731 case COLOR_DEPTH_888:
6732 return 8;
6733 case COLOR_DEPTH_101010:
6734 return 10;
6735 case COLOR_DEPTH_121212:
6736 return 12;
6737 case COLOR_DEPTH_141414:
6738 return 14;
6739 case COLOR_DEPTH_161616:
6740 return 16;
6741 default:
6742 break;
6743 }
6744 return 0;
6745}
6746
6747static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6748 struct drm_crtc_state *crtc_state,
6749 struct drm_connector_state *conn_state)
6750{
6751 struct drm_atomic_state *state = crtc_state->state;
6752 struct drm_connector *connector = conn_state->connector;
6753 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6754 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6755 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6756 struct drm_dp_mst_topology_mgr *mst_mgr;
6757 struct drm_dp_mst_port *mst_port;
6758 enum dc_color_depth color_depth;
6759 int clock, bpp = 0;
6760 bool is_y420 = false;
6761
6762 if (!aconnector->port || !aconnector->dc_sink)
6763 return 0;
6764
6765 mst_port = aconnector->port;
6766 mst_mgr = &aconnector->mst_port->mst_mgr;
6767
6768 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6769 return 0;
6770
6771 if (!state->duplicated) {
6772 int max_bpc = conn_state->max_requested_bpc;
6773 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6774 aconnector->force_yuv420_output;
6775 color_depth = convert_color_depth_from_display_info(connector,
6776 is_y420,
6777 max_bpc);
6778 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6779 clock = adjusted_mode->clock;
6780 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6781 }
6782 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6783 mst_mgr,
6784 mst_port,
6785 dm_new_connector_state->pbn,
6786 dm_mst_get_pbn_divider(aconnector->dc_link));
6787 if (dm_new_connector_state->vcpi_slots < 0) {
6788 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6789 return dm_new_connector_state->vcpi_slots;
6790 }
6791 return 0;
6792}
6793
6794const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6795 .disable = dm_encoder_helper_disable,
6796 .atomic_check = dm_encoder_helper_atomic_check
6797};
6798
6799#if defined(CONFIG_DRM_AMD_DC_DCN)
6800static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6801 struct dc_state *dc_state,
6802 struct dsc_mst_fairness_vars *vars)
6803{
6804 struct dc_stream_state *stream = NULL;
6805 struct drm_connector *connector;
6806 struct drm_connector_state *new_con_state;
6807 struct amdgpu_dm_connector *aconnector;
6808 struct dm_connector_state *dm_conn_state;
6809 int i, j, clock;
6810 int vcpi, pbn_div, pbn = 0;
6811
6812 for_each_new_connector_in_state(state, connector, new_con_state, i) {
6813
6814 aconnector = to_amdgpu_dm_connector(connector);
6815
6816 if (!aconnector->port)
6817 continue;
6818
6819 if (!new_con_state || !new_con_state->crtc)
6820 continue;
6821
6822 dm_conn_state = to_dm_connector_state(new_con_state);
6823
6824 for (j = 0; j < dc_state->stream_count; j++) {
6825 stream = dc_state->streams[j];
6826 if (!stream)
6827 continue;
6828
6829 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6830 break;
6831
6832 stream = NULL;
6833 }
6834
6835 if (!stream)
6836 continue;
6837
6838 if (stream->timing.flags.DSC != 1) {
6839 drm_dp_mst_atomic_enable_dsc(state,
6840 aconnector->port,
6841 dm_conn_state->pbn,
6842 0,
6843 false);
6844 continue;
6845 }
6846
6847 pbn_div = dm_mst_get_pbn_divider(stream->link);
6848 clock = stream->timing.pix_clk_100hz / 10;
6849
6850 for (j = 0; j < dc_state->stream_count; j++) {
6851 if (vars[j].aconnector == aconnector) {
6852 pbn = vars[j].pbn;
6853 break;
6854 }
6855 }
6856
6857 vcpi = drm_dp_mst_atomic_enable_dsc(state,
6858 aconnector->port,
6859 pbn, pbn_div,
6860 true);
6861 if (vcpi < 0)
6862 return vcpi;
6863
6864 dm_conn_state->pbn = pbn;
6865 dm_conn_state->vcpi_slots = vcpi;
6866 }
6867 return 0;
6868}
6869#endif
6870
6871static void dm_drm_plane_reset(struct drm_plane *plane)
6872{
6873 struct dm_plane_state *amdgpu_state = NULL;
6874
6875 if (plane->state)
6876 plane->funcs->atomic_destroy_state(plane, plane->state);
6877
6878 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6879 WARN_ON(amdgpu_state == NULL);
6880
6881 if (amdgpu_state)
6882 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6883}
6884
6885static struct drm_plane_state *
6886dm_drm_plane_duplicate_state(struct drm_plane *plane)
6887{
6888 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6889
6890 old_dm_plane_state = to_dm_plane_state(plane->state);
6891 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6892 if (!dm_plane_state)
6893 return NULL;
6894
6895 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6896
6897 if (old_dm_plane_state->dc_state) {
6898 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6899 dc_plane_state_retain(dm_plane_state->dc_state);
6900 }
6901
6902 return &dm_plane_state->base;
6903}
6904
6905static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6906 struct drm_plane_state *state)
6907{
6908 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6909
6910 if (dm_plane_state->dc_state)
6911 dc_plane_state_release(dm_plane_state->dc_state);
6912
6913 drm_atomic_helper_plane_destroy_state(plane, state);
6914}
6915
6916static const struct drm_plane_funcs dm_plane_funcs = {
6917 .update_plane = drm_atomic_helper_update_plane,
6918 .disable_plane = drm_atomic_helper_disable_plane,
6919 .destroy = drm_primary_helper_destroy,
6920 .reset = dm_drm_plane_reset,
6921 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
6922 .atomic_destroy_state = dm_drm_plane_destroy_state,
6923 .format_mod_supported = dm_plane_format_mod_supported,
6924};
6925
6926static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6927 struct drm_plane_state *new_state)
6928{
6929 struct amdgpu_framebuffer *afb;
6930 struct drm_gem_object *obj;
6931 struct amdgpu_device *adev;
6932 struct amdgpu_bo *rbo;
6933 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6934 struct list_head list;
6935 struct ttm_validate_buffer tv;
6936 struct ww_acquire_ctx ticket;
6937 uint32_t domain;
6938 int r;
6939
6940 if (!new_state->fb) {
6941 DRM_DEBUG_KMS("No FB bound\n");
6942 return 0;
6943 }
6944
6945 afb = to_amdgpu_framebuffer(new_state->fb);
6946 obj = new_state->fb->obj[0];
6947 rbo = gem_to_amdgpu_bo(obj);
6948 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6949 INIT_LIST_HEAD(&list);
6950
6951 tv.bo = &rbo->tbo;
6952 tv.num_shared = 1;
6953 list_add(&tv.head, &list);
6954
6955 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6956 if (r) {
6957 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6958 return r;
6959 }
6960
6961 if (plane->type != DRM_PLANE_TYPE_CURSOR)
6962 domain = amdgpu_display_supported_domains(adev, rbo->flags);
6963 else
6964 domain = AMDGPU_GEM_DOMAIN_VRAM;
6965
6966 r = amdgpu_bo_pin(rbo, domain);
6967 if (unlikely(r != 0)) {
6968 if (r != -ERESTARTSYS)
6969 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6970 ttm_eu_backoff_reservation(&ticket, &list);
6971 return r;
6972 }
6973
6974 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6975 if (unlikely(r != 0)) {
6976 amdgpu_bo_unpin(rbo);
6977 ttm_eu_backoff_reservation(&ticket, &list);
6978 DRM_ERROR("%p bind failed\n", rbo);
6979 return r;
6980 }
6981
6982 ttm_eu_backoff_reservation(&ticket, &list);
6983
6984 afb->address = amdgpu_bo_gpu_offset(rbo);
6985
6986 amdgpu_bo_ref(rbo);
6987
6988
6989
6990
6991
6992
6993
6994
6995
6996 dm_plane_state_old = to_dm_plane_state(plane->state);
6997 dm_plane_state_new = to_dm_plane_state(new_state);
6998
6999 if (dm_plane_state_new->dc_state &&
7000 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7001 struct dc_plane_state *plane_state =
7002 dm_plane_state_new->dc_state;
7003 bool force_disable_dcc = !plane_state->dcc.enable;
7004
7005 fill_plane_buffer_attributes(
7006 adev, afb, plane_state->format, plane_state->rotation,
7007 afb->tiling_flags,
7008 &plane_state->tiling_info, &plane_state->plane_size,
7009 &plane_state->dcc, &plane_state->address,
7010 afb->tmz_surface, force_disable_dcc);
7011 }
7012
7013 return 0;
7014}
7015
7016static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7017 struct drm_plane_state *old_state)
7018{
7019 struct amdgpu_bo *rbo;
7020 int r;
7021
7022 if (!old_state->fb)
7023 return;
7024
7025 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
7026 r = amdgpu_bo_reserve(rbo, false);
7027 if (unlikely(r)) {
7028 DRM_ERROR("failed to reserve rbo before unpin\n");
7029 return;
7030 }
7031
7032 amdgpu_bo_unpin(rbo);
7033 amdgpu_bo_unreserve(rbo);
7034 amdgpu_bo_unref(&rbo);
7035}
7036
7037static int dm_plane_helper_check_state(struct drm_plane_state *state,
7038 struct drm_crtc_state *new_crtc_state)
7039{
7040 struct drm_framebuffer *fb = state->fb;
7041 int min_downscale, max_upscale;
7042 int min_scale = 0;
7043 int max_scale = INT_MAX;
7044
7045
7046 if (fb && state->crtc) {
7047
7048 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7049 int viewport_width = state->crtc_w;
7050 int viewport_height = state->crtc_h;
7051
7052 if (state->crtc_x < 0)
7053 viewport_width += state->crtc_x;
7054 else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7055 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7056
7057 if (state->crtc_y < 0)
7058 viewport_height += state->crtc_y;
7059 else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7060 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7061
7062 if (viewport_width < 0 || viewport_height < 0) {
7063 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7064 return -EINVAL;
7065 } else if (viewport_width < MIN_VIEWPORT_SIZE*2) {
7066 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
7067 return -EINVAL;
7068 } else if (viewport_height < MIN_VIEWPORT_SIZE) {
7069 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
7070 return -EINVAL;
7071 }
7072
7073 }
7074
7075
7076 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7077 &min_downscale, &max_upscale);
7078
7079
7080
7081
7082
7083 min_scale = (1000 << 16) / max_upscale;
7084 max_scale = (1000 << 16) / min_downscale;
7085 }
7086
7087 return drm_atomic_helper_check_plane_state(
7088 state, new_crtc_state, min_scale, max_scale, true, true);
7089}
7090
7091static int dm_plane_atomic_check(struct drm_plane *plane,
7092 struct drm_atomic_state *state)
7093{
7094 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7095 plane);
7096 struct amdgpu_device *adev = drm_to_adev(plane->dev);
7097 struct dc *dc = adev->dm.dc;
7098 struct dm_plane_state *dm_plane_state;
7099 struct dc_scaling_info scaling_info;
7100 struct drm_crtc_state *new_crtc_state;
7101 int ret;
7102
7103 trace_amdgpu_dm_plane_atomic_check(new_plane_state);
7104
7105 dm_plane_state = to_dm_plane_state(new_plane_state);
7106
7107 if (!dm_plane_state->dc_state)
7108 return 0;
7109
7110 new_crtc_state =
7111 drm_atomic_get_new_crtc_state(state,
7112 new_plane_state->crtc);
7113 if (!new_crtc_state)
7114 return -EINVAL;
7115
7116 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7117 if (ret)
7118 return ret;
7119
7120 ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
7121 if (ret)
7122 return ret;
7123
7124 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7125 return 0;
7126
7127 return -EINVAL;
7128}
7129
7130static int dm_plane_atomic_async_check(struct drm_plane *plane,
7131 struct drm_atomic_state *state)
7132{
7133
7134 if (plane->type != DRM_PLANE_TYPE_CURSOR)
7135 return -EINVAL;
7136
7137 return 0;
7138}
7139
7140static void dm_plane_atomic_async_update(struct drm_plane *plane,
7141 struct drm_atomic_state *state)
7142{
7143 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7144 plane);
7145 struct drm_plane_state *old_state =
7146 drm_atomic_get_old_plane_state(state, plane);
7147
7148 trace_amdgpu_dm_atomic_update_cursor(new_state);
7149
7150 swap(plane->state->fb, new_state->fb);
7151
7152 plane->state->src_x = new_state->src_x;
7153 plane->state->src_y = new_state->src_y;
7154 plane->state->src_w = new_state->src_w;
7155 plane->state->src_h = new_state->src_h;
7156 plane->state->crtc_x = new_state->crtc_x;
7157 plane->state->crtc_y = new_state->crtc_y;
7158 plane->state->crtc_w = new_state->crtc_w;
7159 plane->state->crtc_h = new_state->crtc_h;
7160
7161 handle_cursor_update(plane, old_state);
7162}
7163
7164static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7165 .prepare_fb = dm_plane_helper_prepare_fb,
7166 .cleanup_fb = dm_plane_helper_cleanup_fb,
7167 .atomic_check = dm_plane_atomic_check,
7168 .atomic_async_check = dm_plane_atomic_async_check,
7169 .atomic_async_update = dm_plane_atomic_async_update
7170};
7171
7172
7173
7174
7175
7176
7177
7178static const uint32_t rgb_formats[] = {
7179 DRM_FORMAT_XRGB8888,
7180 DRM_FORMAT_ARGB8888,
7181 DRM_FORMAT_RGBA8888,
7182 DRM_FORMAT_XRGB2101010,
7183 DRM_FORMAT_XBGR2101010,
7184 DRM_FORMAT_ARGB2101010,
7185 DRM_FORMAT_ABGR2101010,
7186 DRM_FORMAT_XRGB16161616,
7187 DRM_FORMAT_XBGR16161616,
7188 DRM_FORMAT_ARGB16161616,
7189 DRM_FORMAT_ABGR16161616,
7190 DRM_FORMAT_XBGR8888,
7191 DRM_FORMAT_ABGR8888,
7192 DRM_FORMAT_RGB565,
7193};
7194
7195static const uint32_t overlay_formats[] = {
7196 DRM_FORMAT_XRGB8888,
7197 DRM_FORMAT_ARGB8888,
7198 DRM_FORMAT_RGBA8888,
7199 DRM_FORMAT_XBGR8888,
7200 DRM_FORMAT_ABGR8888,
7201 DRM_FORMAT_RGB565
7202};
7203
7204static const u32 cursor_formats[] = {
7205 DRM_FORMAT_ARGB8888
7206};
7207
7208static int get_plane_formats(const struct drm_plane *plane,
7209 const struct dc_plane_cap *plane_cap,
7210 uint32_t *formats, int max_formats)
7211{
7212 int i, num_formats = 0;
7213
7214
7215
7216
7217
7218
7219
7220 switch (plane->type) {
7221 case DRM_PLANE_TYPE_PRIMARY:
7222 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7223 if (num_formats >= max_formats)
7224 break;
7225
7226 formats[num_formats++] = rgb_formats[i];
7227 }
7228
7229 if (plane_cap && plane_cap->pixel_format_support.nv12)
7230 formats[num_formats++] = DRM_FORMAT_NV12;
7231 if (plane_cap && plane_cap->pixel_format_support.p010)
7232 formats[num_formats++] = DRM_FORMAT_P010;
7233 if (plane_cap && plane_cap->pixel_format_support.fp16) {
7234 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7235 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7236 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7237 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7238 }
7239 break;
7240
7241 case DRM_PLANE_TYPE_OVERLAY:
7242 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7243 if (num_formats >= max_formats)
7244 break;
7245
7246 formats[num_formats++] = overlay_formats[i];
7247 }
7248 break;
7249
7250 case DRM_PLANE_TYPE_CURSOR:
7251 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7252 if (num_formats >= max_formats)
7253 break;
7254
7255 formats[num_formats++] = cursor_formats[i];
7256 }
7257 break;
7258 }
7259
7260 return num_formats;
7261}
7262
7263static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7264 struct drm_plane *plane,
7265 unsigned long possible_crtcs,
7266 const struct dc_plane_cap *plane_cap)
7267{
7268 uint32_t formats[32];
7269 int num_formats;
7270 int res = -EPERM;
7271 unsigned int supported_rotations;
7272 uint64_t *modifiers = NULL;
7273
7274 num_formats = get_plane_formats(plane, plane_cap, formats,
7275 ARRAY_SIZE(formats));
7276
7277 res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7278 if (res)
7279 return res;
7280
7281 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7282 &dm_plane_funcs, formats, num_formats,
7283 modifiers, plane->type, NULL);
7284 kfree(modifiers);
7285 if (res)
7286 return res;
7287
7288 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7289 plane_cap && plane_cap->per_pixel_alpha) {
7290 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7291 BIT(DRM_MODE_BLEND_PREMULTI);
7292
7293 drm_plane_create_alpha_property(plane);
7294 drm_plane_create_blend_mode_property(plane, blend_caps);
7295 }
7296
7297 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7298 plane_cap &&
7299 (plane_cap->pixel_format_support.nv12 ||
7300 plane_cap->pixel_format_support.p010)) {
7301
7302 drm_plane_create_color_properties(
7303 plane,
7304 BIT(DRM_COLOR_YCBCR_BT601) |
7305 BIT(DRM_COLOR_YCBCR_BT709) |
7306 BIT(DRM_COLOR_YCBCR_BT2020),
7307 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7308 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7309 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7310 }
7311
7312 supported_rotations =
7313 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7314 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7315
7316 if (dm->adev->asic_type >= CHIP_BONAIRE &&
7317 plane->type != DRM_PLANE_TYPE_CURSOR)
7318 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7319 supported_rotations);
7320
7321 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7322
7323
7324 if (plane->funcs->reset)
7325 plane->funcs->reset(plane);
7326
7327 return 0;
7328}
7329
7330static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7331 struct drm_plane *plane,
7332 uint32_t crtc_index)
7333{
7334 struct amdgpu_crtc *acrtc = NULL;
7335 struct drm_plane *cursor_plane;
7336
7337 int res = -ENOMEM;
7338
7339 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7340 if (!cursor_plane)
7341 goto fail;
7342
7343 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7344 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7345
7346 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7347 if (!acrtc)
7348 goto fail;
7349
7350 res = drm_crtc_init_with_planes(
7351 dm->ddev,
7352 &acrtc->base,
7353 plane,
7354 cursor_plane,
7355 &amdgpu_dm_crtc_funcs, NULL);
7356
7357 if (res)
7358 goto fail;
7359
7360 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7361
7362
7363 if (acrtc->base.funcs->reset)
7364 acrtc->base.funcs->reset(&acrtc->base);
7365
7366 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7367 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7368
7369 acrtc->crtc_id = crtc_index;
7370 acrtc->base.enabled = false;
7371 acrtc->otg_inst = -1;
7372
7373 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7374 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7375 true, MAX_COLOR_LUT_ENTRIES);
7376 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7377
7378 return 0;
7379
7380fail:
7381 kfree(acrtc);
7382 kfree(cursor_plane);
7383 return res;
7384}
7385
7386
7387static int to_drm_connector_type(enum signal_type st)
7388{
7389 switch (st) {
7390 case SIGNAL_TYPE_HDMI_TYPE_A:
7391 return DRM_MODE_CONNECTOR_HDMIA;
7392 case SIGNAL_TYPE_EDP:
7393 return DRM_MODE_CONNECTOR_eDP;
7394 case SIGNAL_TYPE_LVDS:
7395 return DRM_MODE_CONNECTOR_LVDS;
7396 case SIGNAL_TYPE_RGB:
7397 return DRM_MODE_CONNECTOR_VGA;
7398 case SIGNAL_TYPE_DISPLAY_PORT:
7399 case SIGNAL_TYPE_DISPLAY_PORT_MST:
7400 return DRM_MODE_CONNECTOR_DisplayPort;
7401 case SIGNAL_TYPE_DVI_DUAL_LINK:
7402 case SIGNAL_TYPE_DVI_SINGLE_LINK:
7403 return DRM_MODE_CONNECTOR_DVID;
7404 case SIGNAL_TYPE_VIRTUAL:
7405 return DRM_MODE_CONNECTOR_VIRTUAL;
7406
7407 default:
7408 return DRM_MODE_CONNECTOR_Unknown;
7409 }
7410}
7411
7412static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7413{
7414 struct drm_encoder *encoder;
7415
7416
7417 drm_connector_for_each_possible_encoder(connector, encoder)
7418 return encoder;
7419
7420 return NULL;
7421}
7422
7423static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7424{
7425 struct drm_encoder *encoder;
7426 struct amdgpu_encoder *amdgpu_encoder;
7427
7428 encoder = amdgpu_dm_connector_to_encoder(connector);
7429
7430 if (encoder == NULL)
7431 return;
7432
7433 amdgpu_encoder = to_amdgpu_encoder(encoder);
7434
7435 amdgpu_encoder->native_mode.clock = 0;
7436
7437 if (!list_empty(&connector->probed_modes)) {
7438 struct drm_display_mode *preferred_mode = NULL;
7439
7440 list_for_each_entry(preferred_mode,
7441 &connector->probed_modes,
7442 head) {
7443 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7444 amdgpu_encoder->native_mode = *preferred_mode;
7445
7446 break;
7447 }
7448
7449 }
7450}
7451
7452static struct drm_display_mode *
7453amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7454 char *name,
7455 int hdisplay, int vdisplay)
7456{
7457 struct drm_device *dev = encoder->dev;
7458 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7459 struct drm_display_mode *mode = NULL;
7460 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7461
7462 mode = drm_mode_duplicate(dev, native_mode);
7463
7464 if (mode == NULL)
7465 return NULL;
7466
7467 mode->hdisplay = hdisplay;
7468 mode->vdisplay = vdisplay;
7469 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7470 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7471
7472 return mode;
7473
7474}
7475
7476static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
7477 struct drm_connector *connector)
7478{
7479 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7480 struct drm_display_mode *mode = NULL;
7481 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7482 struct amdgpu_dm_connector *amdgpu_dm_connector =
7483 to_amdgpu_dm_connector(connector);
7484 int i;
7485 int n;
7486 struct mode_size {
7487 char name[DRM_DISPLAY_MODE_LEN];
7488 int w;
7489 int h;
7490 } common_modes[] = {
7491 { "640x480", 640, 480},
7492 { "800x600", 800, 600},
7493 { "1024x768", 1024, 768},
7494 { "1280x720", 1280, 720},
7495 { "1280x800", 1280, 800},
7496 {"1280x1024", 1280, 1024},
7497 { "1440x900", 1440, 900},
7498 {"1680x1050", 1680, 1050},
7499 {"1600x1200", 1600, 1200},
7500 {"1920x1080", 1920, 1080},
7501 {"1920x1200", 1920, 1200}
7502 };
7503
7504 n = ARRAY_SIZE(common_modes);
7505
7506 for (i = 0; i < n; i++) {
7507 struct drm_display_mode *curmode = NULL;
7508 bool mode_existed = false;
7509
7510 if (common_modes[i].w > native_mode->hdisplay ||
7511 common_modes[i].h > native_mode->vdisplay ||
7512 (common_modes[i].w == native_mode->hdisplay &&
7513 common_modes[i].h == native_mode->vdisplay))
7514 continue;
7515
7516 list_for_each_entry(curmode, &connector->probed_modes, head) {
7517 if (common_modes[i].w == curmode->hdisplay &&
7518 common_modes[i].h == curmode->vdisplay) {
7519 mode_existed = true;
7520 break;
7521 }
7522 }
7523
7524 if (mode_existed)
7525 continue;
7526
7527 mode = amdgpu_dm_create_common_mode(encoder,
7528 common_modes[i].name, common_modes[i].w,
7529 common_modes[i].h);
7530 drm_mode_probed_add(connector, mode);
7531 amdgpu_dm_connector->num_modes++;
7532 }
7533}
7534
7535static void amdgpu_set_panel_orientation(struct drm_connector *connector)
7536{
7537 struct drm_encoder *encoder;
7538 struct amdgpu_encoder *amdgpu_encoder;
7539 const struct drm_display_mode *native_mode;
7540
7541 if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
7542 connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
7543 return;
7544
7545 encoder = amdgpu_dm_connector_to_encoder(connector);
7546 if (!encoder)
7547 return;
7548
7549 amdgpu_encoder = to_amdgpu_encoder(encoder);
7550
7551 native_mode = &amdgpu_encoder->native_mode;
7552 if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
7553 return;
7554
7555 drm_connector_set_panel_orientation_with_quirk(connector,
7556 DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
7557 native_mode->hdisplay,
7558 native_mode->vdisplay);
7559}
7560
7561static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7562 struct edid *edid)
7563{
7564 struct amdgpu_dm_connector *amdgpu_dm_connector =
7565 to_amdgpu_dm_connector(connector);
7566
7567 if (edid) {
7568
7569 INIT_LIST_HEAD(&connector->probed_modes);
7570 amdgpu_dm_connector->num_modes =
7571 drm_add_edid_modes(connector, edid);
7572
7573
7574
7575
7576
7577
7578
7579
7580
7581 drm_mode_sort(&connector->probed_modes);
7582 amdgpu_dm_get_native_mode(connector);
7583
7584
7585
7586
7587
7588 amdgpu_dm_update_freesync_caps(connector, edid);
7589
7590 amdgpu_set_panel_orientation(connector);
7591 } else {
7592 amdgpu_dm_connector->num_modes = 0;
7593 }
7594}
7595
7596static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
7597 struct drm_display_mode *mode)
7598{
7599 struct drm_display_mode *m;
7600
7601 list_for_each_entry (m, &aconnector->base.probed_modes, head) {
7602 if (drm_mode_equal(m, mode))
7603 return true;
7604 }
7605
7606 return false;
7607}
7608
7609static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
7610{
7611 const struct drm_display_mode *m;
7612 struct drm_display_mode *new_mode;
7613 uint i;
7614 uint32_t new_modes_count = 0;
7615
7616
7617
7618
7619
7620
7621
7622
7623
7624
7625
7626
7627
7628 static const uint32_t common_rates[] = {
7629 23976, 24000, 25000, 29970, 30000,
7630 48000, 50000, 60000, 72000, 96000
7631 };
7632
7633
7634
7635
7636
7637
7638
7639 m = get_highest_refresh_rate_mode(aconnector, true);
7640 if (!m)
7641 return 0;
7642
7643 for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
7644 uint64_t target_vtotal, target_vtotal_diff;
7645 uint64_t num, den;
7646
7647 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
7648 continue;
7649
7650 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
7651 common_rates[i] > aconnector->max_vfreq * 1000)
7652 continue;
7653
7654 num = (unsigned long long)m->clock * 1000 * 1000;
7655 den = common_rates[i] * (unsigned long long)m->htotal;
7656 target_vtotal = div_u64(num, den);
7657 target_vtotal_diff = target_vtotal - m->vtotal;
7658
7659
7660 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
7661 m->vsync_end + target_vtotal_diff < m->vsync_start ||
7662 m->vtotal + target_vtotal_diff < m->vsync_end)
7663 continue;
7664
7665 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
7666 if (!new_mode)
7667 goto out;
7668
7669 new_mode->vtotal += (u16)target_vtotal_diff;
7670 new_mode->vsync_start += (u16)target_vtotal_diff;
7671 new_mode->vsync_end += (u16)target_vtotal_diff;
7672 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7673 new_mode->type |= DRM_MODE_TYPE_DRIVER;
7674
7675 if (!is_duplicate_mode(aconnector, new_mode)) {
7676 drm_mode_probed_add(&aconnector->base, new_mode);
7677 new_modes_count += 1;
7678 } else
7679 drm_mode_destroy(aconnector->base.dev, new_mode);
7680 }
7681 out:
7682 return new_modes_count;
7683}
7684
7685static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
7686 struct edid *edid)
7687{
7688 struct amdgpu_dm_connector *amdgpu_dm_connector =
7689 to_amdgpu_dm_connector(connector);
7690
7691 if (!(amdgpu_freesync_vid_mode && edid))
7692 return;
7693
7694 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
7695 amdgpu_dm_connector->num_modes +=
7696 add_fs_modes(amdgpu_dm_connector);
7697}
7698
7699static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
7700{
7701 struct amdgpu_dm_connector *amdgpu_dm_connector =
7702 to_amdgpu_dm_connector(connector);
7703 struct drm_encoder *encoder;
7704 struct edid *edid = amdgpu_dm_connector->edid;
7705
7706 encoder = amdgpu_dm_connector_to_encoder(connector);
7707
7708 if (!drm_edid_is_valid(edid)) {
7709 amdgpu_dm_connector->num_modes =
7710 drm_add_modes_noedid(connector, 640, 480);
7711 } else {
7712 amdgpu_dm_connector_ddc_get_modes(connector, edid);
7713 amdgpu_dm_connector_add_common_modes(encoder, connector);
7714 amdgpu_dm_connector_add_freesync_modes(connector, edid);
7715 }
7716 amdgpu_dm_fbc_init(connector);
7717
7718 return amdgpu_dm_connector->num_modes;
7719}
7720
7721void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7722 struct amdgpu_dm_connector *aconnector,
7723 int connector_type,
7724 struct dc_link *link,
7725 int link_index)
7726{
7727 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
7728
7729
7730
7731
7732
7733 if (aconnector->base.funcs->reset)
7734 aconnector->base.funcs->reset(&aconnector->base);
7735
7736 aconnector->connector_id = link_index;
7737 aconnector->dc_link = link;
7738 aconnector->base.interlace_allowed = false;
7739 aconnector->base.doublescan_allowed = false;
7740 aconnector->base.stereo_allowed = false;
7741 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7742 aconnector->hpd.hpd = AMDGPU_HPD_NONE;
7743 aconnector->audio_inst = -1;
7744 mutex_init(&aconnector->hpd_lock);
7745
7746
7747
7748
7749
7750 switch (connector_type) {
7751 case DRM_MODE_CONNECTOR_HDMIA:
7752 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7753 aconnector->base.ycbcr_420_allowed =
7754 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
7755 break;
7756 case DRM_MODE_CONNECTOR_DisplayPort:
7757 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7758 aconnector->base.ycbcr_420_allowed =
7759 link->link_enc->features.dp_ycbcr420_supported ? true : false;
7760 break;
7761 case DRM_MODE_CONNECTOR_DVID:
7762 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7763 break;
7764 default:
7765 break;
7766 }
7767
7768 drm_object_attach_property(&aconnector->base.base,
7769 dm->ddev->mode_config.scaling_mode_property,
7770 DRM_MODE_SCALE_NONE);
7771
7772 drm_object_attach_property(&aconnector->base.base,
7773 adev->mode_info.underscan_property,
7774 UNDERSCAN_OFF);
7775 drm_object_attach_property(&aconnector->base.base,
7776 adev->mode_info.underscan_hborder_property,
7777 0);
7778 drm_object_attach_property(&aconnector->base.base,
7779 adev->mode_info.underscan_vborder_property,
7780 0);
7781
7782 if (!aconnector->mst_port)
7783 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7784
7785
7786 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7787 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7788
7789 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7790 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7791 drm_object_attach_property(&aconnector->base.base,
7792 adev->mode_info.abm_level_property, 0);
7793 }
7794
7795 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7796 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7797 connector_type == DRM_MODE_CONNECTOR_eDP) {
7798 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
7799
7800 if (!aconnector->mst_port)
7801 drm_connector_attach_vrr_capable_property(&aconnector->base);
7802
7803#ifdef CONFIG_DRM_AMD_DC_HDCP
7804 if (adev->dm.hdcp_workqueue)
7805 drm_connector_attach_content_protection_property(&aconnector->base, true);
7806#endif
7807 }
7808}
7809
7810static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7811 struct i2c_msg *msgs, int num)
7812{
7813 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7814 struct ddc_service *ddc_service = i2c->ddc_service;
7815 struct i2c_command cmd;
7816 int i;
7817 int result = -EIO;
7818
7819 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7820
7821 if (!cmd.payloads)
7822 return result;
7823
7824 cmd.number_of_payloads = num;
7825 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7826 cmd.speed = 100;
7827
7828 for (i = 0; i < num; i++) {
7829 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7830 cmd.payloads[i].address = msgs[i].addr;
7831 cmd.payloads[i].length = msgs[i].len;
7832 cmd.payloads[i].data = msgs[i].buf;
7833 }
7834
7835 if (dc_submit_i2c(
7836 ddc_service->ctx->dc,
7837 ddc_service->ddc_pin->hw_info.ddc_channel,
7838 &cmd))
7839 result = num;
7840
7841 kfree(cmd.payloads);
7842 return result;
7843}
7844
7845static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7846{
7847 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7848}
7849
7850static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7851 .master_xfer = amdgpu_dm_i2c_xfer,
7852 .functionality = amdgpu_dm_i2c_func,
7853};
7854
7855static struct amdgpu_i2c_adapter *
7856create_i2c(struct ddc_service *ddc_service,
7857 int link_index,
7858 int *res)
7859{
7860 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7861 struct amdgpu_i2c_adapter *i2c;
7862
7863 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7864 if (!i2c)
7865 return NULL;
7866 i2c->base.owner = THIS_MODULE;
7867 i2c->base.class = I2C_CLASS_DDC;
7868 i2c->base.dev.parent = &adev->pdev->dev;
7869 i2c->base.algo = &amdgpu_dm_i2c_algo;
7870 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7871 i2c_set_adapdata(&i2c->base, i2c);
7872 i2c->ddc_service = ddc_service;
7873 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7874
7875 return i2c;
7876}
7877
7878
7879
7880
7881
7882
7883static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7884 struct amdgpu_dm_connector *aconnector,
7885 uint32_t link_index,
7886 struct amdgpu_encoder *aencoder)
7887{
7888 int res = 0;
7889 int connector_type;
7890 struct dc *dc = dm->dc;
7891 struct dc_link *link = dc_get_link_at_index(dc, link_index);
7892 struct amdgpu_i2c_adapter *i2c;
7893
7894 link->priv = aconnector;
7895
7896 DRM_DEBUG_DRIVER("%s()\n", __func__);
7897
7898 i2c = create_i2c(link->ddc, link->link_index, &res);
7899 if (!i2c) {
7900 DRM_ERROR("Failed to create i2c adapter data\n");
7901 return -ENOMEM;
7902 }
7903
7904 aconnector->i2c = i2c;
7905 res = i2c_add_adapter(&i2c->base);
7906
7907 if (res) {
7908 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7909 goto out_free;
7910 }
7911
7912 connector_type = to_drm_connector_type(link->connector_signal);
7913
7914 res = drm_connector_init_with_ddc(
7915 dm->ddev,
7916 &aconnector->base,
7917 &amdgpu_dm_connector_funcs,
7918 connector_type,
7919 &i2c->base);
7920
7921 if (res) {
7922 DRM_ERROR("connector_init failed\n");
7923 aconnector->connector_id = -1;
7924 goto out_free;
7925 }
7926
7927 drm_connector_helper_add(
7928 &aconnector->base,
7929 &amdgpu_dm_connector_helper_funcs);
7930
7931 amdgpu_dm_connector_init_helper(
7932 dm,
7933 aconnector,
7934 connector_type,
7935 link,
7936 link_index);
7937
7938 drm_connector_attach_encoder(
7939 &aconnector->base, &aencoder->base);
7940
7941 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7942 || connector_type == DRM_MODE_CONNECTOR_eDP)
7943 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7944
7945out_free:
7946 if (res) {
7947 kfree(i2c);
7948 aconnector->i2c = NULL;
7949 }
7950 return res;
7951}
7952
7953int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7954{
7955 switch (adev->mode_info.num_crtc) {
7956 case 1:
7957 return 0x1;
7958 case 2:
7959 return 0x3;
7960 case 3:
7961 return 0x7;
7962 case 4:
7963 return 0xf;
7964 case 5:
7965 return 0x1f;
7966 case 6:
7967 default:
7968 return 0x3f;
7969 }
7970}
7971
7972static int amdgpu_dm_encoder_init(struct drm_device *dev,
7973 struct amdgpu_encoder *aencoder,
7974 uint32_t link_index)
7975{
7976 struct amdgpu_device *adev = drm_to_adev(dev);
7977
7978 int res = drm_encoder_init(dev,
7979 &aencoder->base,
7980 &amdgpu_dm_encoder_funcs,
7981 DRM_MODE_ENCODER_TMDS,
7982 NULL);
7983
7984 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7985
7986 if (!res)
7987 aencoder->encoder_id = link_index;
7988 else
7989 aencoder->encoder_id = -1;
7990
7991 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7992
7993 return res;
7994}
7995
7996static void manage_dm_interrupts(struct amdgpu_device *adev,
7997 struct amdgpu_crtc *acrtc,
7998 bool enable)
7999{
8000
8001
8002
8003
8004
8005
8006 int irq_type =
8007 amdgpu_display_crtc_idx_to_irq_type(
8008 adev,
8009 acrtc->crtc_id);
8010
8011 if (enable) {
8012 drm_crtc_vblank_on(&acrtc->base);
8013 amdgpu_irq_get(
8014 adev,
8015 &adev->pageflip_irq,
8016 irq_type);
8017#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8018 amdgpu_irq_get(
8019 adev,
8020 &adev->vline0_irq,
8021 irq_type);
8022#endif
8023 } else {
8024#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8025 amdgpu_irq_put(
8026 adev,
8027 &adev->vline0_irq,
8028 irq_type);
8029#endif
8030 amdgpu_irq_put(
8031 adev,
8032 &adev->pageflip_irq,
8033 irq_type);
8034 drm_crtc_vblank_off(&acrtc->base);
8035 }
8036}
8037
8038static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8039 struct amdgpu_crtc *acrtc)
8040{
8041 int irq_type =
8042 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8043
8044
8045
8046
8047
8048 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8049}
8050
8051static bool
8052is_scaling_state_different(const struct dm_connector_state *dm_state,
8053 const struct dm_connector_state *old_dm_state)
8054{
8055 if (dm_state->scaling != old_dm_state->scaling)
8056 return true;
8057 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8058 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8059 return true;
8060 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8061 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8062 return true;
8063 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8064 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8065 return true;
8066 return false;
8067}
8068
8069#ifdef CONFIG_DRM_AMD_DC_HDCP
8070static bool is_content_protection_different(struct drm_connector_state *state,
8071 const struct drm_connector_state *old_state,
8072 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8073{
8074 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8075 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
8076
8077
8078 if (old_state->hdcp_content_type != state->hdcp_content_type &&
8079 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8080 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8081 return true;
8082 }
8083
8084
8085
8086
8087
8088 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8089 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8090 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8091 return false;
8092 }
8093
8094
8095
8096
8097
8098 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8099 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8100 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8101
8102
8103
8104
8105
8106
8107
8108
8109
8110 if (!(old_state->crtc && old_state->crtc->enabled) &&
8111 state->crtc && state->crtc->enabled &&
8112 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8113 dm_con_state->update_hdcp = false;
8114 return true;
8115 }
8116
8117
8118
8119
8120
8121
8122
8123
8124
8125 if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8126 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8127 dm_con_state->update_hdcp = false;
8128 return true;
8129 }
8130
8131
8132
8133
8134
8135
8136 if (old_state->content_protection == state->content_protection)
8137 return false;
8138
8139
8140
8141
8142
8143
8144 if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
8145 return true;
8146
8147
8148
8149
8150 return false;
8151}
8152
8153#endif
8154static void remove_stream(struct amdgpu_device *adev,
8155 struct amdgpu_crtc *acrtc,
8156 struct dc_stream_state *stream)
8157{
8158
8159
8160 acrtc->otg_inst = -1;
8161 acrtc->enabled = false;
8162}
8163
8164static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8165 struct dc_cursor_position *position)
8166{
8167 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8168 int x, y;
8169 int xorigin = 0, yorigin = 0;
8170
8171 if (!crtc || !plane->state->fb)
8172 return 0;
8173
8174 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8175 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8176 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8177 __func__,
8178 plane->state->crtc_w,
8179 plane->state->crtc_h);
8180 return -EINVAL;
8181 }
8182
8183 x = plane->state->crtc_x;
8184 y = plane->state->crtc_y;
8185
8186 if (x <= -amdgpu_crtc->max_cursor_width ||
8187 y <= -amdgpu_crtc->max_cursor_height)
8188 return 0;
8189
8190 if (x < 0) {
8191 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8192 x = 0;
8193 }
8194 if (y < 0) {
8195 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8196 y = 0;
8197 }
8198 position->enable = true;
8199 position->translate_by_source = true;
8200 position->x = x;
8201 position->y = y;
8202 position->x_hotspot = xorigin;
8203 position->y_hotspot = yorigin;
8204
8205 return 0;
8206}
8207
8208static void handle_cursor_update(struct drm_plane *plane,
8209 struct drm_plane_state *old_plane_state)
8210{
8211 struct amdgpu_device *adev = drm_to_adev(plane->dev);
8212 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8213 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8214 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8215 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8216 uint64_t address = afb ? afb->address : 0;
8217 struct dc_cursor_position position = {0};
8218 struct dc_cursor_attributes attributes;
8219 int ret;
8220
8221 if (!plane->state->fb && !old_plane_state->fb)
8222 return;
8223
8224 DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8225 __func__,
8226 amdgpu_crtc->crtc_id,
8227 plane->state->crtc_w,
8228 plane->state->crtc_h);
8229
8230 ret = get_cursor_position(plane, crtc, &position);
8231 if (ret)
8232 return;
8233
8234 if (!position.enable) {
8235
8236 if (crtc_state && crtc_state->stream) {
8237 mutex_lock(&adev->dm.dc_lock);
8238 dc_stream_set_cursor_position(crtc_state->stream,
8239 &position);
8240 mutex_unlock(&adev->dm.dc_lock);
8241 }
8242 return;
8243 }
8244
8245 amdgpu_crtc->cursor_width = plane->state->crtc_w;
8246 amdgpu_crtc->cursor_height = plane->state->crtc_h;
8247
8248 memset(&attributes, 0, sizeof(attributes));
8249 attributes.address.high_part = upper_32_bits(address);
8250 attributes.address.low_part = lower_32_bits(address);
8251 attributes.width = plane->state->crtc_w;
8252 attributes.height = plane->state->crtc_h;
8253 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8254 attributes.rotation_angle = 0;
8255 attributes.attribute_flags.value = 0;
8256
8257 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8258
8259 if (crtc_state->stream) {
8260 mutex_lock(&adev->dm.dc_lock);
8261 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8262 &attributes))
8263 DRM_ERROR("DC failed to set cursor attributes\n");
8264
8265 if (!dc_stream_set_cursor_position(crtc_state->stream,
8266 &position))
8267 DRM_ERROR("DC failed to set cursor position\n");
8268 mutex_unlock(&adev->dm.dc_lock);
8269 }
8270}
8271
8272static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8273{
8274
8275 assert_spin_locked(&acrtc->base.dev->event_lock);
8276 WARN_ON(acrtc->event);
8277
8278 acrtc->event = acrtc->base.state->event;
8279
8280
8281 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8282
8283
8284 acrtc->base.state->event = NULL;
8285
8286 DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8287 acrtc->crtc_id);
8288}
8289
8290static void update_freesync_state_on_stream(
8291 struct amdgpu_display_manager *dm,
8292 struct dm_crtc_state *new_crtc_state,
8293 struct dc_stream_state *new_stream,
8294 struct dc_plane_state *surface,
8295 u32 flip_timestamp_in_us)
8296{
8297 struct mod_vrr_params vrr_params;
8298 struct dc_info_packet vrr_infopacket = {0};
8299 struct amdgpu_device *adev = dm->adev;
8300 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8301 unsigned long flags;
8302 bool pack_sdp_v1_3 = false;
8303
8304 if (!new_stream)
8305 return;
8306
8307
8308
8309
8310
8311
8312 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8313 return;
8314
8315 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8316 vrr_params = acrtc->dm_irq_params.vrr_params;
8317
8318 if (surface) {
8319 mod_freesync_handle_preflip(
8320 dm->freesync_module,
8321 surface,
8322 new_stream,
8323 flip_timestamp_in_us,
8324 &vrr_params);
8325
8326 if (adev->family < AMDGPU_FAMILY_AI &&
8327 amdgpu_dm_vrr_active(new_crtc_state)) {
8328 mod_freesync_handle_v_update(dm->freesync_module,
8329 new_stream, &vrr_params);
8330
8331
8332 dc_stream_adjust_vmin_vmax(dm->dc,
8333 new_crtc_state->stream,
8334 &vrr_params.adjust);
8335 }
8336 }
8337
8338 mod_freesync_build_vrr_infopacket(
8339 dm->freesync_module,
8340 new_stream,
8341 &vrr_params,
8342 PACKET_TYPE_VRR,
8343 TRANSFER_FUNC_UNKNOWN,
8344 &vrr_infopacket,
8345 pack_sdp_v1_3);
8346
8347 new_crtc_state->freesync_timing_changed |=
8348 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8349 &vrr_params.adjust,
8350 sizeof(vrr_params.adjust)) != 0);
8351
8352 new_crtc_state->freesync_vrr_info_changed |=
8353 (memcmp(&new_crtc_state->vrr_infopacket,
8354 &vrr_infopacket,
8355 sizeof(vrr_infopacket)) != 0);
8356
8357 acrtc->dm_irq_params.vrr_params = vrr_params;
8358 new_crtc_state->vrr_infopacket = vrr_infopacket;
8359
8360 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8361 new_stream->vrr_infopacket = vrr_infopacket;
8362
8363 if (new_crtc_state->freesync_vrr_info_changed)
8364 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8365 new_crtc_state->base.crtc->base.id,
8366 (int)new_crtc_state->base.vrr_enabled,
8367 (int)vrr_params.state);
8368
8369 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8370}
8371
8372static void update_stream_irq_parameters(
8373 struct amdgpu_display_manager *dm,
8374 struct dm_crtc_state *new_crtc_state)
8375{
8376 struct dc_stream_state *new_stream = new_crtc_state->stream;
8377 struct mod_vrr_params vrr_params;
8378 struct mod_freesync_config config = new_crtc_state->freesync_config;
8379 struct amdgpu_device *adev = dm->adev;
8380 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8381 unsigned long flags;
8382
8383 if (!new_stream)
8384 return;
8385
8386
8387
8388
8389
8390 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8391 return;
8392
8393 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8394 vrr_params = acrtc->dm_irq_params.vrr_params;
8395
8396 if (new_crtc_state->vrr_supported &&
8397 config.min_refresh_in_uhz &&
8398 config.max_refresh_in_uhz) {
8399
8400
8401
8402
8403 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8404 (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8405 new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8406 vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8407 vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8408 vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8409 vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8410 } else {
8411 config.state = new_crtc_state->base.vrr_enabled ?
8412 VRR_STATE_ACTIVE_VARIABLE :
8413 VRR_STATE_INACTIVE;
8414 }
8415 } else {
8416 config.state = VRR_STATE_UNSUPPORTED;
8417 }
8418
8419 mod_freesync_build_vrr_params(dm->freesync_module,
8420 new_stream,
8421 &config, &vrr_params);
8422
8423 new_crtc_state->freesync_timing_changed |=
8424 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8425 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
8426
8427 new_crtc_state->freesync_config = config;
8428
8429 acrtc->dm_irq_params.freesync_config = config;
8430 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8431 acrtc->dm_irq_params.vrr_params = vrr_params;
8432 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8433}
8434
8435static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8436 struct dm_crtc_state *new_state)
8437{
8438 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8439 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8440
8441 if (!old_vrr_active && new_vrr_active) {
8442
8443
8444
8445
8446
8447
8448
8449
8450 dm_set_vupdate_irq(new_state->base.crtc, true);
8451 drm_crtc_vblank_get(new_state->base.crtc);
8452 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8453 __func__, new_state->base.crtc->base.id);
8454 } else if (old_vrr_active && !new_vrr_active) {
8455
8456
8457
8458 dm_set_vupdate_irq(new_state->base.crtc, false);
8459 drm_crtc_vblank_put(new_state->base.crtc);
8460 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8461 __func__, new_state->base.crtc->base.id);
8462 }
8463}
8464
8465static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8466{
8467 struct drm_plane *plane;
8468 struct drm_plane_state *old_plane_state;
8469 int i;
8470
8471
8472
8473
8474
8475 for_each_old_plane_in_state(state, plane, old_plane_state, i)
8476 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8477 handle_cursor_update(plane, old_plane_state);
8478}
8479
8480static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
8481 struct dc_state *dc_state,
8482 struct drm_device *dev,
8483 struct amdgpu_display_manager *dm,
8484 struct drm_crtc *pcrtc,
8485 bool wait_for_vblank)
8486{
8487 uint32_t i;
8488 uint64_t timestamp_ns;
8489 struct drm_plane *plane;
8490 struct drm_plane_state *old_plane_state, *new_plane_state;
8491 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
8492 struct drm_crtc_state *new_pcrtc_state =
8493 drm_atomic_get_new_crtc_state(state, pcrtc);
8494 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
8495 struct dm_crtc_state *dm_old_crtc_state =
8496 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
8497 int planes_count = 0, vpos, hpos;
8498 long r;
8499 unsigned long flags;
8500 struct amdgpu_bo *abo;
8501 uint32_t target_vblank, last_flip_vblank;
8502 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
8503 bool pflip_present = false;
8504 struct {
8505 struct dc_surface_update surface_updates[MAX_SURFACES];
8506 struct dc_plane_info plane_infos[MAX_SURFACES];
8507 struct dc_scaling_info scaling_infos[MAX_SURFACES];
8508 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8509 struct dc_stream_update stream_update;
8510 } *bundle;
8511
8512 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8513
8514 if (!bundle) {
8515 dm_error("Failed to allocate update bundle\n");
8516 goto cleanup;
8517 }
8518
8519
8520
8521
8522
8523
8524 if (acrtc_state->active_planes == 0)
8525 amdgpu_dm_commit_cursors(state);
8526
8527
8528 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8529 struct drm_crtc *crtc = new_plane_state->crtc;
8530 struct drm_crtc_state *new_crtc_state;
8531 struct drm_framebuffer *fb = new_plane_state->fb;
8532 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
8533 bool plane_needs_flip;
8534 struct dc_plane_state *dc_plane;
8535 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
8536
8537
8538 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8539 continue;
8540
8541 if (!fb || !crtc || pcrtc != crtc)
8542 continue;
8543
8544 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8545 if (!new_crtc_state->active)
8546 continue;
8547
8548 dc_plane = dm_new_plane_state->dc_state;
8549
8550 bundle->surface_updates[planes_count].surface = dc_plane;
8551 if (new_pcrtc_state->color_mgmt_changed) {
8552 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8553 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
8554 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
8555 }
8556
8557 fill_dc_scaling_info(new_plane_state,
8558 &bundle->scaling_infos[planes_count]);
8559
8560 bundle->surface_updates[planes_count].scaling_info =
8561 &bundle->scaling_infos[planes_count];
8562
8563 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8564
8565 pflip_present = pflip_present || plane_needs_flip;
8566
8567 if (!plane_needs_flip) {
8568 planes_count += 1;
8569 continue;
8570 }
8571
8572 abo = gem_to_amdgpu_bo(fb->obj[0]);
8573
8574
8575
8576
8577
8578
8579 r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
8580 msecs_to_jiffies(5000));
8581 if (unlikely(r <= 0))
8582 DRM_ERROR("Waiting for fences timed out!");
8583
8584 fill_dc_plane_info_and_addr(
8585 dm->adev, new_plane_state,
8586 afb->tiling_flags,
8587 &bundle->plane_infos[planes_count],
8588 &bundle->flip_addrs[planes_count].address,
8589 afb->tmz_surface, false);
8590
8591 DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
8592 new_plane_state->plane->index,
8593 bundle->plane_infos[planes_count].dcc.enable);
8594
8595 bundle->surface_updates[planes_count].plane_info =
8596 &bundle->plane_infos[planes_count];
8597
8598
8599
8600
8601
8602 bundle->flip_addrs[planes_count].flip_immediate =
8603 crtc->state->async_flip &&
8604 acrtc_state->update_type == UPDATE_TYPE_FAST;
8605
8606 timestamp_ns = ktime_get_ns();
8607 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
8608 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
8609 bundle->surface_updates[planes_count].surface = dc_plane;
8610
8611 if (!bundle->surface_updates[planes_count].surface) {
8612 DRM_ERROR("No surface for CRTC: id=%d\n",
8613 acrtc_attach->crtc_id);
8614 continue;
8615 }
8616
8617 if (plane == pcrtc->primary)
8618 update_freesync_state_on_stream(
8619 dm,
8620 acrtc_state,
8621 acrtc_state->stream,
8622 dc_plane,
8623 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
8624
8625 DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
8626 __func__,
8627 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
8628 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
8629
8630 planes_count += 1;
8631
8632 }
8633
8634 if (pflip_present) {
8635 if (!vrr_active) {
8636
8637
8638
8639
8640
8641
8642 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
8643 }
8644 else {
8645
8646
8647
8648
8649
8650
8651
8652
8653 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8654 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
8655 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8656 }
8657
8658 target_vblank = last_flip_vblank + wait_for_vblank;
8659
8660
8661
8662
8663
8664 while ((acrtc_attach->enabled &&
8665 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
8666 0, &vpos, &hpos, NULL,
8667 NULL, &pcrtc->hwmode)
8668 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
8669 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
8670 (int)(target_vblank -
8671 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8672 usleep_range(1000, 1100);
8673 }
8674
8675
8676
8677
8678
8679
8680
8681
8682
8683 if (acrtc_attach->base.state->event &&
8684 acrtc_state->active_planes > 0) {
8685 drm_crtc_vblank_get(pcrtc);
8686
8687 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8688
8689 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
8690 prepare_flip_isr(acrtc_attach);
8691
8692 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8693 }
8694
8695 if (acrtc_state->stream) {
8696 if (acrtc_state->freesync_vrr_info_changed)
8697 bundle->stream_update.vrr_infopacket =
8698 &acrtc_state->stream->vrr_infopacket;
8699 }
8700 }
8701
8702
8703 if ((planes_count || acrtc_state->active_planes == 0) &&
8704 acrtc_state->stream) {
8705#if defined(CONFIG_DRM_AMD_DC_DCN)
8706
8707
8708
8709
8710 if (dm->vblank_control_workqueue)
8711 flush_workqueue(dm->vblank_control_workqueue);
8712#endif
8713
8714 bundle->stream_update.stream = acrtc_state->stream;
8715 if (new_pcrtc_state->mode_changed) {
8716 bundle->stream_update.src = acrtc_state->stream->src;
8717 bundle->stream_update.dst = acrtc_state->stream->dst;
8718 }
8719
8720 if (new_pcrtc_state->color_mgmt_changed) {
8721
8722
8723
8724
8725 bundle->stream_update.gamut_remap =
8726 &acrtc_state->stream->gamut_remap_matrix;
8727 bundle->stream_update.output_csc_transform =
8728 &acrtc_state->stream->csc_color_matrix;
8729 bundle->stream_update.out_transfer_func =
8730 acrtc_state->stream->out_transfer_func;
8731 }
8732
8733 acrtc_state->stream->abm_level = acrtc_state->abm_level;
8734 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
8735 bundle->stream_update.abm_level = &acrtc_state->abm_level;
8736
8737
8738
8739
8740
8741
8742 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
8743 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8744 dc_stream_adjust_vmin_vmax(
8745 dm->dc, acrtc_state->stream,
8746 &acrtc_attach->dm_irq_params.vrr_params.adjust);
8747 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8748 }
8749 mutex_lock(&dm->dc_lock);
8750 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8751 acrtc_state->stream->link->psr_settings.psr_allow_active)
8752 amdgpu_dm_psr_disable(acrtc_state->stream);
8753
8754 dc_commit_updates_for_stream(dm->dc,
8755 bundle->surface_updates,
8756 planes_count,
8757 acrtc_state->stream,
8758 &bundle->stream_update,
8759 dc_state);
8760
8761
8762
8763
8764
8765
8766
8767
8768
8769
8770
8771
8772
8773
8774 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
8775 dm_update_pflip_irq_state(drm_to_adev(dev),
8776 acrtc_attach);
8777
8778 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8779 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
8780 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8781 amdgpu_dm_link_setup_psr(acrtc_state->stream);
8782
8783
8784 if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
8785 acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
8786 struct amdgpu_dm_connector *aconn =
8787 (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
8788
8789 if (aconn->psr_skip_count > 0)
8790 aconn->psr_skip_count--;
8791
8792
8793 acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
8794 } else {
8795 acrtc_attach->dm_irq_params.allow_psr_entry = false;
8796 }
8797
8798 mutex_unlock(&dm->dc_lock);
8799 }
8800
8801
8802
8803
8804
8805
8806 if (acrtc_state->active_planes)
8807 amdgpu_dm_commit_cursors(state);
8808
8809cleanup:
8810 kfree(bundle);
8811}
8812
8813static void amdgpu_dm_commit_audio(struct drm_device *dev,
8814 struct drm_atomic_state *state)
8815{
8816 struct amdgpu_device *adev = drm_to_adev(dev);
8817 struct amdgpu_dm_connector *aconnector;
8818 struct drm_connector *connector;
8819 struct drm_connector_state *old_con_state, *new_con_state;
8820 struct drm_crtc_state *new_crtc_state;
8821 struct dm_crtc_state *new_dm_crtc_state;
8822 const struct dc_stream_status *status;
8823 int i, inst;
8824
8825
8826 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8827 if (old_con_state->crtc != new_con_state->crtc) {
8828
8829 goto notify;
8830 }
8831
8832 if (!new_con_state->crtc)
8833 continue;
8834
8835 new_crtc_state = drm_atomic_get_new_crtc_state(
8836 state, new_con_state->crtc);
8837
8838 if (!new_crtc_state)
8839 continue;
8840
8841 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8842 continue;
8843
8844 notify:
8845 aconnector = to_amdgpu_dm_connector(connector);
8846
8847 mutex_lock(&adev->dm.audio_lock);
8848 inst = aconnector->audio_inst;
8849 aconnector->audio_inst = -1;
8850 mutex_unlock(&adev->dm.audio_lock);
8851
8852 amdgpu_dm_audio_eld_notify(adev, inst);
8853 }
8854
8855
8856 for_each_new_connector_in_state(state, connector, new_con_state, i) {
8857 if (!new_con_state->crtc)
8858 continue;
8859
8860 new_crtc_state = drm_atomic_get_new_crtc_state(
8861 state, new_con_state->crtc);
8862
8863 if (!new_crtc_state)
8864 continue;
8865
8866 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8867 continue;
8868
8869 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8870 if (!new_dm_crtc_state->stream)
8871 continue;
8872
8873 status = dc_stream_get_status(new_dm_crtc_state->stream);
8874 if (!status)
8875 continue;
8876
8877 aconnector = to_amdgpu_dm_connector(connector);
8878
8879 mutex_lock(&adev->dm.audio_lock);
8880 inst = status->audio_inst;
8881 aconnector->audio_inst = inst;
8882 mutex_unlock(&adev->dm.audio_lock);
8883
8884 amdgpu_dm_audio_eld_notify(adev, inst);
8885 }
8886}
8887
8888
8889
8890
8891
8892
8893
8894
8895
8896static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8897 struct dc_stream_state *stream_state)
8898{
8899 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8900}
8901
8902
8903
8904
8905
8906
8907
8908
8909
8910static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8911{
8912 struct drm_device *dev = state->dev;
8913 struct amdgpu_device *adev = drm_to_adev(dev);
8914 struct amdgpu_display_manager *dm = &adev->dm;
8915 struct dm_atomic_state *dm_state;
8916 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8917 uint32_t i, j;
8918 struct drm_crtc *crtc;
8919 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8920 unsigned long flags;
8921 bool wait_for_vblank = true;
8922 struct drm_connector *connector;
8923 struct drm_connector_state *old_con_state, *new_con_state;
8924 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8925 int crtc_disable_count = 0;
8926 bool mode_set_reset_required = false;
8927
8928 trace_amdgpu_dm_atomic_commit_tail_begin(state);
8929
8930 drm_atomic_helper_update_legacy_modeset_state(dev, state);
8931
8932 dm_state = dm_atomic_get_new_state(state);
8933 if (dm_state && dm_state->context) {
8934 dc_state = dm_state->context;
8935 } else {
8936
8937 dc_state_temp = dc_create_state(dm->dc);
8938 ASSERT(dc_state_temp);
8939 dc_state = dc_state_temp;
8940 dc_resource_state_copy_construct_current(dm->dc, dc_state);
8941 }
8942
8943 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8944 new_crtc_state, i) {
8945 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8946
8947 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8948
8949 if (old_crtc_state->active &&
8950 (!new_crtc_state->active ||
8951 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8952 manage_dm_interrupts(adev, acrtc, false);
8953 dc_stream_release(dm_old_crtc_state->stream);
8954 }
8955 }
8956
8957 drm_atomic_helper_calc_timestamping_constants(state);
8958
8959
8960 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8961 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8962
8963 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8964 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8965
8966 DRM_DEBUG_ATOMIC(
8967 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8968 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8969 "connectors_changed:%d\n",
8970 acrtc->crtc_id,
8971 new_crtc_state->enable,
8972 new_crtc_state->active,
8973 new_crtc_state->planes_changed,
8974 new_crtc_state->mode_changed,
8975 new_crtc_state->active_changed,
8976 new_crtc_state->connectors_changed);
8977
8978
8979 if (old_crtc_state->active && !new_crtc_state->active) {
8980 struct dc_cursor_position position;
8981
8982 memset(&position, 0, sizeof(position));
8983 mutex_lock(&dm->dc_lock);
8984 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8985 mutex_unlock(&dm->dc_lock);
8986 }
8987
8988
8989 if (dm_new_crtc_state->stream) {
8990 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8991 dm_new_crtc_state->stream);
8992 }
8993
8994
8995
8996
8997
8998 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8999
9000 DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
9001
9002 if (!dm_new_crtc_state->stream) {
9003
9004
9005
9006
9007
9008
9009
9010
9011
9012
9013
9014
9015
9016
9017
9018 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9019 __func__, acrtc->base.base.id);
9020 continue;
9021 }
9022
9023 if (dm_old_crtc_state->stream)
9024 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9025
9026 pm_runtime_get_noresume(dev->dev);
9027
9028 acrtc->enabled = true;
9029 acrtc->hw_mode = new_crtc_state->mode;
9030 crtc->hwmode = new_crtc_state->mode;
9031 mode_set_reset_required = true;
9032 } else if (modereset_required(new_crtc_state)) {
9033 DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
9034
9035 if (dm_old_crtc_state->stream)
9036 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9037
9038 mode_set_reset_required = true;
9039 }
9040 }
9041
9042 if (dc_state) {
9043
9044 if (mode_set_reset_required) {
9045#if defined(CONFIG_DRM_AMD_DC_DCN)
9046 if (dm->vblank_control_workqueue)
9047 flush_workqueue(dm->vblank_control_workqueue);
9048#endif
9049 amdgpu_dm_psr_disable_all(dm);
9050 }
9051
9052 dm_enable_per_frame_crtc_master_sync(dc_state);
9053 mutex_lock(&dm->dc_lock);
9054 WARN_ON(!dc_commit_state(dm->dc, dc_state));
9055#if defined(CONFIG_DRM_AMD_DC_DCN)
9056
9057 if (dm->active_vblank_irq_count == 0)
9058 dc_allow_idle_optimizations(dm->dc,true);
9059#endif
9060 mutex_unlock(&dm->dc_lock);
9061 }
9062
9063 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9064 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9065
9066 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9067
9068 if (dm_new_crtc_state->stream != NULL) {
9069 const struct dc_stream_status *status =
9070 dc_stream_get_status(dm_new_crtc_state->stream);
9071
9072 if (!status)
9073 status = dc_stream_get_status_from_state(dc_state,
9074 dm_new_crtc_state->stream);
9075 if (!status)
9076 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
9077 else
9078 acrtc->otg_inst = status->primary_otg_inst;
9079 }
9080 }
9081#ifdef CONFIG_DRM_AMD_DC_HDCP
9082 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9083 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9084 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9085 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9086
9087 new_crtc_state = NULL;
9088
9089 if (acrtc)
9090 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9091
9092 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9093
9094 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9095 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9096 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9097 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9098 dm_new_con_state->update_hdcp = true;
9099 continue;
9100 }
9101
9102 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
9103 hdcp_update_display(
9104 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
9105 new_con_state->hdcp_content_type,
9106 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
9107 }
9108#endif
9109
9110
9111 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9112 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9113 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9114 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9115 struct dc_surface_update dummy_updates[MAX_SURFACES];
9116 struct dc_stream_update stream_update;
9117 struct dc_info_packet hdr_packet;
9118 struct dc_stream_status *status = NULL;
9119 bool abm_changed, hdr_changed, scaling_changed;
9120
9121 memset(&dummy_updates, 0, sizeof(dummy_updates));
9122 memset(&stream_update, 0, sizeof(stream_update));
9123
9124 if (acrtc) {
9125 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9126 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9127 }
9128
9129
9130 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
9131 continue;
9132
9133 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9134 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9135
9136 scaling_changed = is_scaling_state_different(dm_new_con_state,
9137 dm_old_con_state);
9138
9139 abm_changed = dm_new_crtc_state->abm_level !=
9140 dm_old_crtc_state->abm_level;
9141
9142 hdr_changed =
9143 !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
9144
9145 if (!scaling_changed && !abm_changed && !hdr_changed)
9146 continue;
9147
9148 stream_update.stream = dm_new_crtc_state->stream;
9149 if (scaling_changed) {
9150 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
9151 dm_new_con_state, dm_new_crtc_state->stream);
9152
9153 stream_update.src = dm_new_crtc_state->stream->src;
9154 stream_update.dst = dm_new_crtc_state->stream->dst;
9155 }
9156
9157 if (abm_changed) {
9158 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9159
9160 stream_update.abm_level = &dm_new_crtc_state->abm_level;
9161 }
9162
9163 if (hdr_changed) {
9164 fill_hdr_info_packet(new_con_state, &hdr_packet);
9165 stream_update.hdr_static_metadata = &hdr_packet;
9166 }
9167
9168 status = dc_stream_get_status(dm_new_crtc_state->stream);
9169
9170 if (WARN_ON(!status))
9171 continue;
9172
9173 WARN_ON(!status->plane_count);
9174
9175
9176
9177
9178
9179
9180 for (j = 0; j < status->plane_count; j++)
9181 dummy_updates[j].surface = status->plane_states[0];
9182
9183
9184 mutex_lock(&dm->dc_lock);
9185 dc_commit_updates_for_stream(dm->dc,
9186 dummy_updates,
9187 status->plane_count,
9188 dm_new_crtc_state->stream,
9189 &stream_update,
9190 dc_state);
9191 mutex_unlock(&dm->dc_lock);
9192 }
9193
9194
9195 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
9196 new_crtc_state, i) {
9197 if (old_crtc_state->active && !new_crtc_state->active)
9198 crtc_disable_count++;
9199
9200 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9201 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9202
9203
9204 update_stream_irq_parameters(dm, dm_new_crtc_state);
9205
9206
9207 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9208 dm_new_crtc_state);
9209 }
9210
9211
9212
9213
9214
9215
9216
9217 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9218 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9219#ifdef CONFIG_DEBUG_FS
9220 bool configure_crc = false;
9221 enum amdgpu_dm_pipe_crc_source cur_crc_src;
9222#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9223 struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9224#endif
9225 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9226 cur_crc_src = acrtc->dm_irq_params.crc_src;
9227 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9228#endif
9229 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9230
9231 if (new_crtc_state->active &&
9232 (!old_crtc_state->active ||
9233 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9234 dc_stream_retain(dm_new_crtc_state->stream);
9235 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9236 manage_dm_interrupts(adev, acrtc, true);
9237
9238#ifdef CONFIG_DEBUG_FS
9239
9240
9241
9242
9243 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9244
9245 if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9246 configure_crc = true;
9247#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9248 if (amdgpu_dm_crc_window_is_activated(crtc)) {
9249 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9250 acrtc->dm_irq_params.crc_window.update_win = true;
9251 acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9252 spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9253 crc_rd_wrk->crtc = crtc;
9254 spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9255 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9256 }
9257#endif
9258 }
9259
9260 if (configure_crc)
9261 if (amdgpu_dm_crtc_configure_crc_source(
9262 crtc, dm_new_crtc_state, cur_crc_src))
9263 DRM_DEBUG_DRIVER("Failed to configure crc source");
9264#endif
9265 }
9266 }
9267
9268 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9269 if (new_crtc_state->async_flip)
9270 wait_for_vblank = false;
9271
9272
9273 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9274 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9275
9276 if (dm_new_crtc_state->stream)
9277 amdgpu_dm_commit_planes(state, dc_state, dev,
9278 dm, crtc, wait_for_vblank);
9279 }
9280
9281
9282 amdgpu_dm_commit_audio(dev, state);
9283
9284#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || \
9285 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9286
9287 for (i = 0; i < dm->num_of_edps; i++) {
9288 if (dm->backlight_dev[i] &&
9289 (amdgpu_dm_backlight_get_level(dm, i) != dm->brightness[i]))
9290 amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
9291 }
9292#endif
9293
9294
9295
9296
9297 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9298 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9299
9300 if (new_crtc_state->event)
9301 drm_send_event_locked(dev, &new_crtc_state->event->base);
9302
9303 new_crtc_state->event = NULL;
9304 }
9305 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9306
9307
9308 drm_atomic_helper_commit_hw_done(state);
9309
9310 if (wait_for_vblank)
9311 drm_atomic_helper_wait_for_flip_done(dev, state);
9312
9313 drm_atomic_helper_cleanup_planes(dev, state);
9314
9315
9316 if (!adev->mman.keep_stolen_vga_memory)
9317 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9318 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9319
9320
9321
9322
9323
9324
9325 for (i = 0; i < crtc_disable_count; i++)
9326 pm_runtime_put_autosuspend(dev->dev);
9327 pm_runtime_mark_last_busy(dev->dev);
9328
9329 if (dc_state_temp)
9330 dc_release_state(dc_state_temp);
9331}
9332
9333
9334static int dm_force_atomic_commit(struct drm_connector *connector)
9335{
9336 int ret = 0;
9337 struct drm_device *ddev = connector->dev;
9338 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9339 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9340 struct drm_plane *plane = disconnected_acrtc->base.primary;
9341 struct drm_connector_state *conn_state;
9342 struct drm_crtc_state *crtc_state;
9343 struct drm_plane_state *plane_state;
9344
9345 if (!state)
9346 return -ENOMEM;
9347
9348 state->acquire_ctx = ddev->mode_config.acquire_ctx;
9349
9350
9351
9352
9353
9354
9355 conn_state = drm_atomic_get_connector_state(state, connector);
9356
9357 ret = PTR_ERR_OR_ZERO(conn_state);
9358 if (ret)
9359 goto out;
9360
9361
9362 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9363
9364 ret = PTR_ERR_OR_ZERO(crtc_state);
9365 if (ret)
9366 goto out;
9367
9368
9369 crtc_state->mode_changed = true;
9370
9371
9372 plane_state = drm_atomic_get_plane_state(state, plane);
9373
9374 ret = PTR_ERR_OR_ZERO(plane_state);
9375 if (ret)
9376 goto out;
9377
9378
9379 ret = drm_atomic_commit(state);
9380
9381out:
9382 drm_atomic_state_put(state);
9383 if (ret)
9384 DRM_ERROR("Restoring old state failed with %i\n", ret);
9385
9386 return ret;
9387}
9388
9389
9390
9391
9392
9393
9394void dm_restore_drm_connector_state(struct drm_device *dev,
9395 struct drm_connector *connector)
9396{
9397 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9398 struct amdgpu_crtc *disconnected_acrtc;
9399 struct dm_crtc_state *acrtc_state;
9400
9401 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9402 return;
9403
9404 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9405 if (!disconnected_acrtc)
9406 return;
9407
9408 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9409 if (!acrtc_state->stream)
9410 return;
9411
9412
9413
9414
9415
9416
9417 if (acrtc_state->stream->sink != aconnector->dc_sink)
9418 dm_force_atomic_commit(&aconnector->base);
9419}
9420
9421
9422
9423
9424
9425static int do_aquire_global_lock(struct drm_device *dev,
9426 struct drm_atomic_state *state)
9427{
9428 struct drm_crtc *crtc;
9429 struct drm_crtc_commit *commit;
9430 long ret;
9431
9432
9433
9434
9435
9436
9437 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9438 if (ret)
9439 return ret;
9440
9441 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9442 spin_lock(&crtc->commit_lock);
9443 commit = list_first_entry_or_null(&crtc->commit_list,
9444 struct drm_crtc_commit, commit_entry);
9445 if (commit)
9446 drm_crtc_commit_get(commit);
9447 spin_unlock(&crtc->commit_lock);
9448
9449 if (!commit)
9450 continue;
9451
9452
9453
9454
9455
9456 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9457
9458 if (ret > 0)
9459 ret = wait_for_completion_interruptible_timeout(
9460 &commit->flip_done, 10*HZ);
9461
9462 if (ret == 0)
9463 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
9464 "timed out\n", crtc->base.id, crtc->name);
9465
9466 drm_crtc_commit_put(commit);
9467 }
9468
9469 return ret < 0 ? ret : 0;
9470}
9471
9472static void get_freesync_config_for_crtc(
9473 struct dm_crtc_state *new_crtc_state,
9474 struct dm_connector_state *new_con_state)
9475{
9476 struct mod_freesync_config config = {0};
9477 struct amdgpu_dm_connector *aconnector =
9478 to_amdgpu_dm_connector(new_con_state->base.connector);
9479 struct drm_display_mode *mode = &new_crtc_state->base.mode;
9480 int vrefresh = drm_mode_vrefresh(mode);
9481 bool fs_vid_mode = false;
9482
9483 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
9484 vrefresh >= aconnector->min_vfreq &&
9485 vrefresh <= aconnector->max_vfreq;
9486
9487 if (new_crtc_state->vrr_supported) {
9488 new_crtc_state->stream->ignore_msa_timing_param = true;
9489 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9490
9491 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9492 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
9493 config.vsif_supported = true;
9494 config.btr = true;
9495
9496 if (fs_vid_mode) {
9497 config.state = VRR_STATE_ACTIVE_FIXED;
9498 config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9499 goto out;
9500 } else if (new_crtc_state->base.vrr_enabled) {
9501 config.state = VRR_STATE_ACTIVE_VARIABLE;
9502 } else {
9503 config.state = VRR_STATE_INACTIVE;
9504 }
9505 }
9506out:
9507 new_crtc_state->freesync_config = config;
9508}
9509
9510static void reset_freesync_config_for_crtc(
9511 struct dm_crtc_state *new_crtc_state)
9512{
9513 new_crtc_state->vrr_supported = false;
9514
9515 memset(&new_crtc_state->vrr_infopacket, 0,
9516 sizeof(new_crtc_state->vrr_infopacket));
9517}
9518
9519static bool
9520is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9521 struct drm_crtc_state *new_crtc_state)
9522{
9523 struct drm_display_mode old_mode, new_mode;
9524
9525 if (!old_crtc_state || !new_crtc_state)
9526 return false;
9527
9528 old_mode = old_crtc_state->mode;
9529 new_mode = new_crtc_state->mode;
9530
9531 if (old_mode.clock == new_mode.clock &&
9532 old_mode.hdisplay == new_mode.hdisplay &&
9533 old_mode.vdisplay == new_mode.vdisplay &&
9534 old_mode.htotal == new_mode.htotal &&
9535 old_mode.vtotal != new_mode.vtotal &&
9536 old_mode.hsync_start == new_mode.hsync_start &&
9537 old_mode.vsync_start != new_mode.vsync_start &&
9538 old_mode.hsync_end == new_mode.hsync_end &&
9539 old_mode.vsync_end != new_mode.vsync_end &&
9540 old_mode.hskew == new_mode.hskew &&
9541 old_mode.vscan == new_mode.vscan &&
9542 (old_mode.vsync_end - old_mode.vsync_start) ==
9543 (new_mode.vsync_end - new_mode.vsync_start))
9544 return true;
9545
9546 return false;
9547}
9548
9549static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
9550 uint64_t num, den, res;
9551 struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9552
9553 dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
9554
9555 num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
9556 den = (unsigned long long)new_crtc_state->mode.htotal *
9557 (unsigned long long)new_crtc_state->mode.vtotal;
9558
9559 res = div_u64(num, den);
9560 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
9561}
9562
9563static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
9564 struct drm_atomic_state *state,
9565 struct drm_crtc *crtc,
9566 struct drm_crtc_state *old_crtc_state,
9567 struct drm_crtc_state *new_crtc_state,
9568 bool enable,
9569 bool *lock_and_validation_needed)
9570{
9571 struct dm_atomic_state *dm_state = NULL;
9572 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9573 struct dc_stream_state *new_stream;
9574 int ret = 0;
9575
9576
9577
9578
9579
9580 struct amdgpu_crtc *acrtc = NULL;
9581 struct amdgpu_dm_connector *aconnector = NULL;
9582 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
9583 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
9584
9585 new_stream = NULL;
9586
9587 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9588 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9589 acrtc = to_amdgpu_crtc(crtc);
9590 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
9591
9592
9593 if (aconnector && enable) {
9594
9595 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
9596 &aconnector->base);
9597 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
9598 &aconnector->base);
9599
9600 if (IS_ERR(drm_new_conn_state)) {
9601 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
9602 goto fail;
9603 }
9604
9605 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
9606 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
9607
9608 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9609 goto skip_modeset;
9610
9611 new_stream = create_validate_stream_for_sink(aconnector,
9612 &new_crtc_state->mode,
9613 dm_new_conn_state,
9614 dm_old_crtc_state->stream);
9615
9616
9617
9618
9619
9620
9621
9622
9623 if (!new_stream) {
9624 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9625 __func__, acrtc->base.base.id);
9626 ret = -ENOMEM;
9627 goto fail;
9628 }
9629
9630
9631
9632
9633
9634 new_stream->triggered_crtc_reset.enabled =
9635 dm->force_timing_sync;
9636
9637 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9638
9639 ret = fill_hdr_info_packet(drm_new_conn_state,
9640 &new_stream->hdr_static_metadata);
9641 if (ret)
9642 goto fail;
9643
9644
9645
9646
9647
9648
9649
9650
9651
9652
9653 if (amdgpu_freesync_vid_mode &&
9654 dm_new_crtc_state->stream &&
9655 is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
9656 goto skip_modeset;
9657
9658 if (dm_new_crtc_state->stream &&
9659 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
9660 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
9661 new_crtc_state->mode_changed = false;
9662 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
9663 new_crtc_state->mode_changed);
9664 }
9665 }
9666
9667
9668 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9669 goto skip_modeset;
9670
9671 DRM_DEBUG_ATOMIC(
9672 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9673 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9674 "connectors_changed:%d\n",
9675 acrtc->crtc_id,
9676 new_crtc_state->enable,
9677 new_crtc_state->active,
9678 new_crtc_state->planes_changed,
9679 new_crtc_state->mode_changed,
9680 new_crtc_state->active_changed,
9681 new_crtc_state->connectors_changed);
9682
9683
9684 if (!enable) {
9685
9686 if (!dm_old_crtc_state->stream)
9687 goto skip_modeset;
9688
9689 if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
9690 is_timing_unchanged_for_freesync(new_crtc_state,
9691 old_crtc_state)) {
9692 new_crtc_state->mode_changed = false;
9693 DRM_DEBUG_DRIVER(
9694 "Mode change not required for front porch change, "
9695 "setting mode_changed to %d",
9696 new_crtc_state->mode_changed);
9697
9698 set_freesync_fixed_config(dm_new_crtc_state);
9699
9700 goto skip_modeset;
9701 } else if (amdgpu_freesync_vid_mode && aconnector &&
9702 is_freesync_video_mode(&new_crtc_state->mode,
9703 aconnector)) {
9704 struct drm_display_mode *high_mode;
9705
9706 high_mode = get_highest_refresh_rate_mode(aconnector, false);
9707 if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
9708 set_freesync_fixed_config(dm_new_crtc_state);
9709 }
9710 }
9711
9712 ret = dm_atomic_get_state(state, &dm_state);
9713 if (ret)
9714 goto fail;
9715
9716 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
9717 crtc->base.id);
9718
9719
9720 if (dc_remove_stream_from_ctx(
9721 dm->dc,
9722 dm_state->context,
9723 dm_old_crtc_state->stream) != DC_OK) {
9724 ret = -EINVAL;
9725 goto fail;
9726 }
9727
9728 dc_stream_release(dm_old_crtc_state->stream);
9729 dm_new_crtc_state->stream = NULL;
9730
9731 reset_freesync_config_for_crtc(dm_new_crtc_state);
9732
9733 *lock_and_validation_needed = true;
9734
9735 } else {
9736
9737
9738
9739
9740
9741 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
9742 goto skip_modeset;
9743
9744 if (modereset_required(new_crtc_state))
9745 goto skip_modeset;
9746
9747 if (modeset_required(new_crtc_state, new_stream,
9748 dm_old_crtc_state->stream)) {
9749
9750 WARN_ON(dm_new_crtc_state->stream);
9751
9752 ret = dm_atomic_get_state(state, &dm_state);
9753 if (ret)
9754 goto fail;
9755
9756 dm_new_crtc_state->stream = new_stream;
9757
9758 dc_stream_retain(new_stream);
9759
9760 DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
9761 crtc->base.id);
9762
9763 if (dc_add_stream_to_ctx(
9764 dm->dc,
9765 dm_state->context,
9766 dm_new_crtc_state->stream) != DC_OK) {
9767 ret = -EINVAL;
9768 goto fail;
9769 }
9770
9771 *lock_and_validation_needed = true;
9772 }
9773 }
9774
9775skip_modeset:
9776
9777 if (new_stream)
9778 dc_stream_release(new_stream);
9779
9780
9781
9782
9783
9784 if (!(enable && aconnector && new_crtc_state->active))
9785 return 0;
9786
9787
9788
9789
9790
9791
9792
9793
9794 BUG_ON(dm_new_crtc_state->stream == NULL);
9795
9796
9797 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
9798 drm_atomic_crtc_needs_modeset(new_crtc_state))
9799 update_stream_scaling_settings(
9800 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
9801
9802
9803 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9804
9805
9806
9807
9808
9809 if (dm_new_crtc_state->base.color_mgmt_changed ||
9810 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9811 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
9812 if (ret)
9813 goto fail;
9814 }
9815
9816
9817 get_freesync_config_for_crtc(dm_new_crtc_state,
9818 dm_new_conn_state);
9819
9820 return ret;
9821
9822fail:
9823 if (new_stream)
9824 dc_stream_release(new_stream);
9825 return ret;
9826}
9827
9828static bool should_reset_plane(struct drm_atomic_state *state,
9829 struct drm_plane *plane,
9830 struct drm_plane_state *old_plane_state,
9831 struct drm_plane_state *new_plane_state)
9832{
9833 struct drm_plane *other;
9834 struct drm_plane_state *old_other_state, *new_other_state;
9835 struct drm_crtc_state *new_crtc_state;
9836 int i;
9837
9838
9839
9840
9841
9842
9843 if (state->allow_modeset)
9844 return true;
9845
9846
9847 if (old_plane_state->crtc != new_plane_state->crtc)
9848 return true;
9849
9850
9851 if (!new_plane_state->crtc)
9852 return false;
9853
9854 new_crtc_state =
9855 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
9856
9857 if (!new_crtc_state)
9858 return true;
9859
9860
9861 if (new_crtc_state->color_mgmt_changed)
9862 return true;
9863
9864 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
9865 return true;
9866
9867
9868
9869
9870
9871
9872
9873
9874
9875 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
9876 struct amdgpu_framebuffer *old_afb, *new_afb;
9877 if (other->type == DRM_PLANE_TYPE_CURSOR)
9878 continue;
9879
9880 if (old_other_state->crtc != new_plane_state->crtc &&
9881 new_other_state->crtc != new_plane_state->crtc)
9882 continue;
9883
9884 if (old_other_state->crtc != new_other_state->crtc)
9885 return true;
9886
9887
9888 if (old_other_state->src_w != new_other_state->src_w ||
9889 old_other_state->src_h != new_other_state->src_h ||
9890 old_other_state->crtc_w != new_other_state->crtc_w ||
9891 old_other_state->crtc_h != new_other_state->crtc_h)
9892 return true;
9893
9894
9895 if (old_other_state->rotation != new_other_state->rotation)
9896 return true;
9897
9898
9899 if (old_other_state->pixel_blend_mode !=
9900 new_other_state->pixel_blend_mode)
9901 return true;
9902
9903
9904 if (old_other_state->alpha != new_other_state->alpha)
9905 return true;
9906
9907
9908 if (old_other_state->color_range != new_other_state->color_range ||
9909 old_other_state->color_encoding != new_other_state->color_encoding)
9910 return true;
9911
9912
9913 if (!old_other_state->fb || !new_other_state->fb)
9914 continue;
9915
9916
9917 if (old_other_state->fb->format != new_other_state->fb->format)
9918 return true;
9919
9920 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9921 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9922
9923
9924 if (old_afb->tiling_flags != new_afb->tiling_flags ||
9925 old_afb->base.modifier != new_afb->base.modifier)
9926 return true;
9927 }
9928
9929 return false;
9930}
9931
9932static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9933 struct drm_plane_state *new_plane_state,
9934 struct drm_framebuffer *fb)
9935{
9936 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9937 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
9938 unsigned int pitch;
9939 bool linear;
9940
9941 if (fb->width > new_acrtc->max_cursor_width ||
9942 fb->height > new_acrtc->max_cursor_height) {
9943 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9944 new_plane_state->fb->width,
9945 new_plane_state->fb->height);
9946 return -EINVAL;
9947 }
9948 if (new_plane_state->src_w != fb->width << 16 ||
9949 new_plane_state->src_h != fb->height << 16) {
9950 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9951 return -EINVAL;
9952 }
9953
9954
9955 pitch = fb->pitches[0] / fb->format->cpp[0];
9956
9957 if (fb->width != pitch) {
9958 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9959 fb->width, pitch);
9960 return -EINVAL;
9961 }
9962
9963 switch (pitch) {
9964 case 64:
9965 case 128:
9966 case 256:
9967
9968 break;
9969 default:
9970 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9971 return -EINVAL;
9972 }
9973
9974
9975
9976 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9977 if (adev->family < AMDGPU_FAMILY_AI) {
9978 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9979 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9980 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9981 } else {
9982 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9983 }
9984 if (!linear) {
9985 DRM_DEBUG_ATOMIC("Cursor FB not linear");
9986 return -EINVAL;
9987 }
9988 }
9989
9990 return 0;
9991}
9992
9993static int dm_update_plane_state(struct dc *dc,
9994 struct drm_atomic_state *state,
9995 struct drm_plane *plane,
9996 struct drm_plane_state *old_plane_state,
9997 struct drm_plane_state *new_plane_state,
9998 bool enable,
9999 bool *lock_and_validation_needed)
10000{
10001
10002 struct dm_atomic_state *dm_state = NULL;
10003 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
10004 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10005 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
10006 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
10007 struct amdgpu_crtc *new_acrtc;
10008 bool needs_reset;
10009 int ret = 0;
10010
10011
10012 new_plane_crtc = new_plane_state->crtc;
10013 old_plane_crtc = old_plane_state->crtc;
10014 dm_new_plane_state = to_dm_plane_state(new_plane_state);
10015 dm_old_plane_state = to_dm_plane_state(old_plane_state);
10016
10017 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10018 if (!enable || !new_plane_crtc ||
10019 drm_atomic_plane_disabling(plane->state, new_plane_state))
10020 return 0;
10021
10022 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10023
10024 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10025 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10026 return -EINVAL;
10027 }
10028
10029 if (new_plane_state->fb) {
10030 ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10031 new_plane_state->fb);
10032 if (ret)
10033 return ret;
10034 }
10035
10036 return 0;
10037 }
10038
10039 needs_reset = should_reset_plane(state, plane, old_plane_state,
10040 new_plane_state);
10041
10042
10043 if (!enable) {
10044 if (!needs_reset)
10045 return 0;
10046
10047 if (!old_plane_crtc)
10048 return 0;
10049
10050 old_crtc_state = drm_atomic_get_old_crtc_state(
10051 state, old_plane_crtc);
10052 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10053
10054 if (!dm_old_crtc_state->stream)
10055 return 0;
10056
10057 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10058 plane->base.id, old_plane_crtc->base.id);
10059
10060 ret = dm_atomic_get_state(state, &dm_state);
10061 if (ret)
10062 return ret;
10063
10064 if (!dc_remove_plane_from_context(
10065 dc,
10066 dm_old_crtc_state->stream,
10067 dm_old_plane_state->dc_state,
10068 dm_state->context)) {
10069
10070 return -EINVAL;
10071 }
10072
10073
10074 dc_plane_state_release(dm_old_plane_state->dc_state);
10075 dm_new_plane_state->dc_state = NULL;
10076
10077 *lock_and_validation_needed = true;
10078
10079 } else {
10080 struct dc_plane_state *dc_new_plane_state;
10081
10082 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10083 return 0;
10084
10085 if (!new_plane_crtc)
10086 return 0;
10087
10088 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10089 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10090
10091 if (!dm_new_crtc_state->stream)
10092 return 0;
10093
10094 if (!needs_reset)
10095 return 0;
10096
10097 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10098 if (ret)
10099 return ret;
10100
10101 WARN_ON(dm_new_plane_state->dc_state);
10102
10103 dc_new_plane_state = dc_create_plane_state(dc);
10104 if (!dc_new_plane_state)
10105 return -ENOMEM;
10106
10107 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10108 plane->base.id, new_plane_crtc->base.id);
10109
10110 ret = fill_dc_plane_attributes(
10111 drm_to_adev(new_plane_crtc->dev),
10112 dc_new_plane_state,
10113 new_plane_state,
10114 new_crtc_state);
10115 if (ret) {
10116 dc_plane_state_release(dc_new_plane_state);
10117 return ret;
10118 }
10119
10120 ret = dm_atomic_get_state(state, &dm_state);
10121 if (ret) {
10122 dc_plane_state_release(dc_new_plane_state);
10123 return ret;
10124 }
10125
10126
10127
10128
10129
10130
10131
10132
10133 if (!dc_add_plane_to_context(
10134 dc,
10135 dm_new_crtc_state->stream,
10136 dc_new_plane_state,
10137 dm_state->context)) {
10138
10139 dc_plane_state_release(dc_new_plane_state);
10140 return -EINVAL;
10141 }
10142
10143 dm_new_plane_state->dc_state = dc_new_plane_state;
10144
10145
10146
10147
10148 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10149
10150 *lock_and_validation_needed = true;
10151 }
10152
10153
10154 return ret;
10155}
10156
10157static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10158 struct drm_crtc *crtc,
10159 struct drm_crtc_state *new_crtc_state)
10160{
10161 struct drm_plane_state *new_cursor_state, *new_primary_state;
10162 int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
10163
10164
10165
10166
10167
10168
10169 new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
10170 new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
10171 if (!new_cursor_state || !new_primary_state ||
10172 !new_cursor_state->fb || !new_primary_state->fb) {
10173 return 0;
10174 }
10175
10176 cursor_scale_w = new_cursor_state->crtc_w * 1000 /
10177 (new_cursor_state->src_w >> 16);
10178 cursor_scale_h = new_cursor_state->crtc_h * 1000 /
10179 (new_cursor_state->src_h >> 16);
10180
10181 primary_scale_w = new_primary_state->crtc_w * 1000 /
10182 (new_primary_state->src_w >> 16);
10183 primary_scale_h = new_primary_state->crtc_h * 1000 /
10184 (new_primary_state->src_h >> 16);
10185
10186 if (cursor_scale_w != primary_scale_w ||
10187 cursor_scale_h != primary_scale_h) {
10188 drm_dbg_atomic(crtc->dev, "Cursor plane scaling doesn't match primary plane\n");
10189 return -EINVAL;
10190 }
10191
10192 return 0;
10193}
10194
10195#if defined(CONFIG_DRM_AMD_DC_DCN)
10196static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10197{
10198 struct drm_connector *connector;
10199 struct drm_connector_state *conn_state;
10200 struct amdgpu_dm_connector *aconnector = NULL;
10201 int i;
10202 for_each_new_connector_in_state(state, connector, conn_state, i) {
10203 if (conn_state->crtc != crtc)
10204 continue;
10205
10206 aconnector = to_amdgpu_dm_connector(connector);
10207 if (!aconnector->port || !aconnector->mst_port)
10208 aconnector = NULL;
10209 else
10210 break;
10211 }
10212
10213 if (!aconnector)
10214 return 0;
10215
10216 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10217}
10218#endif
10219
10220static int validate_overlay(struct drm_atomic_state *state)
10221{
10222 int i;
10223 struct drm_plane *plane;
10224 struct drm_plane_state *new_plane_state;
10225 struct drm_plane_state *primary_state, *overlay_state = NULL;
10226
10227
10228 for_each_new_plane_in_state_reverse(state, plane, new_plane_state, i) {
10229 if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
10230 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10231 return 0;
10232
10233 overlay_state = new_plane_state;
10234 continue;
10235 }
10236 }
10237
10238
10239 if (!overlay_state)
10240 return 0;
10241
10242
10243 if (!overlay_state->crtc)
10244 return 0;
10245
10246
10247 primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
10248 if (IS_ERR(primary_state))
10249 return PTR_ERR(primary_state);
10250
10251
10252 if (!primary_state->crtc)
10253 return 0;
10254
10255
10256 if (primary_state->crtc_x < overlay_state->crtc_x ||
10257 primary_state->crtc_y < overlay_state->crtc_y ||
10258 primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
10259 primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
10260 DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
10261 return -EINVAL;
10262 }
10263
10264 return 0;
10265}
10266
10267
10268
10269
10270
10271
10272
10273
10274
10275
10276
10277
10278
10279
10280
10281
10282
10283
10284
10285
10286
10287
10288
10289
10290
10291static int amdgpu_dm_atomic_check(struct drm_device *dev,
10292 struct drm_atomic_state *state)
10293{
10294 struct amdgpu_device *adev = drm_to_adev(dev);
10295 struct dm_atomic_state *dm_state = NULL;
10296 struct dc *dc = adev->dm.dc;
10297 struct drm_connector *connector;
10298 struct drm_connector_state *old_con_state, *new_con_state;
10299 struct drm_crtc *crtc;
10300 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10301 struct drm_plane *plane;
10302 struct drm_plane_state *old_plane_state, *new_plane_state;
10303 enum dc_status status;
10304 int ret, i;
10305 bool lock_and_validation_needed = false;
10306 struct dm_crtc_state *dm_old_crtc_state;
10307#if defined(CONFIG_DRM_AMD_DC_DCN)
10308 struct dsc_mst_fairness_vars vars[MAX_PIPES];
10309#endif
10310
10311 trace_amdgpu_dm_atomic_check_begin(state);
10312
10313 ret = drm_atomic_helper_check_modeset(dev, state);
10314 if (ret)
10315 goto fail;
10316
10317
10318 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10319 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10320 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10321
10322
10323 if (!old_con_state->crtc && !new_con_state->crtc)
10324 continue;
10325
10326 if (!new_con_state->crtc)
10327 continue;
10328
10329 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10330 if (IS_ERR(new_crtc_state)) {
10331 ret = PTR_ERR(new_crtc_state);
10332 goto fail;
10333 }
10334
10335 if (dm_old_con_state->abm_level !=
10336 dm_new_con_state->abm_level)
10337 new_crtc_state->connectors_changed = true;
10338 }
10339
10340#if defined(CONFIG_DRM_AMD_DC_DCN)
10341 if (dc_resource_is_dsc_encoding_supported(dc)) {
10342 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10343 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10344 ret = add_affected_mst_dsc_crtcs(state, crtc);
10345 if (ret)
10346 goto fail;
10347 }
10348 }
10349 }
10350#endif
10351 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10352 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10353
10354 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10355 !new_crtc_state->color_mgmt_changed &&
10356 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10357 dm_old_crtc_state->dsc_force_changed == false)
10358 continue;
10359
10360 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
10361 if (ret)
10362 goto fail;
10363
10364 if (!new_crtc_state->enable)
10365 continue;
10366
10367 ret = drm_atomic_add_affected_connectors(state, crtc);
10368 if (ret)
10369 return ret;
10370
10371 ret = drm_atomic_add_affected_planes(state, crtc);
10372 if (ret)
10373 goto fail;
10374
10375 if (dm_old_crtc_state->dsc_force_changed)
10376 new_crtc_state->mode_changed = true;
10377 }
10378
10379
10380
10381
10382
10383
10384 drm_for_each_crtc(crtc, dev) {
10385 bool modified = false;
10386
10387 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10388 if (plane->type == DRM_PLANE_TYPE_CURSOR)
10389 continue;
10390
10391 if (new_plane_state->crtc == crtc ||
10392 old_plane_state->crtc == crtc) {
10393 modified = true;
10394 break;
10395 }
10396 }
10397
10398 if (!modified)
10399 continue;
10400
10401 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10402 if (plane->type == DRM_PLANE_TYPE_CURSOR)
10403 continue;
10404
10405 new_plane_state =
10406 drm_atomic_get_plane_state(state, plane);
10407
10408 if (IS_ERR(new_plane_state)) {
10409 ret = PTR_ERR(new_plane_state);
10410 goto fail;
10411 }
10412 }
10413 }
10414
10415
10416 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10417 ret = dm_update_plane_state(dc, state, plane,
10418 old_plane_state,
10419 new_plane_state,
10420 false,
10421 &lock_and_validation_needed);
10422 if (ret)
10423 goto fail;
10424 }
10425
10426
10427 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10428 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10429 old_crtc_state,
10430 new_crtc_state,
10431 false,
10432 &lock_and_validation_needed);
10433 if (ret)
10434 goto fail;
10435 }
10436
10437
10438 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10439 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10440 old_crtc_state,
10441 new_crtc_state,
10442 true,
10443 &lock_and_validation_needed);
10444 if (ret)
10445 goto fail;
10446 }
10447
10448 ret = validate_overlay(state);
10449 if (ret)
10450 goto fail;
10451
10452
10453 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10454 ret = dm_update_plane_state(dc, state, plane,
10455 old_plane_state,
10456 new_plane_state,
10457 true,
10458 &lock_and_validation_needed);
10459 if (ret)
10460 goto fail;
10461 }
10462
10463
10464 ret = drm_atomic_helper_check_planes(dev, state);
10465 if (ret)
10466 goto fail;
10467
10468
10469 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10470 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10471 if (ret)
10472 goto fail;
10473 }
10474
10475 if (state->legacy_cursor_update) {
10476
10477
10478
10479
10480
10481 state->async_update =
10482 !drm_atomic_helper_async_check(dev, state);
10483
10484
10485
10486
10487
10488
10489
10490
10491 if (state->async_update)
10492 return 0;
10493 }
10494
10495
10496
10497
10498
10499
10500 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10501 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10502 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10503 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10504
10505
10506 if (!acrtc || drm_atomic_crtc_needs_modeset(
10507 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
10508 continue;
10509
10510
10511 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
10512 continue;
10513
10514 lock_and_validation_needed = true;
10515 }
10516
10517
10518
10519
10520
10521
10522
10523
10524
10525
10526
10527
10528
10529
10530 if (lock_and_validation_needed) {
10531 ret = dm_atomic_get_state(state, &dm_state);
10532 if (ret)
10533 goto fail;
10534
10535 ret = do_aquire_global_lock(dev, state);
10536 if (ret)
10537 goto fail;
10538
10539#if defined(CONFIG_DRM_AMD_DC_DCN)
10540 if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars))
10541 goto fail;
10542
10543 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
10544 if (ret)
10545 goto fail;
10546#endif
10547
10548
10549
10550
10551
10552
10553
10554 ret = drm_dp_mst_atomic_check(state);
10555 if (ret)
10556 goto fail;
10557 status = dc_validate_global_state(dc, dm_state->context, false);
10558 if (status != DC_OK) {
10559 drm_dbg_atomic(dev,
10560 "DC global validation failure: %s (%d)",
10561 dc_status_to_str(status), status);
10562 ret = -EINVAL;
10563 goto fail;
10564 }
10565 } else {
10566
10567
10568
10569
10570
10571
10572
10573
10574
10575
10576
10577
10578
10579
10580 for (i = 0; i < state->num_private_objs; i++) {
10581 struct drm_private_obj *obj = state->private_objs[i].ptr;
10582
10583 if (obj->funcs == adev->dm.atomic_obj.funcs) {
10584 int j = state->num_private_objs-1;
10585
10586 dm_atomic_destroy_state(obj,
10587 state->private_objs[i].state);
10588
10589
10590
10591
10592
10593 if (i != j)
10594 state->private_objs[i] =
10595 state->private_objs[j];
10596
10597 state->private_objs[j].ptr = NULL;
10598 state->private_objs[j].state = NULL;
10599 state->private_objs[j].old_state = NULL;
10600 state->private_objs[j].new_state = NULL;
10601
10602 state->num_private_objs = j;
10603 break;
10604 }
10605 }
10606 }
10607
10608
10609 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
10610 struct dm_crtc_state *dm_new_crtc_state =
10611 to_dm_crtc_state(new_crtc_state);
10612
10613 dm_new_crtc_state->update_type = lock_and_validation_needed ?
10614 UPDATE_TYPE_FULL :
10615 UPDATE_TYPE_FAST;
10616 }
10617
10618
10619 WARN_ON(ret);
10620
10621 trace_amdgpu_dm_atomic_check_finish(state, ret);
10622
10623 return ret;
10624
10625fail:
10626 if (ret == -EDEADLK)
10627 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
10628 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
10629 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
10630 else
10631 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
10632
10633 trace_amdgpu_dm_atomic_check_finish(state, ret);
10634
10635 return ret;
10636}
10637
10638static bool is_dp_capable_without_timing_msa(struct dc *dc,
10639 struct amdgpu_dm_connector *amdgpu_dm_connector)
10640{
10641 uint8_t dpcd_data;
10642 bool capable = false;
10643
10644 if (amdgpu_dm_connector->dc_link &&
10645 dm_helpers_dp_read_dpcd(
10646 NULL,
10647 amdgpu_dm_connector->dc_link,
10648 DP_DOWN_STREAM_PORT_COUNT,
10649 &dpcd_data,
10650 sizeof(dpcd_data))) {
10651 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
10652 }
10653
10654 return capable;
10655}
10656
10657static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
10658 unsigned int offset,
10659 unsigned int total_length,
10660 uint8_t *data,
10661 unsigned int length,
10662 struct amdgpu_hdmi_vsdb_info *vsdb)
10663{
10664 bool res;
10665 union dmub_rb_cmd cmd;
10666 struct dmub_cmd_send_edid_cea *input;
10667 struct dmub_cmd_edid_cea_output *output;
10668
10669 if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
10670 return false;
10671
10672 memset(&cmd, 0, sizeof(cmd));
10673
10674 input = &cmd.edid_cea.data.input;
10675
10676 cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
10677 cmd.edid_cea.header.sub_type = 0;
10678 cmd.edid_cea.header.payload_bytes =
10679 sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
10680 input->offset = offset;
10681 input->length = length;
10682 input->total_length = total_length;
10683 memcpy(input->payload, data, length);
10684
10685 res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
10686 if (!res) {
10687 DRM_ERROR("EDID CEA parser failed\n");
10688 return false;
10689 }
10690
10691 output = &cmd.edid_cea.data.output;
10692
10693 if (output->type == DMUB_CMD__EDID_CEA_ACK) {
10694 if (!output->ack.success) {
10695 DRM_ERROR("EDID CEA ack failed at offset %d\n",
10696 output->ack.offset);
10697 }
10698 } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
10699 if (!output->amd_vsdb.vsdb_found)
10700 return false;
10701
10702 vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
10703 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
10704 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
10705 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
10706 } else {
10707 DRM_WARN("Unknown EDID CEA parser results\n");
10708 return false;
10709 }
10710
10711 return true;
10712}
10713
10714static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
10715 uint8_t *edid_ext, int len,
10716 struct amdgpu_hdmi_vsdb_info *vsdb_info)
10717{
10718 int i;
10719
10720
10721 for (i = 0; i < len; i += 8) {
10722 bool res;
10723 int offset;
10724
10725
10726 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
10727 return false;
10728
10729 if (i+8 == len) {
10730
10731 int version, min_rate, max_rate;
10732
10733 res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
10734 if (res) {
10735
10736 vsdb_info->freesync_supported = 1;
10737 vsdb_info->amd_vsdb_version = version;
10738 vsdb_info->min_refresh_rate_hz = min_rate;
10739 vsdb_info->max_refresh_rate_hz = max_rate;
10740 return true;
10741 }
10742
10743 return false;
10744 }
10745
10746
10747 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
10748 if (!res)
10749 return false;
10750 }
10751
10752 return false;
10753}
10754
10755static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
10756 uint8_t *edid_ext, int len,
10757 struct amdgpu_hdmi_vsdb_info *vsdb_info)
10758{
10759 int i;
10760
10761
10762 for (i = 0; i < len; i += 8) {
10763
10764 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
10765 return false;
10766 }
10767
10768 return vsdb_info->freesync_supported;
10769}
10770
10771static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
10772 uint8_t *edid_ext, int len,
10773 struct amdgpu_hdmi_vsdb_info *vsdb_info)
10774{
10775 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
10776
10777 if (adev->dm.dmub_srv)
10778 return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
10779 else
10780 return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
10781}
10782
10783static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
10784 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
10785{
10786 uint8_t *edid_ext = NULL;
10787 int i;
10788 bool valid_vsdb_found = false;
10789
10790
10791
10792 if (edid == NULL || edid->extensions == 0)
10793 return -ENODEV;
10794
10795
10796 for (i = 0; i < edid->extensions; i++) {
10797 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
10798 if (edid_ext[0] == CEA_EXT)
10799 break;
10800 }
10801
10802 if (i == edid->extensions)
10803 return -ENODEV;
10804
10805
10806 if (edid_ext[0] != CEA_EXT)
10807 return -ENODEV;
10808
10809 valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
10810
10811 return valid_vsdb_found ? i : -ENODEV;
10812}
10813
10814void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
10815 struct edid *edid)
10816{
10817 int i = 0;
10818 struct detailed_timing *timing;
10819 struct detailed_non_pixel *data;
10820 struct detailed_data_monitor_range *range;
10821 struct amdgpu_dm_connector *amdgpu_dm_connector =
10822 to_amdgpu_dm_connector(connector);
10823 struct dm_connector_state *dm_con_state = NULL;
10824
10825 struct drm_device *dev = connector->dev;
10826 struct amdgpu_device *adev = drm_to_adev(dev);
10827 bool freesync_capable = false;
10828 struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
10829
10830 if (!connector->state) {
10831 DRM_ERROR("%s - Connector has no state", __func__);
10832 goto update;
10833 }
10834
10835 if (!edid) {
10836 dm_con_state = to_dm_connector_state(connector->state);
10837
10838 amdgpu_dm_connector->min_vfreq = 0;
10839 amdgpu_dm_connector->max_vfreq = 0;
10840 amdgpu_dm_connector->pixel_clock_mhz = 0;
10841
10842 goto update;
10843 }
10844
10845 dm_con_state = to_dm_connector_state(connector->state);
10846
10847 if (!amdgpu_dm_connector->dc_sink) {
10848 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
10849 goto update;
10850 }
10851 if (!adev->dm.freesync_module)
10852 goto update;
10853
10854
10855 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
10856 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
10857 bool edid_check_required = false;
10858
10859 if (edid) {
10860 edid_check_required = is_dp_capable_without_timing_msa(
10861 adev->dm.dc,
10862 amdgpu_dm_connector);
10863 }
10864
10865 if (edid_check_required == true && (edid->version > 1 ||
10866 (edid->version == 1 && edid->revision > 1))) {
10867 for (i = 0; i < 4; i++) {
10868
10869 timing = &edid->detailed_timings[i];
10870 data = &timing->data.other_data;
10871 range = &data->data.range;
10872
10873
10874
10875 if (data->type != EDID_DETAIL_MONITOR_RANGE)
10876 continue;
10877
10878
10879
10880
10881
10882
10883 if (range->flags != 1)
10884 continue;
10885
10886 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
10887 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
10888 amdgpu_dm_connector->pixel_clock_mhz =
10889 range->pixel_clock_mhz * 10;
10890
10891 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
10892 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
10893
10894 break;
10895 }
10896
10897 if (amdgpu_dm_connector->max_vfreq -
10898 amdgpu_dm_connector->min_vfreq > 10) {
10899
10900 freesync_capable = true;
10901 }
10902 }
10903 } else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
10904 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10905 if (i >= 0 && vsdb_info.freesync_supported) {
10906 timing = &edid->detailed_timings[i];
10907 data = &timing->data.other_data;
10908
10909 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
10910 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10911 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10912 freesync_capable = true;
10913
10914 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10915 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
10916 }
10917 }
10918
10919update:
10920 if (dm_con_state)
10921 dm_con_state->freesync_capable = freesync_capable;
10922
10923 if (connector->vrr_capable_property)
10924 drm_connector_set_vrr_capable_property(connector,
10925 freesync_capable);
10926}
10927
10928void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
10929{
10930 struct amdgpu_device *adev = drm_to_adev(dev);
10931 struct dc *dc = adev->dm.dc;
10932 int i;
10933
10934 mutex_lock(&adev->dm.dc_lock);
10935 if (dc->current_state) {
10936 for (i = 0; i < dc->current_state->stream_count; ++i)
10937 dc->current_state->streams[i]
10938 ->triggered_crtc_reset.enabled =
10939 adev->dm.force_timing_sync;
10940
10941 dm_enable_per_frame_crtc_master_sync(dc->current_state);
10942 dc_trigger_sync(dc, dc->current_state);
10943 }
10944 mutex_unlock(&adev->dm.dc_lock);
10945}
10946
10947void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
10948 uint32_t value, const char *func_name)
10949{
10950#ifdef DM_CHECK_ADDR_0
10951 if (address == 0) {
10952 DC_ERR("invalid register write. address = 0");
10953 return;
10954 }
10955#endif
10956 cgs_write_register(ctx->cgs_device, address, value);
10957 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
10958}
10959
10960uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
10961 const char *func_name)
10962{
10963 uint32_t value;
10964#ifdef DM_CHECK_ADDR_0
10965 if (address == 0) {
10966 DC_ERR("invalid register read; address = 0\n");
10967 return 0;
10968 }
10969#endif
10970
10971 if (ctx->dmub_srv &&
10972 ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
10973 !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
10974 ASSERT(false);
10975 return 0;
10976 }
10977
10978 value = cgs_read_register(ctx->cgs_device, address);
10979
10980 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
10981
10982 return value;
10983}
10984
10985int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int linkIndex,
10986 struct aux_payload *payload, enum aux_return_code_type *operation_result)
10987{
10988 struct amdgpu_device *adev = ctx->driver_context;
10989 int ret = 0;
10990
10991 dc_process_dmub_aux_transfer_async(ctx->dc, linkIndex, payload);
10992 ret = wait_for_completion_interruptible_timeout(&adev->dm.dmub_aux_transfer_done, 10*HZ);
10993 if (ret == 0) {
10994 *operation_result = AUX_RET_ERROR_TIMEOUT;
10995 return -1;
10996 }
10997 *operation_result = (enum aux_return_code_type)adev->dm.dmub_notify->result;
10998
10999 if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
11000 (*payload->reply) = adev->dm.dmub_notify->aux_reply.command;
11001
11002
11003 if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
11004 (*payload->reply == AUX_TRANSACTION_REPLY_AUX_ACK))
11005 memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
11006 adev->dm.dmub_notify->aux_reply.length);
11007 }
11008
11009 return adev->dm.dmub_notify->aux_reply.length;
11010}
11011