1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/slab.h>
26#include <linux/mm.h>
27
28#include "dm_services.h"
29
30#include "dc.h"
31
32#include "core_status.h"
33#include "core_types.h"
34#include "hw_sequencer.h"
35#include "dce/dce_hwseq.h"
36
37#include "resource.h"
38
39#include "clk_mgr.h"
40#include "clock_source.h"
41#include "dc_bios_types.h"
42
43#include "bios_parser_interface.h"
44#include "include/irq_service_interface.h"
45#include "transform.h"
46#include "dmcu.h"
47#include "dpp.h"
48#include "timing_generator.h"
49#include "abm.h"
50#include "virtual/virtual_link_encoder.h"
51
52#include "link_hwss.h"
53#include "link_encoder.h"
54
55#include "dc_link_ddc.h"
56#include "dm_helpers.h"
57#include "mem_input.h"
58#include "hubp.h"
59
60#include "dc_link_dp.h"
61
62#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
63#include "dsc.h"
64#endif
65
66#ifdef CONFIG_DRM_AMD_DC_DCN2_0
67#include "vm_helper.h"
68#endif
69
70#include "dce/dce_i2c.h"
71
72#define DC_LOGGER \
73 dc->ctx->logger
74
75const static char DC_BUILD_ID[] = "production-build";
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new)
131{
132 if (new > *original)
133 *original = new;
134}
135
136static void destroy_links(struct dc *dc)
137{
138 uint32_t i;
139
140 for (i = 0; i < dc->link_count; i++) {
141 if (NULL != dc->links[i])
142 link_destroy(&dc->links[i]);
143 }
144}
145
146static bool create_links(
147 struct dc *dc,
148 uint32_t num_virtual_links)
149{
150 int i;
151 int connectors_num;
152 struct dc_bios *bios = dc->ctx->dc_bios;
153
154 dc->link_count = 0;
155
156 connectors_num = bios->funcs->get_connectors_number(bios);
157
158 if (connectors_num > ENUM_ID_COUNT) {
159 dm_error(
160 "DC: Number of connectors %d exceeds maximum of %d!\n",
161 connectors_num,
162 ENUM_ID_COUNT);
163 return false;
164 }
165
166 dm_output_to_console(
167 "DC: %s: connectors_num: physical:%d, virtual:%d\n",
168 __func__,
169 connectors_num,
170 num_virtual_links);
171
172 for (i = 0; i < connectors_num; i++) {
173 struct link_init_data link_init_params = {0};
174 struct dc_link *link;
175
176 link_init_params.ctx = dc->ctx;
177
178 link_init_params.connector_index = i;
179 link_init_params.link_index = dc->link_count;
180 link_init_params.dc = dc;
181 link = link_create(&link_init_params);
182
183 if (link) {
184 bool should_destory_link = false;
185
186 if (link->connector_signal == SIGNAL_TYPE_EDP) {
187 if (dc->config.edp_not_connected)
188 should_destory_link = true;
189 else if (dc->debug.remove_disconnect_edp) {
190 enum dc_connection_type type;
191 dc_link_detect_sink(link, &type);
192 if (type == dc_connection_none)
193 should_destory_link = true;
194 }
195 }
196
197 if (!should_destory_link) {
198 dc->links[dc->link_count] = link;
199 link->dc = dc;
200 ++dc->link_count;
201 } else {
202 link_destroy(&link);
203 }
204 }
205 }
206
207 for (i = 0; i < num_virtual_links; i++) {
208 struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL);
209 struct encoder_init_data enc_init = {0};
210
211 if (link == NULL) {
212 BREAK_TO_DEBUGGER();
213 goto failed_alloc;
214 }
215
216 link->link_index = dc->link_count;
217 dc->links[dc->link_count] = link;
218 dc->link_count++;
219
220 link->ctx = dc->ctx;
221 link->dc = dc;
222 link->connector_signal = SIGNAL_TYPE_VIRTUAL;
223 link->link_id.type = OBJECT_TYPE_CONNECTOR;
224 link->link_id.id = CONNECTOR_ID_VIRTUAL;
225 link->link_id.enum_id = ENUM_ID_1;
226 link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL);
227
228 if (!link->link_enc) {
229 BREAK_TO_DEBUGGER();
230 goto failed_alloc;
231 }
232
233 link->link_status.dpcd_caps = &link->dpcd_caps;
234
235 enc_init.ctx = dc->ctx;
236 enc_init.channel = CHANNEL_ID_UNKNOWN;
237 enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
238 enc_init.transmitter = TRANSMITTER_UNKNOWN;
239 enc_init.connector = link->link_id;
240 enc_init.encoder.type = OBJECT_TYPE_ENCODER;
241 enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
242 enc_init.encoder.enum_id = ENUM_ID_1;
243 virtual_link_encoder_construct(link->link_enc, &enc_init);
244 }
245
246 return true;
247
248failed_alloc:
249 return false;
250}
251
252static struct dc_perf_trace *dc_perf_trace_create(void)
253{
254 return kzalloc(sizeof(struct dc_perf_trace), GFP_KERNEL);
255}
256
257static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace)
258{
259 kfree(*perf_trace);
260 *perf_trace = NULL;
261}
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279bool dc_stream_adjust_vmin_vmax(struct dc *dc,
280 struct dc_stream_state *stream,
281 struct dc_crtc_timing_adjust *adjust)
282{
283 int i = 0;
284 bool ret = false;
285
286 for (i = 0; i < MAX_PIPES; i++) {
287 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
288
289 if (pipe->stream == stream && pipe->stream_res.tg) {
290 pipe->stream->adjust = *adjust;
291 dc->hwss.set_drr(&pipe,
292 1,
293 adjust->v_total_min,
294 adjust->v_total_max,
295 adjust->v_total_mid,
296 adjust->v_total_mid_frame_num);
297
298 ret = true;
299 }
300 }
301 return ret;
302}
303
304bool dc_stream_get_crtc_position(struct dc *dc,
305 struct dc_stream_state **streams, int num_streams,
306 unsigned int *v_pos, unsigned int *nom_v_pos)
307{
308
309 const struct dc_stream_state *stream = streams[0];
310 int i = 0;
311 bool ret = false;
312 struct crtc_position position;
313
314 for (i = 0; i < MAX_PIPES; i++) {
315 struct pipe_ctx *pipe =
316 &dc->current_state->res_ctx.pipe_ctx[i];
317
318 if (pipe->stream == stream && pipe->stream_res.stream_enc) {
319 dc->hwss.get_position(&pipe, 1, &position);
320
321 *v_pos = position.vertical_count;
322 *nom_v_pos = position.nominal_vcount;
323 ret = true;
324 }
325 }
326 return ret;
327}
328
329
330
331
332
333
334
335
336
337
338
339
340bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
341 bool enable, bool continuous)
342{
343 int i;
344 struct pipe_ctx *pipe;
345 struct crc_params param;
346 struct timing_generator *tg;
347
348 for (i = 0; i < MAX_PIPES; i++) {
349 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
350 if (pipe->stream == stream)
351 break;
352 }
353
354 if (i == MAX_PIPES)
355 return false;
356
357
358 param.windowa_x_start = 0;
359 param.windowa_y_start = 0;
360 param.windowa_x_end = pipe->stream->timing.h_addressable;
361 param.windowa_y_end = pipe->stream->timing.v_addressable;
362 param.windowb_x_start = 0;
363 param.windowb_y_start = 0;
364 param.windowb_x_end = pipe->stream->timing.h_addressable;
365 param.windowb_y_end = pipe->stream->timing.v_addressable;
366
367
368 param.selection = UNION_WINDOW_A_B;
369 param.continuous_mode = continuous;
370 param.enable = enable;
371
372 tg = pipe->stream_res.tg;
373
374
375 if (tg->funcs->configure_crc)
376 return tg->funcs->configure_crc(tg, ¶m);
377 DC_LOG_WARNING("CRC capture not supported.");
378 return false;
379}
380
381
382
383
384
385
386
387
388
389
390bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
391 uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
392{
393 int i;
394 struct pipe_ctx *pipe;
395 struct timing_generator *tg;
396
397 for (i = 0; i < MAX_PIPES; i++) {
398 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
399 if (pipe->stream == stream)
400 break;
401 }
402
403 if (i == MAX_PIPES)
404 return false;
405
406 tg = pipe->stream_res.tg;
407
408 if (tg->funcs->get_crc)
409 return tg->funcs->get_crc(tg, r_cr, g_y, b_cb);
410 DC_LOG_WARNING("CRC capture not supported.");
411 return false;
412}
413
414void dc_stream_set_dither_option(struct dc_stream_state *stream,
415 enum dc_dither_option option)
416{
417 struct bit_depth_reduction_params params;
418 struct dc_link *link = stream->link;
419 struct pipe_ctx *pipes = NULL;
420 int i;
421
422 for (i = 0; i < MAX_PIPES; i++) {
423 if (link->dc->current_state->res_ctx.pipe_ctx[i].stream ==
424 stream) {
425 pipes = &link->dc->current_state->res_ctx.pipe_ctx[i];
426 break;
427 }
428 }
429
430 if (!pipes)
431 return;
432 if (option > DITHER_OPTION_MAX)
433 return;
434
435 stream->dither_option = option;
436
437 memset(¶ms, 0, sizeof(params));
438 resource_build_bit_depth_reduction_params(stream, ¶ms);
439 stream->bit_depth_params = params;
440
441 if (pipes->plane_res.xfm &&
442 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth) {
443 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth(
444 pipes->plane_res.xfm,
445 pipes->plane_res.scl_data.lb_params.depth,
446 &stream->bit_depth_params);
447 }
448
449 pipes->stream_res.opp->funcs->
450 opp_program_bit_depth_reduction(pipes->stream_res.opp, ¶ms);
451}
452
453bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream)
454{
455 int i = 0;
456 bool ret = false;
457 struct pipe_ctx *pipes;
458
459 for (i = 0; i < MAX_PIPES; i++) {
460 if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) {
461 pipes = &dc->current_state->res_ctx.pipe_ctx[i];
462 dc->hwss.program_gamut_remap(pipes);
463 ret = true;
464 }
465 }
466
467 return ret;
468}
469
470bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
471{
472 int i = 0;
473 bool ret = false;
474 struct pipe_ctx *pipes;
475
476 for (i = 0; i < MAX_PIPES; i++) {
477 if (dc->current_state->res_ctx.pipe_ctx[i].stream
478 == stream) {
479
480 pipes = &dc->current_state->res_ctx.pipe_ctx[i];
481 dc->hwss.program_output_csc(dc,
482 pipes,
483 stream->output_color_space,
484 stream->csc_color_matrix.matrix,
485 pipes->stream_res.opp->inst);
486 ret = true;
487 }
488 }
489
490 return ret;
491}
492
493void dc_stream_set_static_screen_events(struct dc *dc,
494 struct dc_stream_state **streams,
495 int num_streams,
496 const struct dc_static_screen_events *events)
497{
498 int i = 0;
499 int j = 0;
500 struct pipe_ctx *pipes_affected[MAX_PIPES];
501 int num_pipes_affected = 0;
502
503 for (i = 0; i < num_streams; i++) {
504 struct dc_stream_state *stream = streams[i];
505
506 for (j = 0; j < MAX_PIPES; j++) {
507 if (dc->current_state->res_ctx.pipe_ctx[j].stream
508 == stream) {
509 pipes_affected[num_pipes_affected++] =
510 &dc->current_state->res_ctx.pipe_ctx[j];
511 }
512 }
513 }
514
515 dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, events);
516}
517
518static void destruct(struct dc *dc)
519{
520 if (dc->current_state) {
521 dc_release_state(dc->current_state);
522 dc->current_state = NULL;
523 }
524
525 destroy_links(dc);
526
527 if (dc->clk_mgr) {
528 dc_destroy_clk_mgr(dc->clk_mgr);
529 dc->clk_mgr = NULL;
530 }
531
532 dc_destroy_resource_pool(dc);
533
534 if (dc->ctx->gpio_service)
535 dal_gpio_service_destroy(&dc->ctx->gpio_service);
536
537 if (dc->ctx->created_bios)
538 dal_bios_parser_destroy(&dc->ctx->dc_bios);
539
540 dc_perf_trace_destroy(&dc->ctx->perf_trace);
541
542 kfree(dc->ctx);
543 dc->ctx = NULL;
544
545 kfree(dc->bw_vbios);
546 dc->bw_vbios = NULL;
547
548 kfree(dc->bw_dceip);
549 dc->bw_dceip = NULL;
550
551#ifdef CONFIG_DRM_AMD_DC_DCN1_0
552 kfree(dc->dcn_soc);
553 dc->dcn_soc = NULL;
554
555 kfree(dc->dcn_ip);
556 dc->dcn_ip = NULL;
557
558#endif
559#ifdef CONFIG_DRM_AMD_DC_DCN2_0
560 kfree(dc->vm_helper);
561 dc->vm_helper = NULL;
562
563#endif
564}
565
566static bool construct(struct dc *dc,
567 const struct dc_init_data *init_params)
568{
569 struct dc_context *dc_ctx;
570 struct bw_calcs_dceip *dc_dceip;
571 struct bw_calcs_vbios *dc_vbios;
572#ifdef CONFIG_DRM_AMD_DC_DCN1_0
573 struct dcn_soc_bounding_box *dcn_soc;
574 struct dcn_ip_params *dcn_ip;
575#endif
576
577 enum dce_version dc_version = DCE_VERSION_UNKNOWN;
578 dc->config = init_params->flags;
579
580#ifdef CONFIG_DRM_AMD_DC_DCN2_0
581
582 dc->vm_helper = kzalloc(sizeof(struct vm_helper), GFP_KERNEL);
583 if (!dc->vm_helper) {
584 dm_error("%s: failed to create dc->vm_helper\n", __func__);
585 goto fail;
586 }
587
588#endif
589 memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides));
590
591 dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL);
592 if (!dc_dceip) {
593 dm_error("%s: failed to create dceip\n", __func__);
594 goto fail;
595 }
596
597 dc->bw_dceip = dc_dceip;
598
599 dc_vbios = kzalloc(sizeof(*dc_vbios), GFP_KERNEL);
600 if (!dc_vbios) {
601 dm_error("%s: failed to create vbios\n", __func__);
602 goto fail;
603 }
604
605 dc->bw_vbios = dc_vbios;
606#ifdef CONFIG_DRM_AMD_DC_DCN1_0
607 dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL);
608 if (!dcn_soc) {
609 dm_error("%s: failed to create dcn_soc\n", __func__);
610 goto fail;
611 }
612
613 dc->dcn_soc = dcn_soc;
614
615 dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL);
616 if (!dcn_ip) {
617 dm_error("%s: failed to create dcn_ip\n", __func__);
618 goto fail;
619 }
620
621 dc->dcn_ip = dcn_ip;
622#ifdef CONFIG_DRM_AMD_DC_DCN2_0
623 dc->soc_bounding_box = init_params->soc_bounding_box;
624#endif
625#endif
626
627 dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL);
628 if (!dc_ctx) {
629 dm_error("%s: failed to create ctx\n", __func__);
630 goto fail;
631 }
632
633 dc_ctx->cgs_device = init_params->cgs_device;
634 dc_ctx->driver_context = init_params->driver;
635 dc_ctx->dc = dc;
636 dc_ctx->asic_id = init_params->asic_id;
637 dc_ctx->dc_sink_id_count = 0;
638 dc_ctx->dc_stream_id_count = 0;
639 dc->ctx = dc_ctx;
640
641
642
643 dc_ctx->dce_environment = init_params->dce_environment;
644
645 dc_version = resource_parse_asic_id(init_params->asic_id);
646 dc_ctx->dce_version = dc_version;
647
648
649
650
651 if (init_params->vbios_override)
652 dc_ctx->dc_bios = init_params->vbios_override;
653 else {
654
655 struct bp_init_data bp_init_data;
656
657 bp_init_data.ctx = dc_ctx;
658 bp_init_data.bios = init_params->asic_id.atombios_base_address;
659
660 dc_ctx->dc_bios = dal_bios_parser_create(
661 &bp_init_data, dc_version);
662
663 if (!dc_ctx->dc_bios) {
664 ASSERT_CRITICAL(false);
665 goto fail;
666 }
667
668 dc_ctx->created_bios = true;
669 }
670
671 dc_ctx->perf_trace = dc_perf_trace_create();
672 if (!dc_ctx->perf_trace) {
673 ASSERT_CRITICAL(false);
674 goto fail;
675 }
676
677
678 dc_ctx->gpio_service = dal_gpio_service_create(
679 dc_version,
680 dc_ctx->dce_environment,
681 dc_ctx);
682
683 if (!dc_ctx->gpio_service) {
684 ASSERT_CRITICAL(false);
685 goto fail;
686 }
687
688 dc->res_pool = dc_create_resource_pool(dc, init_params, dc_version);
689 if (!dc->res_pool)
690 goto fail;
691
692 dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg);
693 if (!dc->clk_mgr)
694 goto fail;
695
696#ifdef CONFIG_DRM_AMD_DC_DCN2_1
697 if (dc->res_pool->funcs->update_bw_bounding_box)
698 dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
699#endif
700
701
702
703
704
705
706 dc->current_state = dc_create_state(dc);
707
708 if (!dc->current_state) {
709 dm_error("%s: failed to create validate ctx\n", __func__);
710 goto fail;
711 }
712
713 dc_resource_state_construct(dc, dc->current_state);
714
715 if (!create_links(dc, init_params->num_virtual_links))
716 goto fail;
717
718 return true;
719
720fail:
721
722 destruct(dc);
723 return false;
724}
725
726#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
727static bool disable_all_writeback_pipes_for_stream(
728 const struct dc *dc,
729 struct dc_stream_state *stream,
730 struct dc_state *context)
731{
732 int i;
733
734 for (i = 0; i < stream->num_wb_info; i++)
735 stream->writeback_info[i].wb_enabled = false;
736
737 return true;
738}
739#endif
740
741static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
742{
743 int i, j;
744 struct dc_state *dangling_context = dc_create_state(dc);
745 struct dc_state *current_ctx;
746
747 if (dangling_context == NULL)
748 return;
749
750 dc_resource_state_copy_construct(dc->current_state, dangling_context);
751
752 for (i = 0; i < dc->res_pool->pipe_count; i++) {
753 struct dc_stream_state *old_stream =
754 dc->current_state->res_ctx.pipe_ctx[i].stream;
755 bool should_disable = true;
756
757 for (j = 0; j < context->stream_count; j++) {
758 if (old_stream == context->streams[j]) {
759 should_disable = false;
760 break;
761 }
762 }
763 if (should_disable && old_stream) {
764 dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
765#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
766 disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
767#endif
768 dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
769 }
770 }
771
772 current_ctx = dc->current_state;
773 dc->current_state = dangling_context;
774 dc_release_state(current_ctx);
775}
776
777
778
779
780
781struct dc *dc_create(const struct dc_init_data *init_params)
782{
783 struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
784 unsigned int full_pipe_count;
785
786 if (NULL == dc)
787 goto alloc_fail;
788
789 if (false == construct(dc, init_params))
790 goto construct_fail;
791
792
793 dc->hwss.init_hw(dc);
794
795 full_pipe_count = dc->res_pool->pipe_count;
796 if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
797 full_pipe_count--;
798 dc->caps.max_streams = min(
799 full_pipe_count,
800 dc->res_pool->stream_enc_count);
801
802 dc->caps.max_links = dc->link_count;
803 dc->caps.max_audios = dc->res_pool->audio_count;
804 dc->caps.linear_pitch_alignment = 64;
805
806
807 dc->versions.dc_ver = DC_VER;
808
809 if (dc->res_pool->dmcu != NULL)
810 dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
811
812 dc->build_id = DC_BUILD_ID;
813
814 DC_LOG_DC("Display Core initialized\n");
815
816
817
818 return dc;
819
820construct_fail:
821 kfree(dc);
822
823alloc_fail:
824 return NULL;
825}
826
827void dc_init_callbacks(struct dc *dc,
828 const struct dc_callback_init *init_params)
829{
830}
831
832void dc_destroy(struct dc **dc)
833{
834 destruct(*dc);
835 kfree(*dc);
836 *dc = NULL;
837}
838
839static void enable_timing_multisync(
840 struct dc *dc,
841 struct dc_state *ctx)
842{
843 int i = 0, multisync_count = 0;
844 int pipe_count = dc->res_pool->pipe_count;
845 struct pipe_ctx *multisync_pipes[MAX_PIPES] = { NULL };
846
847 for (i = 0; i < pipe_count; i++) {
848 if (!ctx->res_ctx.pipe_ctx[i].stream ||
849 !ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.enabled)
850 continue;
851 if (ctx->res_ctx.pipe_ctx[i].stream == ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.event_source)
852 continue;
853 multisync_pipes[multisync_count] = &ctx->res_ctx.pipe_ctx[i];
854 multisync_count++;
855 }
856
857 if (multisync_count > 0) {
858 dc->hwss.enable_per_frame_crtc_position_reset(
859 dc, multisync_count, multisync_pipes);
860 }
861}
862
863static void program_timing_sync(
864 struct dc *dc,
865 struct dc_state *ctx)
866{
867 int i, j, k;
868 int group_index = 0;
869 int num_group = 0;
870 int pipe_count = dc->res_pool->pipe_count;
871 struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
872
873 for (i = 0; i < pipe_count; i++) {
874 if (!ctx->res_ctx.pipe_ctx[i].stream || ctx->res_ctx.pipe_ctx[i].top_pipe)
875 continue;
876
877 unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i];
878 }
879
880 for (i = 0; i < pipe_count; i++) {
881 int group_size = 1;
882 struct pipe_ctx *pipe_set[MAX_PIPES];
883
884 if (!unsynced_pipes[i])
885 continue;
886
887 pipe_set[0] = unsynced_pipes[i];
888 unsynced_pipes[i] = NULL;
889
890
891
892
893 for (j = i + 1; j < pipe_count; j++) {
894 if (!unsynced_pipes[j])
895 continue;
896
897 if (resource_are_streams_timing_synchronizable(
898 unsynced_pipes[j]->stream,
899 pipe_set[0]->stream)) {
900 pipe_set[group_size] = unsynced_pipes[j];
901 unsynced_pipes[j] = NULL;
902 group_size++;
903 }
904 }
905
906
907 for (j = 0; j < group_size; j++) {
908 struct pipe_ctx *temp;
909
910 if (pipe_set[j]->plane_state) {
911 if (j == 0)
912 break;
913
914 temp = pipe_set[0];
915 pipe_set[0] = pipe_set[j];
916 pipe_set[j] = temp;
917 break;
918 }
919 }
920
921
922 for (k = 0; k < group_size; k++) {
923 struct dc_stream_status *status = dc_stream_get_status_from_state(ctx, pipe_set[k]->stream);
924
925 status->timing_sync_info.group_id = num_group;
926 status->timing_sync_info.group_size = group_size;
927 if (k == 0)
928 status->timing_sync_info.master = true;
929 else
930 status->timing_sync_info.master = false;
931
932 }
933
934 for (j = j + 1; j < group_size; j++) {
935 if (pipe_set[j]->plane_state) {
936 group_size--;
937 pipe_set[j] = pipe_set[group_size];
938 j--;
939 }
940 }
941
942 if (group_size > 1) {
943 dc->hwss.enable_timing_synchronization(
944 dc, group_index, group_size, pipe_set);
945 group_index++;
946 }
947 num_group++;
948 }
949}
950
951static bool context_changed(
952 struct dc *dc,
953 struct dc_state *context)
954{
955 uint8_t i;
956
957 if (context->stream_count != dc->current_state->stream_count)
958 return true;
959
960 for (i = 0; i < dc->current_state->stream_count; i++) {
961 if (dc->current_state->streams[i] != context->streams[i])
962 return true;
963 }
964
965 return false;
966}
967
968bool dc_validate_seamless_boot_timing(const struct dc *dc,
969 const struct dc_sink *sink,
970 struct dc_crtc_timing *crtc_timing)
971{
972 struct timing_generator *tg;
973 struct dc_link *link = sink->link;
974 unsigned int enc_inst, tg_inst;
975
976
977 if (!link->link_enc->funcs->is_dig_enabled(link->link_enc))
978 return false;
979
980
981
982
983
984
985
986 enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
987
988
989 if (enc_inst >= dc->res_pool->pipe_count)
990 return false;
991
992 if (enc_inst >= dc->res_pool->stream_enc_count)
993 return false;
994
995 tg_inst = dc->res_pool->stream_enc[enc_inst]->funcs->dig_source_otg(
996 dc->res_pool->stream_enc[enc_inst]);
997
998 if (tg_inst >= dc->res_pool->timing_generator_count)
999 return false;
1000
1001 tg = dc->res_pool->timing_generators[tg_inst];
1002
1003 if (!tg->funcs->is_matching_timing)
1004 return false;
1005
1006 if (!tg->funcs->is_matching_timing(tg, crtc_timing))
1007 return false;
1008
1009 if (dc_is_dp_signal(link->connector_signal)) {
1010 unsigned int pix_clk_100hz;
1011
1012 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
1013 dc->res_pool->dp_clock_source,
1014 tg_inst, &pix_clk_100hz);
1015
1016 if (crtc_timing->pix_clk_100hz != pix_clk_100hz)
1017 return false;
1018
1019 }
1020
1021 return true;
1022}
1023
1024bool dc_enable_stereo(
1025 struct dc *dc,
1026 struct dc_state *context,
1027 struct dc_stream_state *streams[],
1028 uint8_t stream_count)
1029{
1030 bool ret = true;
1031 int i, j;
1032 struct pipe_ctx *pipe;
1033
1034 for (i = 0; i < MAX_PIPES; i++) {
1035 if (context != NULL)
1036 pipe = &context->res_ctx.pipe_ctx[i];
1037 else
1038 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1039 for (j = 0 ; pipe && j < stream_count; j++) {
1040 if (streams[j] && streams[j] == pipe->stream &&
1041 dc->hwss.setup_stereo)
1042 dc->hwss.setup_stereo(pipe, dc);
1043 }
1044 }
1045
1046 return ret;
1047}
1048
1049
1050
1051
1052
1053static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context)
1054{
1055 struct dc_bios *dcb = dc->ctx->dc_bios;
1056 enum dc_status result = DC_ERROR_UNEXPECTED;
1057 struct pipe_ctx *pipe;
1058 int i, k, l;
1059 struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
1060
1061 disable_dangling_plane(dc, context);
1062
1063 for (i = 0; i < context->stream_count; i++)
1064 dc_streams[i] = context->streams[i];
1065
1066 if (!dcb->funcs->is_accelerated_mode(dcb))
1067 dc->hwss.enable_accelerated_mode(dc, context);
1068
1069 for (i = 0; i < context->stream_count; i++) {
1070 if (context->streams[i]->apply_seamless_boot_optimization)
1071 dc->optimize_seamless_boot = true;
1072 }
1073
1074 if (!dc->optimize_seamless_boot)
1075 dc->hwss.prepare_bandwidth(dc, context);
1076
1077
1078
1079
1080 for (i = 0; i < context->stream_count; i++) {
1081 if (context->streams[i]->mode_changed)
1082 continue;
1083
1084 dc->hwss.apply_ctx_for_surface(
1085 dc, context->streams[i],
1086 context->stream_status[i].plane_count,
1087 context);
1088 }
1089
1090
1091 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1092 pipe = &context->res_ctx.pipe_ctx[i];
1093 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
1094 }
1095
1096 result = dc->hwss.apply_ctx_to_hw(dc, context);
1097
1098 if (result != DC_OK)
1099 return result;
1100
1101 if (context->stream_count > 1 && !dc->debug.disable_timing_sync) {
1102 enable_timing_multisync(dc, context);
1103 program_timing_sync(dc, context);
1104 }
1105
1106
1107 for (i = 0; i < context->stream_count; i++) {
1108 const struct dc_link *link = context->streams[i]->link;
1109
1110 if (!context->streams[i]->mode_changed)
1111 continue;
1112
1113 dc->hwss.apply_ctx_for_surface(
1114 dc, context->streams[i],
1115 context->stream_status[i].plane_count,
1116 context);
1117
1118
1119
1120
1121
1122 for (k = 0; k < MAX_PIPES; k++) {
1123 pipe = &context->res_ctx.pipe_ctx[k];
1124
1125 for (l = 0 ; pipe && l < context->stream_count; l++) {
1126 if (context->streams[l] &&
1127 context->streams[l] == pipe->stream &&
1128 dc->hwss.setup_stereo)
1129 dc->hwss.setup_stereo(pipe, dc);
1130 }
1131 }
1132
1133 CONN_MSG_MODE(link, "{%dx%d, %dx%d@%dKhz}",
1134 context->streams[i]->timing.h_addressable,
1135 context->streams[i]->timing.v_addressable,
1136 context->streams[i]->timing.h_total,
1137 context->streams[i]->timing.v_total,
1138 context->streams[i]->timing.pix_clk_100hz / 10);
1139 }
1140
1141 dc_enable_stereo(dc, context, dc_streams, context->stream_count);
1142
1143 if (!dc->optimize_seamless_boot)
1144
1145 dc->hwss.optimize_bandwidth(dc, context);
1146
1147 for (i = 0; i < context->stream_count; i++)
1148 context->streams[i]->mode_changed = false;
1149
1150 memset(&context->commit_hints, 0, sizeof(context->commit_hints));
1151
1152 dc_release_state(dc->current_state);
1153
1154 dc->current_state = context;
1155
1156 dc_retain_state(dc->current_state);
1157
1158 return result;
1159}
1160
1161bool dc_commit_state(struct dc *dc, struct dc_state *context)
1162{
1163 enum dc_status result = DC_ERROR_UNEXPECTED;
1164 int i;
1165
1166 if (false == context_changed(dc, context))
1167 return DC_OK;
1168
1169 DC_LOG_DC("%s: %d streams\n",
1170 __func__, context->stream_count);
1171
1172 for (i = 0; i < context->stream_count; i++) {
1173 struct dc_stream_state *stream = context->streams[i];
1174
1175 dc_stream_log(dc, stream);
1176 }
1177
1178 result = dc_commit_state_no_check(dc, context);
1179
1180 return (result == DC_OK);
1181}
1182
1183bool dc_post_update_surfaces_to_stream(struct dc *dc)
1184{
1185 int i;
1186 struct dc_state *context = dc->current_state;
1187
1188 if (!dc->optimized_required || dc->optimize_seamless_boot)
1189 return true;
1190
1191 post_surface_trace(dc);
1192
1193 for (i = 0; i < dc->res_pool->pipe_count; i++)
1194 if (context->res_ctx.pipe_ctx[i].stream == NULL ||
1195 context->res_ctx.pipe_ctx[i].plane_state == NULL) {
1196 context->res_ctx.pipe_ctx[i].pipe_idx = i;
1197 dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
1198 }
1199
1200 dc->optimized_required = false;
1201
1202 dc->hwss.optimize_bandwidth(dc, context);
1203 return true;
1204}
1205
1206struct dc_state *dc_create_state(struct dc *dc)
1207{
1208 struct dc_state *context = kvzalloc(sizeof(struct dc_state),
1209 GFP_KERNEL);
1210
1211 if (!context)
1212 return NULL;
1213
1214
1215
1216
1217#ifdef CONFIG_DRM_AMD_DC_DCN1_0
1218 memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib));
1219#endif
1220
1221 kref_init(&context->refcount);
1222
1223 return context;
1224}
1225
1226struct dc_state *dc_copy_state(struct dc_state *src_ctx)
1227{
1228 int i, j;
1229 struct dc_state *new_ctx = kvmalloc(sizeof(struct dc_state), GFP_KERNEL);
1230
1231 if (!new_ctx)
1232 return NULL;
1233 memcpy(new_ctx, src_ctx, sizeof(struct dc_state));
1234
1235 for (i = 0; i < MAX_PIPES; i++) {
1236 struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i];
1237
1238 if (cur_pipe->top_pipe)
1239 cur_pipe->top_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx];
1240
1241 if (cur_pipe->bottom_pipe)
1242 cur_pipe->bottom_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx];
1243
1244 if (cur_pipe->prev_odm_pipe)
1245 cur_pipe->prev_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->prev_odm_pipe->pipe_idx];
1246
1247 if (cur_pipe->next_odm_pipe)
1248 cur_pipe->next_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->next_odm_pipe->pipe_idx];
1249
1250 }
1251
1252 for (i = 0; i < new_ctx->stream_count; i++) {
1253 dc_stream_retain(new_ctx->streams[i]);
1254 for (j = 0; j < new_ctx->stream_status[i].plane_count; j++)
1255 dc_plane_state_retain(
1256 new_ctx->stream_status[i].plane_states[j]);
1257 }
1258
1259 kref_init(&new_ctx->refcount);
1260
1261 return new_ctx;
1262}
1263
1264void dc_retain_state(struct dc_state *context)
1265{
1266 kref_get(&context->refcount);
1267}
1268
1269static void dc_state_free(struct kref *kref)
1270{
1271 struct dc_state *context = container_of(kref, struct dc_state, refcount);
1272 dc_resource_state_destruct(context);
1273 kvfree(context);
1274}
1275
1276void dc_release_state(struct dc_state *context)
1277{
1278 kref_put(&context->refcount, dc_state_free);
1279}
1280
1281bool dc_set_generic_gpio_for_stereo(bool enable,
1282 struct gpio_service *gpio_service)
1283{
1284 enum gpio_result gpio_result = GPIO_RESULT_NON_SPECIFIC_ERROR;
1285 struct gpio_pin_info pin_info;
1286 struct gpio *generic;
1287 struct gpio_generic_mux_config *config = kzalloc(sizeof(struct gpio_generic_mux_config),
1288 GFP_KERNEL);
1289
1290 if (!config)
1291 return false;
1292 pin_info = dal_gpio_get_generic_pin_info(gpio_service, GPIO_ID_GENERIC, 0);
1293
1294 if (pin_info.mask == 0xFFFFFFFF || pin_info.offset == 0xFFFFFFFF) {
1295 kfree(config);
1296 return false;
1297 } else {
1298 generic = dal_gpio_service_create_generic_mux(
1299 gpio_service,
1300 pin_info.offset,
1301 pin_info.mask);
1302 }
1303
1304 if (!generic) {
1305 kfree(config);
1306 return false;
1307 }
1308
1309 gpio_result = dal_gpio_open(generic, GPIO_MODE_OUTPUT);
1310
1311 config->enable_output_from_mux = enable;
1312 config->mux_select = GPIO_SIGNAL_SOURCE_PASS_THROUGH_STEREO_SYNC;
1313
1314 if (gpio_result == GPIO_RESULT_OK)
1315 gpio_result = dal_mux_setup_config(generic, config);
1316
1317 if (gpio_result == GPIO_RESULT_OK) {
1318 dal_gpio_close(generic);
1319 dal_gpio_destroy_generic_mux(&generic);
1320 kfree(config);
1321 return true;
1322 } else {
1323 dal_gpio_close(generic);
1324 dal_gpio_destroy_generic_mux(&generic);
1325 kfree(config);
1326 return false;
1327 }
1328}
1329
1330static bool is_surface_in_context(
1331 const struct dc_state *context,
1332 const struct dc_plane_state *plane_state)
1333{
1334 int j;
1335
1336 for (j = 0; j < MAX_PIPES; j++) {
1337 const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1338
1339 if (plane_state == pipe_ctx->plane_state) {
1340 return true;
1341 }
1342 }
1343
1344 return false;
1345}
1346
1347static enum surface_update_type get_plane_info_update_type(const struct dc_surface_update *u)
1348{
1349 union surface_update_flags *update_flags = &u->surface->update_flags;
1350 enum surface_update_type update_type = UPDATE_TYPE_FAST;
1351
1352 if (!u->plane_info)
1353 return UPDATE_TYPE_FAST;
1354
1355 if (u->plane_info->color_space != u->surface->color_space) {
1356 update_flags->bits.color_space_change = 1;
1357 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1358 }
1359
1360 if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) {
1361 update_flags->bits.horizontal_mirror_change = 1;
1362 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1363 }
1364
1365 if (u->plane_info->rotation != u->surface->rotation) {
1366 update_flags->bits.rotation_change = 1;
1367 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
1368 }
1369
1370 if (u->plane_info->format != u->surface->format) {
1371 update_flags->bits.pixel_format_change = 1;
1372 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
1373 }
1374
1375 if (u->plane_info->stereo_format != u->surface->stereo_format) {
1376 update_flags->bits.stereo_format_change = 1;
1377 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
1378 }
1379
1380 if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha) {
1381 update_flags->bits.per_pixel_alpha_change = 1;
1382 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1383 }
1384
1385 if (u->plane_info->global_alpha_value != u->surface->global_alpha_value) {
1386 update_flags->bits.global_alpha_change = 1;
1387 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1388 }
1389
1390 if (u->plane_info->sdr_white_level != u->surface->sdr_white_level) {
1391 update_flags->bits.sdr_white_level = 1;
1392 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1393 }
1394
1395 if (u->plane_info->dcc.enable != u->surface->dcc.enable
1396 || u->plane_info->dcc.independent_64b_blks != u->surface->dcc.independent_64b_blks
1397 || u->plane_info->dcc.meta_pitch != u->surface->dcc.meta_pitch) {
1398 update_flags->bits.dcc_change = 1;
1399 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1400 }
1401
1402 if (resource_pixel_format_to_bpp(u->plane_info->format) !=
1403 resource_pixel_format_to_bpp(u->surface->format)) {
1404
1405
1406
1407 update_flags->bits.bpp_change = 1;
1408 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
1409 }
1410
1411 if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch
1412 || u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch
1413 || u->plane_info->plane_size.chroma_pitch != u->surface->plane_size.chroma_pitch) {
1414 update_flags->bits.plane_size_change = 1;
1415 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1416 }
1417
1418
1419 if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,
1420 sizeof(union dc_tiling_info)) != 0) {
1421 update_flags->bits.swizzle_change = 1;
1422 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1423
1424
1425
1426
1427 if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR) {
1428
1429
1430
1431 update_flags->bits.bandwidth_change = 1;
1432 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
1433 }
1434 }
1435
1436
1437 return update_type;
1438}
1439
1440static enum surface_update_type get_scaling_info_update_type(
1441 const struct dc_surface_update *u)
1442{
1443 union surface_update_flags *update_flags = &u->surface->update_flags;
1444
1445 if (!u->scaling_info)
1446 return UPDATE_TYPE_FAST;
1447
1448 if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width
1449 || u->scaling_info->clip_rect.height != u->surface->clip_rect.height
1450 || u->scaling_info->dst_rect.width != u->surface->dst_rect.width
1451 || u->scaling_info->dst_rect.height != u->surface->dst_rect.height) {
1452 update_flags->bits.scaling_change = 1;
1453
1454 if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width
1455 || u->scaling_info->dst_rect.height < u->surface->dst_rect.height)
1456 && (u->scaling_info->dst_rect.width < u->surface->src_rect.width
1457 || u->scaling_info->dst_rect.height < u->surface->src_rect.height))
1458
1459 update_flags->bits.bandwidth_change = 1;
1460 }
1461
1462 if (u->scaling_info->src_rect.width != u->surface->src_rect.width
1463 || u->scaling_info->src_rect.height != u->surface->src_rect.height) {
1464
1465 update_flags->bits.scaling_change = 1;
1466 if (u->scaling_info->src_rect.width > u->surface->src_rect.width
1467 && u->scaling_info->src_rect.height > u->surface->src_rect.height)
1468
1469 update_flags->bits.clock_change = 1;
1470 }
1471
1472 if (u->scaling_info->src_rect.x != u->surface->src_rect.x
1473 || u->scaling_info->src_rect.y != u->surface->src_rect.y
1474 || u->scaling_info->clip_rect.x != u->surface->clip_rect.x
1475 || u->scaling_info->clip_rect.y != u->surface->clip_rect.y
1476 || u->scaling_info->dst_rect.x != u->surface->dst_rect.x
1477 || u->scaling_info->dst_rect.y != u->surface->dst_rect.y)
1478 update_flags->bits.position_change = 1;
1479
1480 if (update_flags->bits.clock_change
1481 || update_flags->bits.bandwidth_change)
1482 return UPDATE_TYPE_FULL;
1483
1484 if (update_flags->bits.scaling_change
1485 || update_flags->bits.position_change)
1486 return UPDATE_TYPE_MED;
1487
1488 return UPDATE_TYPE_FAST;
1489}
1490
1491static enum surface_update_type det_surface_update(const struct dc *dc,
1492 const struct dc_surface_update *u)
1493{
1494 const struct dc_state *context = dc->current_state;
1495 enum surface_update_type type;
1496 enum surface_update_type overall_type = UPDATE_TYPE_FAST;
1497 union surface_update_flags *update_flags = &u->surface->update_flags;
1498
1499 update_flags->raw = 0;
1500
1501 if (u->flip_addr)
1502 update_flags->bits.addr_update = 1;
1503
1504 if (!is_surface_in_context(context, u->surface)) {
1505 update_flags->bits.new_plane = 1;
1506 return UPDATE_TYPE_FULL;
1507 }
1508
1509 if (u->surface->force_full_update) {
1510 update_flags->bits.full_update = 1;
1511 return UPDATE_TYPE_FULL;
1512 }
1513
1514 type = get_plane_info_update_type(u);
1515 elevate_update_type(&overall_type, type);
1516
1517 type = get_scaling_info_update_type(u);
1518 elevate_update_type(&overall_type, type);
1519
1520 if (u->flip_addr)
1521 update_flags->bits.addr_update = 1;
1522
1523 if (u->in_transfer_func)
1524 update_flags->bits.in_transfer_func_change = 1;
1525
1526 if (u->input_csc_color_matrix)
1527 update_flags->bits.input_csc_change = 1;
1528
1529 if (u->coeff_reduction_factor)
1530 update_flags->bits.coeff_reduction_change = 1;
1531
1532 if (u->gamma) {
1533 enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN;
1534
1535 if (u->plane_info)
1536 format = u->plane_info->format;
1537 else if (u->surface)
1538 format = u->surface->format;
1539
1540 if (dce_use_lut(format))
1541 update_flags->bits.gamma_change = 1;
1542 }
1543
1544 if (update_flags->bits.in_transfer_func_change) {
1545 type = UPDATE_TYPE_MED;
1546 elevate_update_type(&overall_type, type);
1547 }
1548
1549 if (update_flags->bits.input_csc_change
1550 || update_flags->bits.coeff_reduction_change
1551 || update_flags->bits.gamma_change) {
1552 type = UPDATE_TYPE_FULL;
1553 elevate_update_type(&overall_type, type);
1554 }
1555
1556 return overall_type;
1557}
1558
1559static enum surface_update_type check_update_surfaces_for_stream(
1560 struct dc *dc,
1561 struct dc_surface_update *updates,
1562 int surface_count,
1563 struct dc_stream_update *stream_update,
1564 const struct dc_stream_status *stream_status)
1565{
1566 int i;
1567 enum surface_update_type overall_type = UPDATE_TYPE_FAST;
1568
1569 if (stream_status == NULL || stream_status->plane_count != surface_count)
1570 return UPDATE_TYPE_FULL;
1571
1572
1573 if (stream_update) {
1574 if ((stream_update->src.height != 0) &&
1575 (stream_update->src.width != 0))
1576 return UPDATE_TYPE_FULL;
1577
1578 if ((stream_update->dst.height != 0) &&
1579 (stream_update->dst.width != 0))
1580 return UPDATE_TYPE_FULL;
1581
1582 if (stream_update->out_transfer_func)
1583 return UPDATE_TYPE_FULL;
1584
1585 if (stream_update->abm_level)
1586 return UPDATE_TYPE_FULL;
1587
1588 if (stream_update->dpms_off)
1589 return UPDATE_TYPE_FULL;
1590
1591#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
1592 if (stream_update->wb_update)
1593 return UPDATE_TYPE_FULL;
1594#endif
1595 }
1596
1597 for (i = 0 ; i < surface_count; i++) {
1598 enum surface_update_type type =
1599 det_surface_update(dc, &updates[i]);
1600
1601 if (type == UPDATE_TYPE_FULL)
1602 return type;
1603
1604 elevate_update_type(&overall_type, type);
1605 }
1606
1607 return overall_type;
1608}
1609
1610
1611
1612
1613
1614
1615enum surface_update_type dc_check_update_surfaces_for_stream(
1616 struct dc *dc,
1617 struct dc_surface_update *updates,
1618 int surface_count,
1619 struct dc_stream_update *stream_update,
1620 const struct dc_stream_status *stream_status)
1621{
1622 int i;
1623 enum surface_update_type type;
1624
1625 for (i = 0; i < surface_count; i++)
1626 updates[i].surface->update_flags.raw = 0;
1627
1628 type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
1629 if (type == UPDATE_TYPE_FULL)
1630 for (i = 0; i < surface_count; i++)
1631 updates[i].surface->update_flags.raw = 0xFFFFFFFF;
1632
1633 if (type == UPDATE_TYPE_FAST && memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0)
1634 dc->optimized_required = true;
1635
1636 return type;
1637}
1638
1639static struct dc_stream_status *stream_get_status(
1640 struct dc_state *ctx,
1641 struct dc_stream_state *stream)
1642{
1643 uint8_t i;
1644
1645 for (i = 0; i < ctx->stream_count; i++) {
1646 if (stream == ctx->streams[i]) {
1647 return &ctx->stream_status[i];
1648 }
1649 }
1650
1651 return NULL;
1652}
1653
1654static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
1655
1656static void copy_surface_update_to_plane(
1657 struct dc_plane_state *surface,
1658 struct dc_surface_update *srf_update)
1659{
1660 if (srf_update->flip_addr) {
1661 surface->address = srf_update->flip_addr->address;
1662 surface->flip_immediate =
1663 srf_update->flip_addr->flip_immediate;
1664 surface->time.time_elapsed_in_us[surface->time.index] =
1665 srf_update->flip_addr->flip_timestamp_in_us -
1666 surface->time.prev_update_time_in_us;
1667 surface->time.prev_update_time_in_us =
1668 srf_update->flip_addr->flip_timestamp_in_us;
1669 surface->time.index++;
1670 if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX)
1671 surface->time.index = 0;
1672 }
1673
1674 if (srf_update->scaling_info) {
1675 surface->scaling_quality =
1676 srf_update->scaling_info->scaling_quality;
1677 surface->dst_rect =
1678 srf_update->scaling_info->dst_rect;
1679 surface->src_rect =
1680 srf_update->scaling_info->src_rect;
1681 surface->clip_rect =
1682 srf_update->scaling_info->clip_rect;
1683 }
1684
1685 if (srf_update->plane_info) {
1686 surface->color_space =
1687 srf_update->plane_info->color_space;
1688 surface->format =
1689 srf_update->plane_info->format;
1690 surface->plane_size =
1691 srf_update->plane_info->plane_size;
1692 surface->rotation =
1693 srf_update->plane_info->rotation;
1694 surface->horizontal_mirror =
1695 srf_update->plane_info->horizontal_mirror;
1696 surface->stereo_format =
1697 srf_update->plane_info->stereo_format;
1698 surface->tiling_info =
1699 srf_update->plane_info->tiling_info;
1700 surface->visible =
1701 srf_update->plane_info->visible;
1702 surface->per_pixel_alpha =
1703 srf_update->plane_info->per_pixel_alpha;
1704 surface->global_alpha =
1705 srf_update->plane_info->global_alpha;
1706 surface->global_alpha_value =
1707 srf_update->plane_info->global_alpha_value;
1708 surface->dcc =
1709 srf_update->plane_info->dcc;
1710 surface->sdr_white_level =
1711 srf_update->plane_info->sdr_white_level;
1712 surface->layer_index =
1713 srf_update->plane_info->layer_index;
1714 }
1715
1716 if (srf_update->gamma &&
1717 (surface->gamma_correction !=
1718 srf_update->gamma)) {
1719 memcpy(&surface->gamma_correction->entries,
1720 &srf_update->gamma->entries,
1721 sizeof(struct dc_gamma_entries));
1722 surface->gamma_correction->is_identity =
1723 srf_update->gamma->is_identity;
1724 surface->gamma_correction->num_entries =
1725 srf_update->gamma->num_entries;
1726 surface->gamma_correction->type =
1727 srf_update->gamma->type;
1728 }
1729
1730 if (srf_update->in_transfer_func &&
1731 (surface->in_transfer_func !=
1732 srf_update->in_transfer_func)) {
1733 surface->in_transfer_func->sdr_ref_white_level =
1734 srf_update->in_transfer_func->sdr_ref_white_level;
1735 surface->in_transfer_func->tf =
1736 srf_update->in_transfer_func->tf;
1737 surface->in_transfer_func->type =
1738 srf_update->in_transfer_func->type;
1739 memcpy(&surface->in_transfer_func->tf_pts,
1740 &srf_update->in_transfer_func->tf_pts,
1741 sizeof(struct dc_transfer_func_distributed_points));
1742 }
1743
1744#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
1745 if (srf_update->func_shaper &&
1746 (surface->in_shaper_func !=
1747 srf_update->func_shaper))
1748 memcpy(surface->in_shaper_func, srf_update->func_shaper,
1749 sizeof(*surface->in_shaper_func));
1750
1751 if (srf_update->lut3d_func &&
1752 (surface->lut3d_func !=
1753 srf_update->lut3d_func))
1754 memcpy(surface->lut3d_func, srf_update->lut3d_func,
1755 sizeof(*surface->lut3d_func));
1756
1757 if (srf_update->blend_tf &&
1758 (surface->blend_tf !=
1759 srf_update->blend_tf))
1760 memcpy(surface->blend_tf, srf_update->blend_tf,
1761 sizeof(*surface->blend_tf));
1762
1763#endif
1764 if (srf_update->input_csc_color_matrix)
1765 surface->input_csc_color_matrix =
1766 *srf_update->input_csc_color_matrix;
1767
1768 if (srf_update->coeff_reduction_factor)
1769 surface->coeff_reduction_factor =
1770 *srf_update->coeff_reduction_factor;
1771}
1772
1773static void copy_stream_update_to_stream(struct dc *dc,
1774 struct dc_state *context,
1775 struct dc_stream_state *stream,
1776 const struct dc_stream_update *update)
1777{
1778 if (update == NULL || stream == NULL)
1779 return;
1780
1781 if (update->src.height && update->src.width)
1782 stream->src = update->src;
1783
1784 if (update->dst.height && update->dst.width)
1785 stream->dst = update->dst;
1786
1787 if (update->out_transfer_func &&
1788 stream->out_transfer_func != update->out_transfer_func) {
1789 stream->out_transfer_func->sdr_ref_white_level =
1790 update->out_transfer_func->sdr_ref_white_level;
1791 stream->out_transfer_func->tf = update->out_transfer_func->tf;
1792 stream->out_transfer_func->type =
1793 update->out_transfer_func->type;
1794 memcpy(&stream->out_transfer_func->tf_pts,
1795 &update->out_transfer_func->tf_pts,
1796 sizeof(struct dc_transfer_func_distributed_points));
1797 }
1798
1799 if (update->hdr_static_metadata)
1800 stream->hdr_static_metadata = *update->hdr_static_metadata;
1801
1802 if (update->abm_level)
1803 stream->abm_level = *update->abm_level;
1804
1805 if (update->periodic_interrupt0)
1806 stream->periodic_interrupt0 = *update->periodic_interrupt0;
1807
1808 if (update->periodic_interrupt1)
1809 stream->periodic_interrupt1 = *update->periodic_interrupt1;
1810
1811 if (update->gamut_remap)
1812 stream->gamut_remap_matrix = *update->gamut_remap;
1813
1814
1815
1816
1817
1818 if (update->output_color_space)
1819 stream->output_color_space = *update->output_color_space;
1820
1821 if (update->output_csc_transform)
1822 stream->csc_color_matrix = *update->output_csc_transform;
1823
1824 if (update->vrr_infopacket)
1825 stream->vrr_infopacket = *update->vrr_infopacket;
1826
1827 if (update->dpms_off)
1828 stream->dpms_off = *update->dpms_off;
1829
1830 if (update->vsc_infopacket)
1831 stream->vsc_infopacket = *update->vsc_infopacket;
1832
1833 if (update->vsp_infopacket)
1834 stream->vsp_infopacket = *update->vsp_infopacket;
1835
1836 if (update->dither_option)
1837 stream->dither_option = *update->dither_option;
1838#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
1839
1840 if (update->wb_update) {
1841 int i;
1842
1843 stream->num_wb_info = update->wb_update->num_wb_info;
1844 ASSERT(stream->num_wb_info <= MAX_DWB_PIPES);
1845 for (i = 0; i < stream->num_wb_info; i++)
1846 stream->writeback_info[i] =
1847 update->wb_update->writeback_info[i];
1848 }
1849#endif
1850#if defined(CONFIG_DRM_AMD_DC_DSC_SUPPORT)
1851 if (update->dsc_config) {
1852 struct dc_dsc_config old_dsc_cfg = stream->timing.dsc_cfg;
1853 uint32_t old_dsc_enabled = stream->timing.flags.DSC;
1854 uint32_t enable_dsc = (update->dsc_config->num_slices_h != 0 &&
1855 update->dsc_config->num_slices_v != 0);
1856
1857 stream->timing.dsc_cfg = *update->dsc_config;
1858 stream->timing.flags.DSC = enable_dsc;
1859 if (!dc->res_pool->funcs->validate_bandwidth(dc, context,
1860 true)) {
1861 stream->timing.dsc_cfg = old_dsc_cfg;
1862 stream->timing.flags.DSC = old_dsc_enabled;
1863 }
1864 }
1865#endif
1866}
1867
1868static void commit_planes_do_stream_update(struct dc *dc,
1869 struct dc_stream_state *stream,
1870 struct dc_stream_update *stream_update,
1871 enum surface_update_type update_type,
1872 struct dc_state *context)
1873{
1874 int j;
1875
1876
1877 for (j = 0; j < dc->res_pool->pipe_count; j++) {
1878 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1879
1880 if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->stream == stream) {
1881
1882 if (stream_update->periodic_interrupt0 &&
1883 dc->hwss.setup_periodic_interrupt)
1884 dc->hwss.setup_periodic_interrupt(pipe_ctx, VLINE0);
1885
1886 if (stream_update->periodic_interrupt1 &&
1887 dc->hwss.setup_periodic_interrupt)
1888 dc->hwss.setup_periodic_interrupt(pipe_ctx, VLINE1);
1889
1890 if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) ||
1891 stream_update->vrr_infopacket ||
1892 stream_update->vsc_infopacket ||
1893 stream_update->vsp_infopacket) {
1894 resource_build_info_frame(pipe_ctx);
1895 dc->hwss.update_info_frame(pipe_ctx);
1896 }
1897
1898 if (stream_update->gamut_remap)
1899 dc_stream_set_gamut_remap(dc, stream);
1900
1901 if (stream_update->output_csc_transform)
1902 dc_stream_program_csc_matrix(dc, stream);
1903
1904 if (stream_update->dither_option) {
1905#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
1906 struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
1907#endif
1908 resource_build_bit_depth_reduction_params(pipe_ctx->stream,
1909 &pipe_ctx->stream->bit_depth_params);
1910 pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp,
1911 &stream->bit_depth_params,
1912 &stream->clamping);
1913#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
1914 while (odm_pipe) {
1915 odm_pipe->stream_res.opp->funcs->opp_program_fmt(odm_pipe->stream_res.opp,
1916 &stream->bit_depth_params,
1917 &stream->clamping);
1918 odm_pipe = odm_pipe->next_odm_pipe;
1919 }
1920#endif
1921 }
1922
1923#if defined(CONFIG_DRM_AMD_DC_DSC_SUPPORT)
1924 if (stream_update->dsc_config && dc->hwss.pipe_control_lock_global) {
1925 dc->hwss.pipe_control_lock_global(dc, pipe_ctx, true);
1926 dp_update_dsc_config(pipe_ctx);
1927 dc->hwss.pipe_control_lock_global(dc, pipe_ctx, false);
1928 }
1929#endif
1930
1931 if (update_type == UPDATE_TYPE_FAST)
1932 continue;
1933
1934 if (stream_update->dpms_off) {
1935 dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
1936
1937 if (*stream_update->dpms_off) {
1938 core_link_disable_stream(pipe_ctx);
1939
1940 if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only)
1941 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
1942
1943 dc->hwss.optimize_bandwidth(dc, dc->current_state);
1944 } else {
1945 if (!dc->optimize_seamless_boot)
1946 dc->hwss.prepare_bandwidth(dc, dc->current_state);
1947
1948 core_link_enable_stream(dc->current_state, pipe_ctx);
1949 }
1950
1951 dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
1952 }
1953
1954 if (stream_update->abm_level && pipe_ctx->stream_res.abm) {
1955 if (pipe_ctx->stream_res.tg->funcs->is_blanked) {
1956
1957 if (!pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg))
1958 pipe_ctx->stream_res.abm->funcs->set_abm_level(
1959 pipe_ctx->stream_res.abm, stream->abm_level);
1960 } else
1961 pipe_ctx->stream_res.abm->funcs->set_abm_level(
1962 pipe_ctx->stream_res.abm, stream->abm_level);
1963 }
1964 }
1965 }
1966}
1967
1968static void commit_planes_for_stream(struct dc *dc,
1969 struct dc_surface_update *srf_updates,
1970 int surface_count,
1971 struct dc_stream_state *stream,
1972 struct dc_stream_update *stream_update,
1973 enum surface_update_type update_type,
1974 struct dc_state *context)
1975{
1976 int i, j;
1977 struct pipe_ctx *top_pipe_to_program = NULL;
1978
1979 if (dc->optimize_seamless_boot && surface_count > 0) {
1980
1981
1982
1983
1984
1985
1986 if (stream->apply_seamless_boot_optimization) {
1987 stream->apply_seamless_boot_optimization = false;
1988 dc->optimize_seamless_boot = false;
1989 dc->optimized_required = true;
1990 }
1991 }
1992
1993 if (update_type == UPDATE_TYPE_FULL && !dc->optimize_seamless_boot) {
1994 dc->hwss.prepare_bandwidth(dc, context);
1995 context_clock_trace(dc, context);
1996 }
1997
1998
1999 if (stream_update)
2000 commit_planes_do_stream_update(dc, stream, stream_update, update_type, context);
2001
2002 if (surface_count == 0) {
2003
2004
2005
2006
2007 dc->hwss.apply_ctx_for_surface(dc, stream, 0, context);
2008 return;
2009 }
2010
2011#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
2012 if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
2013 for (i = 0; i < surface_count; i++) {
2014 struct dc_plane_state *plane_state = srf_updates[i].surface;
2015
2016 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2017 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2018 if (!pipe_ctx->plane_state)
2019 continue;
2020 if (pipe_ctx->plane_state != plane_state)
2021 continue;
2022 plane_state->triplebuffer_flips = false;
2023 if (update_type == UPDATE_TYPE_FAST &&
2024 dc->hwss.program_triplebuffer != NULL &&
2025 !plane_state->flip_immediate &&
2026 !dc->debug.disable_tri_buf) {
2027
2028 plane_state->triplebuffer_flips = true;
2029 }
2030 }
2031 }
2032 }
2033#endif
2034
2035
2036 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2037 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2038
2039 if (!pipe_ctx->top_pipe &&
2040 !pipe_ctx->prev_odm_pipe &&
2041 pipe_ctx->stream &&
2042 pipe_ctx->stream == stream) {
2043 struct dc_stream_status *stream_status = NULL;
2044
2045 top_pipe_to_program = pipe_ctx;
2046
2047 if (!pipe_ctx->plane_state)
2048 continue;
2049
2050
2051 if (update_type == UPDATE_TYPE_FAST)
2052 continue;
2053
2054#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
2055 ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
2056
2057 if (dc->hwss.program_triplebuffer != NULL &&
2058 !dc->debug.disable_tri_buf) {
2059
2060 dc->hwss.program_triplebuffer(
2061 dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
2062 }
2063#endif
2064 stream_status =
2065 stream_get_status(context, pipe_ctx->stream);
2066
2067 dc->hwss.apply_ctx_for_surface(
2068 dc, pipe_ctx->stream, stream_status->plane_count, context);
2069 }
2070 }
2071
2072
2073 if (update_type == UPDATE_TYPE_FAST) {
2074
2075
2076
2077
2078 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
2079
2080#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
2081 if (dc->hwss.set_flip_control_gsl)
2082 for (i = 0; i < surface_count; i++) {
2083 struct dc_plane_state *plane_state = srf_updates[i].surface;
2084
2085 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2086 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2087
2088 if (pipe_ctx->stream != stream)
2089 continue;
2090
2091 if (pipe_ctx->plane_state != plane_state)
2092 continue;
2093
2094
2095 dc->hwss.set_flip_control_gsl(pipe_ctx,
2096 plane_state->flip_immediate);
2097 }
2098 }
2099#endif
2100
2101 for (i = 0; i < surface_count; i++) {
2102 struct dc_plane_state *plane_state = srf_updates[i].surface;
2103
2104 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2105 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2106
2107 if (pipe_ctx->stream != stream)
2108 continue;
2109
2110 if (pipe_ctx->plane_state != plane_state)
2111 continue;
2112#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
2113
2114 if (dc->hwss.program_triplebuffer != NULL &&
2115 !dc->debug.disable_tri_buf) {
2116
2117 dc->hwss.program_triplebuffer(
2118 dc, pipe_ctx, plane_state->triplebuffer_flips);
2119 }
2120#endif
2121 if (srf_updates[i].flip_addr)
2122 dc->hwss.update_plane_addr(dc, pipe_ctx);
2123 }
2124 }
2125
2126 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
2127 }
2128
2129
2130 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2131 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2132
2133 if (pipe_ctx->bottom_pipe ||
2134 !pipe_ctx->stream ||
2135 pipe_ctx->stream != stream ||
2136 !pipe_ctx->plane_state->update_flags.bits.addr_update)
2137 continue;
2138
2139 if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger)
2140 pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg);
2141 }
2142}
2143
2144void dc_commit_updates_for_stream(struct dc *dc,
2145 struct dc_surface_update *srf_updates,
2146 int surface_count,
2147 struct dc_stream_state *stream,
2148 struct dc_stream_update *stream_update,
2149 struct dc_state *state)
2150{
2151 const struct dc_stream_status *stream_status;
2152 enum surface_update_type update_type;
2153 struct dc_state *context;
2154 struct dc_context *dc_ctx = dc->ctx;
2155 int i;
2156
2157 stream_status = dc_stream_get_status(stream);
2158 context = dc->current_state;
2159
2160 update_type = dc_check_update_surfaces_for_stream(
2161 dc, srf_updates, surface_count, stream_update, stream_status);
2162
2163 if (update_type >= update_surface_trace_level)
2164 update_surface_trace(dc, srf_updates, surface_count);
2165
2166
2167 if (update_type >= UPDATE_TYPE_FULL) {
2168
2169
2170 context = dc_create_state(dc);
2171 if (context == NULL) {
2172 DC_ERROR("Failed to allocate new validate context!\n");
2173 return;
2174 }
2175
2176 dc_resource_state_copy_construct(state, context);
2177
2178 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2179 struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
2180 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
2181
2182 if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
2183 new_pipe->plane_state->force_full_update = true;
2184 }
2185 }
2186
2187
2188 for (i = 0; i < surface_count; i++) {
2189 struct dc_plane_state *surface = srf_updates[i].surface;
2190
2191 copy_surface_update_to_plane(surface, &srf_updates[i]);
2192
2193 }
2194
2195 copy_stream_update_to_stream(dc, context, stream, stream_update);
2196
2197 commit_planes_for_stream(
2198 dc,
2199 srf_updates,
2200 surface_count,
2201 stream,
2202 stream_update,
2203 update_type,
2204 context);
2205
2206 if (dc->current_state != context) {
2207
2208 struct dc_state *old = dc->current_state;
2209
2210 dc->current_state = context;
2211 dc_release_state(old);
2212
2213 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2214 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2215
2216 if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
2217 pipe_ctx->plane_state->force_full_update = false;
2218 }
2219 }
2220
2221 if (update_type >= UPDATE_TYPE_FULL)
2222 dc_post_update_surfaces_to_stream(dc);
2223
2224 return;
2225
2226}
2227
2228uint8_t dc_get_current_stream_count(struct dc *dc)
2229{
2230 return dc->current_state->stream_count;
2231}
2232
2233struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i)
2234{
2235 if (i < dc->current_state->stream_count)
2236 return dc->current_state->streams[i];
2237 return NULL;
2238}
2239
2240enum dc_irq_source dc_interrupt_to_irq_source(
2241 struct dc *dc,
2242 uint32_t src_id,
2243 uint32_t ext_id)
2244{
2245 return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
2246}
2247
2248
2249
2250
2251bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
2252{
2253
2254 if (dc == NULL)
2255 return false;
2256
2257 return dal_irq_service_set(dc->res_pool->irqs, src, enable);
2258}
2259
2260void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
2261{
2262 dal_irq_service_ack(dc->res_pool->irqs, src);
2263}
2264
2265void dc_set_power_state(
2266 struct dc *dc,
2267 enum dc_acpi_cm_power_state power_state)
2268{
2269 struct kref refcount;
2270 struct display_mode_lib *dml = kzalloc(sizeof(struct display_mode_lib),
2271 GFP_KERNEL);
2272
2273 ASSERT(dml);
2274 if (!dml)
2275 return;
2276
2277 switch (power_state) {
2278 case DC_ACPI_CM_POWER_STATE_D0:
2279 dc_resource_state_construct(dc, dc->current_state);
2280
2281 dc->hwss.init_hw(dc);
2282
2283#ifdef CONFIG_DRM_AMD_DC_DCN2_0
2284 if (dc->hwss.init_sys_ctx != NULL &&
2285 dc->vm_pa_config.valid) {
2286 dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config);
2287 }
2288#endif
2289
2290 break;
2291 default:
2292 ASSERT(dc->current_state->stream_count == 0);
2293
2294
2295
2296
2297
2298
2299 refcount = dc->current_state->refcount;
2300
2301 memcpy(dml, &dc->current_state->bw_ctx.dml, sizeof(struct display_mode_lib));
2302
2303 dc_resource_state_destruct(dc->current_state);
2304 memset(dc->current_state, 0,
2305 sizeof(*dc->current_state));
2306
2307 dc->current_state->refcount = refcount;
2308 dc->current_state->bw_ctx.dml = *dml;
2309
2310 break;
2311 }
2312
2313 kfree(dml);
2314}
2315
2316void dc_resume(struct dc *dc)
2317{
2318
2319 uint32_t i;
2320
2321 for (i = 0; i < dc->link_count; i++)
2322 core_link_resume(dc->links[i]);
2323}
2324
2325unsigned int dc_get_current_backlight_pwm(struct dc *dc)
2326{
2327 struct abm *abm = dc->res_pool->abm;
2328
2329 if (abm)
2330 return abm->funcs->get_current_backlight(abm);
2331
2332 return 0;
2333}
2334
2335unsigned int dc_get_target_backlight_pwm(struct dc *dc)
2336{
2337 struct abm *abm = dc->res_pool->abm;
2338
2339 if (abm)
2340 return abm->funcs->get_target_backlight(abm);
2341
2342 return 0;
2343}
2344
2345bool dc_is_dmcu_initialized(struct dc *dc)
2346{
2347 struct dmcu *dmcu = dc->res_pool->dmcu;
2348
2349 if (dmcu)
2350 return dmcu->funcs->is_dmcu_initialized(dmcu);
2351 return false;
2352}
2353
2354bool dc_submit_i2c(
2355 struct dc *dc,
2356 uint32_t link_index,
2357 struct i2c_command *cmd)
2358{
2359
2360 struct dc_link *link = dc->links[link_index];
2361 struct ddc_service *ddc = link->ddc;
2362 return dce_i2c_submit_command(
2363 dc->res_pool,
2364 ddc->ddc_pin,
2365 cmd);
2366}
2367
2368static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink *sink)
2369{
2370 if (dc_link->sink_count >= MAX_SINKS_PER_LINK) {
2371 BREAK_TO_DEBUGGER();
2372 return false;
2373 }
2374
2375 dc_sink_retain(sink);
2376
2377 dc_link->remote_sinks[dc_link->sink_count] = sink;
2378 dc_link->sink_count++;
2379
2380 return true;
2381}
2382
2383
2384
2385
2386
2387
2388struct dc_sink *dc_link_add_remote_sink(
2389 struct dc_link *link,
2390 const uint8_t *edid,
2391 int len,
2392 struct dc_sink_init_data *init_data)
2393{
2394 struct dc_sink *dc_sink;
2395 enum dc_edid_status edid_status;
2396
2397 if (len > DC_MAX_EDID_BUFFER_SIZE) {
2398 dm_error("Max EDID buffer size breached!\n");
2399 return NULL;
2400 }
2401
2402 if (!init_data) {
2403 BREAK_TO_DEBUGGER();
2404 return NULL;
2405 }
2406
2407 if (!init_data->link) {
2408 BREAK_TO_DEBUGGER();
2409 return NULL;
2410 }
2411
2412 dc_sink = dc_sink_create(init_data);
2413
2414 if (!dc_sink)
2415 return NULL;
2416
2417 memmove(dc_sink->dc_edid.raw_edid, edid, len);
2418 dc_sink->dc_edid.length = len;
2419
2420 if (!link_add_remote_sink_helper(
2421 link,
2422 dc_sink))
2423 goto fail_add_sink;
2424
2425 edid_status = dm_helpers_parse_edid_caps(
2426 link->ctx,
2427 &dc_sink->dc_edid,
2428 &dc_sink->edid_caps);
2429
2430
2431
2432
2433
2434 if (edid_status != EDID_OK) {
2435 dc_sink->dc_edid.length = 0;
2436 dm_error("Bad EDID, status%d!\n", edid_status);
2437 }
2438
2439 return dc_sink;
2440
2441fail_add_sink:
2442 dc_sink_release(dc_sink);
2443 return NULL;
2444}
2445
2446
2447
2448
2449
2450
2451
2452void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink)
2453{
2454 int i;
2455
2456 if (!link->sink_count) {
2457 BREAK_TO_DEBUGGER();
2458 return;
2459 }
2460
2461 for (i = 0; i < link->sink_count; i++) {
2462 if (link->remote_sinks[i] == sink) {
2463 dc_sink_release(sink);
2464 link->remote_sinks[i] = NULL;
2465
2466
2467 while (i < link->sink_count - 1) {
2468 link->remote_sinks[i] = link->remote_sinks[i+1];
2469 i++;
2470 }
2471 link->remote_sinks[i] = NULL;
2472 link->sink_count--;
2473 return;
2474 }
2475 }
2476}
2477
2478void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info)
2479{
2480 info->displayClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dispclk_khz;
2481 info->engineClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_khz;
2482 info->memoryClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dramclk_khz;
2483 info->maxSupportedDppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz;
2484 info->dppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dppclk_khz;
2485 info->socClock = (unsigned int)state->bw_ctx.bw.dcn.clk.socclk_khz;
2486 info->dcfClockDeepSleep = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz;
2487 info->fClock = (unsigned int)state->bw_ctx.bw.dcn.clk.fclk_khz;
2488 info->phyClock = (unsigned int)state->bw_ctx.bw.dcn.clk.phyclk_khz;
2489}
2490enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping)
2491{
2492 if (dc->hwss.set_clock)
2493 return dc->hwss.set_clock(dc, clock_type, clk_khz, stepping);
2494 return DC_ERROR_UNEXPECTED;
2495}
2496void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg)
2497{
2498 if (dc->hwss.get_clock)
2499 dc->hwss.get_clock(dc, clock_type, clock_cfg);
2500}
2501