1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/slab.h>
26#include <linux/mm.h>
27
28#include "dm_services.h"
29
30#include "dc.h"
31
32#include "core_status.h"
33#include "core_types.h"
34#include "hw_sequencer.h"
35#include "dce/dce_hwseq.h"
36
37#include "resource.h"
38
39#include "clk_mgr.h"
40#include "clock_source.h"
41#include "dc_bios_types.h"
42
43#include "bios_parser_interface.h"
44#include "include/irq_service_interface.h"
45#include "transform.h"
46#include "dmcu.h"
47#include "dpp.h"
48#include "timing_generator.h"
49#include "abm.h"
50#include "virtual/virtual_link_encoder.h"
51
52#include "link_hwss.h"
53#include "link_encoder.h"
54
55#include "dc_link_ddc.h"
56#include "dm_helpers.h"
57#include "mem_input.h"
58#include "hubp.h"
59
60#include "dc_link_dp.h"
61
62#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
63#include "dsc.h"
64#endif
65
66#ifdef CONFIG_DRM_AMD_DC_DCN2_0
67#include "vm_helper.h"
68#endif
69
70#include "dce/dce_i2c.h"
71
72#define DC_LOGGER \
73 dc->ctx->logger
74
75const static char DC_BUILD_ID[] = "production-build";
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new)
131{
132 if (new > *original)
133 *original = new;
134}
135
136static void destroy_links(struct dc *dc)
137{
138 uint32_t i;
139
140 for (i = 0; i < dc->link_count; i++) {
141 if (NULL != dc->links[i])
142 link_destroy(&dc->links[i]);
143 }
144}
145
146static bool create_links(
147 struct dc *dc,
148 uint32_t num_virtual_links)
149{
150 int i;
151 int connectors_num;
152 struct dc_bios *bios = dc->ctx->dc_bios;
153
154 dc->link_count = 0;
155
156 connectors_num = bios->funcs->get_connectors_number(bios);
157
158 if (connectors_num > ENUM_ID_COUNT) {
159 dm_error(
160 "DC: Number of connectors %d exceeds maximum of %d!\n",
161 connectors_num,
162 ENUM_ID_COUNT);
163 return false;
164 }
165
166 dm_output_to_console(
167 "DC: %s: connectors_num: physical:%d, virtual:%d\n",
168 __func__,
169 connectors_num,
170 num_virtual_links);
171
172 for (i = 0; i < connectors_num; i++) {
173 struct link_init_data link_init_params = {0};
174 struct dc_link *link;
175
176 link_init_params.ctx = dc->ctx;
177
178 link_init_params.connector_index = i;
179 link_init_params.link_index = dc->link_count;
180 link_init_params.dc = dc;
181 link = link_create(&link_init_params);
182
183 if (link) {
184 bool should_destory_link = false;
185
186 if (link->connector_signal == SIGNAL_TYPE_EDP) {
187 if (dc->config.edp_not_connected)
188 should_destory_link = true;
189 else if (dc->debug.remove_disconnect_edp) {
190 enum dc_connection_type type;
191 dc_link_detect_sink(link, &type);
192 if (type == dc_connection_none)
193 should_destory_link = true;
194 }
195 }
196
197 if (dc->config.force_enum_edp || !should_destory_link) {
198 dc->links[dc->link_count] = link;
199 link->dc = dc;
200 ++dc->link_count;
201 } else {
202 link_destroy(&link);
203 }
204 }
205 }
206
207 for (i = 0; i < num_virtual_links; i++) {
208 struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL);
209 struct encoder_init_data enc_init = {0};
210
211 if (link == NULL) {
212 BREAK_TO_DEBUGGER();
213 goto failed_alloc;
214 }
215
216 link->link_index = dc->link_count;
217 dc->links[dc->link_count] = link;
218 dc->link_count++;
219
220 link->ctx = dc->ctx;
221 link->dc = dc;
222 link->connector_signal = SIGNAL_TYPE_VIRTUAL;
223 link->link_id.type = OBJECT_TYPE_CONNECTOR;
224 link->link_id.id = CONNECTOR_ID_VIRTUAL;
225 link->link_id.enum_id = ENUM_ID_1;
226 link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL);
227
228 if (!link->link_enc) {
229 BREAK_TO_DEBUGGER();
230 goto failed_alloc;
231 }
232
233 link->link_status.dpcd_caps = &link->dpcd_caps;
234
235 enc_init.ctx = dc->ctx;
236 enc_init.channel = CHANNEL_ID_UNKNOWN;
237 enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
238 enc_init.transmitter = TRANSMITTER_UNKNOWN;
239 enc_init.connector = link->link_id;
240 enc_init.encoder.type = OBJECT_TYPE_ENCODER;
241 enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
242 enc_init.encoder.enum_id = ENUM_ID_1;
243 virtual_link_encoder_construct(link->link_enc, &enc_init);
244 }
245
246 return true;
247
248failed_alloc:
249 return false;
250}
251
252static struct dc_perf_trace *dc_perf_trace_create(void)
253{
254 return kzalloc(sizeof(struct dc_perf_trace), GFP_KERNEL);
255}
256
257static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace)
258{
259 kfree(*perf_trace);
260 *perf_trace = NULL;
261}
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279bool dc_stream_adjust_vmin_vmax(struct dc *dc,
280 struct dc_stream_state *stream,
281 struct dc_crtc_timing_adjust *adjust)
282{
283 int i = 0;
284 bool ret = false;
285
286 for (i = 0; i < MAX_PIPES; i++) {
287 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
288
289 if (pipe->stream == stream && pipe->stream_res.tg) {
290 pipe->stream->adjust = *adjust;
291 dc->hwss.set_drr(&pipe,
292 1,
293 adjust->v_total_min,
294 adjust->v_total_max,
295 adjust->v_total_mid,
296 adjust->v_total_mid_frame_num);
297
298 ret = true;
299 }
300 }
301 return ret;
302}
303
304bool dc_stream_get_crtc_position(struct dc *dc,
305 struct dc_stream_state **streams, int num_streams,
306 unsigned int *v_pos, unsigned int *nom_v_pos)
307{
308
309 const struct dc_stream_state *stream = streams[0];
310 int i = 0;
311 bool ret = false;
312 struct crtc_position position;
313
314 for (i = 0; i < MAX_PIPES; i++) {
315 struct pipe_ctx *pipe =
316 &dc->current_state->res_ctx.pipe_ctx[i];
317
318 if (pipe->stream == stream && pipe->stream_res.stream_enc) {
319 dc->hwss.get_position(&pipe, 1, &position);
320
321 *v_pos = position.vertical_count;
322 *nom_v_pos = position.nominal_vcount;
323 ret = true;
324 }
325 }
326 return ret;
327}
328
329
330
331
332
333
334
335
336
337
338
339
340bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
341 bool enable, bool continuous)
342{
343 int i;
344 struct pipe_ctx *pipe;
345 struct crc_params param;
346 struct timing_generator *tg;
347
348 for (i = 0; i < MAX_PIPES; i++) {
349 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
350 if (pipe->stream == stream)
351 break;
352 }
353
354 if (i == MAX_PIPES)
355 return false;
356
357
358 param.windowa_x_start = 0;
359 param.windowa_y_start = 0;
360 param.windowa_x_end = pipe->stream->timing.h_addressable;
361 param.windowa_y_end = pipe->stream->timing.v_addressable;
362 param.windowb_x_start = 0;
363 param.windowb_y_start = 0;
364 param.windowb_x_end = pipe->stream->timing.h_addressable;
365 param.windowb_y_end = pipe->stream->timing.v_addressable;
366
367
368 param.selection = UNION_WINDOW_A_B;
369 param.continuous_mode = continuous;
370 param.enable = enable;
371
372 tg = pipe->stream_res.tg;
373
374
375 if (tg->funcs->configure_crc)
376 return tg->funcs->configure_crc(tg, ¶m);
377 DC_LOG_WARNING("CRC capture not supported.");
378 return false;
379}
380
381
382
383
384
385
386
387
388
389
390bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
391 uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
392{
393 int i;
394 struct pipe_ctx *pipe;
395 struct timing_generator *tg;
396
397 for (i = 0; i < MAX_PIPES; i++) {
398 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
399 if (pipe->stream == stream)
400 break;
401 }
402
403 if (i == MAX_PIPES)
404 return false;
405
406 tg = pipe->stream_res.tg;
407
408 if (tg->funcs->get_crc)
409 return tg->funcs->get_crc(tg, r_cr, g_y, b_cb);
410 DC_LOG_WARNING("CRC capture not supported.");
411 return false;
412}
413
414void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream,
415 enum dc_dynamic_expansion option)
416{
417
418 int i = 0;
419 struct pipe_ctx *pipe_ctx;
420
421 for (i = 0; i < MAX_PIPES; i++) {
422 if (dc->current_state->res_ctx.pipe_ctx[i].stream
423 == stream) {
424 pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
425 pipe_ctx->stream_res.opp->dyn_expansion = option;
426 pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
427 pipe_ctx->stream_res.opp,
428 COLOR_SPACE_YCBCR601,
429 stream->timing.display_color_depth,
430 stream->signal);
431 }
432 }
433}
434
435void dc_stream_set_dither_option(struct dc_stream_state *stream,
436 enum dc_dither_option option)
437{
438 struct bit_depth_reduction_params params;
439 struct dc_link *link = stream->link;
440 struct pipe_ctx *pipes = NULL;
441 int i;
442
443 for (i = 0; i < MAX_PIPES; i++) {
444 if (link->dc->current_state->res_ctx.pipe_ctx[i].stream ==
445 stream) {
446 pipes = &link->dc->current_state->res_ctx.pipe_ctx[i];
447 break;
448 }
449 }
450
451 if (!pipes)
452 return;
453 if (option > DITHER_OPTION_MAX)
454 return;
455
456 stream->dither_option = option;
457
458 memset(¶ms, 0, sizeof(params));
459 resource_build_bit_depth_reduction_params(stream, ¶ms);
460 stream->bit_depth_params = params;
461
462 if (pipes->plane_res.xfm &&
463 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth) {
464 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth(
465 pipes->plane_res.xfm,
466 pipes->plane_res.scl_data.lb_params.depth,
467 &stream->bit_depth_params);
468 }
469
470 pipes->stream_res.opp->funcs->
471 opp_program_bit_depth_reduction(pipes->stream_res.opp, ¶ms);
472}
473
474bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream)
475{
476 int i = 0;
477 bool ret = false;
478 struct pipe_ctx *pipes;
479
480 for (i = 0; i < MAX_PIPES; i++) {
481 if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) {
482 pipes = &dc->current_state->res_ctx.pipe_ctx[i];
483 dc->hwss.program_gamut_remap(pipes);
484 ret = true;
485 }
486 }
487
488 return ret;
489}
490
491bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
492{
493 int i = 0;
494 bool ret = false;
495 struct pipe_ctx *pipes;
496
497 for (i = 0; i < MAX_PIPES; i++) {
498 if (dc->current_state->res_ctx.pipe_ctx[i].stream
499 == stream) {
500
501 pipes = &dc->current_state->res_ctx.pipe_ctx[i];
502 dc->hwss.program_output_csc(dc,
503 pipes,
504 stream->output_color_space,
505 stream->csc_color_matrix.matrix,
506 pipes->stream_res.opp->inst);
507 ret = true;
508 }
509 }
510
511 return ret;
512}
513
514void dc_stream_set_static_screen_events(struct dc *dc,
515 struct dc_stream_state **streams,
516 int num_streams,
517 const struct dc_static_screen_events *events)
518{
519 int i = 0;
520 int j = 0;
521 struct pipe_ctx *pipes_affected[MAX_PIPES];
522 int num_pipes_affected = 0;
523
524 for (i = 0; i < num_streams; i++) {
525 struct dc_stream_state *stream = streams[i];
526
527 for (j = 0; j < MAX_PIPES; j++) {
528 if (dc->current_state->res_ctx.pipe_ctx[j].stream
529 == stream) {
530 pipes_affected[num_pipes_affected++] =
531 &dc->current_state->res_ctx.pipe_ctx[j];
532 }
533 }
534 }
535
536 dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, events);
537}
538
539static void destruct(struct dc *dc)
540{
541 if (dc->current_state) {
542 dc_release_state(dc->current_state);
543 dc->current_state = NULL;
544 }
545
546 destroy_links(dc);
547
548 if (dc->clk_mgr) {
549 dc_destroy_clk_mgr(dc->clk_mgr);
550 dc->clk_mgr = NULL;
551 }
552
553 dc_destroy_resource_pool(dc);
554
555 if (dc->ctx->gpio_service)
556 dal_gpio_service_destroy(&dc->ctx->gpio_service);
557
558 if (dc->ctx->created_bios)
559 dal_bios_parser_destroy(&dc->ctx->dc_bios);
560
561 dc_perf_trace_destroy(&dc->ctx->perf_trace);
562
563 kfree(dc->ctx);
564 dc->ctx = NULL;
565
566 kfree(dc->bw_vbios);
567 dc->bw_vbios = NULL;
568
569 kfree(dc->bw_dceip);
570 dc->bw_dceip = NULL;
571
572#ifdef CONFIG_DRM_AMD_DC_DCN1_0
573 kfree(dc->dcn_soc);
574 dc->dcn_soc = NULL;
575
576 kfree(dc->dcn_ip);
577 dc->dcn_ip = NULL;
578
579#endif
580#ifdef CONFIG_DRM_AMD_DC_DCN2_0
581 kfree(dc->vm_helper);
582 dc->vm_helper = NULL;
583
584#endif
585}
586
587static bool construct(struct dc *dc,
588 const struct dc_init_data *init_params)
589{
590 struct dc_context *dc_ctx;
591 struct bw_calcs_dceip *dc_dceip;
592 struct bw_calcs_vbios *dc_vbios;
593#ifdef CONFIG_DRM_AMD_DC_DCN1_0
594 struct dcn_soc_bounding_box *dcn_soc;
595 struct dcn_ip_params *dcn_ip;
596#endif
597
598 enum dce_version dc_version = DCE_VERSION_UNKNOWN;
599 dc->config = init_params->flags;
600
601#ifdef CONFIG_DRM_AMD_DC_DCN2_0
602
603 dc->vm_helper = kzalloc(sizeof(struct vm_helper), GFP_KERNEL);
604 if (!dc->vm_helper) {
605 dm_error("%s: failed to create dc->vm_helper\n", __func__);
606 goto fail;
607 }
608
609#endif
610 memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides));
611
612 dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL);
613 if (!dc_dceip) {
614 dm_error("%s: failed to create dceip\n", __func__);
615 goto fail;
616 }
617
618 dc->bw_dceip = dc_dceip;
619
620 dc_vbios = kzalloc(sizeof(*dc_vbios), GFP_KERNEL);
621 if (!dc_vbios) {
622 dm_error("%s: failed to create vbios\n", __func__);
623 goto fail;
624 }
625
626 dc->bw_vbios = dc_vbios;
627#ifdef CONFIG_DRM_AMD_DC_DCN1_0
628 dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL);
629 if (!dcn_soc) {
630 dm_error("%s: failed to create dcn_soc\n", __func__);
631 goto fail;
632 }
633
634 dc->dcn_soc = dcn_soc;
635
636 dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL);
637 if (!dcn_ip) {
638 dm_error("%s: failed to create dcn_ip\n", __func__);
639 goto fail;
640 }
641
642 dc->dcn_ip = dcn_ip;
643#ifdef CONFIG_DRM_AMD_DC_DCN2_0
644 dc->soc_bounding_box = init_params->soc_bounding_box;
645#endif
646#endif
647
648 dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL);
649 if (!dc_ctx) {
650 dm_error("%s: failed to create ctx\n", __func__);
651 goto fail;
652 }
653
654 dc_ctx->cgs_device = init_params->cgs_device;
655 dc_ctx->driver_context = init_params->driver;
656 dc_ctx->dc = dc;
657 dc_ctx->asic_id = init_params->asic_id;
658 dc_ctx->dc_sink_id_count = 0;
659 dc_ctx->dc_stream_id_count = 0;
660 dc->ctx = dc_ctx;
661
662
663
664 dc_ctx->dce_environment = init_params->dce_environment;
665
666 dc_version = resource_parse_asic_id(init_params->asic_id);
667 dc_ctx->dce_version = dc_version;
668
669
670
671
672 if (init_params->vbios_override)
673 dc_ctx->dc_bios = init_params->vbios_override;
674 else {
675
676 struct bp_init_data bp_init_data;
677
678 bp_init_data.ctx = dc_ctx;
679 bp_init_data.bios = init_params->asic_id.atombios_base_address;
680
681 dc_ctx->dc_bios = dal_bios_parser_create(
682 &bp_init_data, dc_version);
683
684 if (!dc_ctx->dc_bios) {
685 ASSERT_CRITICAL(false);
686 goto fail;
687 }
688
689 dc_ctx->created_bios = true;
690 }
691
692 dc_ctx->perf_trace = dc_perf_trace_create();
693 if (!dc_ctx->perf_trace) {
694 ASSERT_CRITICAL(false);
695 goto fail;
696 }
697
698
699 dc_ctx->gpio_service = dal_gpio_service_create(
700 dc_version,
701 dc_ctx->dce_environment,
702 dc_ctx);
703
704 if (!dc_ctx->gpio_service) {
705 ASSERT_CRITICAL(false);
706 goto fail;
707 }
708
709 dc->res_pool = dc_create_resource_pool(dc, init_params, dc_version);
710 if (!dc->res_pool)
711 goto fail;
712
713 dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg);
714 if (!dc->clk_mgr)
715 goto fail;
716
717#ifdef CONFIG_DRM_AMD_DC_DCN2_1
718 if (dc->res_pool->funcs->update_bw_bounding_box)
719 dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
720#endif
721
722
723
724
725
726
727 dc->current_state = dc_create_state(dc);
728
729 if (!dc->current_state) {
730 dm_error("%s: failed to create validate ctx\n", __func__);
731 goto fail;
732 }
733
734 dc_resource_state_construct(dc, dc->current_state);
735
736 if (!create_links(dc, init_params->num_virtual_links))
737 goto fail;
738
739 return true;
740
741fail:
742
743 destruct(dc);
744 return false;
745}
746
747#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
748static bool disable_all_writeback_pipes_for_stream(
749 const struct dc *dc,
750 struct dc_stream_state *stream,
751 struct dc_state *context)
752{
753 int i;
754
755 for (i = 0; i < stream->num_wb_info; i++)
756 stream->writeback_info[i].wb_enabled = false;
757
758 return true;
759}
760#endif
761
762static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
763{
764 int i, j;
765 struct dc_state *dangling_context = dc_create_state(dc);
766 struct dc_state *current_ctx;
767
768 if (dangling_context == NULL)
769 return;
770
771 dc_resource_state_copy_construct(dc->current_state, dangling_context);
772
773 for (i = 0; i < dc->res_pool->pipe_count; i++) {
774 struct dc_stream_state *old_stream =
775 dc->current_state->res_ctx.pipe_ctx[i].stream;
776 bool should_disable = true;
777
778 for (j = 0; j < context->stream_count; j++) {
779 if (old_stream == context->streams[j]) {
780 should_disable = false;
781 break;
782 }
783 }
784 if (should_disable && old_stream) {
785 dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
786#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
787 disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
788#endif
789 if (dc->hwss.apply_ctx_for_surface)
790 dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
791 }
792#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
793 if (dc->hwss.program_front_end_for_ctx)
794 dc->hwss.program_front_end_for_ctx(dc, dangling_context);
795#endif
796 }
797
798 current_ctx = dc->current_state;
799 dc->current_state = dangling_context;
800 dc_release_state(current_ctx);
801}
802
803
804
805
806
807struct dc *dc_create(const struct dc_init_data *init_params)
808{
809 struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
810 unsigned int full_pipe_count;
811
812 if (NULL == dc)
813 goto alloc_fail;
814
815 if (false == construct(dc, init_params))
816 goto construct_fail;
817
818 full_pipe_count = dc->res_pool->pipe_count;
819 if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
820 full_pipe_count--;
821 dc->caps.max_streams = min(
822 full_pipe_count,
823 dc->res_pool->stream_enc_count);
824
825 dc->caps.max_links = dc->link_count;
826 dc->caps.max_audios = dc->res_pool->audio_count;
827 dc->caps.linear_pitch_alignment = 64;
828
829
830 dc->versions.dc_ver = DC_VER;
831
832 if (dc->res_pool->dmcu != NULL)
833 dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
834
835 dc->build_id = DC_BUILD_ID;
836
837 DC_LOG_DC("Display Core initialized\n");
838
839
840
841 return dc;
842
843construct_fail:
844 kfree(dc);
845
846alloc_fail:
847 return NULL;
848}
849
850void dc_hardware_init(struct dc *dc)
851{
852 dc->hwss.init_hw(dc);
853}
854
855void dc_init_callbacks(struct dc *dc,
856 const struct dc_callback_init *init_params)
857{
858#ifdef CONFIG_DRM_AMD_DC_HDCP
859 dc->ctx->cp_psp = init_params->cp_psp;
860#endif
861}
862
863void dc_deinit_callbacks(struct dc *dc)
864{
865#ifdef CONFIG_DRM_AMD_DC_HDCP
866 memset(&dc->ctx->cp_psp, 0, sizeof(dc->ctx->cp_psp));
867#endif
868}
869
870void dc_destroy(struct dc **dc)
871{
872 destruct(*dc);
873 kfree(*dc);
874 *dc = NULL;
875}
876
877static void enable_timing_multisync(
878 struct dc *dc,
879 struct dc_state *ctx)
880{
881 int i = 0, multisync_count = 0;
882 int pipe_count = dc->res_pool->pipe_count;
883 struct pipe_ctx *multisync_pipes[MAX_PIPES] = { NULL };
884
885 for (i = 0; i < pipe_count; i++) {
886 if (!ctx->res_ctx.pipe_ctx[i].stream ||
887 !ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.enabled)
888 continue;
889 if (ctx->res_ctx.pipe_ctx[i].stream == ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.event_source)
890 continue;
891 multisync_pipes[multisync_count] = &ctx->res_ctx.pipe_ctx[i];
892 multisync_count++;
893 }
894
895 if (multisync_count > 0) {
896 dc->hwss.enable_per_frame_crtc_position_reset(
897 dc, multisync_count, multisync_pipes);
898 }
899}
900
901static void program_timing_sync(
902 struct dc *dc,
903 struct dc_state *ctx)
904{
905 int i, j, k;
906 int group_index = 0;
907 int num_group = 0;
908 int pipe_count = dc->res_pool->pipe_count;
909 struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
910
911 for (i = 0; i < pipe_count; i++) {
912 if (!ctx->res_ctx.pipe_ctx[i].stream || ctx->res_ctx.pipe_ctx[i].top_pipe)
913 continue;
914
915 unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i];
916 }
917
918 for (i = 0; i < pipe_count; i++) {
919 int group_size = 1;
920 struct pipe_ctx *pipe_set[MAX_PIPES];
921
922 if (!unsynced_pipes[i])
923 continue;
924
925 pipe_set[0] = unsynced_pipes[i];
926 unsynced_pipes[i] = NULL;
927
928
929
930
931 for (j = i + 1; j < pipe_count; j++) {
932 if (!unsynced_pipes[j])
933 continue;
934
935 if (resource_are_streams_timing_synchronizable(
936 unsynced_pipes[j]->stream,
937 pipe_set[0]->stream)) {
938 pipe_set[group_size] = unsynced_pipes[j];
939 unsynced_pipes[j] = NULL;
940 group_size++;
941 }
942 }
943
944
945 for (j = 0; j < group_size; j++) {
946 if (pipe_set[j]->plane_state) {
947 if (j == 0)
948 break;
949
950 swap(pipe_set[0], pipe_set[j]);
951 break;
952 }
953 }
954
955
956 for (k = 0; k < group_size; k++) {
957 struct dc_stream_status *status = dc_stream_get_status_from_state(ctx, pipe_set[k]->stream);
958
959 status->timing_sync_info.group_id = num_group;
960 status->timing_sync_info.group_size = group_size;
961 if (k == 0)
962 status->timing_sync_info.master = true;
963 else
964 status->timing_sync_info.master = false;
965
966 }
967
968 for (j = j + 1; j < group_size; j++) {
969 if (pipe_set[j]->plane_state) {
970 group_size--;
971 pipe_set[j] = pipe_set[group_size];
972 j--;
973 }
974 }
975
976 if (group_size > 1) {
977 dc->hwss.enable_timing_synchronization(
978 dc, group_index, group_size, pipe_set);
979 group_index++;
980 }
981 num_group++;
982 }
983}
984
985static bool context_changed(
986 struct dc *dc,
987 struct dc_state *context)
988{
989 uint8_t i;
990
991 if (context->stream_count != dc->current_state->stream_count)
992 return true;
993
994 for (i = 0; i < dc->current_state->stream_count; i++) {
995 if (dc->current_state->streams[i] != context->streams[i])
996 return true;
997 }
998
999 return false;
1000}
1001
1002bool dc_validate_seamless_boot_timing(const struct dc *dc,
1003 const struct dc_sink *sink,
1004 struct dc_crtc_timing *crtc_timing)
1005{
1006 struct timing_generator *tg;
1007 struct stream_encoder *se = NULL;
1008
1009 struct dc_crtc_timing hw_crtc_timing = {0};
1010
1011 struct dc_link *link = sink->link;
1012 unsigned int i, enc_inst, tg_inst = 0;
1013
1014
1015 if (sink->sink_signal != SIGNAL_TYPE_DISPLAY_PORT &&
1016 sink->sink_signal != SIGNAL_TYPE_EDP)
1017 return false;
1018
1019
1020 if (!link->link_enc->funcs->is_dig_enabled(link->link_enc))
1021 return false;
1022
1023 enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
1024
1025 if (enc_inst == ENGINE_ID_UNKNOWN)
1026 return false;
1027
1028 for (i = 0; i < dc->res_pool->stream_enc_count; i++) {
1029 if (dc->res_pool->stream_enc[i]->id == enc_inst) {
1030
1031 se = dc->res_pool->stream_enc[i];
1032
1033 tg_inst = dc->res_pool->stream_enc[i]->funcs->dig_source_otg(
1034 dc->res_pool->stream_enc[i]);
1035 break;
1036 }
1037 }
1038
1039
1040 if (i == dc->res_pool->stream_enc_count)
1041 return false;
1042
1043 if (tg_inst >= dc->res_pool->timing_generator_count)
1044 return false;
1045
1046 tg = dc->res_pool->timing_generators[tg_inst];
1047
1048 if (!tg->funcs->get_hw_timing)
1049 return false;
1050
1051 if (!tg->funcs->get_hw_timing(tg, &hw_crtc_timing))
1052 return false;
1053
1054 if (crtc_timing->h_total != hw_crtc_timing.h_total)
1055 return false;
1056
1057 if (crtc_timing->h_border_left != hw_crtc_timing.h_border_left)
1058 return false;
1059
1060 if (crtc_timing->h_addressable != hw_crtc_timing.h_addressable)
1061 return false;
1062
1063 if (crtc_timing->h_border_right != hw_crtc_timing.h_border_right)
1064 return false;
1065
1066 if (crtc_timing->h_front_porch != hw_crtc_timing.h_front_porch)
1067 return false;
1068
1069 if (crtc_timing->h_sync_width != hw_crtc_timing.h_sync_width)
1070 return false;
1071
1072 if (crtc_timing->v_total != hw_crtc_timing.v_total)
1073 return false;
1074
1075 if (crtc_timing->v_border_top != hw_crtc_timing.v_border_top)
1076 return false;
1077
1078 if (crtc_timing->v_addressable != hw_crtc_timing.v_addressable)
1079 return false;
1080
1081 if (crtc_timing->v_border_bottom != hw_crtc_timing.v_border_bottom)
1082 return false;
1083
1084 if (crtc_timing->v_front_porch != hw_crtc_timing.v_front_porch)
1085 return false;
1086
1087 if (crtc_timing->v_sync_width != hw_crtc_timing.v_sync_width)
1088 return false;
1089
1090 if (dc_is_dp_signal(link->connector_signal)) {
1091 unsigned int pix_clk_100hz;
1092
1093 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
1094 dc->res_pool->dp_clock_source,
1095 tg_inst, &pix_clk_100hz);
1096
1097 if (crtc_timing->pix_clk_100hz != pix_clk_100hz)
1098 return false;
1099
1100 if (!se->funcs->dp_get_pixel_format)
1101 return false;
1102
1103 if (!se->funcs->dp_get_pixel_format(
1104 se,
1105 &hw_crtc_timing.pixel_encoding,
1106 &hw_crtc_timing.display_color_depth))
1107 return false;
1108
1109 if (hw_crtc_timing.display_color_depth != crtc_timing->display_color_depth)
1110 return false;
1111
1112 if (hw_crtc_timing.pixel_encoding != crtc_timing->pixel_encoding)
1113 return false;
1114 }
1115
1116 return true;
1117}
1118
1119bool dc_enable_stereo(
1120 struct dc *dc,
1121 struct dc_state *context,
1122 struct dc_stream_state *streams[],
1123 uint8_t stream_count)
1124{
1125 bool ret = true;
1126 int i, j;
1127 struct pipe_ctx *pipe;
1128
1129 for (i = 0; i < MAX_PIPES; i++) {
1130 if (context != NULL)
1131 pipe = &context->res_ctx.pipe_ctx[i];
1132 else
1133 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1134 for (j = 0 ; pipe && j < stream_count; j++) {
1135 if (streams[j] && streams[j] == pipe->stream &&
1136 dc->hwss.setup_stereo)
1137 dc->hwss.setup_stereo(pipe, dc);
1138 }
1139 }
1140
1141 return ret;
1142}
1143
1144
1145
1146
1147
1148static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context)
1149{
1150 struct dc_bios *dcb = dc->ctx->dc_bios;
1151 enum dc_status result = DC_ERROR_UNEXPECTED;
1152 struct pipe_ctx *pipe;
1153 int i, k, l;
1154 struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
1155
1156 disable_dangling_plane(dc, context);
1157
1158 for (i = 0; i < context->stream_count; i++)
1159 dc_streams[i] = context->streams[i];
1160
1161 if (!dcb->funcs->is_accelerated_mode(dcb))
1162 dc->hwss.enable_accelerated_mode(dc, context);
1163
1164 for (i = 0; i < context->stream_count; i++) {
1165 if (context->streams[i]->apply_seamless_boot_optimization)
1166 dc->optimize_seamless_boot = true;
1167 }
1168
1169 if (!dc->optimize_seamless_boot)
1170 dc->hwss.prepare_bandwidth(dc, context);
1171
1172
1173
1174
1175 if (dc->hwss.apply_ctx_for_surface)
1176 for (i = 0; i < context->stream_count; i++) {
1177 if (context->streams[i]->mode_changed)
1178 continue;
1179
1180 dc->hwss.apply_ctx_for_surface(
1181 dc, context->streams[i],
1182 context->stream_status[i].plane_count,
1183 context);
1184 }
1185#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
1186 if (dc->hwss.program_front_end_for_ctx)
1187 dc->hwss.program_front_end_for_ctx(dc, context);
1188#endif
1189
1190
1191 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1192 pipe = &context->res_ctx.pipe_ctx[i];
1193 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
1194 }
1195
1196 result = dc->hwss.apply_ctx_to_hw(dc, context);
1197
1198 if (result != DC_OK)
1199 return result;
1200
1201 if (context->stream_count > 1 && !dc->debug.disable_timing_sync) {
1202 enable_timing_multisync(dc, context);
1203 program_timing_sync(dc, context);
1204 }
1205
1206
1207#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
1208 if (dc->hwss.program_front_end_for_ctx)
1209 dc->hwss.program_front_end_for_ctx(dc, context);
1210#endif
1211 for (i = 0; i < context->stream_count; i++) {
1212 const struct dc_link *link = context->streams[i]->link;
1213
1214 if (!context->streams[i]->mode_changed)
1215 continue;
1216
1217 if (dc->hwss.apply_ctx_for_surface)
1218 dc->hwss.apply_ctx_for_surface(
1219 dc, context->streams[i],
1220 context->stream_status[i].plane_count,
1221 context);
1222
1223
1224
1225
1226
1227 for (k = 0; k < MAX_PIPES; k++) {
1228 pipe = &context->res_ctx.pipe_ctx[k];
1229
1230 for (l = 0 ; pipe && l < context->stream_count; l++) {
1231 if (context->streams[l] &&
1232 context->streams[l] == pipe->stream &&
1233 dc->hwss.setup_stereo)
1234 dc->hwss.setup_stereo(pipe, dc);
1235 }
1236 }
1237
1238 CONN_MSG_MODE(link, "{%dx%d, %dx%d@%dKhz}",
1239 context->streams[i]->timing.h_addressable,
1240 context->streams[i]->timing.v_addressable,
1241 context->streams[i]->timing.h_total,
1242 context->streams[i]->timing.v_total,
1243 context->streams[i]->timing.pix_clk_100hz / 10);
1244 }
1245
1246 dc_enable_stereo(dc, context, dc_streams, context->stream_count);
1247
1248 for (i = 0; i < context->stream_count; i++)
1249 context->streams[i]->mode_changed = false;
1250
1251 dc_release_state(dc->current_state);
1252
1253 dc->current_state = context;
1254
1255 dc_retain_state(dc->current_state);
1256
1257 return result;
1258}
1259
1260bool dc_commit_state(struct dc *dc, struct dc_state *context)
1261{
1262 enum dc_status result = DC_ERROR_UNEXPECTED;
1263 int i;
1264
1265 if (false == context_changed(dc, context))
1266 return DC_OK;
1267
1268 DC_LOG_DC("%s: %d streams\n",
1269 __func__, context->stream_count);
1270
1271 for (i = 0; i < context->stream_count; i++) {
1272 struct dc_stream_state *stream = context->streams[i];
1273
1274 dc_stream_log(dc, stream);
1275 }
1276
1277 result = dc_commit_state_no_check(dc, context);
1278
1279 return (result == DC_OK);
1280}
1281
1282bool dc_post_update_surfaces_to_stream(struct dc *dc)
1283{
1284 int i;
1285 struct dc_state *context = dc->current_state;
1286
1287 if (!dc->optimized_required || dc->optimize_seamless_boot)
1288 return true;
1289
1290 post_surface_trace(dc);
1291
1292 for (i = 0; i < dc->res_pool->pipe_count; i++)
1293 if (context->res_ctx.pipe_ctx[i].stream == NULL ||
1294 context->res_ctx.pipe_ctx[i].plane_state == NULL) {
1295 context->res_ctx.pipe_ctx[i].pipe_idx = i;
1296 dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
1297 }
1298
1299 dc->optimized_required = false;
1300
1301 dc->hwss.optimize_bandwidth(dc, context);
1302 return true;
1303}
1304
1305struct dc_state *dc_create_state(struct dc *dc)
1306{
1307 struct dc_state *context = kvzalloc(sizeof(struct dc_state),
1308 GFP_KERNEL);
1309
1310 if (!context)
1311 return NULL;
1312
1313
1314
1315
1316#ifdef CONFIG_DRM_AMD_DC_DCN1_0
1317 memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib));
1318#endif
1319
1320 kref_init(&context->refcount);
1321
1322 return context;
1323}
1324
1325struct dc_state *dc_copy_state(struct dc_state *src_ctx)
1326{
1327 int i, j;
1328 struct dc_state *new_ctx = kvmalloc(sizeof(struct dc_state), GFP_KERNEL);
1329
1330 if (!new_ctx)
1331 return NULL;
1332 memcpy(new_ctx, src_ctx, sizeof(struct dc_state));
1333
1334 for (i = 0; i < MAX_PIPES; i++) {
1335 struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i];
1336
1337 if (cur_pipe->top_pipe)
1338 cur_pipe->top_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx];
1339
1340 if (cur_pipe->bottom_pipe)
1341 cur_pipe->bottom_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx];
1342
1343 if (cur_pipe->prev_odm_pipe)
1344 cur_pipe->prev_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->prev_odm_pipe->pipe_idx];
1345
1346 if (cur_pipe->next_odm_pipe)
1347 cur_pipe->next_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->next_odm_pipe->pipe_idx];
1348
1349 }
1350
1351 for (i = 0; i < new_ctx->stream_count; i++) {
1352 dc_stream_retain(new_ctx->streams[i]);
1353 for (j = 0; j < new_ctx->stream_status[i].plane_count; j++)
1354 dc_plane_state_retain(
1355 new_ctx->stream_status[i].plane_states[j]);
1356 }
1357
1358 kref_init(&new_ctx->refcount);
1359
1360 return new_ctx;
1361}
1362
1363void dc_retain_state(struct dc_state *context)
1364{
1365 kref_get(&context->refcount);
1366}
1367
1368static void dc_state_free(struct kref *kref)
1369{
1370 struct dc_state *context = container_of(kref, struct dc_state, refcount);
1371 dc_resource_state_destruct(context);
1372 kvfree(context);
1373}
1374
1375void dc_release_state(struct dc_state *context)
1376{
1377 kref_put(&context->refcount, dc_state_free);
1378}
1379
1380bool dc_set_generic_gpio_for_stereo(bool enable,
1381 struct gpio_service *gpio_service)
1382{
1383 enum gpio_result gpio_result = GPIO_RESULT_NON_SPECIFIC_ERROR;
1384 struct gpio_pin_info pin_info;
1385 struct gpio *generic;
1386 struct gpio_generic_mux_config *config = kzalloc(sizeof(struct gpio_generic_mux_config),
1387 GFP_KERNEL);
1388
1389 if (!config)
1390 return false;
1391 pin_info = dal_gpio_get_generic_pin_info(gpio_service, GPIO_ID_GENERIC, 0);
1392
1393 if (pin_info.mask == 0xFFFFFFFF || pin_info.offset == 0xFFFFFFFF) {
1394 kfree(config);
1395 return false;
1396 } else {
1397 generic = dal_gpio_service_create_generic_mux(
1398 gpio_service,
1399 pin_info.offset,
1400 pin_info.mask);
1401 }
1402
1403 if (!generic) {
1404 kfree(config);
1405 return false;
1406 }
1407
1408 gpio_result = dal_gpio_open(generic, GPIO_MODE_OUTPUT);
1409
1410 config->enable_output_from_mux = enable;
1411 config->mux_select = GPIO_SIGNAL_SOURCE_PASS_THROUGH_STEREO_SYNC;
1412
1413 if (gpio_result == GPIO_RESULT_OK)
1414 gpio_result = dal_mux_setup_config(generic, config);
1415
1416 if (gpio_result == GPIO_RESULT_OK) {
1417 dal_gpio_close(generic);
1418 dal_gpio_destroy_generic_mux(&generic);
1419 kfree(config);
1420 return true;
1421 } else {
1422 dal_gpio_close(generic);
1423 dal_gpio_destroy_generic_mux(&generic);
1424 kfree(config);
1425 return false;
1426 }
1427}
1428
1429static bool is_surface_in_context(
1430 const struct dc_state *context,
1431 const struct dc_plane_state *plane_state)
1432{
1433 int j;
1434
1435 for (j = 0; j < MAX_PIPES; j++) {
1436 const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1437
1438 if (plane_state == pipe_ctx->plane_state) {
1439 return true;
1440 }
1441 }
1442
1443 return false;
1444}
1445
1446static enum surface_update_type get_plane_info_update_type(const struct dc_surface_update *u)
1447{
1448 union surface_update_flags *update_flags = &u->surface->update_flags;
1449 enum surface_update_type update_type = UPDATE_TYPE_FAST;
1450
1451 if (!u->plane_info)
1452 return UPDATE_TYPE_FAST;
1453
1454 if (u->plane_info->color_space != u->surface->color_space) {
1455 update_flags->bits.color_space_change = 1;
1456 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1457 }
1458
1459 if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) {
1460 update_flags->bits.horizontal_mirror_change = 1;
1461 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1462 }
1463
1464 if (u->plane_info->rotation != u->surface->rotation) {
1465 update_flags->bits.rotation_change = 1;
1466 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
1467 }
1468
1469 if (u->plane_info->format != u->surface->format) {
1470 update_flags->bits.pixel_format_change = 1;
1471 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
1472 }
1473
1474 if (u->plane_info->stereo_format != u->surface->stereo_format) {
1475 update_flags->bits.stereo_format_change = 1;
1476 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
1477 }
1478
1479 if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha) {
1480 update_flags->bits.per_pixel_alpha_change = 1;
1481 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1482 }
1483
1484 if (u->plane_info->global_alpha_value != u->surface->global_alpha_value) {
1485 update_flags->bits.global_alpha_change = 1;
1486 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1487 }
1488
1489 if (u->plane_info->sdr_white_level != u->surface->sdr_white_level) {
1490 update_flags->bits.sdr_white_level = 1;
1491 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1492 }
1493
1494 if (u->plane_info->dcc.enable != u->surface->dcc.enable
1495 || u->plane_info->dcc.independent_64b_blks != u->surface->dcc.independent_64b_blks
1496 || u->plane_info->dcc.meta_pitch != u->surface->dcc.meta_pitch) {
1497 update_flags->bits.dcc_change = 1;
1498 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1499 }
1500
1501 if (resource_pixel_format_to_bpp(u->plane_info->format) !=
1502 resource_pixel_format_to_bpp(u->surface->format)) {
1503
1504
1505
1506 update_flags->bits.bpp_change = 1;
1507 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
1508 }
1509
1510 if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch
1511 || u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch
1512 || u->plane_info->plane_size.chroma_pitch != u->surface->plane_size.chroma_pitch) {
1513 update_flags->bits.plane_size_change = 1;
1514 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1515 }
1516
1517
1518 if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,
1519 sizeof(union dc_tiling_info)) != 0) {
1520 update_flags->bits.swizzle_change = 1;
1521 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1522
1523
1524
1525
1526 if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR) {
1527
1528
1529
1530 update_flags->bits.bandwidth_change = 1;
1531 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
1532 }
1533 }
1534
1535
1536 return update_type;
1537}
1538
1539static enum surface_update_type get_scaling_info_update_type(
1540 const struct dc_surface_update *u)
1541{
1542 union surface_update_flags *update_flags = &u->surface->update_flags;
1543
1544 if (!u->scaling_info)
1545 return UPDATE_TYPE_FAST;
1546
1547 if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width
1548 || u->scaling_info->clip_rect.height != u->surface->clip_rect.height
1549 || u->scaling_info->dst_rect.width != u->surface->dst_rect.width
1550 || u->scaling_info->dst_rect.height != u->surface->dst_rect.height) {
1551 update_flags->bits.scaling_change = 1;
1552
1553 if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width
1554 || u->scaling_info->dst_rect.height < u->surface->dst_rect.height)
1555 && (u->scaling_info->dst_rect.width < u->surface->src_rect.width
1556 || u->scaling_info->dst_rect.height < u->surface->src_rect.height))
1557
1558 update_flags->bits.bandwidth_change = 1;
1559 }
1560
1561 if (u->scaling_info->src_rect.width != u->surface->src_rect.width
1562 || u->scaling_info->src_rect.height != u->surface->src_rect.height) {
1563
1564 update_flags->bits.scaling_change = 1;
1565 if (u->scaling_info->src_rect.width > u->surface->src_rect.width
1566 && u->scaling_info->src_rect.height > u->surface->src_rect.height)
1567
1568 update_flags->bits.clock_change = 1;
1569 }
1570
1571 if (u->scaling_info->src_rect.x != u->surface->src_rect.x
1572 || u->scaling_info->src_rect.y != u->surface->src_rect.y
1573 || u->scaling_info->clip_rect.x != u->surface->clip_rect.x
1574 || u->scaling_info->clip_rect.y != u->surface->clip_rect.y
1575 || u->scaling_info->dst_rect.x != u->surface->dst_rect.x
1576 || u->scaling_info->dst_rect.y != u->surface->dst_rect.y)
1577 update_flags->bits.position_change = 1;
1578
1579 if (update_flags->bits.clock_change
1580 || update_flags->bits.bandwidth_change)
1581 return UPDATE_TYPE_FULL;
1582
1583 if (update_flags->bits.scaling_change
1584 || update_flags->bits.position_change)
1585 return UPDATE_TYPE_MED;
1586
1587 return UPDATE_TYPE_FAST;
1588}
1589
1590static enum surface_update_type det_surface_update(const struct dc *dc,
1591 const struct dc_surface_update *u)
1592{
1593 const struct dc_state *context = dc->current_state;
1594 enum surface_update_type type;
1595 enum surface_update_type overall_type = UPDATE_TYPE_FAST;
1596 union surface_update_flags *update_flags = &u->surface->update_flags;
1597
1598 if (u->flip_addr)
1599 update_flags->bits.addr_update = 1;
1600
1601 if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) {
1602 update_flags->raw = 0xFFFFFFFF;
1603 return UPDATE_TYPE_FULL;
1604 }
1605
1606 update_flags->raw = 0;
1607
1608 type = get_plane_info_update_type(u);
1609 elevate_update_type(&overall_type, type);
1610
1611 type = get_scaling_info_update_type(u);
1612 elevate_update_type(&overall_type, type);
1613
1614 if (u->flip_addr)
1615 update_flags->bits.addr_update = 1;
1616
1617 if (u->in_transfer_func)
1618 update_flags->bits.in_transfer_func_change = 1;
1619
1620 if (u->input_csc_color_matrix)
1621 update_flags->bits.input_csc_change = 1;
1622
1623 if (u->coeff_reduction_factor)
1624 update_flags->bits.coeff_reduction_change = 1;
1625
1626 if (u->gamma) {
1627 enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN;
1628
1629 if (u->plane_info)
1630 format = u->plane_info->format;
1631 else if (u->surface)
1632 format = u->surface->format;
1633
1634 if (dce_use_lut(format))
1635 update_flags->bits.gamma_change = 1;
1636 }
1637
1638 if (update_flags->bits.in_transfer_func_change) {
1639 type = UPDATE_TYPE_MED;
1640 elevate_update_type(&overall_type, type);
1641 }
1642
1643 if (update_flags->bits.input_csc_change
1644 || update_flags->bits.coeff_reduction_change
1645 || update_flags->bits.gamma_change) {
1646 type = UPDATE_TYPE_FULL;
1647 elevate_update_type(&overall_type, type);
1648 }
1649
1650 return overall_type;
1651}
1652
1653static enum surface_update_type check_update_surfaces_for_stream(
1654 struct dc *dc,
1655 struct dc_surface_update *updates,
1656 int surface_count,
1657 struct dc_stream_update *stream_update,
1658 const struct dc_stream_status *stream_status)
1659{
1660 int i;
1661 enum surface_update_type overall_type = UPDATE_TYPE_FAST;
1662
1663 if (stream_status == NULL || stream_status->plane_count != surface_count)
1664 overall_type = UPDATE_TYPE_FULL;
1665
1666
1667 if (stream_update) {
1668 union stream_update_flags *su_flags = &stream_update->stream->update_flags;
1669
1670 if ((stream_update->src.height != 0 && stream_update->src.width != 0) ||
1671 (stream_update->dst.height != 0 && stream_update->dst.width != 0))
1672 su_flags->bits.scaling = 1;
1673
1674 if (stream_update->out_transfer_func)
1675 su_flags->bits.out_tf = 1;
1676
1677 if (stream_update->abm_level)
1678 su_flags->bits.abm_level = 1;
1679
1680 if (stream_update->dpms_off)
1681 su_flags->bits.dpms_off = 1;
1682
1683 if (stream_update->gamut_remap)
1684 su_flags->bits.gamut_remap = 1;
1685
1686#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
1687 if (stream_update->wb_update)
1688 su_flags->bits.wb_update = 1;
1689#endif
1690 if (su_flags->raw != 0)
1691 overall_type = UPDATE_TYPE_FULL;
1692
1693 if (stream_update->output_csc_transform || stream_update->output_color_space)
1694 su_flags->bits.out_csc = 1;
1695 }
1696
1697 for (i = 0 ; i < surface_count; i++) {
1698 enum surface_update_type type =
1699 det_surface_update(dc, &updates[i]);
1700
1701 elevate_update_type(&overall_type, type);
1702 }
1703
1704 return overall_type;
1705}
1706
1707
1708
1709
1710
1711
1712enum surface_update_type dc_check_update_surfaces_for_stream(
1713 struct dc *dc,
1714 struct dc_surface_update *updates,
1715 int surface_count,
1716 struct dc_stream_update *stream_update,
1717 const struct dc_stream_status *stream_status)
1718{
1719 int i;
1720 enum surface_update_type type;
1721
1722 if (stream_update)
1723 stream_update->stream->update_flags.raw = 0;
1724 for (i = 0; i < surface_count; i++)
1725 updates[i].surface->update_flags.raw = 0;
1726
1727 type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
1728 if (type == UPDATE_TYPE_FULL) {
1729 if (stream_update)
1730 stream_update->stream->update_flags.raw = 0xFFFFFFFF;
1731 for (i = 0; i < surface_count; i++)
1732 updates[i].surface->update_flags.raw = 0xFFFFFFFF;
1733 }
1734
1735 if (type == UPDATE_TYPE_FAST) {
1736
1737 if (dc->clk_mgr->funcs->are_clock_states_equal) {
1738 if (!dc->clk_mgr->funcs->are_clock_states_equal(&dc->clk_mgr->clks, &dc->current_state->bw_ctx.bw.dcn.clk))
1739 dc->optimized_required = true;
1740
1741 } else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) {
1742 dc->optimized_required = true;
1743 }
1744 }
1745
1746 return type;
1747}
1748
1749static struct dc_stream_status *stream_get_status(
1750 struct dc_state *ctx,
1751 struct dc_stream_state *stream)
1752{
1753 uint8_t i;
1754
1755 for (i = 0; i < ctx->stream_count; i++) {
1756 if (stream == ctx->streams[i]) {
1757 return &ctx->stream_status[i];
1758 }
1759 }
1760
1761 return NULL;
1762}
1763
1764static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
1765
1766static void copy_surface_update_to_plane(
1767 struct dc_plane_state *surface,
1768 struct dc_surface_update *srf_update)
1769{
1770 if (srf_update->flip_addr) {
1771 surface->address = srf_update->flip_addr->address;
1772 surface->flip_immediate =
1773 srf_update->flip_addr->flip_immediate;
1774 surface->time.time_elapsed_in_us[surface->time.index] =
1775 srf_update->flip_addr->flip_timestamp_in_us -
1776 surface->time.prev_update_time_in_us;
1777 surface->time.prev_update_time_in_us =
1778 srf_update->flip_addr->flip_timestamp_in_us;
1779 surface->time.index++;
1780 if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX)
1781 surface->time.index = 0;
1782 }
1783
1784 if (srf_update->scaling_info) {
1785 surface->scaling_quality =
1786 srf_update->scaling_info->scaling_quality;
1787 surface->dst_rect =
1788 srf_update->scaling_info->dst_rect;
1789 surface->src_rect =
1790 srf_update->scaling_info->src_rect;
1791 surface->clip_rect =
1792 srf_update->scaling_info->clip_rect;
1793 }
1794
1795 if (srf_update->plane_info) {
1796 surface->color_space =
1797 srf_update->plane_info->color_space;
1798 surface->format =
1799 srf_update->plane_info->format;
1800 surface->plane_size =
1801 srf_update->plane_info->plane_size;
1802 surface->rotation =
1803 srf_update->plane_info->rotation;
1804 surface->horizontal_mirror =
1805 srf_update->plane_info->horizontal_mirror;
1806 surface->stereo_format =
1807 srf_update->plane_info->stereo_format;
1808 surface->tiling_info =
1809 srf_update->plane_info->tiling_info;
1810 surface->visible =
1811 srf_update->plane_info->visible;
1812 surface->per_pixel_alpha =
1813 srf_update->plane_info->per_pixel_alpha;
1814 surface->global_alpha =
1815 srf_update->plane_info->global_alpha;
1816 surface->global_alpha_value =
1817 srf_update->plane_info->global_alpha_value;
1818 surface->dcc =
1819 srf_update->plane_info->dcc;
1820 surface->sdr_white_level =
1821 srf_update->plane_info->sdr_white_level;
1822 surface->layer_index =
1823 srf_update->plane_info->layer_index;
1824 }
1825
1826 if (srf_update->gamma &&
1827 (surface->gamma_correction !=
1828 srf_update->gamma)) {
1829 memcpy(&surface->gamma_correction->entries,
1830 &srf_update->gamma->entries,
1831 sizeof(struct dc_gamma_entries));
1832 surface->gamma_correction->is_identity =
1833 srf_update->gamma->is_identity;
1834 surface->gamma_correction->num_entries =
1835 srf_update->gamma->num_entries;
1836 surface->gamma_correction->type =
1837 srf_update->gamma->type;
1838 }
1839
1840 if (srf_update->in_transfer_func &&
1841 (surface->in_transfer_func !=
1842 srf_update->in_transfer_func)) {
1843 surface->in_transfer_func->sdr_ref_white_level =
1844 srf_update->in_transfer_func->sdr_ref_white_level;
1845 surface->in_transfer_func->tf =
1846 srf_update->in_transfer_func->tf;
1847 surface->in_transfer_func->type =
1848 srf_update->in_transfer_func->type;
1849 memcpy(&surface->in_transfer_func->tf_pts,
1850 &srf_update->in_transfer_func->tf_pts,
1851 sizeof(struct dc_transfer_func_distributed_points));
1852 }
1853
1854#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
1855 if (srf_update->func_shaper &&
1856 (surface->in_shaper_func !=
1857 srf_update->func_shaper))
1858 memcpy(surface->in_shaper_func, srf_update->func_shaper,
1859 sizeof(*surface->in_shaper_func));
1860
1861 if (srf_update->lut3d_func &&
1862 (surface->lut3d_func !=
1863 srf_update->lut3d_func))
1864 memcpy(surface->lut3d_func, srf_update->lut3d_func,
1865 sizeof(*surface->lut3d_func));
1866
1867 if (srf_update->blend_tf &&
1868 (surface->blend_tf !=
1869 srf_update->blend_tf))
1870 memcpy(surface->blend_tf, srf_update->blend_tf,
1871 sizeof(*surface->blend_tf));
1872
1873#endif
1874 if (srf_update->input_csc_color_matrix)
1875 surface->input_csc_color_matrix =
1876 *srf_update->input_csc_color_matrix;
1877
1878 if (srf_update->coeff_reduction_factor)
1879 surface->coeff_reduction_factor =
1880 *srf_update->coeff_reduction_factor;
1881}
1882
1883static void copy_stream_update_to_stream(struct dc *dc,
1884 struct dc_state *context,
1885 struct dc_stream_state *stream,
1886 const struct dc_stream_update *update)
1887{
1888 if (update == NULL || stream == NULL)
1889 return;
1890
1891 if (update->src.height && update->src.width)
1892 stream->src = update->src;
1893
1894 if (update->dst.height && update->dst.width)
1895 stream->dst = update->dst;
1896
1897 if (update->out_transfer_func &&
1898 stream->out_transfer_func != update->out_transfer_func) {
1899 stream->out_transfer_func->sdr_ref_white_level =
1900 update->out_transfer_func->sdr_ref_white_level;
1901 stream->out_transfer_func->tf = update->out_transfer_func->tf;
1902 stream->out_transfer_func->type =
1903 update->out_transfer_func->type;
1904 memcpy(&stream->out_transfer_func->tf_pts,
1905 &update->out_transfer_func->tf_pts,
1906 sizeof(struct dc_transfer_func_distributed_points));
1907 }
1908
1909 if (update->hdr_static_metadata)
1910 stream->hdr_static_metadata = *update->hdr_static_metadata;
1911
1912 if (update->abm_level)
1913 stream->abm_level = *update->abm_level;
1914
1915 if (update->periodic_interrupt0)
1916 stream->periodic_interrupt0 = *update->periodic_interrupt0;
1917
1918 if (update->periodic_interrupt1)
1919 stream->periodic_interrupt1 = *update->periodic_interrupt1;
1920
1921 if (update->gamut_remap)
1922 stream->gamut_remap_matrix = *update->gamut_remap;
1923
1924
1925
1926
1927
1928 if (update->output_color_space)
1929 stream->output_color_space = *update->output_color_space;
1930
1931 if (update->output_csc_transform)
1932 stream->csc_color_matrix = *update->output_csc_transform;
1933
1934 if (update->vrr_infopacket)
1935 stream->vrr_infopacket = *update->vrr_infopacket;
1936
1937 if (update->dpms_off)
1938 stream->dpms_off = *update->dpms_off;
1939
1940 if (update->vsc_infopacket)
1941 stream->vsc_infopacket = *update->vsc_infopacket;
1942
1943 if (update->vsp_infopacket)
1944 stream->vsp_infopacket = *update->vsp_infopacket;
1945
1946 if (update->dither_option)
1947 stream->dither_option = *update->dither_option;
1948#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
1949
1950 if (update->wb_update) {
1951 int i;
1952
1953 stream->num_wb_info = update->wb_update->num_wb_info;
1954 ASSERT(stream->num_wb_info <= MAX_DWB_PIPES);
1955 for (i = 0; i < stream->num_wb_info; i++)
1956 stream->writeback_info[i] =
1957 update->wb_update->writeback_info[i];
1958 }
1959#endif
1960#if defined(CONFIG_DRM_AMD_DC_DSC_SUPPORT)
1961 if (update->dsc_config) {
1962 struct dc_dsc_config old_dsc_cfg = stream->timing.dsc_cfg;
1963 uint32_t old_dsc_enabled = stream->timing.flags.DSC;
1964 uint32_t enable_dsc = (update->dsc_config->num_slices_h != 0 &&
1965 update->dsc_config->num_slices_v != 0);
1966
1967 stream->timing.dsc_cfg = *update->dsc_config;
1968 stream->timing.flags.DSC = enable_dsc;
1969 if (!dc->res_pool->funcs->validate_bandwidth(dc, context,
1970 true)) {
1971 stream->timing.dsc_cfg = old_dsc_cfg;
1972 stream->timing.flags.DSC = old_dsc_enabled;
1973 }
1974 }
1975#endif
1976}
1977
1978static void commit_planes_do_stream_update(struct dc *dc,
1979 struct dc_stream_state *stream,
1980 struct dc_stream_update *stream_update,
1981 enum surface_update_type update_type,
1982 struct dc_state *context)
1983{
1984 int j;
1985 bool should_program_abm;
1986
1987
1988 for (j = 0; j < dc->res_pool->pipe_count; j++) {
1989 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1990
1991 if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->stream == stream) {
1992
1993 if (stream_update->periodic_interrupt0 &&
1994 dc->hwss.setup_periodic_interrupt)
1995 dc->hwss.setup_periodic_interrupt(pipe_ctx, VLINE0);
1996
1997 if (stream_update->periodic_interrupt1 &&
1998 dc->hwss.setup_periodic_interrupt)
1999 dc->hwss.setup_periodic_interrupt(pipe_ctx, VLINE1);
2000
2001 if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) ||
2002 stream_update->vrr_infopacket ||
2003 stream_update->vsc_infopacket ||
2004 stream_update->vsp_infopacket) {
2005 resource_build_info_frame(pipe_ctx);
2006 dc->hwss.update_info_frame(pipe_ctx);
2007 }
2008
2009 if (stream_update->gamut_remap)
2010 dc_stream_set_gamut_remap(dc, stream);
2011
2012 if (stream_update->output_csc_transform)
2013 dc_stream_program_csc_matrix(dc, stream);
2014
2015 if (stream_update->dither_option) {
2016#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
2017 struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
2018#endif
2019 resource_build_bit_depth_reduction_params(pipe_ctx->stream,
2020 &pipe_ctx->stream->bit_depth_params);
2021 pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp,
2022 &stream->bit_depth_params,
2023 &stream->clamping);
2024#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
2025 while (odm_pipe) {
2026 odm_pipe->stream_res.opp->funcs->opp_program_fmt(odm_pipe->stream_res.opp,
2027 &stream->bit_depth_params,
2028 &stream->clamping);
2029 odm_pipe = odm_pipe->next_odm_pipe;
2030 }
2031#endif
2032 }
2033
2034#if defined(CONFIG_DRM_AMD_DC_DSC_SUPPORT)
2035 if (stream_update->dsc_config && dc->hwss.pipe_control_lock_global) {
2036 dc->hwss.pipe_control_lock_global(dc, pipe_ctx, true);
2037 dp_update_dsc_config(pipe_ctx);
2038 dc->hwss.pipe_control_lock_global(dc, pipe_ctx, false);
2039 }
2040#endif
2041
2042 if (update_type == UPDATE_TYPE_FAST)
2043 continue;
2044
2045 if (stream_update->dpms_off) {
2046 dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
2047
2048 if (*stream_update->dpms_off) {
2049 core_link_disable_stream(pipe_ctx);
2050
2051 if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only)
2052 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
2053
2054 dc->hwss.optimize_bandwidth(dc, dc->current_state);
2055 } else {
2056 if (!dc->optimize_seamless_boot)
2057 dc->hwss.prepare_bandwidth(dc, dc->current_state);
2058
2059 core_link_enable_stream(dc->current_state, pipe_ctx);
2060 }
2061
2062 dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
2063 }
2064
2065 if (stream_update->abm_level && pipe_ctx->stream_res.abm) {
2066 should_program_abm = true;
2067
2068
2069 if (pipe_ctx->stream_res.tg->funcs->is_blanked)
2070 if (pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg))
2071 should_program_abm = false;
2072
2073 if (should_program_abm) {
2074 if (*stream_update->abm_level == ABM_LEVEL_IMMEDIATE_DISABLE) {
2075 pipe_ctx->stream_res.abm->funcs->set_abm_immediate_disable(pipe_ctx->stream_res.abm);
2076 } else {
2077 pipe_ctx->stream_res.abm->funcs->set_abm_level(
2078 pipe_ctx->stream_res.abm, stream->abm_level);
2079 }
2080 }
2081 }
2082 }
2083 }
2084}
2085
2086static void commit_planes_for_stream(struct dc *dc,
2087 struct dc_surface_update *srf_updates,
2088 int surface_count,
2089 struct dc_stream_state *stream,
2090 struct dc_stream_update *stream_update,
2091 enum surface_update_type update_type,
2092 struct dc_state *context)
2093{
2094 int i, j;
2095 struct pipe_ctx *top_pipe_to_program = NULL;
2096
2097 if (dc->optimize_seamless_boot && surface_count > 0) {
2098
2099
2100
2101
2102
2103
2104 if (stream->apply_seamless_boot_optimization) {
2105 stream->apply_seamless_boot_optimization = false;
2106 dc->optimize_seamless_boot = false;
2107 dc->optimized_required = true;
2108 }
2109 }
2110
2111 if (update_type == UPDATE_TYPE_FULL && !dc->optimize_seamless_boot) {
2112 dc->hwss.prepare_bandwidth(dc, context);
2113 context_clock_trace(dc, context);
2114 }
2115
2116
2117 if (stream_update)
2118 commit_planes_do_stream_update(dc, stream, stream_update, update_type, context);
2119
2120 if (surface_count == 0) {
2121
2122
2123
2124
2125 if (dc->hwss.apply_ctx_for_surface)
2126 dc->hwss.apply_ctx_for_surface(dc, stream, 0, context);
2127#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
2128 if (dc->hwss.program_front_end_for_ctx)
2129 dc->hwss.program_front_end_for_ctx(dc, context);
2130#endif
2131
2132 return;
2133 }
2134
2135#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
2136 if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
2137 for (i = 0; i < surface_count; i++) {
2138 struct dc_plane_state *plane_state = srf_updates[i].surface;
2139
2140 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2141 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2142 if (!pipe_ctx->plane_state)
2143 continue;
2144 if (pipe_ctx->plane_state != plane_state)
2145 continue;
2146 plane_state->triplebuffer_flips = false;
2147 if (update_type == UPDATE_TYPE_FAST &&
2148 dc->hwss.program_triplebuffer != NULL &&
2149 !plane_state->flip_immediate &&
2150 !dc->debug.disable_tri_buf) {
2151
2152 plane_state->triplebuffer_flips = true;
2153 }
2154 }
2155 }
2156 }
2157#endif
2158
2159
2160 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2161 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2162
2163 if (!pipe_ctx->top_pipe &&
2164 !pipe_ctx->prev_odm_pipe &&
2165 pipe_ctx->stream &&
2166 pipe_ctx->stream == stream) {
2167 struct dc_stream_status *stream_status = NULL;
2168
2169 top_pipe_to_program = pipe_ctx;
2170
2171 if (!pipe_ctx->plane_state)
2172 continue;
2173
2174
2175 if (update_type == UPDATE_TYPE_FAST)
2176 continue;
2177
2178#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
2179 ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
2180
2181 if (dc->hwss.program_triplebuffer != NULL &&
2182 !dc->debug.disable_tri_buf) {
2183
2184 dc->hwss.program_triplebuffer(
2185 dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
2186 }
2187#endif
2188 stream_status =
2189 stream_get_status(context, pipe_ctx->stream);
2190
2191 if (dc->hwss.apply_ctx_for_surface)
2192 dc->hwss.apply_ctx_for_surface(
2193 dc, pipe_ctx->stream, stream_status->plane_count, context);
2194 }
2195 }
2196#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
2197 if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST)
2198 dc->hwss.program_front_end_for_ctx(dc, context);
2199#endif
2200
2201
2202 if (update_type == UPDATE_TYPE_FAST) {
2203
2204
2205
2206
2207 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
2208
2209#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
2210 if (dc->hwss.set_flip_control_gsl)
2211 for (i = 0; i < surface_count; i++) {
2212 struct dc_plane_state *plane_state = srf_updates[i].surface;
2213
2214 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2215 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2216
2217 if (pipe_ctx->stream != stream)
2218 continue;
2219
2220 if (pipe_ctx->plane_state != plane_state)
2221 continue;
2222
2223
2224 dc->hwss.set_flip_control_gsl(pipe_ctx,
2225 plane_state->flip_immediate);
2226 }
2227 }
2228#endif
2229
2230 for (i = 0; i < surface_count; i++) {
2231 struct dc_plane_state *plane_state = srf_updates[i].surface;
2232
2233 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2234 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2235
2236 if (pipe_ctx->stream != stream)
2237 continue;
2238
2239 if (pipe_ctx->plane_state != plane_state)
2240 continue;
2241#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
2242
2243 if (dc->hwss.program_triplebuffer != NULL &&
2244 !dc->debug.disable_tri_buf) {
2245
2246 dc->hwss.program_triplebuffer(
2247 dc, pipe_ctx, plane_state->triplebuffer_flips);
2248 }
2249#endif
2250 if (srf_updates[i].flip_addr)
2251 dc->hwss.update_plane_addr(dc, pipe_ctx);
2252 }
2253 }
2254
2255 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
2256 }
2257
2258
2259 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2260 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2261
2262 if (pipe_ctx->bottom_pipe ||
2263 !pipe_ctx->stream ||
2264 pipe_ctx->stream != stream ||
2265 !pipe_ctx->plane_state->update_flags.bits.addr_update)
2266 continue;
2267
2268 if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger)
2269 pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg);
2270 }
2271}
2272
2273void dc_commit_updates_for_stream(struct dc *dc,
2274 struct dc_surface_update *srf_updates,
2275 int surface_count,
2276 struct dc_stream_state *stream,
2277 struct dc_stream_update *stream_update,
2278 struct dc_state *state)
2279{
2280 const struct dc_stream_status *stream_status;
2281 enum surface_update_type update_type;
2282 struct dc_state *context;
2283 struct dc_context *dc_ctx = dc->ctx;
2284 int i;
2285
2286 stream_status = dc_stream_get_status(stream);
2287 context = dc->current_state;
2288
2289 update_type = dc_check_update_surfaces_for_stream(
2290 dc, srf_updates, surface_count, stream_update, stream_status);
2291
2292 if (update_type >= update_surface_trace_level)
2293 update_surface_trace(dc, srf_updates, surface_count);
2294
2295
2296 if (update_type >= UPDATE_TYPE_FULL) {
2297
2298
2299 context = dc_create_state(dc);
2300 if (context == NULL) {
2301 DC_ERROR("Failed to allocate new validate context!\n");
2302 return;
2303 }
2304
2305 dc_resource_state_copy_construct(state, context);
2306
2307 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2308 struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
2309 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
2310
2311 if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
2312 new_pipe->plane_state->force_full_update = true;
2313 }
2314 }
2315
2316
2317 for (i = 0; i < surface_count; i++) {
2318 struct dc_plane_state *surface = srf_updates[i].surface;
2319
2320 copy_surface_update_to_plane(surface, &srf_updates[i]);
2321
2322 }
2323
2324 copy_stream_update_to_stream(dc, context, stream, stream_update);
2325
2326 commit_planes_for_stream(
2327 dc,
2328 srf_updates,
2329 surface_count,
2330 stream,
2331 stream_update,
2332 update_type,
2333 context);
2334
2335 if (dc->current_state != context) {
2336
2337 struct dc_state *old = dc->current_state;
2338
2339 dc->current_state = context;
2340 dc_release_state(old);
2341
2342 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2343 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2344
2345 if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
2346 pipe_ctx->plane_state->force_full_update = false;
2347 }
2348 }
2349
2350 if (update_type >= UPDATE_TYPE_FULL)
2351 dc_post_update_surfaces_to_stream(dc);
2352
2353 return;
2354
2355}
2356
2357uint8_t dc_get_current_stream_count(struct dc *dc)
2358{
2359 return dc->current_state->stream_count;
2360}
2361
2362struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i)
2363{
2364 if (i < dc->current_state->stream_count)
2365 return dc->current_state->streams[i];
2366 return NULL;
2367}
2368
2369enum dc_irq_source dc_interrupt_to_irq_source(
2370 struct dc *dc,
2371 uint32_t src_id,
2372 uint32_t ext_id)
2373{
2374 return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
2375}
2376
2377
2378
2379
2380bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
2381{
2382
2383 if (dc == NULL)
2384 return false;
2385
2386 return dal_irq_service_set(dc->res_pool->irqs, src, enable);
2387}
2388
2389void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
2390{
2391 dal_irq_service_ack(dc->res_pool->irqs, src);
2392}
2393
2394void dc_set_power_state(
2395 struct dc *dc,
2396 enum dc_acpi_cm_power_state power_state)
2397{
2398 struct kref refcount;
2399 struct display_mode_lib *dml = kzalloc(sizeof(struct display_mode_lib),
2400 GFP_KERNEL);
2401
2402 ASSERT(dml);
2403 if (!dml)
2404 return;
2405
2406 switch (power_state) {
2407 case DC_ACPI_CM_POWER_STATE_D0:
2408 dc_resource_state_construct(dc, dc->current_state);
2409
2410 dc->hwss.init_hw(dc);
2411
2412#ifdef CONFIG_DRM_AMD_DC_DCN2_0
2413 if (dc->hwss.init_sys_ctx != NULL &&
2414 dc->vm_pa_config.valid) {
2415 dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config);
2416 }
2417#endif
2418
2419 break;
2420 default:
2421 ASSERT(dc->current_state->stream_count == 0);
2422
2423
2424
2425
2426
2427
2428 refcount = dc->current_state->refcount;
2429
2430 memcpy(dml, &dc->current_state->bw_ctx.dml, sizeof(struct display_mode_lib));
2431
2432 dc_resource_state_destruct(dc->current_state);
2433 memset(dc->current_state, 0,
2434 sizeof(*dc->current_state));
2435
2436 dc->current_state->refcount = refcount;
2437 dc->current_state->bw_ctx.dml = *dml;
2438
2439 break;
2440 }
2441
2442 kfree(dml);
2443}
2444
2445void dc_resume(struct dc *dc)
2446{
2447
2448 uint32_t i;
2449
2450 for (i = 0; i < dc->link_count; i++)
2451 core_link_resume(dc->links[i]);
2452}
2453
2454unsigned int dc_get_current_backlight_pwm(struct dc *dc)
2455{
2456 struct abm *abm = dc->res_pool->abm;
2457
2458 if (abm)
2459 return abm->funcs->get_current_backlight(abm);
2460
2461 return 0;
2462}
2463
2464unsigned int dc_get_target_backlight_pwm(struct dc *dc)
2465{
2466 struct abm *abm = dc->res_pool->abm;
2467
2468 if (abm)
2469 return abm->funcs->get_target_backlight(abm);
2470
2471 return 0;
2472}
2473
2474bool dc_is_dmcu_initialized(struct dc *dc)
2475{
2476 struct dmcu *dmcu = dc->res_pool->dmcu;
2477
2478 if (dmcu)
2479 return dmcu->funcs->is_dmcu_initialized(dmcu);
2480 return false;
2481}
2482
2483bool dc_submit_i2c(
2484 struct dc *dc,
2485 uint32_t link_index,
2486 struct i2c_command *cmd)
2487{
2488
2489 struct dc_link *link = dc->links[link_index];
2490 struct ddc_service *ddc = link->ddc;
2491 return dce_i2c_submit_command(
2492 dc->res_pool,
2493 ddc->ddc_pin,
2494 cmd);
2495}
2496
2497static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink *sink)
2498{
2499 if (dc_link->sink_count >= MAX_SINKS_PER_LINK) {
2500 BREAK_TO_DEBUGGER();
2501 return false;
2502 }
2503
2504 dc_sink_retain(sink);
2505
2506 dc_link->remote_sinks[dc_link->sink_count] = sink;
2507 dc_link->sink_count++;
2508
2509 return true;
2510}
2511
2512
2513
2514
2515
2516
2517struct dc_sink *dc_link_add_remote_sink(
2518 struct dc_link *link,
2519 const uint8_t *edid,
2520 int len,
2521 struct dc_sink_init_data *init_data)
2522{
2523 struct dc_sink *dc_sink;
2524 enum dc_edid_status edid_status;
2525
2526 if (len > DC_MAX_EDID_BUFFER_SIZE) {
2527 dm_error("Max EDID buffer size breached!\n");
2528 return NULL;
2529 }
2530
2531 if (!init_data) {
2532 BREAK_TO_DEBUGGER();
2533 return NULL;
2534 }
2535
2536 if (!init_data->link) {
2537 BREAK_TO_DEBUGGER();
2538 return NULL;
2539 }
2540
2541 dc_sink = dc_sink_create(init_data);
2542
2543 if (!dc_sink)
2544 return NULL;
2545
2546 memmove(dc_sink->dc_edid.raw_edid, edid, len);
2547 dc_sink->dc_edid.length = len;
2548
2549 if (!link_add_remote_sink_helper(
2550 link,
2551 dc_sink))
2552 goto fail_add_sink;
2553
2554 edid_status = dm_helpers_parse_edid_caps(
2555 link->ctx,
2556 &dc_sink->dc_edid,
2557 &dc_sink->edid_caps);
2558
2559
2560
2561
2562
2563 if (edid_status != EDID_OK) {
2564 dc_sink->dc_edid.length = 0;
2565 dm_error("Bad EDID, status%d!\n", edid_status);
2566 }
2567
2568 return dc_sink;
2569
2570fail_add_sink:
2571 dc_sink_release(dc_sink);
2572 return NULL;
2573}
2574
2575
2576
2577
2578
2579
2580
2581void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink)
2582{
2583 int i;
2584
2585 if (!link->sink_count) {
2586 BREAK_TO_DEBUGGER();
2587 return;
2588 }
2589
2590 for (i = 0; i < link->sink_count; i++) {
2591 if (link->remote_sinks[i] == sink) {
2592 dc_sink_release(sink);
2593 link->remote_sinks[i] = NULL;
2594
2595
2596 while (i < link->sink_count - 1) {
2597 link->remote_sinks[i] = link->remote_sinks[i+1];
2598 i++;
2599 }
2600 link->remote_sinks[i] = NULL;
2601 link->sink_count--;
2602 return;
2603 }
2604 }
2605}
2606
2607void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info)
2608{
2609 info->displayClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dispclk_khz;
2610 info->engineClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_khz;
2611 info->memoryClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dramclk_khz;
2612 info->maxSupportedDppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz;
2613 info->dppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dppclk_khz;
2614 info->socClock = (unsigned int)state->bw_ctx.bw.dcn.clk.socclk_khz;
2615 info->dcfClockDeepSleep = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz;
2616 info->fClock = (unsigned int)state->bw_ctx.bw.dcn.clk.fclk_khz;
2617 info->phyClock = (unsigned int)state->bw_ctx.bw.dcn.clk.phyclk_khz;
2618}
2619enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping)
2620{
2621 if (dc->hwss.set_clock)
2622 return dc->hwss.set_clock(dc, clock_type, clk_khz, stepping);
2623 return DC_ERROR_UNEXPECTED;
2624}
2625void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg)
2626{
2627 if (dc->hwss.get_clock)
2628 dc->hwss.get_clock(dc, clock_type, clock_cfg);
2629}
2630