linux/drivers/gpu/drm/amd/display/dc/core/dc.c
<<
>>
Prefs
   1/*
   2 * Copyright 2015 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: AMD
  23 */
  24
  25#include <linux/slab.h>
  26#include <linux/mm.h>
  27
  28#include "dm_services.h"
  29
  30#include "dc.h"
  31
  32#include "core_status.h"
  33#include "core_types.h"
  34#include "hw_sequencer.h"
  35#include "dce/dce_hwseq.h"
  36
  37#include "resource.h"
  38
  39#include "clk_mgr.h"
  40#include "clock_source.h"
  41#include "dc_bios_types.h"
  42
  43#include "bios_parser_interface.h"
  44#include "include/irq_service_interface.h"
  45#include "transform.h"
  46#include "dmcu.h"
  47#include "dpp.h"
  48#include "timing_generator.h"
  49#include "abm.h"
  50#include "virtual/virtual_link_encoder.h"
  51
  52#include "link_hwss.h"
  53#include "link_encoder.h"
  54
  55#include "dc_link_ddc.h"
  56#include "dm_helpers.h"
  57#include "mem_input.h"
  58#include "hubp.h"
  59
  60#include "dc_link_dp.h"
  61#include "dc_dmub_srv.h"
  62
  63#include "dsc.h"
  64
  65#include "vm_helper.h"
  66
  67#include "dce/dce_i2c.h"
  68
  69#include "dmub/dmub_srv.h"
  70
  71#include "dce/dmub_hw_lock_mgr.h"
  72
  73#define CTX \
  74        dc->ctx
  75
  76#define DC_LOGGER \
  77        dc->ctx->logger
  78
  79static const char DC_BUILD_ID[] = "production-build";
  80
  81/**
  82 * DOC: Overview
  83 *
  84 * DC is the OS-agnostic component of the amdgpu DC driver.
  85 *
  86 * DC maintains and validates a set of structs representing the state of the
  87 * driver and writes that state to AMD hardware
  88 *
  89 * Main DC HW structs:
  90 *
  91 * struct dc - The central struct.  One per driver.  Created on driver load,
  92 * destroyed on driver unload.
  93 *
  94 * struct dc_context - One per driver.
  95 * Used as a backpointer by most other structs in dc.
  96 *
  97 * struct dc_link - One per connector (the physical DP, HDMI, miniDP, or eDP
  98 * plugpoints).  Created on driver load, destroyed on driver unload.
  99 *
 100 * struct dc_sink - One per display.  Created on boot or hotplug.
 101 * Destroyed on shutdown or hotunplug.  A dc_link can have a local sink
 102 * (the display directly attached).  It may also have one or more remote
 103 * sinks (in the Multi-Stream Transport case)
 104 *
 105 * struct resource_pool - One per driver.  Represents the hw blocks not in the
 106 * main pipeline.  Not directly accessible by dm.
 107 *
 108 * Main dc state structs:
 109 *
 110 * These structs can be created and destroyed as needed.  There is a full set of
 111 * these structs in dc->current_state representing the currently programmed state.
 112 *
 113 * struct dc_state - The global DC state to track global state information,
 114 * such as bandwidth values.
 115 *
 116 * struct dc_stream_state - Represents the hw configuration for the pipeline from
 117 * a framebuffer to a display.  Maps one-to-one with dc_sink.
 118 *
 119 * struct dc_plane_state - Represents a framebuffer.  Each stream has at least one,
 120 * and may have more in the Multi-Plane Overlay case.
 121 *
 122 * struct resource_context - Represents the programmable state of everything in
 123 * the resource_pool.  Not directly accessible by dm.
 124 *
 125 * struct pipe_ctx - A member of struct resource_context.  Represents the
 126 * internal hardware pipeline components.  Each dc_plane_state has either
 127 * one or two (in the pipe-split case).
 128 */
 129
 130/*******************************************************************************
 131 * Private functions
 132 ******************************************************************************/
 133
 134static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new)
 135{
 136        if (new > *original)
 137                *original = new;
 138}
 139
 140static void destroy_links(struct dc *dc)
 141{
 142        uint32_t i;
 143
 144        for (i = 0; i < dc->link_count; i++) {
 145                if (NULL != dc->links[i])
 146                        link_destroy(&dc->links[i]);
 147        }
 148}
 149
 150static bool create_links(
 151                struct dc *dc,
 152                uint32_t num_virtual_links)
 153{
 154        int i;
 155        int connectors_num;
 156        struct dc_bios *bios = dc->ctx->dc_bios;
 157
 158        dc->link_count = 0;
 159
 160        connectors_num = bios->funcs->get_connectors_number(bios);
 161
 162        if (connectors_num > ENUM_ID_COUNT) {
 163                dm_error(
 164                        "DC: Number of connectors %d exceeds maximum of %d!\n",
 165                        connectors_num,
 166                        ENUM_ID_COUNT);
 167                return false;
 168        }
 169
 170        dm_output_to_console(
 171                "DC: %s: connectors_num: physical:%d, virtual:%d\n",
 172                __func__,
 173                connectors_num,
 174                num_virtual_links);
 175
 176        for (i = 0; i < connectors_num; i++) {
 177                struct link_init_data link_init_params = {0};
 178                struct dc_link *link;
 179
 180                link_init_params.ctx = dc->ctx;
 181                /* next BIOS object table connector */
 182                link_init_params.connector_index = i;
 183                link_init_params.link_index = dc->link_count;
 184                link_init_params.dc = dc;
 185                link = link_create(&link_init_params);
 186
 187                if (link) {
 188                        bool should_destory_link = false;
 189
 190                        if (link->connector_signal == SIGNAL_TYPE_EDP) {
 191                                if (dc->config.edp_not_connected) {
 192                                        if (!IS_DIAG_DC(dc->ctx->dce_environment))
 193                                                should_destory_link = true;
 194                                } else {
 195                                        enum dc_connection_type type;
 196                                        dc_link_detect_sink(link, &type);
 197                                        if (type == dc_connection_none)
 198                                                should_destory_link = true;
 199                                }
 200                        }
 201
 202                        if (dc->config.force_enum_edp || !should_destory_link) {
 203                                dc->links[dc->link_count] = link;
 204                                link->dc = dc;
 205                                ++dc->link_count;
 206                        } else {
 207                                link_destroy(&link);
 208                        }
 209                }
 210        }
 211
 212        for (i = 0; i < num_virtual_links; i++) {
 213                struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL);
 214                struct encoder_init_data enc_init = {0};
 215
 216                if (link == NULL) {
 217                        BREAK_TO_DEBUGGER();
 218                        goto failed_alloc;
 219                }
 220
 221                link->link_index = dc->link_count;
 222                dc->links[dc->link_count] = link;
 223                dc->link_count++;
 224
 225                link->ctx = dc->ctx;
 226                link->dc = dc;
 227                link->connector_signal = SIGNAL_TYPE_VIRTUAL;
 228                link->link_id.type = OBJECT_TYPE_CONNECTOR;
 229                link->link_id.id = CONNECTOR_ID_VIRTUAL;
 230                link->link_id.enum_id = ENUM_ID_1;
 231                link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL);
 232
 233                if (!link->link_enc) {
 234                        BREAK_TO_DEBUGGER();
 235                        goto failed_alloc;
 236                }
 237
 238                link->link_status.dpcd_caps = &link->dpcd_caps;
 239
 240                enc_init.ctx = dc->ctx;
 241                enc_init.channel = CHANNEL_ID_UNKNOWN;
 242                enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
 243                enc_init.transmitter = TRANSMITTER_UNKNOWN;
 244                enc_init.connector = link->link_id;
 245                enc_init.encoder.type = OBJECT_TYPE_ENCODER;
 246                enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
 247                enc_init.encoder.enum_id = ENUM_ID_1;
 248                virtual_link_encoder_construct(link->link_enc, &enc_init);
 249        }
 250
 251        return true;
 252
 253failed_alloc:
 254        return false;
 255}
 256
 257static struct dc_perf_trace *dc_perf_trace_create(void)
 258{
 259        return kzalloc(sizeof(struct dc_perf_trace), GFP_KERNEL);
 260}
 261
 262static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace)
 263{
 264        kfree(*perf_trace);
 265        *perf_trace = NULL;
 266}
 267
 268/**
 269 *****************************************************************************
 270 *  Function: dc_stream_adjust_vmin_vmax
 271 *
 272 *  @brief
 273 *     Looks up the pipe context of dc_stream_state and updates the
 274 *     vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh
 275 *     Rate, which is a power-saving feature that targets reducing panel
 276 *     refresh rate while the screen is static
 277 *
 278 *  @param [in] dc: dc reference
 279 *  @param [in] stream: Initial dc stream state
 280 *  @param [in] adjust: Updated parameters for vertical_total_min and
 281 *  vertical_total_max
 282 *****************************************************************************
 283 */
 284bool dc_stream_adjust_vmin_vmax(struct dc *dc,
 285                struct dc_stream_state *stream,
 286                struct dc_crtc_timing_adjust *adjust)
 287{
 288        int i = 0;
 289        bool ret = false;
 290
 291        stream->adjust = *adjust;
 292
 293        for (i = 0; i < MAX_PIPES; i++) {
 294                struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
 295
 296                if (pipe->stream == stream && pipe->stream_res.tg) {
 297                        dc->hwss.set_drr(&pipe,
 298                                        1,
 299                                        adjust->v_total_min,
 300                                        adjust->v_total_max,
 301                                        adjust->v_total_mid,
 302                                        adjust->v_total_mid_frame_num);
 303
 304                        ret = true;
 305                }
 306        }
 307        return ret;
 308}
 309
 310bool dc_stream_get_crtc_position(struct dc *dc,
 311                struct dc_stream_state **streams, int num_streams,
 312                unsigned int *v_pos, unsigned int *nom_v_pos)
 313{
 314        /* TODO: Support multiple streams */
 315        const struct dc_stream_state *stream = streams[0];
 316        int i = 0;
 317        bool ret = false;
 318        struct crtc_position position;
 319
 320        for (i = 0; i < MAX_PIPES; i++) {
 321                struct pipe_ctx *pipe =
 322                                &dc->current_state->res_ctx.pipe_ctx[i];
 323
 324                if (pipe->stream == stream && pipe->stream_res.stream_enc) {
 325                        dc->hwss.get_position(&pipe, 1, &position);
 326
 327                        *v_pos = position.vertical_count;
 328                        *nom_v_pos = position.nominal_vcount;
 329                        ret = true;
 330                }
 331        }
 332        return ret;
 333}
 334
 335/**
 336 * dc_stream_configure_crc() - Configure CRC capture for the given stream.
 337 * @dc: DC Object
 338 * @stream: The stream to configure CRC on.
 339 * @enable: Enable CRC if true, disable otherwise.
 340 * @continuous: Capture CRC on every frame if true. Otherwise, only capture
 341 *              once.
 342 *
 343 * By default, only CRC0 is configured, and the entire frame is used to
 344 * calculate the crc.
 345 */
 346bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
 347                             bool enable, bool continuous)
 348{
 349        int i;
 350        struct pipe_ctx *pipe;
 351        struct crc_params param;
 352        struct timing_generator *tg;
 353
 354        for (i = 0; i < MAX_PIPES; i++) {
 355                pipe = &dc->current_state->res_ctx.pipe_ctx[i];
 356                if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
 357                        break;
 358        }
 359        /* Stream not found */
 360        if (i == MAX_PIPES)
 361                return false;
 362
 363        /* Always capture the full frame */
 364        param.windowa_x_start = 0;
 365        param.windowa_y_start = 0;
 366        param.windowa_x_end = pipe->stream->timing.h_addressable;
 367        param.windowa_y_end = pipe->stream->timing.v_addressable;
 368        param.windowb_x_start = 0;
 369        param.windowb_y_start = 0;
 370        param.windowb_x_end = pipe->stream->timing.h_addressable;
 371        param.windowb_y_end = pipe->stream->timing.v_addressable;
 372
 373        param.dsc_mode = pipe->stream->timing.flags.DSC ? 1:0;
 374        param.odm_mode = pipe->next_odm_pipe ? 1:0;
 375
 376        /* Default to the union of both windows */
 377        param.selection = UNION_WINDOW_A_B;
 378        param.continuous_mode = continuous;
 379        param.enable = enable;
 380
 381        tg = pipe->stream_res.tg;
 382
 383        /* Only call if supported */
 384        if (tg->funcs->configure_crc)
 385                return tg->funcs->configure_crc(tg, &param);
 386        DC_LOG_WARNING("CRC capture not supported.");
 387        return false;
 388}
 389
 390/**
 391 * dc_stream_get_crc() - Get CRC values for the given stream.
 392 * @dc: DC object
 393 * @stream: The DC stream state of the stream to get CRCs from.
 394 * @r_cr, g_y, b_cb: CRC values for the three channels are stored here.
 395 *
 396 * dc_stream_configure_crc needs to be called beforehand to enable CRCs.
 397 * Return false if stream is not found, or if CRCs are not enabled.
 398 */
 399bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
 400                       uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
 401{
 402        int i;
 403        struct pipe_ctx *pipe;
 404        struct timing_generator *tg;
 405
 406        for (i = 0; i < MAX_PIPES; i++) {
 407                pipe = &dc->current_state->res_ctx.pipe_ctx[i];
 408                if (pipe->stream == stream)
 409                        break;
 410        }
 411        /* Stream not found */
 412        if (i == MAX_PIPES)
 413                return false;
 414
 415        tg = pipe->stream_res.tg;
 416
 417        if (tg->funcs->get_crc)
 418                return tg->funcs->get_crc(tg, r_cr, g_y, b_cb);
 419        DC_LOG_WARNING("CRC capture not supported.");
 420        return false;
 421}
 422
 423void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream,
 424                enum dc_dynamic_expansion option)
 425{
 426        /* OPP FMT dyn expansion updates*/
 427        int i = 0;
 428        struct pipe_ctx *pipe_ctx;
 429
 430        for (i = 0; i < MAX_PIPES; i++) {
 431                if (dc->current_state->res_ctx.pipe_ctx[i].stream
 432                                == stream) {
 433                        pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
 434                        pipe_ctx->stream_res.opp->dyn_expansion = option;
 435                        pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
 436                                        pipe_ctx->stream_res.opp,
 437                                        COLOR_SPACE_YCBCR601,
 438                                        stream->timing.display_color_depth,
 439                                        stream->signal);
 440                }
 441        }
 442}
 443
 444void dc_stream_set_dither_option(struct dc_stream_state *stream,
 445                enum dc_dither_option option)
 446{
 447        struct bit_depth_reduction_params params;
 448        struct dc_link *link = stream->link;
 449        struct pipe_ctx *pipes = NULL;
 450        int i;
 451
 452        for (i = 0; i < MAX_PIPES; i++) {
 453                if (link->dc->current_state->res_ctx.pipe_ctx[i].stream ==
 454                                stream) {
 455                        pipes = &link->dc->current_state->res_ctx.pipe_ctx[i];
 456                        break;
 457                }
 458        }
 459
 460        if (!pipes)
 461                return;
 462        if (option > DITHER_OPTION_MAX)
 463                return;
 464
 465        stream->dither_option = option;
 466
 467        memset(&params, 0, sizeof(params));
 468        resource_build_bit_depth_reduction_params(stream, &params);
 469        stream->bit_depth_params = params;
 470
 471        if (pipes->plane_res.xfm &&
 472            pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth) {
 473                pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth(
 474                        pipes->plane_res.xfm,
 475                        pipes->plane_res.scl_data.lb_params.depth,
 476                        &stream->bit_depth_params);
 477        }
 478
 479        pipes->stream_res.opp->funcs->
 480                opp_program_bit_depth_reduction(pipes->stream_res.opp, &params);
 481}
 482
 483bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream)
 484{
 485        int i = 0;
 486        bool ret = false;
 487        struct pipe_ctx *pipes;
 488
 489        for (i = 0; i < MAX_PIPES; i++) {
 490                if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) {
 491                        pipes = &dc->current_state->res_ctx.pipe_ctx[i];
 492                        dc->hwss.program_gamut_remap(pipes);
 493                        ret = true;
 494                }
 495        }
 496
 497        return ret;
 498}
 499
 500bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
 501{
 502        int i = 0;
 503        bool ret = false;
 504        struct pipe_ctx *pipes;
 505
 506        for (i = 0; i < MAX_PIPES; i++) {
 507                if (dc->current_state->res_ctx.pipe_ctx[i].stream
 508                                == stream) {
 509
 510                        pipes = &dc->current_state->res_ctx.pipe_ctx[i];
 511                        dc->hwss.program_output_csc(dc,
 512                                        pipes,
 513                                        stream->output_color_space,
 514                                        stream->csc_color_matrix.matrix,
 515                                        pipes->stream_res.opp->inst);
 516                        ret = true;
 517                }
 518        }
 519
 520        return ret;
 521}
 522
 523void dc_stream_set_static_screen_params(struct dc *dc,
 524                struct dc_stream_state **streams,
 525                int num_streams,
 526                const struct dc_static_screen_params *params)
 527{
 528        int i = 0;
 529        int j = 0;
 530        struct pipe_ctx *pipes_affected[MAX_PIPES];
 531        int num_pipes_affected = 0;
 532
 533        for (i = 0; i < num_streams; i++) {
 534                struct dc_stream_state *stream = streams[i];
 535
 536                for (j = 0; j < MAX_PIPES; j++) {
 537                        if (dc->current_state->res_ctx.pipe_ctx[j].stream
 538                                        == stream) {
 539                                pipes_affected[num_pipes_affected++] =
 540                                                &dc->current_state->res_ctx.pipe_ctx[j];
 541                        }
 542                }
 543        }
 544
 545        dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, params);
 546}
 547
 548static void dc_destruct(struct dc *dc)
 549{
 550        if (dc->current_state) {
 551                dc_release_state(dc->current_state);
 552                dc->current_state = NULL;
 553        }
 554
 555        destroy_links(dc);
 556
 557        if (dc->clk_mgr) {
 558                dc_destroy_clk_mgr(dc->clk_mgr);
 559                dc->clk_mgr = NULL;
 560        }
 561
 562        dc_destroy_resource_pool(dc);
 563
 564        if (dc->ctx->gpio_service)
 565                dal_gpio_service_destroy(&dc->ctx->gpio_service);
 566
 567        if (dc->ctx->created_bios)
 568                dal_bios_parser_destroy(&dc->ctx->dc_bios);
 569
 570        dc_perf_trace_destroy(&dc->ctx->perf_trace);
 571
 572        kfree(dc->ctx);
 573        dc->ctx = NULL;
 574
 575        kfree(dc->bw_vbios);
 576        dc->bw_vbios = NULL;
 577
 578        kfree(dc->bw_dceip);
 579        dc->bw_dceip = NULL;
 580
 581#ifdef CONFIG_DRM_AMD_DC_DCN
 582        kfree(dc->dcn_soc);
 583        dc->dcn_soc = NULL;
 584
 585        kfree(dc->dcn_ip);
 586        dc->dcn_ip = NULL;
 587
 588#endif
 589        kfree(dc->vm_helper);
 590        dc->vm_helper = NULL;
 591
 592}
 593
 594static bool dc_construct_ctx(struct dc *dc,
 595                const struct dc_init_data *init_params)
 596{
 597        struct dc_context *dc_ctx;
 598        enum dce_version dc_version = DCE_VERSION_UNKNOWN;
 599
 600        dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL);
 601        if (!dc_ctx)
 602                return false;
 603
 604        dc_ctx->cgs_device = init_params->cgs_device;
 605        dc_ctx->driver_context = init_params->driver;
 606        dc_ctx->dc = dc;
 607        dc_ctx->asic_id = init_params->asic_id;
 608        dc_ctx->dc_sink_id_count = 0;
 609        dc_ctx->dc_stream_id_count = 0;
 610        dc_ctx->dce_environment = init_params->dce_environment;
 611
 612        /* Create logger */
 613
 614        dc_version = resource_parse_asic_id(init_params->asic_id);
 615        dc_ctx->dce_version = dc_version;
 616
 617        dc_ctx->perf_trace = dc_perf_trace_create();
 618        if (!dc_ctx->perf_trace) {
 619                ASSERT_CRITICAL(false);
 620                return false;
 621        }
 622
 623        dc->ctx = dc_ctx;
 624
 625        return true;
 626}
 627
 628static bool dc_construct(struct dc *dc,
 629                const struct dc_init_data *init_params)
 630{
 631        struct dc_context *dc_ctx;
 632        struct bw_calcs_dceip *dc_dceip;
 633        struct bw_calcs_vbios *dc_vbios;
 634#ifdef CONFIG_DRM_AMD_DC_DCN
 635        struct dcn_soc_bounding_box *dcn_soc;
 636        struct dcn_ip_params *dcn_ip;
 637#endif
 638
 639        dc->config = init_params->flags;
 640
 641        // Allocate memory for the vm_helper
 642        dc->vm_helper = kzalloc(sizeof(struct vm_helper), GFP_KERNEL);
 643        if (!dc->vm_helper) {
 644                dm_error("%s: failed to create dc->vm_helper\n", __func__);
 645                goto fail;
 646        }
 647
 648        memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides));
 649
 650        dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL);
 651        if (!dc_dceip) {
 652                dm_error("%s: failed to create dceip\n", __func__);
 653                goto fail;
 654        }
 655
 656        dc->bw_dceip = dc_dceip;
 657
 658        dc_vbios = kzalloc(sizeof(*dc_vbios), GFP_KERNEL);
 659        if (!dc_vbios) {
 660                dm_error("%s: failed to create vbios\n", __func__);
 661                goto fail;
 662        }
 663
 664        dc->bw_vbios = dc_vbios;
 665#ifdef CONFIG_DRM_AMD_DC_DCN
 666        dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL);
 667        if (!dcn_soc) {
 668                dm_error("%s: failed to create dcn_soc\n", __func__);
 669                goto fail;
 670        }
 671
 672        dc->dcn_soc = dcn_soc;
 673
 674        dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL);
 675        if (!dcn_ip) {
 676                dm_error("%s: failed to create dcn_ip\n", __func__);
 677                goto fail;
 678        }
 679
 680        dc->dcn_ip = dcn_ip;
 681        dc->soc_bounding_box = init_params->soc_bounding_box;
 682#endif
 683
 684        if (!dc_construct_ctx(dc, init_params)) {
 685                dm_error("%s: failed to create ctx\n", __func__);
 686                goto fail;
 687        }
 688
 689        dc_ctx = dc->ctx;
 690
 691        /* Resource should construct all asic specific resources.
 692         * This should be the only place where we need to parse the asic id
 693         */
 694        if (init_params->vbios_override)
 695                dc_ctx->dc_bios = init_params->vbios_override;
 696        else {
 697                /* Create BIOS parser */
 698                struct bp_init_data bp_init_data;
 699
 700                bp_init_data.ctx = dc_ctx;
 701                bp_init_data.bios = init_params->asic_id.atombios_base_address;
 702
 703                dc_ctx->dc_bios = dal_bios_parser_create(
 704                                &bp_init_data, dc_ctx->dce_version);
 705
 706                if (!dc_ctx->dc_bios) {
 707                        ASSERT_CRITICAL(false);
 708                        goto fail;
 709                }
 710
 711                dc_ctx->created_bios = true;
 712        }
 713
 714        dc->vendor_signature = init_params->vendor_signature;
 715
 716        /* Create GPIO service */
 717        dc_ctx->gpio_service = dal_gpio_service_create(
 718                        dc_ctx->dce_version,
 719                        dc_ctx->dce_environment,
 720                        dc_ctx);
 721
 722        if (!dc_ctx->gpio_service) {
 723                ASSERT_CRITICAL(false);
 724                goto fail;
 725        }
 726
 727        dc->res_pool = dc_create_resource_pool(dc, init_params, dc_ctx->dce_version);
 728        if (!dc->res_pool)
 729                goto fail;
 730
 731        dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg);
 732        if (!dc->clk_mgr)
 733                goto fail;
 734#ifdef CONFIG_DRM_AMD_DC_DCN3_0
 735        dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present;
 736#endif
 737
 738        if (dc->res_pool->funcs->update_bw_bounding_box)
 739                dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
 740
 741        /* Creation of current_state must occur after dc->dml
 742         * is initialized in dc_create_resource_pool because
 743         * on creation it copies the contents of dc->dml
 744         */
 745
 746        dc->current_state = dc_create_state(dc);
 747
 748        if (!dc->current_state) {
 749                dm_error("%s: failed to create validate ctx\n", __func__);
 750                goto fail;
 751        }
 752
 753        dc_resource_state_construct(dc, dc->current_state);
 754
 755        if (!create_links(dc, init_params->num_virtual_links))
 756                goto fail;
 757
 758        return true;
 759
 760fail:
 761        return false;
 762}
 763
 764static bool disable_all_writeback_pipes_for_stream(
 765                const struct dc *dc,
 766                struct dc_stream_state *stream,
 767                struct dc_state *context)
 768{
 769        int i;
 770
 771        for (i = 0; i < stream->num_wb_info; i++)
 772                stream->writeback_info[i].wb_enabled = false;
 773
 774        return true;
 775}
 776
 777void apply_ctx_interdependent_lock(struct dc *dc, struct dc_state *context, struct dc_stream_state *stream, bool lock)
 778{
 779        int i = 0;
 780
 781        /* Checks if interdependent update function pointer is NULL or not, takes care of DCE110 case */
 782        if (dc->hwss.interdependent_update_lock)
 783                dc->hwss.interdependent_update_lock(dc, context, lock);
 784        else {
 785                for (i = 0; i < dc->res_pool->pipe_count; i++) {
 786                        struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
 787                        struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
 788
 789                        // Copied conditions that were previously in dce110_apply_ctx_for_surface
 790                        if (stream == pipe_ctx->stream) {
 791                                if (!pipe_ctx->top_pipe &&
 792                                        (pipe_ctx->plane_state || old_pipe_ctx->plane_state))
 793                                        dc->hwss.pipe_control_lock(dc, pipe_ctx, lock);
 794                        }
 795                }
 796        }
 797}
 798
 799static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
 800{
 801        int i, j;
 802        struct dc_state *dangling_context = dc_create_state(dc);
 803        struct dc_state *current_ctx;
 804
 805        if (dangling_context == NULL)
 806                return;
 807
 808        dc_resource_state_copy_construct(dc->current_state, dangling_context);
 809
 810        for (i = 0; i < dc->res_pool->pipe_count; i++) {
 811                struct dc_stream_state *old_stream =
 812                                dc->current_state->res_ctx.pipe_ctx[i].stream;
 813                bool should_disable = true;
 814
 815                for (j = 0; j < context->stream_count; j++) {
 816                        if (old_stream == context->streams[j]) {
 817                                should_disable = false;
 818                                break;
 819                        }
 820                }
 821                if (should_disable && old_stream) {
 822                        dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
 823                        disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
 824
 825                        if (dc->hwss.apply_ctx_for_surface) {
 826                                apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, true);
 827                                dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
 828                                apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, false);
 829                                dc->hwss.post_unlock_program_front_end(dc, dangling_context);
 830                        }
 831                        if (dc->hwss.program_front_end_for_ctx) {
 832                                dc->hwss.interdependent_update_lock(dc, dc->current_state, true);
 833                                dc->hwss.program_front_end_for_ctx(dc, dangling_context);
 834                                dc->hwss.interdependent_update_lock(dc, dc->current_state, false);
 835                                dc->hwss.post_unlock_program_front_end(dc, dangling_context);
 836                        }
 837                }
 838        }
 839
 840        current_ctx = dc->current_state;
 841        dc->current_state = dangling_context;
 842        dc_release_state(current_ctx);
 843}
 844
 845static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
 846{
 847        int i;
 848        PERF_TRACE();
 849        for (i = 0; i < MAX_PIPES; i++) {
 850                int count = 0;
 851                struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
 852
 853                if (!pipe->plane_state)
 854                        continue;
 855
 856                /* Timeout 100 ms */
 857                while (count < 100000) {
 858                        /* Must set to false to start with, due to OR in update function */
 859                        pipe->plane_state->status.is_flip_pending = false;
 860                        dc->hwss.update_pending_status(pipe);
 861                        if (!pipe->plane_state->status.is_flip_pending)
 862                                break;
 863                        udelay(1);
 864                        count++;
 865                }
 866                ASSERT(!pipe->plane_state->status.is_flip_pending);
 867        }
 868        PERF_TRACE();
 869}
 870
 871/*******************************************************************************
 872 * Public functions
 873 ******************************************************************************/
 874
 875struct dc *dc_create(const struct dc_init_data *init_params)
 876{
 877        struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
 878        unsigned int full_pipe_count;
 879
 880        if (NULL == dc)
 881                goto alloc_fail;
 882
 883        if (init_params->dce_environment == DCE_ENV_VIRTUAL_HW) {
 884                if (false == dc_construct_ctx(dc, init_params)) {
 885                        dc_destruct(dc);
 886                        goto construct_fail;
 887                }
 888        } else {
 889                if (false == dc_construct(dc, init_params)) {
 890                        dc_destruct(dc);
 891                        goto construct_fail;
 892                }
 893
 894                full_pipe_count = dc->res_pool->pipe_count;
 895                if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
 896                        full_pipe_count--;
 897                dc->caps.max_streams = min(
 898                                full_pipe_count,
 899                                dc->res_pool->stream_enc_count);
 900
 901                dc->optimize_seamless_boot_streams = 0;
 902                dc->caps.max_links = dc->link_count;
 903                dc->caps.max_audios = dc->res_pool->audio_count;
 904                dc->caps.linear_pitch_alignment = 64;
 905
 906                dc->caps.max_dp_protocol_version = DP_VERSION_1_4;
 907
 908                if (dc->res_pool->dmcu != NULL)
 909                        dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
 910        }
 911
 912        /* Populate versioning information */
 913        dc->versions.dc_ver = DC_VER;
 914
 915        dc->build_id = DC_BUILD_ID;
 916
 917        DC_LOG_DC("Display Core initialized\n");
 918
 919
 920
 921        return dc;
 922
 923construct_fail:
 924        kfree(dc);
 925
 926alloc_fail:
 927        return NULL;
 928}
 929
 930void dc_hardware_init(struct dc *dc)
 931{
 932        if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW)
 933                dc->hwss.init_hw(dc);
 934}
 935
 936void dc_init_callbacks(struct dc *dc,
 937                const struct dc_callback_init *init_params)
 938{
 939#ifdef CONFIG_DRM_AMD_DC_HDCP
 940        dc->ctx->cp_psp = init_params->cp_psp;
 941#endif
 942}
 943
 944void dc_deinit_callbacks(struct dc *dc)
 945{
 946#ifdef CONFIG_DRM_AMD_DC_HDCP
 947        memset(&dc->ctx->cp_psp, 0, sizeof(dc->ctx->cp_psp));
 948#endif
 949}
 950
 951void dc_destroy(struct dc **dc)
 952{
 953        dc_destruct(*dc);
 954        kfree(*dc);
 955        *dc = NULL;
 956}
 957
 958static void enable_timing_multisync(
 959                struct dc *dc,
 960                struct dc_state *ctx)
 961{
 962        int i = 0, multisync_count = 0;
 963        int pipe_count = dc->res_pool->pipe_count;
 964        struct pipe_ctx *multisync_pipes[MAX_PIPES] = { NULL };
 965
 966        for (i = 0; i < pipe_count; i++) {
 967                if (!ctx->res_ctx.pipe_ctx[i].stream ||
 968                                !ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.enabled)
 969                        continue;
 970                if (ctx->res_ctx.pipe_ctx[i].stream == ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.event_source)
 971                        continue;
 972                multisync_pipes[multisync_count] = &ctx->res_ctx.pipe_ctx[i];
 973                multisync_count++;
 974        }
 975
 976        if (multisync_count > 0) {
 977                dc->hwss.enable_per_frame_crtc_position_reset(
 978                        dc, multisync_count, multisync_pipes);
 979        }
 980}
 981
 982static void program_timing_sync(
 983                struct dc *dc,
 984                struct dc_state *ctx)
 985{
 986        int i, j, k;
 987        int group_index = 0;
 988        int num_group = 0;
 989        int pipe_count = dc->res_pool->pipe_count;
 990        struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
 991
 992        for (i = 0; i < pipe_count; i++) {
 993                if (!ctx->res_ctx.pipe_ctx[i].stream || ctx->res_ctx.pipe_ctx[i].top_pipe)
 994                        continue;
 995
 996                unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i];
 997        }
 998
 999        for (i = 0; i < pipe_count; i++) {
1000                int group_size = 1;
1001                struct pipe_ctx *pipe_set[MAX_PIPES];
1002
1003                if (!unsynced_pipes[i])
1004                        continue;
1005
1006                pipe_set[0] = unsynced_pipes[i];
1007                unsynced_pipes[i] = NULL;
1008
1009                /* Add tg to the set, search rest of the tg's for ones with
1010                 * same timing, add all tgs with same timing to the group
1011                 */
1012                for (j = i + 1; j < pipe_count; j++) {
1013                        if (!unsynced_pipes[j])
1014                                continue;
1015
1016                        if (resource_are_streams_timing_synchronizable(
1017                                        unsynced_pipes[j]->stream,
1018                                        pipe_set[0]->stream)) {
1019                                pipe_set[group_size] = unsynced_pipes[j];
1020                                unsynced_pipes[j] = NULL;
1021                                group_size++;
1022                        }
1023                }
1024
1025                /* set first unblanked pipe as master */
1026                for (j = 0; j < group_size; j++) {
1027                        bool is_blanked;
1028
1029                        if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
1030                                is_blanked =
1031                                        pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
1032                        else
1033                                is_blanked =
1034                                        pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
1035                        if (!is_blanked) {
1036                                if (j == 0)
1037                                        break;
1038
1039                                swap(pipe_set[0], pipe_set[j]);
1040                                break;
1041                        }
1042                }
1043
1044
1045                for (k = 0; k < group_size; k++) {
1046                        struct dc_stream_status *status = dc_stream_get_status_from_state(ctx, pipe_set[k]->stream);
1047
1048                        status->timing_sync_info.group_id = num_group;
1049                        status->timing_sync_info.group_size = group_size;
1050                        if (k == 0)
1051                                status->timing_sync_info.master = true;
1052                        else
1053                                status->timing_sync_info.master = false;
1054
1055                }
1056                /* remove any other unblanked pipes as they have already been synced */
1057                for (j = j + 1; j < group_size; j++) {
1058                        bool is_blanked;
1059
1060                        if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
1061                                is_blanked =
1062                                        pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
1063                        else
1064                                is_blanked =
1065                                        pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
1066                        if (!is_blanked) {
1067                                group_size--;
1068                                pipe_set[j] = pipe_set[group_size];
1069                                j--;
1070                        }
1071                }
1072
1073                if (group_size > 1) {
1074                        dc->hwss.enable_timing_synchronization(
1075                                dc, group_index, group_size, pipe_set);
1076                        group_index++;
1077                }
1078                num_group++;
1079        }
1080}
1081
1082static bool context_changed(
1083                struct dc *dc,
1084                struct dc_state *context)
1085{
1086        uint8_t i;
1087
1088        if (context->stream_count != dc->current_state->stream_count)
1089                return true;
1090
1091        for (i = 0; i < dc->current_state->stream_count; i++) {
1092                if (dc->current_state->streams[i] != context->streams[i])
1093                        return true;
1094        }
1095
1096        return false;
1097}
1098
1099bool dc_validate_seamless_boot_timing(const struct dc *dc,
1100                                const struct dc_sink *sink,
1101                                struct dc_crtc_timing *crtc_timing)
1102{
1103        struct timing_generator *tg;
1104        struct stream_encoder *se = NULL;
1105
1106        struct dc_crtc_timing hw_crtc_timing = {0};
1107
1108        struct dc_link *link = sink->link;
1109        unsigned int i, enc_inst, tg_inst = 0;
1110
1111        // Seamless port only support single DP and EDP so far
1112        if (sink->sink_signal != SIGNAL_TYPE_DISPLAY_PORT &&
1113                sink->sink_signal != SIGNAL_TYPE_EDP)
1114                return false;
1115
1116        /* Check for enabled DIG to identify enabled display */
1117        if (!link->link_enc->funcs->is_dig_enabled(link->link_enc))
1118                return false;
1119
1120        enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
1121
1122        if (enc_inst == ENGINE_ID_UNKNOWN)
1123                return false;
1124
1125        for (i = 0; i < dc->res_pool->stream_enc_count; i++) {
1126                if (dc->res_pool->stream_enc[i]->id == enc_inst) {
1127
1128                        se = dc->res_pool->stream_enc[i];
1129
1130                        tg_inst = dc->res_pool->stream_enc[i]->funcs->dig_source_otg(
1131                                dc->res_pool->stream_enc[i]);
1132                        break;
1133                }
1134        }
1135
1136        // tg_inst not found
1137        if (i == dc->res_pool->stream_enc_count)
1138                return false;
1139
1140        if (tg_inst >= dc->res_pool->timing_generator_count)
1141                return false;
1142
1143        tg = dc->res_pool->timing_generators[tg_inst];
1144
1145        if (!tg->funcs->get_hw_timing)
1146                return false;
1147
1148        if (!tg->funcs->get_hw_timing(tg, &hw_crtc_timing))
1149                return false;
1150
1151        if (crtc_timing->h_total != hw_crtc_timing.h_total)
1152                return false;
1153
1154        if (crtc_timing->h_border_left != hw_crtc_timing.h_border_left)
1155                return false;
1156
1157        if (crtc_timing->h_addressable != hw_crtc_timing.h_addressable)
1158                return false;
1159
1160        if (crtc_timing->h_border_right != hw_crtc_timing.h_border_right)
1161                return false;
1162
1163        if (crtc_timing->h_front_porch != hw_crtc_timing.h_front_porch)
1164                return false;
1165
1166        if (crtc_timing->h_sync_width != hw_crtc_timing.h_sync_width)
1167                return false;
1168
1169        if (crtc_timing->v_total != hw_crtc_timing.v_total)
1170                return false;
1171
1172        if (crtc_timing->v_border_top != hw_crtc_timing.v_border_top)
1173                return false;
1174
1175        if (crtc_timing->v_addressable != hw_crtc_timing.v_addressable)
1176                return false;
1177
1178        if (crtc_timing->v_border_bottom != hw_crtc_timing.v_border_bottom)
1179                return false;
1180
1181        if (crtc_timing->v_front_porch != hw_crtc_timing.v_front_porch)
1182                return false;
1183
1184        if (crtc_timing->v_sync_width != hw_crtc_timing.v_sync_width)
1185                return false;
1186
1187        if (dc_is_dp_signal(link->connector_signal)) {
1188                unsigned int pix_clk_100hz;
1189
1190                dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
1191                        dc->res_pool->dp_clock_source,
1192                        tg_inst, &pix_clk_100hz);
1193
1194                if (crtc_timing->pix_clk_100hz != pix_clk_100hz)
1195                        return false;
1196
1197                if (!se->funcs->dp_get_pixel_format)
1198                        return false;
1199
1200                if (!se->funcs->dp_get_pixel_format(
1201                        se,
1202                        &hw_crtc_timing.pixel_encoding,
1203                        &hw_crtc_timing.display_color_depth))
1204                        return false;
1205
1206                if (hw_crtc_timing.display_color_depth != crtc_timing->display_color_depth)
1207                        return false;
1208
1209                if (hw_crtc_timing.pixel_encoding != crtc_timing->pixel_encoding)
1210                        return false;
1211        }
1212
1213        return true;
1214}
1215
1216bool dc_enable_stereo(
1217        struct dc *dc,
1218        struct dc_state *context,
1219        struct dc_stream_state *streams[],
1220        uint8_t stream_count)
1221{
1222        bool ret = true;
1223        int i, j;
1224        struct pipe_ctx *pipe;
1225
1226        for (i = 0; i < MAX_PIPES; i++) {
1227                if (context != NULL)
1228                        pipe = &context->res_ctx.pipe_ctx[i];
1229                else
1230                        pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1231                for (j = 0 ; pipe && j < stream_count; j++)  {
1232                        if (streams[j] && streams[j] == pipe->stream &&
1233                                dc->hwss.setup_stereo)
1234                                dc->hwss.setup_stereo(pipe, dc);
1235                }
1236        }
1237
1238        return ret;
1239}
1240
1241/*
1242 * Applies given context to HW and copy it into current context.
1243 * It's up to the user to release the src context afterwards.
1244 */
1245static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context)
1246{
1247        struct dc_bios *dcb = dc->ctx->dc_bios;
1248        enum dc_status result = DC_ERROR_UNEXPECTED;
1249        struct pipe_ctx *pipe;
1250        int i, k, l;
1251        struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
1252
1253#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1254        dc_allow_idle_optimizations(dc, false);
1255#endif
1256
1257        for (i = 0; i < context->stream_count; i++)
1258                dc_streams[i] =  context->streams[i];
1259
1260        if (!dcb->funcs->is_accelerated_mode(dcb))
1261                dc->hwss.enable_accelerated_mode(dc, context);
1262
1263        for (i = 0; i < context->stream_count; i++) {
1264                if (context->streams[i]->apply_seamless_boot_optimization)
1265                        dc->optimize_seamless_boot_streams++;
1266        }
1267
1268        if (dc->optimize_seamless_boot_streams == 0)
1269                dc->hwss.prepare_bandwidth(dc, context);
1270
1271        disable_dangling_plane(dc, context);
1272        /* re-program planes for existing stream, in case we need to
1273         * free up plane resource for later use
1274         */
1275        if (dc->hwss.apply_ctx_for_surface) {
1276                for (i = 0; i < context->stream_count; i++) {
1277                        if (context->streams[i]->mode_changed)
1278                                continue;
1279                        apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
1280                        dc->hwss.apply_ctx_for_surface(
1281                                dc, context->streams[i],
1282                                context->stream_status[i].plane_count,
1283                                context); /* use new pipe config in new context */
1284                        apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
1285                        dc->hwss.post_unlock_program_front_end(dc, context);
1286                }
1287        }
1288
1289        /* Program hardware */
1290        for (i = 0; i < dc->res_pool->pipe_count; i++) {
1291                pipe = &context->res_ctx.pipe_ctx[i];
1292                dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
1293        }
1294
1295        result = dc->hwss.apply_ctx_to_hw(dc, context);
1296
1297        if (result != DC_OK)
1298                return result;
1299
1300        if (context->stream_count > 1 && !dc->debug.disable_timing_sync) {
1301                enable_timing_multisync(dc, context);
1302                program_timing_sync(dc, context);
1303        }
1304
1305        /* Program all planes within new context*/
1306        if (dc->hwss.program_front_end_for_ctx) {
1307                dc->hwss.interdependent_update_lock(dc, context, true);
1308                dc->hwss.program_front_end_for_ctx(dc, context);
1309                dc->hwss.interdependent_update_lock(dc, context, false);
1310                dc->hwss.post_unlock_program_front_end(dc, context);
1311        }
1312        for (i = 0; i < context->stream_count; i++) {
1313                const struct dc_link *link = context->streams[i]->link;
1314
1315                if (!context->streams[i]->mode_changed)
1316                        continue;
1317
1318                if (dc->hwss.apply_ctx_for_surface) {
1319                        apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
1320                        dc->hwss.apply_ctx_for_surface(
1321                                        dc, context->streams[i],
1322                                        context->stream_status[i].plane_count,
1323                                        context);
1324                        apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
1325                        dc->hwss.post_unlock_program_front_end(dc, context);
1326                }
1327
1328                /*
1329                 * enable stereo
1330                 * TODO rework dc_enable_stereo call to work with validation sets?
1331                 */
1332                for (k = 0; k < MAX_PIPES; k++) {
1333                        pipe = &context->res_ctx.pipe_ctx[k];
1334
1335                        for (l = 0 ; pipe && l < context->stream_count; l++)  {
1336                                if (context->streams[l] &&
1337                                        context->streams[l] == pipe->stream &&
1338                                        dc->hwss.setup_stereo)
1339                                        dc->hwss.setup_stereo(pipe, dc);
1340                        }
1341                }
1342
1343                CONN_MSG_MODE(link, "{%dx%d, %dx%d@%dKhz}",
1344                                context->streams[i]->timing.h_addressable,
1345                                context->streams[i]->timing.v_addressable,
1346                                context->streams[i]->timing.h_total,
1347                                context->streams[i]->timing.v_total,
1348                                context->streams[i]->timing.pix_clk_100hz / 10);
1349        }
1350
1351        dc_enable_stereo(dc, context, dc_streams, context->stream_count);
1352
1353        if (dc->optimize_seamless_boot_streams == 0) {
1354                /* Must wait for no flips to be pending before doing optimize bw */
1355                wait_for_no_pipes_pending(dc, context);
1356                /* pplib is notified if disp_num changed */
1357                dc->hwss.optimize_bandwidth(dc, context);
1358        }
1359
1360        for (i = 0; i < context->stream_count; i++)
1361                context->streams[i]->mode_changed = false;
1362
1363        dc_release_state(dc->current_state);
1364
1365        dc->current_state = context;
1366
1367        dc_retain_state(dc->current_state);
1368
1369        return result;
1370}
1371
1372bool dc_commit_state(struct dc *dc, struct dc_state *context)
1373{
1374        enum dc_status result = DC_ERROR_UNEXPECTED;
1375        int i;
1376
1377        if (false == context_changed(dc, context))
1378                return DC_OK;
1379
1380        DC_LOG_DC("%s: %d streams\n",
1381                                __func__, context->stream_count);
1382
1383        for (i = 0; i < context->stream_count; i++) {
1384                struct dc_stream_state *stream = context->streams[i];
1385
1386                dc_stream_log(dc, stream);
1387        }
1388
1389        result = dc_commit_state_no_check(dc, context);
1390
1391        return (result == DC_OK);
1392}
1393
1394#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1395bool dc_acquire_release_mpc_3dlut(
1396                struct dc *dc, bool acquire,
1397                struct dc_stream_state *stream,
1398                struct dc_3dlut **lut,
1399                struct dc_transfer_func **shaper)
1400{
1401        int pipe_idx;
1402        bool ret = false;
1403        bool found_pipe_idx = false;
1404        const struct resource_pool *pool = dc->res_pool;
1405        struct resource_context *res_ctx = &dc->current_state->res_ctx;
1406        int mpcc_id = 0;
1407
1408        if (pool && res_ctx) {
1409                if (acquire) {
1410                        /*find pipe idx for the given stream*/
1411                        for (pipe_idx = 0; pipe_idx < pool->pipe_count; pipe_idx++) {
1412                                if (res_ctx->pipe_ctx[pipe_idx].stream == stream) {
1413                                        found_pipe_idx = true;
1414                                        mpcc_id = res_ctx->pipe_ctx[pipe_idx].plane_res.hubp->inst;
1415                                        break;
1416                                }
1417                        }
1418                } else
1419                        found_pipe_idx = true;/*for release pipe_idx is not required*/
1420
1421                if (found_pipe_idx) {
1422                        if (acquire && pool->funcs->acquire_post_bldn_3dlut)
1423                                ret = pool->funcs->acquire_post_bldn_3dlut(res_ctx, pool, mpcc_id, lut, shaper);
1424                        else if (acquire == false && pool->funcs->release_post_bldn_3dlut)
1425                                ret = pool->funcs->release_post_bldn_3dlut(res_ctx, pool, lut, shaper);
1426                }
1427        }
1428        return ret;
1429}
1430#endif
1431static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context)
1432{
1433        int i;
1434        struct pipe_ctx *pipe;
1435
1436        for (i = 0; i < MAX_PIPES; i++) {
1437                pipe = &context->res_ctx.pipe_ctx[i];
1438
1439                if (!pipe->plane_state)
1440                        continue;
1441
1442                /* Must set to false to start with, due to OR in update function */
1443                pipe->plane_state->status.is_flip_pending = false;
1444                dc->hwss.update_pending_status(pipe);
1445                if (pipe->plane_state->status.is_flip_pending)
1446                        return true;
1447        }
1448        return false;
1449}
1450
1451bool dc_post_update_surfaces_to_stream(struct dc *dc)
1452{
1453        int i;
1454        struct dc_state *context = dc->current_state;
1455
1456        if ((!dc->optimized_required) || dc->optimize_seamless_boot_streams > 0)
1457                return true;
1458
1459        post_surface_trace(dc);
1460
1461        if (is_flip_pending_in_pipes(dc, context))
1462                return true;
1463
1464        for (i = 0; i < dc->res_pool->pipe_count; i++)
1465                if (context->res_ctx.pipe_ctx[i].stream == NULL ||
1466                    context->res_ctx.pipe_ctx[i].plane_state == NULL) {
1467                        context->res_ctx.pipe_ctx[i].pipe_idx = i;
1468                        dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
1469                }
1470
1471        dc->hwss.optimize_bandwidth(dc, context);
1472
1473        dc->optimized_required = false;
1474        dc->wm_optimized_required = false;
1475
1476        return true;
1477}
1478
1479struct dc_state *dc_create_state(struct dc *dc)
1480{
1481        struct dc_state *context = kvzalloc(sizeof(struct dc_state),
1482                                            GFP_KERNEL);
1483
1484        if (!context)
1485                return NULL;
1486        /* Each context must have their own instance of VBA and in order to
1487         * initialize and obtain IP and SOC the base DML instance from DC is
1488         * initially copied into every context
1489         */
1490#ifdef CONFIG_DRM_AMD_DC_DCN
1491        memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib));
1492#endif
1493
1494        kref_init(&context->refcount);
1495
1496        return context;
1497}
1498
1499struct dc_state *dc_copy_state(struct dc_state *src_ctx)
1500{
1501        int i, j;
1502        struct dc_state *new_ctx = kvmalloc(sizeof(struct dc_state), GFP_KERNEL);
1503
1504        if (!new_ctx)
1505                return NULL;
1506        memcpy(new_ctx, src_ctx, sizeof(struct dc_state));
1507
1508        for (i = 0; i < MAX_PIPES; i++) {
1509                        struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i];
1510
1511                        if (cur_pipe->top_pipe)
1512                                cur_pipe->top_pipe =  &new_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx];
1513
1514                        if (cur_pipe->bottom_pipe)
1515                                cur_pipe->bottom_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx];
1516
1517                        if (cur_pipe->prev_odm_pipe)
1518                                cur_pipe->prev_odm_pipe =  &new_ctx->res_ctx.pipe_ctx[cur_pipe->prev_odm_pipe->pipe_idx];
1519
1520                        if (cur_pipe->next_odm_pipe)
1521                                cur_pipe->next_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->next_odm_pipe->pipe_idx];
1522
1523        }
1524
1525        for (i = 0; i < new_ctx->stream_count; i++) {
1526                        dc_stream_retain(new_ctx->streams[i]);
1527                        for (j = 0; j < new_ctx->stream_status[i].plane_count; j++)
1528                                dc_plane_state_retain(
1529                                        new_ctx->stream_status[i].plane_states[j]);
1530        }
1531
1532        kref_init(&new_ctx->refcount);
1533
1534        return new_ctx;
1535}
1536
1537void dc_retain_state(struct dc_state *context)
1538{
1539        kref_get(&context->refcount);
1540}
1541
1542static void dc_state_free(struct kref *kref)
1543{
1544        struct dc_state *context = container_of(kref, struct dc_state, refcount);
1545        dc_resource_state_destruct(context);
1546        kvfree(context);
1547}
1548
1549void dc_release_state(struct dc_state *context)
1550{
1551        kref_put(&context->refcount, dc_state_free);
1552}
1553
1554bool dc_set_generic_gpio_for_stereo(bool enable,
1555                struct gpio_service *gpio_service)
1556{
1557        enum gpio_result gpio_result = GPIO_RESULT_NON_SPECIFIC_ERROR;
1558        struct gpio_pin_info pin_info;
1559        struct gpio *generic;
1560        struct gpio_generic_mux_config *config = kzalloc(sizeof(struct gpio_generic_mux_config),
1561                           GFP_KERNEL);
1562
1563        if (!config)
1564                return false;
1565        pin_info = dal_gpio_get_generic_pin_info(gpio_service, GPIO_ID_GENERIC, 0);
1566
1567        if (pin_info.mask == 0xFFFFFFFF || pin_info.offset == 0xFFFFFFFF) {
1568                kfree(config);
1569                return false;
1570        } else {
1571                generic = dal_gpio_service_create_generic_mux(
1572                        gpio_service,
1573                        pin_info.offset,
1574                        pin_info.mask);
1575        }
1576
1577        if (!generic) {
1578                kfree(config);
1579                return false;
1580        }
1581
1582        gpio_result = dal_gpio_open(generic, GPIO_MODE_OUTPUT);
1583
1584        config->enable_output_from_mux = enable;
1585        config->mux_select = GPIO_SIGNAL_SOURCE_PASS_THROUGH_STEREO_SYNC;
1586
1587        if (gpio_result == GPIO_RESULT_OK)
1588                gpio_result = dal_mux_setup_config(generic, config);
1589
1590        if (gpio_result == GPIO_RESULT_OK) {
1591                dal_gpio_close(generic);
1592                dal_gpio_destroy_generic_mux(&generic);
1593                kfree(config);
1594                return true;
1595        } else {
1596                dal_gpio_close(generic);
1597                dal_gpio_destroy_generic_mux(&generic);
1598                kfree(config);
1599                return false;
1600        }
1601}
1602
1603static bool is_surface_in_context(
1604                const struct dc_state *context,
1605                const struct dc_plane_state *plane_state)
1606{
1607        int j;
1608
1609        for (j = 0; j < MAX_PIPES; j++) {
1610                const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1611
1612                if (plane_state == pipe_ctx->plane_state) {
1613                        return true;
1614                }
1615        }
1616
1617        return false;
1618}
1619
1620static enum surface_update_type get_plane_info_update_type(const struct dc_surface_update *u)
1621{
1622        union surface_update_flags *update_flags = &u->surface->update_flags;
1623        enum surface_update_type update_type = UPDATE_TYPE_FAST;
1624
1625        if (!u->plane_info)
1626                return UPDATE_TYPE_FAST;
1627
1628        if (u->plane_info->color_space != u->surface->color_space) {
1629                update_flags->bits.color_space_change = 1;
1630                elevate_update_type(&update_type, UPDATE_TYPE_MED);
1631        }
1632
1633        if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) {
1634                update_flags->bits.horizontal_mirror_change = 1;
1635                elevate_update_type(&update_type, UPDATE_TYPE_MED);
1636        }
1637
1638        if (u->plane_info->rotation != u->surface->rotation) {
1639                update_flags->bits.rotation_change = 1;
1640                elevate_update_type(&update_type, UPDATE_TYPE_FULL);
1641        }
1642
1643        if (u->plane_info->format != u->surface->format) {
1644                update_flags->bits.pixel_format_change = 1;
1645                elevate_update_type(&update_type, UPDATE_TYPE_FULL);
1646        }
1647
1648        if (u->plane_info->stereo_format != u->surface->stereo_format) {
1649                update_flags->bits.stereo_format_change = 1;
1650                elevate_update_type(&update_type, UPDATE_TYPE_FULL);
1651        }
1652
1653        if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha) {
1654                update_flags->bits.per_pixel_alpha_change = 1;
1655                elevate_update_type(&update_type, UPDATE_TYPE_MED);
1656        }
1657
1658        if (u->plane_info->global_alpha_value != u->surface->global_alpha_value) {
1659                update_flags->bits.global_alpha_change = 1;
1660                elevate_update_type(&update_type, UPDATE_TYPE_MED);
1661        }
1662
1663        if (u->plane_info->dcc.enable != u->surface->dcc.enable
1664                        || u->plane_info->dcc.independent_64b_blks != u->surface->dcc.independent_64b_blks
1665                        || u->plane_info->dcc.meta_pitch != u->surface->dcc.meta_pitch) {
1666                update_flags->bits.dcc_change = 1;
1667                elevate_update_type(&update_type, UPDATE_TYPE_MED);
1668        }
1669
1670        if (resource_pixel_format_to_bpp(u->plane_info->format) !=
1671                        resource_pixel_format_to_bpp(u->surface->format)) {
1672                /* different bytes per element will require full bandwidth
1673                 * and DML calculation
1674                 */
1675                update_flags->bits.bpp_change = 1;
1676                elevate_update_type(&update_type, UPDATE_TYPE_FULL);
1677        }
1678
1679        if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch
1680                        || u->plane_info->plane_size.chroma_pitch != u->surface->plane_size.chroma_pitch) {
1681                update_flags->bits.plane_size_change = 1;
1682                elevate_update_type(&update_type, UPDATE_TYPE_MED);
1683        }
1684
1685
1686        if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,
1687                        sizeof(union dc_tiling_info)) != 0) {
1688                update_flags->bits.swizzle_change = 1;
1689                elevate_update_type(&update_type, UPDATE_TYPE_MED);
1690
1691                /* todo: below are HW dependent, we should add a hook to
1692                 * DCE/N resource and validated there.
1693                 */
1694                if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR) {
1695                        /* swizzled mode requires RQ to be setup properly,
1696                         * thus need to run DML to calculate RQ settings
1697                         */
1698                        update_flags->bits.bandwidth_change = 1;
1699                        elevate_update_type(&update_type, UPDATE_TYPE_FULL);
1700                }
1701        }
1702
1703        /* This should be UPDATE_TYPE_FAST if nothing has changed. */
1704        return update_type;
1705}
1706
1707static enum surface_update_type get_scaling_info_update_type(
1708                const struct dc_surface_update *u)
1709{
1710        union surface_update_flags *update_flags = &u->surface->update_flags;
1711
1712        if (!u->scaling_info)
1713                return UPDATE_TYPE_FAST;
1714
1715        if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width
1716                        || u->scaling_info->clip_rect.height != u->surface->clip_rect.height
1717                        || u->scaling_info->dst_rect.width != u->surface->dst_rect.width
1718                        || u->scaling_info->dst_rect.height != u->surface->dst_rect.height
1719                        || u->scaling_info->scaling_quality.integer_scaling !=
1720                                u->surface->scaling_quality.integer_scaling
1721                        ) {
1722                update_flags->bits.scaling_change = 1;
1723
1724                if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width
1725                        || u->scaling_info->dst_rect.height < u->surface->dst_rect.height)
1726                                && (u->scaling_info->dst_rect.width < u->surface->src_rect.width
1727                                        || u->scaling_info->dst_rect.height < u->surface->src_rect.height))
1728                        /* Making dst rect smaller requires a bandwidth change */
1729                        update_flags->bits.bandwidth_change = 1;
1730        }
1731
1732        if (u->scaling_info->src_rect.width != u->surface->src_rect.width
1733                || u->scaling_info->src_rect.height != u->surface->src_rect.height) {
1734
1735                update_flags->bits.scaling_change = 1;
1736                if (u->scaling_info->src_rect.width > u->surface->src_rect.width
1737                                || u->scaling_info->src_rect.height > u->surface->src_rect.height)
1738                        /* Making src rect bigger requires a bandwidth change */
1739                        update_flags->bits.clock_change = 1;
1740        }
1741
1742        if (u->scaling_info->src_rect.x != u->surface->src_rect.x
1743                        || u->scaling_info->src_rect.y != u->surface->src_rect.y
1744                        || u->scaling_info->clip_rect.x != u->surface->clip_rect.x
1745                        || u->scaling_info->clip_rect.y != u->surface->clip_rect.y
1746                        || u->scaling_info->dst_rect.x != u->surface->dst_rect.x
1747                        || u->scaling_info->dst_rect.y != u->surface->dst_rect.y)
1748                update_flags->bits.position_change = 1;
1749
1750        if (update_flags->bits.clock_change
1751                        || update_flags->bits.bandwidth_change
1752                        || update_flags->bits.scaling_change)
1753                return UPDATE_TYPE_FULL;
1754
1755        if (update_flags->bits.position_change)
1756                return UPDATE_TYPE_MED;
1757
1758        return UPDATE_TYPE_FAST;
1759}
1760
1761static enum surface_update_type det_surface_update(const struct dc *dc,
1762                const struct dc_surface_update *u)
1763{
1764        const struct dc_state *context = dc->current_state;
1765        enum surface_update_type type;
1766        enum surface_update_type overall_type = UPDATE_TYPE_FAST;
1767        union surface_update_flags *update_flags = &u->surface->update_flags;
1768
1769        if (u->flip_addr)
1770                update_flags->bits.addr_update = 1;
1771
1772        if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) {
1773                update_flags->raw = 0xFFFFFFFF;
1774                return UPDATE_TYPE_FULL;
1775        }
1776
1777        update_flags->raw = 0; // Reset all flags
1778
1779        type = get_plane_info_update_type(u);
1780        elevate_update_type(&overall_type, type);
1781
1782        type = get_scaling_info_update_type(u);
1783        elevate_update_type(&overall_type, type);
1784
1785        if (u->flip_addr)
1786                update_flags->bits.addr_update = 1;
1787
1788        if (u->in_transfer_func)
1789                update_flags->bits.in_transfer_func_change = 1;
1790
1791        if (u->input_csc_color_matrix)
1792                update_flags->bits.input_csc_change = 1;
1793
1794        if (u->coeff_reduction_factor)
1795                update_flags->bits.coeff_reduction_change = 1;
1796
1797        if (u->gamut_remap_matrix)
1798                update_flags->bits.gamut_remap_change = 1;
1799
1800        if (u->gamma) {
1801                enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN;
1802
1803                if (u->plane_info)
1804                        format = u->plane_info->format;
1805                else if (u->surface)
1806                        format = u->surface->format;
1807
1808                if (dce_use_lut(format))
1809                        update_flags->bits.gamma_change = 1;
1810        }
1811
1812        if (u->hdr_mult.value)
1813                if (u->hdr_mult.value != u->surface->hdr_mult.value) {
1814                        update_flags->bits.hdr_mult = 1;
1815                        elevate_update_type(&overall_type, UPDATE_TYPE_MED);
1816                }
1817
1818        if (update_flags->bits.in_transfer_func_change) {
1819                type = UPDATE_TYPE_MED;
1820                elevate_update_type(&overall_type, type);
1821        }
1822
1823        if (update_flags->bits.input_csc_change
1824                        || update_flags->bits.coeff_reduction_change
1825                        || update_flags->bits.gamma_change
1826                        || update_flags->bits.gamut_remap_change) {
1827                type = UPDATE_TYPE_FULL;
1828                elevate_update_type(&overall_type, type);
1829        }
1830
1831        return overall_type;
1832}
1833
1834static enum surface_update_type check_update_surfaces_for_stream(
1835                struct dc *dc,
1836                struct dc_surface_update *updates,
1837                int surface_count,
1838                struct dc_stream_update *stream_update,
1839                const struct dc_stream_status *stream_status)
1840{
1841        int i;
1842        enum surface_update_type overall_type = UPDATE_TYPE_FAST;
1843
1844#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1845        if (dc->idle_optimizations_allowed)
1846                overall_type = UPDATE_TYPE_FULL;
1847
1848#endif
1849        if (stream_status == NULL || stream_status->plane_count != surface_count)
1850                overall_type = UPDATE_TYPE_FULL;
1851
1852        /* some stream updates require passive update */
1853        if (stream_update) {
1854                union stream_update_flags *su_flags = &stream_update->stream->update_flags;
1855
1856                if ((stream_update->src.height != 0 && stream_update->src.width != 0) ||
1857                        (stream_update->dst.height != 0 && stream_update->dst.width != 0) ||
1858                        stream_update->integer_scaling_update)
1859                        su_flags->bits.scaling = 1;
1860
1861                if (stream_update->out_transfer_func)
1862                        su_flags->bits.out_tf = 1;
1863
1864                if (stream_update->abm_level)
1865                        su_flags->bits.abm_level = 1;
1866
1867                if (stream_update->dpms_off)
1868                        su_flags->bits.dpms_off = 1;
1869
1870                if (stream_update->gamut_remap)
1871                        su_flags->bits.gamut_remap = 1;
1872
1873                if (stream_update->wb_update)
1874                        su_flags->bits.wb_update = 1;
1875
1876                if (stream_update->dsc_config)
1877                        su_flags->bits.dsc_changed = 1;
1878
1879                if (su_flags->raw != 0)
1880                        overall_type = UPDATE_TYPE_FULL;
1881
1882                if (stream_update->output_csc_transform || stream_update->output_color_space)
1883                        su_flags->bits.out_csc = 1;
1884        }
1885
1886        for (i = 0 ; i < surface_count; i++) {
1887                enum surface_update_type type =
1888                                det_surface_update(dc, &updates[i]);
1889
1890                elevate_update_type(&overall_type, type);
1891        }
1892
1893        return overall_type;
1894}
1895
1896/**
1897 * dc_check_update_surfaces_for_stream() - Determine update type (fast, med, or full)
1898 *
1899 * See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types
1900 */
1901enum surface_update_type dc_check_update_surfaces_for_stream(
1902                struct dc *dc,
1903                struct dc_surface_update *updates,
1904                int surface_count,
1905                struct dc_stream_update *stream_update,
1906                const struct dc_stream_status *stream_status)
1907{
1908        int i;
1909        enum surface_update_type type;
1910
1911        if (stream_update)
1912                stream_update->stream->update_flags.raw = 0;
1913        for (i = 0; i < surface_count; i++)
1914                updates[i].surface->update_flags.raw = 0;
1915
1916        type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
1917        if (type == UPDATE_TYPE_FULL) {
1918                if (stream_update) {
1919                        uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed;
1920                        stream_update->stream->update_flags.raw = 0xFFFFFFFF;
1921                        stream_update->stream->update_flags.bits.dsc_changed = dsc_changed;
1922                }
1923                for (i = 0; i < surface_count; i++)
1924                        updates[i].surface->update_flags.raw = 0xFFFFFFFF;
1925        }
1926
1927        if (type == UPDATE_TYPE_FAST) {
1928                // If there's an available clock comparator, we use that.
1929                if (dc->clk_mgr->funcs->are_clock_states_equal) {
1930                        if (!dc->clk_mgr->funcs->are_clock_states_equal(&dc->clk_mgr->clks, &dc->current_state->bw_ctx.bw.dcn.clk))
1931                                dc->optimized_required = true;
1932                // Else we fallback to mem compare.
1933                } else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) {
1934                        dc->optimized_required = true;
1935                }
1936
1937                dc->optimized_required |= dc->wm_optimized_required;
1938        }
1939
1940        return type;
1941}
1942
1943static struct dc_stream_status *stream_get_status(
1944        struct dc_state *ctx,
1945        struct dc_stream_state *stream)
1946{
1947        uint8_t i;
1948
1949        for (i = 0; i < ctx->stream_count; i++) {
1950                if (stream == ctx->streams[i]) {
1951                        return &ctx->stream_status[i];
1952                }
1953        }
1954
1955        return NULL;
1956}
1957
1958static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
1959
1960static void copy_surface_update_to_plane(
1961                struct dc_plane_state *surface,
1962                struct dc_surface_update *srf_update)
1963{
1964        if (srf_update->flip_addr) {
1965                surface->address = srf_update->flip_addr->address;
1966                surface->flip_immediate =
1967                        srf_update->flip_addr->flip_immediate;
1968                surface->time.time_elapsed_in_us[surface->time.index] =
1969                        srf_update->flip_addr->flip_timestamp_in_us -
1970                                surface->time.prev_update_time_in_us;
1971                surface->time.prev_update_time_in_us =
1972                        srf_update->flip_addr->flip_timestamp_in_us;
1973                surface->time.index++;
1974                if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX)
1975                        surface->time.index = 0;
1976
1977                surface->triplebuffer_flips = srf_update->flip_addr->triplebuffer_flips;
1978        }
1979
1980        if (srf_update->scaling_info) {
1981                surface->scaling_quality =
1982                                srf_update->scaling_info->scaling_quality;
1983                surface->dst_rect =
1984                                srf_update->scaling_info->dst_rect;
1985                surface->src_rect =
1986                                srf_update->scaling_info->src_rect;
1987                surface->clip_rect =
1988                                srf_update->scaling_info->clip_rect;
1989        }
1990
1991        if (srf_update->plane_info) {
1992                surface->color_space =
1993                                srf_update->plane_info->color_space;
1994                surface->format =
1995                                srf_update->plane_info->format;
1996                surface->plane_size =
1997                                srf_update->plane_info->plane_size;
1998                surface->rotation =
1999                                srf_update->plane_info->rotation;
2000                surface->horizontal_mirror =
2001                                srf_update->plane_info->horizontal_mirror;
2002                surface->stereo_format =
2003                                srf_update->plane_info->stereo_format;
2004                surface->tiling_info =
2005                                srf_update->plane_info->tiling_info;
2006                surface->visible =
2007                                srf_update->plane_info->visible;
2008                surface->per_pixel_alpha =
2009                                srf_update->plane_info->per_pixel_alpha;
2010                surface->global_alpha =
2011                                srf_update->plane_info->global_alpha;
2012                surface->global_alpha_value =
2013                                srf_update->plane_info->global_alpha_value;
2014                surface->dcc =
2015                                srf_update->plane_info->dcc;
2016                surface->layer_index =
2017                                srf_update->plane_info->layer_index;
2018        }
2019
2020        if (srf_update->gamma &&
2021                        (surface->gamma_correction !=
2022                                        srf_update->gamma)) {
2023                memcpy(&surface->gamma_correction->entries,
2024                        &srf_update->gamma->entries,
2025                        sizeof(struct dc_gamma_entries));
2026                surface->gamma_correction->is_identity =
2027                        srf_update->gamma->is_identity;
2028                surface->gamma_correction->num_entries =
2029                        srf_update->gamma->num_entries;
2030                surface->gamma_correction->type =
2031                        srf_update->gamma->type;
2032        }
2033
2034        if (srf_update->in_transfer_func &&
2035                        (surface->in_transfer_func !=
2036                                srf_update->in_transfer_func)) {
2037                surface->in_transfer_func->sdr_ref_white_level =
2038                        srf_update->in_transfer_func->sdr_ref_white_level;
2039                surface->in_transfer_func->tf =
2040                        srf_update->in_transfer_func->tf;
2041                surface->in_transfer_func->type =
2042                        srf_update->in_transfer_func->type;
2043                memcpy(&surface->in_transfer_func->tf_pts,
2044                        &srf_update->in_transfer_func->tf_pts,
2045                        sizeof(struct dc_transfer_func_distributed_points));
2046        }
2047
2048        if (srf_update->func_shaper &&
2049                        (surface->in_shaper_func !=
2050                        srf_update->func_shaper))
2051                memcpy(surface->in_shaper_func, srf_update->func_shaper,
2052                sizeof(*surface->in_shaper_func));
2053
2054        if (srf_update->lut3d_func &&
2055                        (surface->lut3d_func !=
2056                        srf_update->lut3d_func))
2057                memcpy(surface->lut3d_func, srf_update->lut3d_func,
2058                sizeof(*surface->lut3d_func));
2059
2060        if (srf_update->hdr_mult.value)
2061                surface->hdr_mult =
2062                                srf_update->hdr_mult;
2063
2064        if (srf_update->blend_tf &&
2065                        (surface->blend_tf !=
2066                        srf_update->blend_tf))
2067                memcpy(surface->blend_tf, srf_update->blend_tf,
2068                sizeof(*surface->blend_tf));
2069
2070        if (srf_update->input_csc_color_matrix)
2071                surface->input_csc_color_matrix =
2072                        *srf_update->input_csc_color_matrix;
2073
2074        if (srf_update->coeff_reduction_factor)
2075                surface->coeff_reduction_factor =
2076                        *srf_update->coeff_reduction_factor;
2077
2078        if (srf_update->gamut_remap_matrix)
2079                surface->gamut_remap_matrix =
2080                        *srf_update->gamut_remap_matrix;
2081}
2082
2083static void copy_stream_update_to_stream(struct dc *dc,
2084                                         struct dc_state *context,
2085                                         struct dc_stream_state *stream,
2086                                         struct dc_stream_update *update)
2087{
2088        struct dc_context *dc_ctx = dc->ctx;
2089
2090        if (update == NULL || stream == NULL)
2091                return;
2092
2093        if (update->src.height && update->src.width)
2094                stream->src = update->src;
2095
2096        if (update->dst.height && update->dst.width)
2097                stream->dst = update->dst;
2098
2099        if (update->out_transfer_func &&
2100            stream->out_transfer_func != update->out_transfer_func) {
2101                stream->out_transfer_func->sdr_ref_white_level =
2102                        update->out_transfer_func->sdr_ref_white_level;
2103                stream->out_transfer_func->tf = update->out_transfer_func->tf;
2104                stream->out_transfer_func->type =
2105                        update->out_transfer_func->type;
2106                memcpy(&stream->out_transfer_func->tf_pts,
2107                       &update->out_transfer_func->tf_pts,
2108                       sizeof(struct dc_transfer_func_distributed_points));
2109        }
2110
2111        if (update->hdr_static_metadata)
2112                stream->hdr_static_metadata = *update->hdr_static_metadata;
2113
2114        if (update->abm_level)
2115                stream->abm_level = *update->abm_level;
2116
2117        if (update->periodic_interrupt0)
2118                stream->periodic_interrupt0 = *update->periodic_interrupt0;
2119
2120        if (update->periodic_interrupt1)
2121                stream->periodic_interrupt1 = *update->periodic_interrupt1;
2122
2123        if (update->gamut_remap)
2124                stream->gamut_remap_matrix = *update->gamut_remap;
2125
2126        /* Note: this being updated after mode set is currently not a use case
2127         * however if it arises OCSC would need to be reprogrammed at the
2128         * minimum
2129         */
2130        if (update->output_color_space)
2131                stream->output_color_space = *update->output_color_space;
2132
2133        if (update->output_csc_transform)
2134                stream->csc_color_matrix = *update->output_csc_transform;
2135
2136        if (update->vrr_infopacket)
2137                stream->vrr_infopacket = *update->vrr_infopacket;
2138
2139        if (update->dpms_off)
2140                stream->dpms_off = *update->dpms_off;
2141
2142        if (update->vsc_infopacket)
2143                stream->vsc_infopacket = *update->vsc_infopacket;
2144
2145        if (update->vsp_infopacket)
2146                stream->vsp_infopacket = *update->vsp_infopacket;
2147
2148        if (update->dither_option)
2149                stream->dither_option = *update->dither_option;
2150        /* update current stream with writeback info */
2151        if (update->wb_update) {
2152                int i;
2153
2154                stream->num_wb_info = update->wb_update->num_wb_info;
2155                ASSERT(stream->num_wb_info <= MAX_DWB_PIPES);
2156                for (i = 0; i < stream->num_wb_info; i++)
2157                        stream->writeback_info[i] =
2158                                update->wb_update->writeback_info[i];
2159        }
2160        if (update->dsc_config) {
2161                struct dc_dsc_config old_dsc_cfg = stream->timing.dsc_cfg;
2162                uint32_t old_dsc_enabled = stream->timing.flags.DSC;
2163                uint32_t enable_dsc = (update->dsc_config->num_slices_h != 0 &&
2164                                       update->dsc_config->num_slices_v != 0);
2165
2166                /* Use temporarry context for validating new DSC config */
2167                struct dc_state *dsc_validate_context = dc_create_state(dc);
2168
2169                if (dsc_validate_context) {
2170                        dc_resource_state_copy_construct(dc->current_state, dsc_validate_context);
2171
2172                        stream->timing.dsc_cfg = *update->dsc_config;
2173                        stream->timing.flags.DSC = enable_dsc;
2174                        if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true)) {
2175                                stream->timing.dsc_cfg = old_dsc_cfg;
2176                                stream->timing.flags.DSC = old_dsc_enabled;
2177                                update->dsc_config = NULL;
2178                        }
2179
2180                        dc_release_state(dsc_validate_context);
2181                } else {
2182                        DC_ERROR("Failed to allocate new validate context for DSC change\n");
2183                        update->dsc_config = NULL;
2184                }
2185        }
2186}
2187
2188static void commit_planes_do_stream_update(struct dc *dc,
2189                struct dc_stream_state *stream,
2190                struct dc_stream_update *stream_update,
2191                enum surface_update_type update_type,
2192                struct dc_state *context)
2193{
2194        int j;
2195        bool should_program_abm;
2196
2197        // Stream updates
2198        for (j = 0; j < dc->res_pool->pipe_count; j++) {
2199                struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2200
2201                if (!pipe_ctx->top_pipe &&  !pipe_ctx->prev_odm_pipe && pipe_ctx->stream == stream) {
2202
2203                        if (stream_update->periodic_interrupt0 &&
2204                                        dc->hwss.setup_periodic_interrupt)
2205                                dc->hwss.setup_periodic_interrupt(dc, pipe_ctx, VLINE0);
2206
2207                        if (stream_update->periodic_interrupt1 &&
2208                                        dc->hwss.setup_periodic_interrupt)
2209                                dc->hwss.setup_periodic_interrupt(dc, pipe_ctx, VLINE1);
2210
2211                        if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) ||
2212                                        stream_update->vrr_infopacket ||
2213                                        stream_update->vsc_infopacket ||
2214                                        stream_update->vsp_infopacket) {
2215                                resource_build_info_frame(pipe_ctx);
2216                                dc->hwss.update_info_frame(pipe_ctx);
2217                        }
2218
2219                        if (stream_update->hdr_static_metadata &&
2220                                        stream->use_dynamic_meta &&
2221                                        dc->hwss.set_dmdata_attributes &&
2222                                        pipe_ctx->stream->dmdata_address.quad_part != 0)
2223                                dc->hwss.set_dmdata_attributes(pipe_ctx);
2224
2225                        if (stream_update->gamut_remap)
2226                                dc_stream_set_gamut_remap(dc, stream);
2227
2228                        if (stream_update->output_csc_transform)
2229                                dc_stream_program_csc_matrix(dc, stream);
2230
2231                        if (stream_update->dither_option) {
2232                                struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
2233                                resource_build_bit_depth_reduction_params(pipe_ctx->stream,
2234                                                                        &pipe_ctx->stream->bit_depth_params);
2235                                pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp,
2236                                                &stream->bit_depth_params,
2237                                                &stream->clamping);
2238                                while (odm_pipe) {
2239                                        odm_pipe->stream_res.opp->funcs->opp_program_fmt(odm_pipe->stream_res.opp,
2240                                                        &stream->bit_depth_params,
2241                                                        &stream->clamping);
2242                                        odm_pipe = odm_pipe->next_odm_pipe;
2243                                }
2244                        }
2245
2246                        /* Full fe update*/
2247                        if (update_type == UPDATE_TYPE_FAST)
2248                                continue;
2249
2250                        if (stream_update->dsc_config)
2251                                dp_update_dsc_config(pipe_ctx);
2252
2253                        if (stream_update->dpms_off) {
2254                                if (*stream_update->dpms_off) {
2255                                        core_link_disable_stream(pipe_ctx);
2256                                        /* for dpms, keep acquired resources*/
2257                                        if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only)
2258                                                pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
2259
2260                                        dc->hwss.optimize_bandwidth(dc, dc->current_state);
2261                                } else {
2262                                        if (dc->optimize_seamless_boot_streams == 0)
2263                                                dc->hwss.prepare_bandwidth(dc, dc->current_state);
2264
2265                                        core_link_enable_stream(dc->current_state, pipe_ctx);
2266                                }
2267                        }
2268
2269                        if (stream_update->abm_level && pipe_ctx->stream_res.abm) {
2270                                should_program_abm = true;
2271
2272                                // if otg funcs defined check if blanked before programming
2273                                if (pipe_ctx->stream_res.tg->funcs->is_blanked)
2274                                        if (pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg))
2275                                                should_program_abm = false;
2276
2277                                if (should_program_abm) {
2278                                        if (*stream_update->abm_level == ABM_LEVEL_IMMEDIATE_DISABLE) {
2279                                                dc->hwss.set_abm_immediate_disable(pipe_ctx);
2280                                        } else {
2281                                                pipe_ctx->stream_res.abm->funcs->set_abm_level(
2282                                                        pipe_ctx->stream_res.abm, stream->abm_level);
2283                                        }
2284                                }
2285                        }
2286                }
2287        }
2288}
2289
2290static void commit_planes_for_stream(struct dc *dc,
2291                struct dc_surface_update *srf_updates,
2292                int surface_count,
2293                struct dc_stream_state *stream,
2294                struct dc_stream_update *stream_update,
2295                enum surface_update_type update_type,
2296                struct dc_state *context)
2297{
2298        int i, j;
2299        struct pipe_ctx *top_pipe_to_program = NULL;
2300
2301        if (dc->optimize_seamless_boot_streams > 0 && surface_count > 0) {
2302                /* Optimize seamless boot flag keeps clocks and watermarks high until
2303                 * first flip. After first flip, optimization is required to lower
2304                 * bandwidth. Important to note that it is expected UEFI will
2305                 * only light up a single display on POST, therefore we only expect
2306                 * one stream with seamless boot flag set.
2307                 */
2308                if (stream->apply_seamless_boot_optimization) {
2309                        stream->apply_seamless_boot_optimization = false;
2310                        dc->optimize_seamless_boot_streams--;
2311
2312                        if (dc->optimize_seamless_boot_streams == 0)
2313                                dc->optimized_required = true;
2314                }
2315        }
2316
2317        if (update_type == UPDATE_TYPE_FULL) {
2318#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
2319                dc_allow_idle_optimizations(dc, false);
2320
2321#endif
2322                if (dc->optimize_seamless_boot_streams == 0)
2323                        dc->hwss.prepare_bandwidth(dc, context);
2324
2325                context_clock_trace(dc, context);
2326        }
2327
2328        for (j = 0; j < dc->res_pool->pipe_count; j++) {
2329                struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2330
2331                if (!pipe_ctx->top_pipe &&
2332                        !pipe_ctx->prev_odm_pipe &&
2333                        pipe_ctx->stream &&
2334                        pipe_ctx->stream == stream) {
2335                        top_pipe_to_program = pipe_ctx;
2336                }
2337        }
2338
2339        if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
2340                if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
2341                        if (should_use_dmub_lock(stream->link)) {
2342                                union dmub_hw_lock_flags hw_locks = { 0 };
2343                                struct dmub_hw_lock_inst_flags inst_flags = { 0 };
2344
2345                                hw_locks.bits.lock_dig = 1;
2346                                inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst;
2347
2348                                dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
2349                                                        true,
2350                                                        &hw_locks,
2351                                                        &inst_flags);
2352                        } else
2353                                top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable(
2354                                                top_pipe_to_program->stream_res.tg);
2355                }
2356
2357        if ((update_type != UPDATE_TYPE_FAST) && dc->hwss.interdependent_update_lock)
2358                dc->hwss.interdependent_update_lock(dc, context, true);
2359        else
2360                /* Lock the top pipe while updating plane addrs, since freesync requires
2361                 *  plane addr update event triggers to be synchronized.
2362                 *  top_pipe_to_program is expected to never be NULL
2363                 */
2364                dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
2365
2366
2367        // Stream updates
2368        if (stream_update)
2369                commit_planes_do_stream_update(dc, stream, stream_update, update_type, context);
2370
2371        if (surface_count == 0) {
2372                /*
2373                 * In case of turning off screen, no need to program front end a second time.
2374                 * just return after program blank.
2375                 */
2376                if (dc->hwss.apply_ctx_for_surface)
2377                        dc->hwss.apply_ctx_for_surface(dc, stream, 0, context);
2378                if (dc->hwss.program_front_end_for_ctx)
2379                        dc->hwss.program_front_end_for_ctx(dc, context);
2380
2381                if ((update_type != UPDATE_TYPE_FAST) && dc->hwss.interdependent_update_lock)
2382                        dc->hwss.interdependent_update_lock(dc, context, false);
2383                else
2384                        dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
2385
2386                dc->hwss.post_unlock_program_front_end(dc, context);
2387                return;
2388        }
2389
2390        if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
2391                for (i = 0; i < surface_count; i++) {
2392                        struct dc_plane_state *plane_state = srf_updates[i].surface;
2393                        /*set logical flag for lock/unlock use*/
2394                        for (j = 0; j < dc->res_pool->pipe_count; j++) {
2395                                struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2396                                if (!pipe_ctx->plane_state)
2397                                        continue;
2398                                if (pipe_ctx->plane_state != plane_state)
2399                                        continue;
2400                                plane_state->triplebuffer_flips = false;
2401                                if (update_type == UPDATE_TYPE_FAST &&
2402                                        dc->hwss.program_triplebuffer != NULL &&
2403                                        !plane_state->flip_immediate &&
2404                                        !dc->debug.disable_tri_buf) {
2405                                                /*triple buffer for VUpdate  only*/
2406                                                plane_state->triplebuffer_flips = true;
2407                                }
2408                        }
2409                }
2410        }
2411
2412        // Update Type FULL, Surface updates
2413        for (j = 0; j < dc->res_pool->pipe_count; j++) {
2414                struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2415
2416                if (!pipe_ctx->top_pipe &&
2417                        !pipe_ctx->prev_odm_pipe &&
2418                        pipe_ctx->stream &&
2419                        pipe_ctx->stream == stream) {
2420                        struct dc_stream_status *stream_status = NULL;
2421
2422                        if (!pipe_ctx->plane_state)
2423                                continue;
2424
2425                        /* Full fe update*/
2426                        if (update_type == UPDATE_TYPE_FAST)
2427                                continue;
2428
2429                        ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
2430
2431                        if (dc->hwss.program_triplebuffer != NULL &&
2432                                !dc->debug.disable_tri_buf) {
2433                                /*turn off triple buffer for full update*/
2434                                dc->hwss.program_triplebuffer(
2435                                        dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
2436                        }
2437                        stream_status =
2438                                stream_get_status(context, pipe_ctx->stream);
2439
2440                        if (dc->hwss.apply_ctx_for_surface)
2441                                dc->hwss.apply_ctx_for_surface(
2442                                        dc, pipe_ctx->stream, stream_status->plane_count, context);
2443                }
2444        }
2445        if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) {
2446                dc->hwss.program_front_end_for_ctx(dc, context);
2447#ifdef CONFIG_DRM_AMD_DC_DCN
2448                if (dc->debug.validate_dml_output) {
2449                        for (i = 0; i < dc->res_pool->pipe_count; i++) {
2450                                struct pipe_ctx cur_pipe = context->res_ctx.pipe_ctx[i];
2451                                if (cur_pipe.stream == NULL)
2452                                        continue;
2453
2454                                cur_pipe.plane_res.hubp->funcs->validate_dml_output(
2455                                                cur_pipe.plane_res.hubp, dc->ctx,
2456                                                &context->res_ctx.pipe_ctx[i].rq_regs,
2457                                                &context->res_ctx.pipe_ctx[i].dlg_regs,
2458                                                &context->res_ctx.pipe_ctx[i].ttu_regs);
2459                        }
2460                }
2461#endif
2462        }
2463
2464        // Update Type FAST, Surface updates
2465        if (update_type == UPDATE_TYPE_FAST) {
2466                if (dc->hwss.set_flip_control_gsl)
2467                        for (i = 0; i < surface_count; i++) {
2468                                struct dc_plane_state *plane_state = srf_updates[i].surface;
2469
2470                                for (j = 0; j < dc->res_pool->pipe_count; j++) {
2471                                        struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2472
2473                                        if (pipe_ctx->stream != stream)
2474                                                continue;
2475
2476                                        if (pipe_ctx->plane_state != plane_state)
2477                                                continue;
2478
2479                                        // GSL has to be used for flip immediate
2480                                        dc->hwss.set_flip_control_gsl(pipe_ctx,
2481                                                        plane_state->flip_immediate);
2482                                }
2483                        }
2484                /* Perform requested Updates */
2485                for (i = 0; i < surface_count; i++) {
2486                        struct dc_plane_state *plane_state = srf_updates[i].surface;
2487
2488                        for (j = 0; j < dc->res_pool->pipe_count; j++) {
2489                                struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2490
2491                                if (pipe_ctx->stream != stream)
2492                                        continue;
2493
2494                                if (pipe_ctx->plane_state != plane_state)
2495                                        continue;
2496                                /*program triple buffer after lock based on flip type*/
2497                                if (dc->hwss.program_triplebuffer != NULL &&
2498                                        !dc->debug.disable_tri_buf) {
2499                                        /*only enable triplebuffer for  fast_update*/
2500                                        dc->hwss.program_triplebuffer(
2501                                                dc, pipe_ctx, plane_state->triplebuffer_flips);
2502                                }
2503                                if (srf_updates[i].flip_addr)
2504                                        dc->hwss.update_plane_addr(dc, pipe_ctx);
2505                        }
2506                }
2507        }
2508
2509        if ((update_type != UPDATE_TYPE_FAST) && dc->hwss.interdependent_update_lock)
2510                dc->hwss.interdependent_update_lock(dc, context, false);
2511        else
2512                dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
2513
2514        if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
2515                if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
2516                        top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
2517                                        top_pipe_to_program->stream_res.tg,
2518                                        CRTC_STATE_VACTIVE);
2519                        top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
2520                                        top_pipe_to_program->stream_res.tg,
2521                                        CRTC_STATE_VBLANK);
2522                        top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
2523                                        top_pipe_to_program->stream_res.tg,
2524                                        CRTC_STATE_VACTIVE);
2525
2526                        if (stream && should_use_dmub_lock(stream->link)) {
2527                                union dmub_hw_lock_flags hw_locks = { 0 };
2528                                struct dmub_hw_lock_inst_flags inst_flags = { 0 };
2529
2530                                hw_locks.bits.lock_dig = 1;
2531                                inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst;
2532
2533                                dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
2534                                                        false,
2535                                                        &hw_locks,
2536                                                        &inst_flags);
2537                        } else
2538                                top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_disable(
2539                                        top_pipe_to_program->stream_res.tg);
2540                }
2541
2542        if (update_type != UPDATE_TYPE_FAST)
2543                dc->hwss.post_unlock_program_front_end(dc, context);
2544
2545        // Fire manual trigger only when bottom plane is flipped
2546        for (j = 0; j < dc->res_pool->pipe_count; j++) {
2547                struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2548
2549                if (pipe_ctx->bottom_pipe ||
2550                                !pipe_ctx->stream ||
2551                                pipe_ctx->stream != stream ||
2552                                !pipe_ctx->plane_state->update_flags.bits.addr_update)
2553                        continue;
2554
2555                if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger)
2556                        pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg);
2557        }
2558}
2559
2560void dc_commit_updates_for_stream(struct dc *dc,
2561                struct dc_surface_update *srf_updates,
2562                int surface_count,
2563                struct dc_stream_state *stream,
2564                struct dc_stream_update *stream_update,
2565                struct dc_state *state)
2566{
2567        const struct dc_stream_status *stream_status;
2568        enum surface_update_type update_type;
2569        struct dc_state *context;
2570        struct dc_context *dc_ctx = dc->ctx;
2571        int i, j;
2572
2573        stream_status = dc_stream_get_status(stream);
2574        context = dc->current_state;
2575
2576        update_type = dc_check_update_surfaces_for_stream(
2577                                dc, srf_updates, surface_count, stream_update, stream_status);
2578
2579        if (update_type >= update_surface_trace_level)
2580                update_surface_trace(dc, srf_updates, surface_count);
2581
2582
2583        if (update_type >= UPDATE_TYPE_FULL) {
2584
2585                /* initialize scratch memory for building context */
2586                context = dc_create_state(dc);
2587                if (context == NULL) {
2588                        DC_ERROR("Failed to allocate new validate context!\n");
2589                        return;
2590                }
2591
2592                dc_resource_state_copy_construct(state, context);
2593
2594                for (i = 0; i < dc->res_pool->pipe_count; i++) {
2595                        struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
2596                        struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
2597
2598                        if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
2599                                new_pipe->plane_state->force_full_update = true;
2600                }
2601        }
2602
2603
2604        for (i = 0; i < surface_count; i++) {
2605                struct dc_plane_state *surface = srf_updates[i].surface;
2606
2607                copy_surface_update_to_plane(surface, &srf_updates[i]);
2608
2609                if (update_type >= UPDATE_TYPE_MED) {
2610                        for (j = 0; j < dc->res_pool->pipe_count; j++) {
2611                                struct pipe_ctx *pipe_ctx =
2612                                        &context->res_ctx.pipe_ctx[j];
2613
2614                                if (pipe_ctx->plane_state != surface)
2615                                        continue;
2616
2617                                resource_build_scaling_params(pipe_ctx);
2618                        }
2619                }
2620        }
2621
2622        copy_stream_update_to_stream(dc, context, stream, stream_update);
2623
2624        if (update_type > UPDATE_TYPE_FAST) {
2625                if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
2626                        DC_ERROR("Mode validation failed for stream update!\n");
2627                        dc_release_state(context);
2628                        return;
2629                }
2630        }
2631
2632        commit_planes_for_stream(
2633                                dc,
2634                                srf_updates,
2635                                surface_count,
2636                                stream,
2637                                stream_update,
2638                                update_type,
2639                                context);
2640        /*update current_State*/
2641        if (dc->current_state != context) {
2642
2643                struct dc_state *old = dc->current_state;
2644
2645                dc->current_state = context;
2646                dc_release_state(old);
2647
2648                for (i = 0; i < dc->res_pool->pipe_count; i++) {
2649                        struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2650
2651                        if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
2652                                pipe_ctx->plane_state->force_full_update = false;
2653                }
2654        }
2655        /*let's use current_state to update watermark etc*/
2656        if (update_type >= UPDATE_TYPE_FULL)
2657                dc_post_update_surfaces_to_stream(dc);
2658
2659        return;
2660
2661}
2662
2663uint8_t dc_get_current_stream_count(struct dc *dc)
2664{
2665        return dc->current_state->stream_count;
2666}
2667
2668struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i)
2669{
2670        if (i < dc->current_state->stream_count)
2671                return dc->current_state->streams[i];
2672        return NULL;
2673}
2674
2675enum dc_irq_source dc_interrupt_to_irq_source(
2676                struct dc *dc,
2677                uint32_t src_id,
2678                uint32_t ext_id)
2679{
2680        return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
2681}
2682
2683/**
2684 * dc_interrupt_set() - Enable/disable an AMD hw interrupt source
2685 */
2686bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
2687{
2688
2689        if (dc == NULL)
2690                return false;
2691
2692        return dal_irq_service_set(dc->res_pool->irqs, src, enable);
2693}
2694
2695void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
2696{
2697        dal_irq_service_ack(dc->res_pool->irqs, src);
2698}
2699
2700void dc_power_down_on_boot(struct dc *dc)
2701{
2702        if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW &&
2703                        dc->hwss.power_down_on_boot)
2704                dc->hwss.power_down_on_boot(dc);
2705}
2706
2707void dc_set_power_state(
2708        struct dc *dc,
2709        enum dc_acpi_cm_power_state power_state)
2710{
2711        struct kref refcount;
2712        struct display_mode_lib *dml;
2713
2714        switch (power_state) {
2715        case DC_ACPI_CM_POWER_STATE_D0:
2716                dc_resource_state_construct(dc, dc->current_state);
2717
2718                if (dc->ctx->dmub_srv)
2719                        dc_dmub_srv_wait_phy_init(dc->ctx->dmub_srv);
2720
2721                dc->hwss.init_hw(dc);
2722
2723                if (dc->hwss.init_sys_ctx != NULL &&
2724                        dc->vm_pa_config.valid) {
2725                        dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config);
2726                }
2727
2728                break;
2729        default:
2730                ASSERT(dc->current_state->stream_count == 0);
2731                /* Zero out the current context so that on resume we start with
2732                 * clean state, and dc hw programming optimizations will not
2733                 * cause any trouble.
2734                 */
2735                dml = kzalloc(sizeof(struct display_mode_lib),
2736                                GFP_KERNEL);
2737
2738                ASSERT(dml);
2739                if (!dml)
2740                        return;
2741
2742                /* Preserve refcount */
2743                refcount = dc->current_state->refcount;
2744                /* Preserve display mode lib */
2745                memcpy(dml, &dc->current_state->bw_ctx.dml, sizeof(struct display_mode_lib));
2746
2747                dc_resource_state_destruct(dc->current_state);
2748                memset(dc->current_state, 0,
2749                                sizeof(*dc->current_state));
2750
2751                dc->current_state->refcount = refcount;
2752                dc->current_state->bw_ctx.dml = *dml;
2753
2754                kfree(dml);
2755
2756                break;
2757        }
2758}
2759
2760void dc_resume(struct dc *dc)
2761{
2762        uint32_t i;
2763
2764        for (i = 0; i < dc->link_count; i++)
2765                core_link_resume(dc->links[i]);
2766}
2767
2768bool dc_is_dmcu_initialized(struct dc *dc)
2769{
2770        struct dmcu *dmcu = dc->res_pool->dmcu;
2771
2772        if (dmcu)
2773                return dmcu->funcs->is_dmcu_initialized(dmcu);
2774        return false;
2775}
2776
2777bool dc_submit_i2c(
2778                struct dc *dc,
2779                uint32_t link_index,
2780                struct i2c_command *cmd)
2781{
2782
2783        struct dc_link *link = dc->links[link_index];
2784        struct ddc_service *ddc = link->ddc;
2785        return dce_i2c_submit_command(
2786                dc->res_pool,
2787                ddc->ddc_pin,
2788                cmd);
2789}
2790
2791bool dc_submit_i2c_oem(
2792                struct dc *dc,
2793                struct i2c_command *cmd)
2794{
2795        struct ddc_service *ddc = dc->res_pool->oem_device;
2796        return dce_i2c_submit_command(
2797                dc->res_pool,
2798                ddc->ddc_pin,
2799                cmd);
2800}
2801
2802static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink *sink)
2803{
2804        if (dc_link->sink_count >= MAX_SINKS_PER_LINK) {
2805                BREAK_TO_DEBUGGER();
2806                return false;
2807        }
2808
2809        dc_sink_retain(sink);
2810
2811        dc_link->remote_sinks[dc_link->sink_count] = sink;
2812        dc_link->sink_count++;
2813
2814        return true;
2815}
2816
2817/**
2818 * dc_link_add_remote_sink() - Create a sink and attach it to an existing link
2819 *
2820 * EDID length is in bytes
2821 */
2822struct dc_sink *dc_link_add_remote_sink(
2823                struct dc_link *link,
2824                const uint8_t *edid,
2825                int len,
2826                struct dc_sink_init_data *init_data)
2827{
2828        struct dc_sink *dc_sink;
2829        enum dc_edid_status edid_status;
2830
2831        if (len > DC_MAX_EDID_BUFFER_SIZE) {
2832                dm_error("Max EDID buffer size breached!\n");
2833                return NULL;
2834        }
2835
2836        if (!init_data) {
2837                BREAK_TO_DEBUGGER();
2838                return NULL;
2839        }
2840
2841        if (!init_data->link) {
2842                BREAK_TO_DEBUGGER();
2843                return NULL;
2844        }
2845
2846        dc_sink = dc_sink_create(init_data);
2847
2848        if (!dc_sink)
2849                return NULL;
2850
2851        memmove(dc_sink->dc_edid.raw_edid, edid, len);
2852        dc_sink->dc_edid.length = len;
2853
2854        if (!link_add_remote_sink_helper(
2855                        link,
2856                        dc_sink))
2857                goto fail_add_sink;
2858
2859        edid_status = dm_helpers_parse_edid_caps(
2860                        link->ctx,
2861                        &dc_sink->dc_edid,
2862                        &dc_sink->edid_caps);
2863
2864        /*
2865         * Treat device as no EDID device if EDID
2866         * parsing fails
2867         */
2868        if (edid_status != EDID_OK) {
2869                dc_sink->dc_edid.length = 0;
2870                dm_error("Bad EDID, status%d!\n", edid_status);
2871        }
2872
2873        return dc_sink;
2874
2875fail_add_sink:
2876        dc_sink_release(dc_sink);
2877        return NULL;
2878}
2879
2880/**
2881 * dc_link_remove_remote_sink() - Remove a remote sink from a dc_link
2882 *
2883 * Note that this just removes the struct dc_sink - it doesn't
2884 * program hardware or alter other members of dc_link
2885 */
2886void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink)
2887{
2888        int i;
2889
2890        if (!link->sink_count) {
2891                BREAK_TO_DEBUGGER();
2892                return;
2893        }
2894
2895        for (i = 0; i < link->sink_count; i++) {
2896                if (link->remote_sinks[i] == sink) {
2897                        dc_sink_release(sink);
2898                        link->remote_sinks[i] = NULL;
2899
2900                        /* shrink array to remove empty place */
2901                        while (i < link->sink_count - 1) {
2902                                link->remote_sinks[i] = link->remote_sinks[i+1];
2903                                i++;
2904                        }
2905                        link->remote_sinks[i] = NULL;
2906                        link->sink_count--;
2907                        return;
2908                }
2909        }
2910}
2911
2912void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info)
2913{
2914        info->displayClock                              = (unsigned int)state->bw_ctx.bw.dcn.clk.dispclk_khz;
2915        info->engineClock                               = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_khz;
2916        info->memoryClock                               = (unsigned int)state->bw_ctx.bw.dcn.clk.dramclk_khz;
2917        info->maxSupportedDppClock              = (unsigned int)state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz;
2918        info->dppClock                                  = (unsigned int)state->bw_ctx.bw.dcn.clk.dppclk_khz;
2919        info->socClock                                  = (unsigned int)state->bw_ctx.bw.dcn.clk.socclk_khz;
2920        info->dcfClockDeepSleep                 = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz;
2921        info->fClock                                    = (unsigned int)state->bw_ctx.bw.dcn.clk.fclk_khz;
2922        info->phyClock                                  = (unsigned int)state->bw_ctx.bw.dcn.clk.phyclk_khz;
2923}
2924enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping)
2925{
2926        if (dc->hwss.set_clock)
2927                return dc->hwss.set_clock(dc, clock_type, clk_khz, stepping);
2928        return DC_ERROR_UNEXPECTED;
2929}
2930void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg)
2931{
2932        if (dc->hwss.get_clock)
2933                dc->hwss.get_clock(dc, clock_type, clock_cfg);
2934}
2935
2936#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
2937
2938void dc_allow_idle_optimizations(struct dc *dc, bool allow)
2939{
2940        if (dc->debug.disable_idle_power_optimizations)
2941                return;
2942
2943        if (allow == dc->idle_optimizations_allowed)
2944                return;
2945
2946        if (dc->hwss.apply_idle_power_optimizations && dc->hwss.apply_idle_power_optimizations(dc, allow))
2947                dc->idle_optimizations_allowed = allow;
2948}
2949
2950/*
2951 * blank all streams, and set min and max memory clock to
2952 * lowest and highest DPM level, respectively
2953 */
2954void dc_unlock_memory_clock_frequency(struct dc *dc)
2955{
2956        unsigned int i;
2957
2958        for (i = 0; i < MAX_PIPES; i++)
2959                if (dc->current_state->res_ctx.pipe_ctx[i].plane_state)
2960                        core_link_disable_stream(&dc->current_state->res_ctx.pipe_ctx[i]);
2961
2962        dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, false);
2963        dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
2964}
2965
2966/*
2967 * set min memory clock to the min required for current mode,
2968 * max to maxDPM, and unblank streams
2969 */
2970void dc_lock_memory_clock_frequency(struct dc *dc)
2971{
2972        unsigned int i;
2973
2974        dc->clk_mgr->funcs->get_memclk_states_from_smu(dc->clk_mgr);
2975        dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, true);
2976        dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
2977
2978        for (i = 0; i < MAX_PIPES; i++)
2979                if (dc->current_state->res_ctx.pipe_ctx[i].plane_state)
2980                        core_link_enable_stream(dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]);
2981}
2982#endif
2983