linux/drivers/gpu/drm/amd/display/dc/core/dc.c
<<
>>
Prefs
   1/*
   2 * Copyright 2015 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: AMD
  23 */
  24
  25#include <linux/slab.h>
  26#include <linux/mm.h>
  27
  28#include "dm_services.h"
  29
  30#include "dc.h"
  31
  32#include "core_status.h"
  33#include "core_types.h"
  34#include "hw_sequencer.h"
  35#include "dce/dce_hwseq.h"
  36
  37#include "resource.h"
  38
  39#include "clk_mgr.h"
  40#include "clock_source.h"
  41#include "dc_bios_types.h"
  42
  43#include "bios_parser_interface.h"
  44#include "bios/bios_parser_helper.h"
  45#include "include/irq_service_interface.h"
  46#include "transform.h"
  47#include "dmcu.h"
  48#include "dpp.h"
  49#include "timing_generator.h"
  50#include "abm.h"
  51#include "virtual/virtual_link_encoder.h"
  52#include "hubp.h"
  53
  54#include "link_hwss.h"
  55#include "link_encoder.h"
  56#include "link_enc_cfg.h"
  57
  58#include "dc_link.h"
  59#include "dc_link_ddc.h"
  60#include "dm_helpers.h"
  61#include "mem_input.h"
  62
  63#include "dc_link_dp.h"
  64#include "dc_dmub_srv.h"
  65
  66#include "dsc.h"
  67
  68#include "vm_helper.h"
  69
  70#include "dce/dce_i2c.h"
  71
  72#include "dmub/dmub_srv.h"
  73
  74#include "i2caux_interface.h"
  75#include "dce/dmub_hw_lock_mgr.h"
  76
  77#include "dc_trace.h"
  78
  79#define CTX \
  80        dc->ctx
  81
  82#define DC_LOGGER \
  83        dc->ctx->logger
  84
  85static const char DC_BUILD_ID[] = "production-build";
  86
  87/**
  88 * DOC: Overview
  89 *
  90 * DC is the OS-agnostic component of the amdgpu DC driver.
  91 *
  92 * DC maintains and validates a set of structs representing the state of the
  93 * driver and writes that state to AMD hardware
  94 *
  95 * Main DC HW structs:
  96 *
  97 * struct dc - The central struct.  One per driver.  Created on driver load,
  98 * destroyed on driver unload.
  99 *
 100 * struct dc_context - One per driver.
 101 * Used as a backpointer by most other structs in dc.
 102 *
 103 * struct dc_link - One per connector (the physical DP, HDMI, miniDP, or eDP
 104 * plugpoints).  Created on driver load, destroyed on driver unload.
 105 *
 106 * struct dc_sink - One per display.  Created on boot or hotplug.
 107 * Destroyed on shutdown or hotunplug.  A dc_link can have a local sink
 108 * (the display directly attached).  It may also have one or more remote
 109 * sinks (in the Multi-Stream Transport case)
 110 *
 111 * struct resource_pool - One per driver.  Represents the hw blocks not in the
 112 * main pipeline.  Not directly accessible by dm.
 113 *
 114 * Main dc state structs:
 115 *
 116 * These structs can be created and destroyed as needed.  There is a full set of
 117 * these structs in dc->current_state representing the currently programmed state.
 118 *
 119 * struct dc_state - The global DC state to track global state information,
 120 * such as bandwidth values.
 121 *
 122 * struct dc_stream_state - Represents the hw configuration for the pipeline from
 123 * a framebuffer to a display.  Maps one-to-one with dc_sink.
 124 *
 125 * struct dc_plane_state - Represents a framebuffer.  Each stream has at least one,
 126 * and may have more in the Multi-Plane Overlay case.
 127 *
 128 * struct resource_context - Represents the programmable state of everything in
 129 * the resource_pool.  Not directly accessible by dm.
 130 *
 131 * struct pipe_ctx - A member of struct resource_context.  Represents the
 132 * internal hardware pipeline components.  Each dc_plane_state has either
 133 * one or two (in the pipe-split case).
 134 */
 135
 136/*******************************************************************************
 137 * Private functions
 138 ******************************************************************************/
 139
 140static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new)
 141{
 142        if (new > *original)
 143                *original = new;
 144}
 145
 146static void destroy_links(struct dc *dc)
 147{
 148        uint32_t i;
 149
 150        for (i = 0; i < dc->link_count; i++) {
 151                if (NULL != dc->links[i])
 152                        link_destroy(&dc->links[i]);
 153        }
 154}
 155
 156static uint32_t get_num_of_internal_disp(struct dc_link **links, uint32_t num_links)
 157{
 158        int i;
 159        uint32_t count = 0;
 160
 161        for (i = 0; i < num_links; i++) {
 162                if (links[i]->connector_signal == SIGNAL_TYPE_EDP ||
 163                                links[i]->is_internal_display)
 164                        count++;
 165        }
 166
 167        return count;
 168}
 169
 170static int get_seamless_boot_stream_count(struct dc_state *ctx)
 171{
 172        uint8_t i;
 173        uint8_t seamless_boot_stream_count = 0;
 174
 175        for (i = 0; i < ctx->stream_count; i++)
 176                if (ctx->streams[i]->apply_seamless_boot_optimization)
 177                        seamless_boot_stream_count++;
 178
 179        return seamless_boot_stream_count;
 180}
 181
 182static bool create_links(
 183                struct dc *dc,
 184                uint32_t num_virtual_links)
 185{
 186        int i;
 187        int connectors_num;
 188        struct dc_bios *bios = dc->ctx->dc_bios;
 189
 190        dc->link_count = 0;
 191
 192        connectors_num = bios->funcs->get_connectors_number(bios);
 193
 194        DC_LOG_DC("BIOS object table - number of connectors: %d", connectors_num);
 195
 196        if (connectors_num > ENUM_ID_COUNT) {
 197                dm_error(
 198                        "DC: Number of connectors %d exceeds maximum of %d!\n",
 199                        connectors_num,
 200                        ENUM_ID_COUNT);
 201                return false;
 202        }
 203
 204        dm_output_to_console(
 205                "DC: %s: connectors_num: physical:%d, virtual:%d\n",
 206                __func__,
 207                connectors_num,
 208                num_virtual_links);
 209
 210        for (i = 0; i < connectors_num; i++) {
 211                struct link_init_data link_init_params = {0};
 212                struct dc_link *link;
 213
 214                DC_LOG_DC("BIOS object table - printing link object info for connector number: %d, link_index: %d", i, dc->link_count);
 215
 216                link_init_params.ctx = dc->ctx;
 217                /* next BIOS object table connector */
 218                link_init_params.connector_index = i;
 219                link_init_params.link_index = dc->link_count;
 220                link_init_params.dc = dc;
 221                link = link_create(&link_init_params);
 222
 223                if (link) {
 224                                dc->links[dc->link_count] = link;
 225                                link->dc = dc;
 226                                ++dc->link_count;
 227                }
 228        }
 229
 230        DC_LOG_DC("BIOS object table - end");
 231
 232        /* Create a link for each usb4 dpia port */
 233        for (i = 0; i < dc->res_pool->usb4_dpia_count; i++) {
 234                struct link_init_data link_init_params = {0};
 235                struct dc_link *link;
 236
 237                link_init_params.ctx = dc->ctx;
 238                link_init_params.connector_index = i;
 239                link_init_params.link_index = dc->link_count;
 240                link_init_params.dc = dc;
 241                link_init_params.is_dpia_link = true;
 242
 243                link = link_create(&link_init_params);
 244                if (link) {
 245                        dc->links[dc->link_count] = link;
 246                        link->dc = dc;
 247                        ++dc->link_count;
 248                }
 249        }
 250
 251        for (i = 0; i < num_virtual_links; i++) {
 252                struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL);
 253                struct encoder_init_data enc_init = {0};
 254
 255                if (link == NULL) {
 256                        BREAK_TO_DEBUGGER();
 257                        goto failed_alloc;
 258                }
 259
 260                link->link_index = dc->link_count;
 261                dc->links[dc->link_count] = link;
 262                dc->link_count++;
 263
 264                link->ctx = dc->ctx;
 265                link->dc = dc;
 266                link->connector_signal = SIGNAL_TYPE_VIRTUAL;
 267                link->link_id.type = OBJECT_TYPE_CONNECTOR;
 268                link->link_id.id = CONNECTOR_ID_VIRTUAL;
 269                link->link_id.enum_id = ENUM_ID_1;
 270                link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL);
 271
 272                if (!link->link_enc) {
 273                        BREAK_TO_DEBUGGER();
 274                        goto failed_alloc;
 275                }
 276
 277#if defined(CONFIG_DRM_AMD_DC_DCN)
 278                if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) &&
 279                                dc->caps.dp_hpo &&
 280                                link->dc->res_pool->res_cap->num_hpo_dp_link_encoder > 0) {
 281                        /* FPGA case - Allocate HPO DP link encoder */
 282                        if (i < link->dc->res_pool->res_cap->num_hpo_dp_link_encoder) {
 283                                link->hpo_dp_link_enc = link->dc->res_pool->hpo_dp_link_enc[i];
 284
 285                                if (link->hpo_dp_link_enc == NULL) {
 286                                        BREAK_TO_DEBUGGER();
 287                                        goto failed_alloc;
 288                                }
 289                                link->hpo_dp_link_enc->hpd_source = link->link_enc->hpd_source;
 290                                link->hpo_dp_link_enc->transmitter = link->link_enc->transmitter;
 291                        }
 292                }
 293#endif
 294
 295                link->link_status.dpcd_caps = &link->dpcd_caps;
 296
 297                enc_init.ctx = dc->ctx;
 298                enc_init.channel = CHANNEL_ID_UNKNOWN;
 299                enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
 300                enc_init.transmitter = TRANSMITTER_UNKNOWN;
 301                enc_init.connector = link->link_id;
 302                enc_init.encoder.type = OBJECT_TYPE_ENCODER;
 303                enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
 304                enc_init.encoder.enum_id = ENUM_ID_1;
 305                virtual_link_encoder_construct(link->link_enc, &enc_init);
 306        }
 307
 308        dc->caps.num_of_internal_disp = get_num_of_internal_disp(dc->links, dc->link_count);
 309
 310        return true;
 311
 312failed_alloc:
 313        return false;
 314}
 315
 316/* Create additional DIG link encoder objects if fewer than the platform
 317 * supports were created during link construction. This can happen if the
 318 * number of physical connectors is less than the number of DIGs.
 319 */
 320static bool create_link_encoders(struct dc *dc)
 321{
 322        bool res = true;
 323        unsigned int num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia;
 324        unsigned int num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc;
 325        int i;
 326
 327        /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG
 328         * link encoders and physical display endpoints and does not require
 329         * additional link encoder objects.
 330         */
 331        if (num_usb4_dpia == 0)
 332                return res;
 333
 334        /* Create as many link encoder objects as the platform supports. DPIA
 335         * endpoints can be programmably mapped to any DIG.
 336         */
 337        if (num_dig_link_enc > dc->res_pool->dig_link_enc_count) {
 338                for (i = 0; i < num_dig_link_enc; i++) {
 339                        struct link_encoder *link_enc = dc->res_pool->link_encoders[i];
 340
 341                        if (!link_enc && dc->res_pool->funcs->link_enc_create_minimal) {
 342                                link_enc = dc->res_pool->funcs->link_enc_create_minimal(dc->ctx,
 343                                                (enum engine_id)(ENGINE_ID_DIGA + i));
 344                                if (link_enc) {
 345                                        dc->res_pool->link_encoders[i] = link_enc;
 346                                        dc->res_pool->dig_link_enc_count++;
 347                                } else {
 348                                        res = false;
 349                                }
 350                        }
 351                }
 352        }
 353
 354        return res;
 355}
 356
 357/* Destroy any additional DIG link encoder objects created by
 358 * create_link_encoders().
 359 * NB: Must only be called after destroy_links().
 360 */
 361static void destroy_link_encoders(struct dc *dc)
 362{
 363        unsigned int num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia;
 364        unsigned int num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc;
 365        int i;
 366
 367        /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG
 368         * link encoders and physical display endpoints and does not require
 369         * additional link encoder objects.
 370         */
 371        if (num_usb4_dpia == 0)
 372                return;
 373
 374        for (i = 0; i < num_dig_link_enc; i++) {
 375                struct link_encoder *link_enc = dc->res_pool->link_encoders[i];
 376
 377                if (link_enc) {
 378                        link_enc->funcs->destroy(&link_enc);
 379                        dc->res_pool->link_encoders[i] = NULL;
 380                        dc->res_pool->dig_link_enc_count--;
 381                }
 382        }
 383}
 384
 385static struct dc_perf_trace *dc_perf_trace_create(void)
 386{
 387        return kzalloc(sizeof(struct dc_perf_trace), GFP_KERNEL);
 388}
 389
 390static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace)
 391{
 392        kfree(*perf_trace);
 393        *perf_trace = NULL;
 394}
 395
 396/**
 397 *  dc_stream_adjust_vmin_vmax:
 398 *
 399 *  Looks up the pipe context of dc_stream_state and updates the
 400 *  vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh
 401 *  Rate, which is a power-saving feature that targets reducing panel
 402 *  refresh rate while the screen is static
 403 *
 404 *  @dc:     dc reference
 405 *  @stream: Initial dc stream state
 406 *  @adjust: Updated parameters for vertical_total_min and vertical_total_max
 407 */
 408bool dc_stream_adjust_vmin_vmax(struct dc *dc,
 409                struct dc_stream_state *stream,
 410                struct dc_crtc_timing_adjust *adjust)
 411{
 412        int i;
 413        bool ret = false;
 414
 415        stream->adjust.v_total_max = adjust->v_total_max;
 416        stream->adjust.v_total_mid = adjust->v_total_mid;
 417        stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num;
 418        stream->adjust.v_total_min = adjust->v_total_min;
 419
 420        for (i = 0; i < MAX_PIPES; i++) {
 421                struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
 422
 423                if (pipe->stream == stream && pipe->stream_res.tg) {
 424                        dc->hwss.set_drr(&pipe,
 425                                        1,
 426                                        *adjust);
 427
 428                        ret = true;
 429                }
 430        }
 431        return ret;
 432}
 433
 434/**
 435 *****************************************************************************
 436 *  Function: dc_stream_get_last_vrr_vtotal
 437 *
 438 *  @brief
 439 *     Looks up the pipe context of dc_stream_state and gets the
 440 *     last VTOTAL used by DRR (Dynamic Refresh Rate)
 441 *
 442 *  @param [in] dc: dc reference
 443 *  @param [in] stream: Initial dc stream state
 444 *  @param [in] adjust: Updated parameters for vertical_total_min and
 445 *  vertical_total_max
 446 *****************************************************************************
 447 */
 448bool dc_stream_get_last_used_drr_vtotal(struct dc *dc,
 449                struct dc_stream_state *stream,
 450                uint32_t *refresh_rate)
 451{
 452        bool status = false;
 453
 454        int i = 0;
 455
 456        for (i = 0; i < MAX_PIPES; i++) {
 457                struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
 458
 459                if (pipe->stream == stream && pipe->stream_res.tg) {
 460                        /* Only execute if a function pointer has been defined for
 461                         * the DC version in question
 462                         */
 463                        if (pipe->stream_res.tg->funcs->get_last_used_drr_vtotal) {
 464                                pipe->stream_res.tg->funcs->get_last_used_drr_vtotal(pipe->stream_res.tg, refresh_rate);
 465
 466                                status = true;
 467
 468                                break;
 469                        }
 470                }
 471        }
 472
 473        return status;
 474}
 475
 476bool dc_stream_get_crtc_position(struct dc *dc,
 477                struct dc_stream_state **streams, int num_streams,
 478                unsigned int *v_pos, unsigned int *nom_v_pos)
 479{
 480        /* TODO: Support multiple streams */
 481        const struct dc_stream_state *stream = streams[0];
 482        int i;
 483        bool ret = false;
 484        struct crtc_position position;
 485
 486        for (i = 0; i < MAX_PIPES; i++) {
 487                struct pipe_ctx *pipe =
 488                                &dc->current_state->res_ctx.pipe_ctx[i];
 489
 490                if (pipe->stream == stream && pipe->stream_res.stream_enc) {
 491                        dc->hwss.get_position(&pipe, 1, &position);
 492
 493                        *v_pos = position.vertical_count;
 494                        *nom_v_pos = position.nominal_vcount;
 495                        ret = true;
 496                }
 497        }
 498        return ret;
 499}
 500
 501#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
 502bool dc_stream_forward_dmcu_crc_window(struct dc *dc, struct dc_stream_state *stream,
 503                             struct crc_params *crc_window)
 504{
 505        int i;
 506        struct dmcu *dmcu = dc->res_pool->dmcu;
 507        struct pipe_ctx *pipe;
 508        struct crc_region tmp_win, *crc_win;
 509        struct otg_phy_mux mapping_tmp, *mux_mapping;
 510
 511        /*crc window can't be null*/
 512        if (!crc_window)
 513                return false;
 514
 515        if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu))) {
 516                crc_win = &tmp_win;
 517                mux_mapping = &mapping_tmp;
 518                /*set crc window*/
 519                tmp_win.x_start = crc_window->windowa_x_start;
 520                tmp_win.y_start = crc_window->windowa_y_start;
 521                tmp_win.x_end = crc_window->windowa_x_end;
 522                tmp_win.y_end = crc_window->windowa_y_end;
 523
 524                for (i = 0; i < MAX_PIPES; i++) {
 525                        pipe = &dc->current_state->res_ctx.pipe_ctx[i];
 526                        if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
 527                                break;
 528                }
 529
 530                /* Stream not found */
 531                if (i == MAX_PIPES)
 532                        return false;
 533
 534
 535                /*set mux routing info*/
 536                mapping_tmp.phy_output_num = stream->link->link_enc_hw_inst;
 537                mapping_tmp.otg_output_num = pipe->stream_res.tg->inst;
 538
 539                dmcu->funcs->forward_crc_window(dmcu, crc_win, mux_mapping);
 540        } else {
 541                DC_LOG_DC("dmcu is not initialized");
 542                return false;
 543        }
 544
 545        return true;
 546}
 547
 548bool dc_stream_stop_dmcu_crc_win_update(struct dc *dc, struct dc_stream_state *stream)
 549{
 550        int i;
 551        struct dmcu *dmcu = dc->res_pool->dmcu;
 552        struct pipe_ctx *pipe;
 553        struct otg_phy_mux mapping_tmp, *mux_mapping;
 554
 555        if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu))) {
 556                mux_mapping = &mapping_tmp;
 557
 558                for (i = 0; i < MAX_PIPES; i++) {
 559                        pipe = &dc->current_state->res_ctx.pipe_ctx[i];
 560                        if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
 561                                break;
 562                }
 563
 564                /* Stream not found */
 565                if (i == MAX_PIPES)
 566                        return false;
 567
 568
 569                /*set mux routing info*/
 570                mapping_tmp.phy_output_num = stream->link->link_enc_hw_inst;
 571                mapping_tmp.otg_output_num = pipe->stream_res.tg->inst;
 572
 573                dmcu->funcs->stop_crc_win_update(dmcu, mux_mapping);
 574        } else {
 575                DC_LOG_DC("dmcu is not initialized");
 576                return false;
 577        }
 578
 579        return true;
 580}
 581#endif
 582
 583/**
 584 * dc_stream_configure_crc() - Configure CRC capture for the given stream.
 585 * @dc: DC Object
 586 * @stream: The stream to configure CRC on.
 587 * @enable: Enable CRC if true, disable otherwise.
 588 * @crc_window: CRC window (x/y start/end) information
 589 * @continuous: Capture CRC on every frame if true. Otherwise, only capture
 590 *              once.
 591 *
 592 * By default, only CRC0 is configured, and the entire frame is used to
 593 * calculate the crc.
 594 */
 595bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
 596                             struct crc_params *crc_window, bool enable, bool continuous)
 597{
 598        int i;
 599        struct pipe_ctx *pipe;
 600        struct crc_params param;
 601        struct timing_generator *tg;
 602
 603        for (i = 0; i < MAX_PIPES; i++) {
 604                pipe = &dc->current_state->res_ctx.pipe_ctx[i];
 605                if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
 606                        break;
 607        }
 608        /* Stream not found */
 609        if (i == MAX_PIPES)
 610                return false;
 611
 612        /* By default, capture the full frame */
 613        param.windowa_x_start = 0;
 614        param.windowa_y_start = 0;
 615        param.windowa_x_end = pipe->stream->timing.h_addressable;
 616        param.windowa_y_end = pipe->stream->timing.v_addressable;
 617        param.windowb_x_start = 0;
 618        param.windowb_y_start = 0;
 619        param.windowb_x_end = pipe->stream->timing.h_addressable;
 620        param.windowb_y_end = pipe->stream->timing.v_addressable;
 621
 622        if (crc_window) {
 623                param.windowa_x_start = crc_window->windowa_x_start;
 624                param.windowa_y_start = crc_window->windowa_y_start;
 625                param.windowa_x_end = crc_window->windowa_x_end;
 626                param.windowa_y_end = crc_window->windowa_y_end;
 627                param.windowb_x_start = crc_window->windowb_x_start;
 628                param.windowb_y_start = crc_window->windowb_y_start;
 629                param.windowb_x_end = crc_window->windowb_x_end;
 630                param.windowb_y_end = crc_window->windowb_y_end;
 631        }
 632
 633        param.dsc_mode = pipe->stream->timing.flags.DSC ? 1:0;
 634        param.odm_mode = pipe->next_odm_pipe ? 1:0;
 635
 636        /* Default to the union of both windows */
 637        param.selection = UNION_WINDOW_A_B;
 638        param.continuous_mode = continuous;
 639        param.enable = enable;
 640
 641        tg = pipe->stream_res.tg;
 642
 643        /* Only call if supported */
 644        if (tg->funcs->configure_crc)
 645                return tg->funcs->configure_crc(tg, &param);
 646        DC_LOG_WARNING("CRC capture not supported.");
 647        return false;
 648}
 649
 650/**
 651 * dc_stream_get_crc() - Get CRC values for the given stream.
 652 * @dc: DC object
 653 * @stream: The DC stream state of the stream to get CRCs from.
 654 * @r_cr: CRC value for the first of the 3 channels stored here.
 655 * @g_y:  CRC value for the second of the 3 channels stored here.
 656 * @b_cb: CRC value for the third of the 3 channels stored here.
 657 *
 658 * dc_stream_configure_crc needs to be called beforehand to enable CRCs.
 659 * Return false if stream is not found, or if CRCs are not enabled.
 660 */
 661bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
 662                       uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
 663{
 664        int i;
 665        struct pipe_ctx *pipe;
 666        struct timing_generator *tg;
 667
 668        for (i = 0; i < MAX_PIPES; i++) {
 669                pipe = &dc->current_state->res_ctx.pipe_ctx[i];
 670                if (pipe->stream == stream)
 671                        break;
 672        }
 673        /* Stream not found */
 674        if (i == MAX_PIPES)
 675                return false;
 676
 677        tg = pipe->stream_res.tg;
 678
 679        if (tg->funcs->get_crc)
 680                return tg->funcs->get_crc(tg, r_cr, g_y, b_cb);
 681        DC_LOG_WARNING("CRC capture not supported.");
 682        return false;
 683}
 684
 685void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream,
 686                enum dc_dynamic_expansion option)
 687{
 688        /* OPP FMT dyn expansion updates*/
 689        int i;
 690        struct pipe_ctx *pipe_ctx;
 691
 692        for (i = 0; i < MAX_PIPES; i++) {
 693                if (dc->current_state->res_ctx.pipe_ctx[i].stream
 694                                == stream) {
 695                        pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
 696                        pipe_ctx->stream_res.opp->dyn_expansion = option;
 697                        pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
 698                                        pipe_ctx->stream_res.opp,
 699                                        COLOR_SPACE_YCBCR601,
 700                                        stream->timing.display_color_depth,
 701                                        stream->signal);
 702                }
 703        }
 704}
 705
 706void dc_stream_set_dither_option(struct dc_stream_state *stream,
 707                enum dc_dither_option option)
 708{
 709        struct bit_depth_reduction_params params;
 710        struct dc_link *link = stream->link;
 711        struct pipe_ctx *pipes = NULL;
 712        int i;
 713
 714        for (i = 0; i < MAX_PIPES; i++) {
 715                if (link->dc->current_state->res_ctx.pipe_ctx[i].stream ==
 716                                stream) {
 717                        pipes = &link->dc->current_state->res_ctx.pipe_ctx[i];
 718                        break;
 719                }
 720        }
 721
 722        if (!pipes)
 723                return;
 724        if (option > DITHER_OPTION_MAX)
 725                return;
 726
 727        stream->dither_option = option;
 728
 729        memset(&params, 0, sizeof(params));
 730        resource_build_bit_depth_reduction_params(stream, &params);
 731        stream->bit_depth_params = params;
 732
 733        if (pipes->plane_res.xfm &&
 734            pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth) {
 735                pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth(
 736                        pipes->plane_res.xfm,
 737                        pipes->plane_res.scl_data.lb_params.depth,
 738                        &stream->bit_depth_params);
 739        }
 740
 741        pipes->stream_res.opp->funcs->
 742                opp_program_bit_depth_reduction(pipes->stream_res.opp, &params);
 743}
 744
 745bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream)
 746{
 747        int i;
 748        bool ret = false;
 749        struct pipe_ctx *pipes;
 750
 751        for (i = 0; i < MAX_PIPES; i++) {
 752                if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) {
 753                        pipes = &dc->current_state->res_ctx.pipe_ctx[i];
 754                        dc->hwss.program_gamut_remap(pipes);
 755                        ret = true;
 756                }
 757        }
 758
 759        return ret;
 760}
 761
 762bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
 763{
 764        int i;
 765        bool ret = false;
 766        struct pipe_ctx *pipes;
 767
 768        for (i = 0; i < MAX_PIPES; i++) {
 769                if (dc->current_state->res_ctx.pipe_ctx[i].stream
 770                                == stream) {
 771
 772                        pipes = &dc->current_state->res_ctx.pipe_ctx[i];
 773                        dc->hwss.program_output_csc(dc,
 774                                        pipes,
 775                                        stream->output_color_space,
 776                                        stream->csc_color_matrix.matrix,
 777                                        pipes->stream_res.opp->inst);
 778                        ret = true;
 779                }
 780        }
 781
 782        return ret;
 783}
 784
 785void dc_stream_set_static_screen_params(struct dc *dc,
 786                struct dc_stream_state **streams,
 787                int num_streams,
 788                const struct dc_static_screen_params *params)
 789{
 790        int i, j;
 791        struct pipe_ctx *pipes_affected[MAX_PIPES];
 792        int num_pipes_affected = 0;
 793
 794        for (i = 0; i < num_streams; i++) {
 795                struct dc_stream_state *stream = streams[i];
 796
 797                for (j = 0; j < MAX_PIPES; j++) {
 798                        if (dc->current_state->res_ctx.pipe_ctx[j].stream
 799                                        == stream) {
 800                                pipes_affected[num_pipes_affected++] =
 801                                                &dc->current_state->res_ctx.pipe_ctx[j];
 802                        }
 803                }
 804        }
 805
 806        dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, params);
 807}
 808
 809static void dc_destruct(struct dc *dc)
 810{
 811        if (dc->current_state) {
 812                dc_release_state(dc->current_state);
 813                dc->current_state = NULL;
 814        }
 815
 816        destroy_links(dc);
 817
 818        destroy_link_encoders(dc);
 819
 820        if (dc->clk_mgr) {
 821                dc_destroy_clk_mgr(dc->clk_mgr);
 822                dc->clk_mgr = NULL;
 823        }
 824
 825        dc_destroy_resource_pool(dc);
 826
 827        if (dc->ctx->gpio_service)
 828                dal_gpio_service_destroy(&dc->ctx->gpio_service);
 829
 830        if (dc->ctx->created_bios)
 831                dal_bios_parser_destroy(&dc->ctx->dc_bios);
 832
 833        dc_perf_trace_destroy(&dc->ctx->perf_trace);
 834
 835        kfree(dc->ctx);
 836        dc->ctx = NULL;
 837
 838        kfree(dc->bw_vbios);
 839        dc->bw_vbios = NULL;
 840
 841        kfree(dc->bw_dceip);
 842        dc->bw_dceip = NULL;
 843
 844#ifdef CONFIG_DRM_AMD_DC_DCN
 845        kfree(dc->dcn_soc);
 846        dc->dcn_soc = NULL;
 847
 848        kfree(dc->dcn_ip);
 849        dc->dcn_ip = NULL;
 850
 851#endif
 852        kfree(dc->vm_helper);
 853        dc->vm_helper = NULL;
 854
 855}
 856
 857static bool dc_construct_ctx(struct dc *dc,
 858                const struct dc_init_data *init_params)
 859{
 860        struct dc_context *dc_ctx;
 861        enum dce_version dc_version = DCE_VERSION_UNKNOWN;
 862
 863        dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL);
 864        if (!dc_ctx)
 865                return false;
 866
 867        dc_ctx->cgs_device = init_params->cgs_device;
 868        dc_ctx->driver_context = init_params->driver;
 869        dc_ctx->dc = dc;
 870        dc_ctx->asic_id = init_params->asic_id;
 871        dc_ctx->dc_sink_id_count = 0;
 872        dc_ctx->dc_stream_id_count = 0;
 873        dc_ctx->dce_environment = init_params->dce_environment;
 874
 875        /* Create logger */
 876
 877        dc_version = resource_parse_asic_id(init_params->asic_id);
 878        dc_ctx->dce_version = dc_version;
 879
 880        dc_ctx->perf_trace = dc_perf_trace_create();
 881        if (!dc_ctx->perf_trace) {
 882                ASSERT_CRITICAL(false);
 883                return false;
 884        }
 885
 886        dc->ctx = dc_ctx;
 887
 888        return true;
 889}
 890
 891static bool dc_construct(struct dc *dc,
 892                const struct dc_init_data *init_params)
 893{
 894        struct dc_context *dc_ctx;
 895        struct bw_calcs_dceip *dc_dceip;
 896        struct bw_calcs_vbios *dc_vbios;
 897#ifdef CONFIG_DRM_AMD_DC_DCN
 898        struct dcn_soc_bounding_box *dcn_soc;
 899        struct dcn_ip_params *dcn_ip;
 900#endif
 901
 902        dc->config = init_params->flags;
 903
 904        // Allocate memory for the vm_helper
 905        dc->vm_helper = kzalloc(sizeof(struct vm_helper), GFP_KERNEL);
 906        if (!dc->vm_helper) {
 907                dm_error("%s: failed to create dc->vm_helper\n", __func__);
 908                goto fail;
 909        }
 910
 911        memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides));
 912
 913        dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL);
 914        if (!dc_dceip) {
 915                dm_error("%s: failed to create dceip\n", __func__);
 916                goto fail;
 917        }
 918
 919        dc->bw_dceip = dc_dceip;
 920
 921        dc_vbios = kzalloc(sizeof(*dc_vbios), GFP_KERNEL);
 922        if (!dc_vbios) {
 923                dm_error("%s: failed to create vbios\n", __func__);
 924                goto fail;
 925        }
 926
 927        dc->bw_vbios = dc_vbios;
 928#ifdef CONFIG_DRM_AMD_DC_DCN
 929        dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL);
 930        if (!dcn_soc) {
 931                dm_error("%s: failed to create dcn_soc\n", __func__);
 932                goto fail;
 933        }
 934
 935        dc->dcn_soc = dcn_soc;
 936
 937        dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL);
 938        if (!dcn_ip) {
 939                dm_error("%s: failed to create dcn_ip\n", __func__);
 940                goto fail;
 941        }
 942
 943        dc->dcn_ip = dcn_ip;
 944#endif
 945
 946        if (!dc_construct_ctx(dc, init_params)) {
 947                dm_error("%s: failed to create ctx\n", __func__);
 948                goto fail;
 949        }
 950
 951        dc_ctx = dc->ctx;
 952
 953        /* Resource should construct all asic specific resources.
 954         * This should be the only place where we need to parse the asic id
 955         */
 956        if (init_params->vbios_override)
 957                dc_ctx->dc_bios = init_params->vbios_override;
 958        else {
 959                /* Create BIOS parser */
 960                struct bp_init_data bp_init_data;
 961
 962                bp_init_data.ctx = dc_ctx;
 963                bp_init_data.bios = init_params->asic_id.atombios_base_address;
 964
 965                dc_ctx->dc_bios = dal_bios_parser_create(
 966                                &bp_init_data, dc_ctx->dce_version);
 967
 968                if (!dc_ctx->dc_bios) {
 969                        ASSERT_CRITICAL(false);
 970                        goto fail;
 971                }
 972
 973                dc_ctx->created_bios = true;
 974        }
 975
 976        dc->vendor_signature = init_params->vendor_signature;
 977
 978        /* Create GPIO service */
 979        dc_ctx->gpio_service = dal_gpio_service_create(
 980                        dc_ctx->dce_version,
 981                        dc_ctx->dce_environment,
 982                        dc_ctx);
 983
 984        if (!dc_ctx->gpio_service) {
 985                ASSERT_CRITICAL(false);
 986                goto fail;
 987        }
 988
 989        dc->res_pool = dc_create_resource_pool(dc, init_params, dc_ctx->dce_version);
 990        if (!dc->res_pool)
 991                goto fail;
 992
 993        /* set i2c speed if not done by the respective dcnxxx__resource.c */
 994        if (dc->caps.i2c_speed_in_khz_hdcp == 0)
 995                dc->caps.i2c_speed_in_khz_hdcp = dc->caps.i2c_speed_in_khz;
 996
 997        dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg);
 998        if (!dc->clk_mgr)
 999                goto fail;
1000#ifdef CONFIG_DRM_AMD_DC_DCN
1001        dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present;
1002#endif
1003
1004        if (dc->res_pool->funcs->update_bw_bounding_box)
1005                dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
1006
1007        /* Creation of current_state must occur after dc->dml
1008         * is initialized in dc_create_resource_pool because
1009         * on creation it copies the contents of dc->dml
1010         */
1011
1012        dc->current_state = dc_create_state(dc);
1013
1014        if (!dc->current_state) {
1015                dm_error("%s: failed to create validate ctx\n", __func__);
1016                goto fail;
1017        }
1018
1019        dc_resource_state_construct(dc, dc->current_state);
1020
1021        if (!create_links(dc, init_params->num_virtual_links))
1022                goto fail;
1023
1024        /* Create additional DIG link encoder objects if fewer than the platform
1025         * supports were created during link construction.
1026         */
1027        if (!create_link_encoders(dc))
1028                goto fail;
1029
1030        /* Initialise DIG link encoder resource tracking variables. */
1031        link_enc_cfg_init(dc, dc->current_state);
1032
1033        return true;
1034
1035fail:
1036        return false;
1037}
1038
1039static void disable_all_writeback_pipes_for_stream(
1040                const struct dc *dc,
1041                struct dc_stream_state *stream,
1042                struct dc_state *context)
1043{
1044        int i;
1045
1046        for (i = 0; i < stream->num_wb_info; i++)
1047                stream->writeback_info[i].wb_enabled = false;
1048}
1049
1050static void apply_ctx_interdependent_lock(struct dc *dc, struct dc_state *context,
1051                                          struct dc_stream_state *stream, bool lock)
1052{
1053        int i;
1054
1055        /* Checks if interdependent update function pointer is NULL or not, takes care of DCE110 case */
1056        if (dc->hwss.interdependent_update_lock)
1057                dc->hwss.interdependent_update_lock(dc, context, lock);
1058        else {
1059                for (i = 0; i < dc->res_pool->pipe_count; i++) {
1060                        struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1061                        struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1062
1063                        // Copied conditions that were previously in dce110_apply_ctx_for_surface
1064                        if (stream == pipe_ctx->stream) {
1065                                if (!pipe_ctx->top_pipe &&
1066                                        (pipe_ctx->plane_state || old_pipe_ctx->plane_state))
1067                                        dc->hwss.pipe_control_lock(dc, pipe_ctx, lock);
1068                        }
1069                }
1070        }
1071}
1072
1073static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
1074{
1075        int i, j;
1076        struct dc_state *dangling_context = dc_create_state(dc);
1077        struct dc_state *current_ctx;
1078
1079        if (dangling_context == NULL)
1080                return;
1081
1082        dc_resource_state_copy_construct(dc->current_state, dangling_context);
1083
1084        for (i = 0; i < dc->res_pool->pipe_count; i++) {
1085                struct dc_stream_state *old_stream =
1086                                dc->current_state->res_ctx.pipe_ctx[i].stream;
1087                bool should_disable = true;
1088                bool pipe_split_change =
1089                        context->res_ctx.pipe_ctx[i].top_pipe != dc->current_state->res_ctx.pipe_ctx[i].top_pipe;
1090
1091                for (j = 0; j < context->stream_count; j++) {
1092                        if (old_stream == context->streams[j]) {
1093                                should_disable = false;
1094                                break;
1095                        }
1096                }
1097                if (!should_disable && pipe_split_change)
1098                        should_disable = true;
1099
1100                if (should_disable && old_stream) {
1101                        dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
1102                        disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
1103
1104                        if (dc->hwss.apply_ctx_for_surface) {
1105                                apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, true);
1106                                dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
1107                                apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, false);
1108                                dc->hwss.post_unlock_program_front_end(dc, dangling_context);
1109                        }
1110                        if (dc->hwss.program_front_end_for_ctx) {
1111                                dc->hwss.interdependent_update_lock(dc, dc->current_state, true);
1112                                dc->hwss.program_front_end_for_ctx(dc, dangling_context);
1113                                dc->hwss.interdependent_update_lock(dc, dc->current_state, false);
1114                                dc->hwss.post_unlock_program_front_end(dc, dangling_context);
1115                        }
1116                }
1117        }
1118
1119        current_ctx = dc->current_state;
1120        dc->current_state = dangling_context;
1121        dc_release_state(current_ctx);
1122}
1123
1124static void disable_vbios_mode_if_required(
1125                struct dc *dc,
1126                struct dc_state *context)
1127{
1128        unsigned int i, j;
1129
1130        /* check if timing_changed, disable stream*/
1131        for (i = 0; i < dc->res_pool->pipe_count; i++) {
1132                struct dc_stream_state *stream = NULL;
1133                struct dc_link *link = NULL;
1134                struct pipe_ctx *pipe = NULL;
1135
1136                pipe = &context->res_ctx.pipe_ctx[i];
1137                stream = pipe->stream;
1138                if (stream == NULL)
1139                        continue;
1140
1141                // only looking for first odm pipe
1142                if (pipe->prev_odm_pipe)
1143                        continue;
1144
1145                if (stream->link->local_sink &&
1146                        stream->link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
1147                        link = stream->link;
1148                }
1149
1150                if (link != NULL && link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
1151                        unsigned int enc_inst, tg_inst = 0;
1152                        unsigned int pix_clk_100hz;
1153
1154                        enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
1155                        if (enc_inst != ENGINE_ID_UNKNOWN) {
1156                                for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
1157                                        if (dc->res_pool->stream_enc[j]->id == enc_inst) {
1158                                                tg_inst = dc->res_pool->stream_enc[j]->funcs->dig_source_otg(
1159                                                        dc->res_pool->stream_enc[j]);
1160                                                break;
1161                                        }
1162                                }
1163
1164                                dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
1165                                        dc->res_pool->dp_clock_source,
1166                                        tg_inst, &pix_clk_100hz);
1167
1168                                if (link->link_status.link_active) {
1169                                        uint32_t requested_pix_clk_100hz =
1170                                                pipe->stream_res.pix_clk_params.requested_pix_clk_100hz;
1171
1172                                        if (pix_clk_100hz != requested_pix_clk_100hz) {
1173                                                core_link_disable_stream(pipe);
1174                                                pipe->stream->dpms_off = false;
1175                                        }
1176                                }
1177                        }
1178                }
1179        }
1180}
1181
1182static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
1183{
1184        int i;
1185        PERF_TRACE();
1186        for (i = 0; i < MAX_PIPES; i++) {
1187                int count = 0;
1188                struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
1189
1190                if (!pipe->plane_state)
1191                        continue;
1192
1193                /* Timeout 100 ms */
1194                while (count < 100000) {
1195                        /* Must set to false to start with, due to OR in update function */
1196                        pipe->plane_state->status.is_flip_pending = false;
1197                        dc->hwss.update_pending_status(pipe);
1198                        if (!pipe->plane_state->status.is_flip_pending)
1199                                break;
1200                        udelay(1);
1201                        count++;
1202                }
1203                ASSERT(!pipe->plane_state->status.is_flip_pending);
1204        }
1205        PERF_TRACE();
1206}
1207
1208/*******************************************************************************
1209 * Public functions
1210 ******************************************************************************/
1211
1212struct dc *dc_create(const struct dc_init_data *init_params)
1213{
1214        struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
1215        unsigned int full_pipe_count;
1216
1217        if (!dc)
1218                return NULL;
1219
1220        if (init_params->dce_environment == DCE_ENV_VIRTUAL_HW) {
1221                if (!dc_construct_ctx(dc, init_params))
1222                        goto destruct_dc;
1223        } else {
1224                if (!dc_construct(dc, init_params))
1225                        goto destruct_dc;
1226
1227                full_pipe_count = dc->res_pool->pipe_count;
1228                if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
1229                        full_pipe_count--;
1230                dc->caps.max_streams = min(
1231                                full_pipe_count,
1232                                dc->res_pool->stream_enc_count);
1233
1234                dc->caps.max_links = dc->link_count;
1235                dc->caps.max_audios = dc->res_pool->audio_count;
1236                dc->caps.linear_pitch_alignment = 64;
1237
1238                dc->caps.max_dp_protocol_version = DP_VERSION_1_4;
1239
1240                if (dc->res_pool->dmcu != NULL)
1241                        dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
1242        }
1243
1244        /* Populate versioning information */
1245        dc->versions.dc_ver = DC_VER;
1246
1247        dc->build_id = DC_BUILD_ID;
1248
1249        DC_LOG_DC("Display Core initialized\n");
1250
1251
1252
1253        return dc;
1254
1255destruct_dc:
1256        dc_destruct(dc);
1257        kfree(dc);
1258        return NULL;
1259}
1260
1261static void detect_edp_presence(struct dc *dc)
1262{
1263        struct dc_link *edp_links[MAX_NUM_EDP];
1264        struct dc_link *edp_link = NULL;
1265        enum dc_connection_type type;
1266        int i;
1267        int edp_num;
1268
1269        get_edp_links(dc, edp_links, &edp_num);
1270        if (!edp_num)
1271                return;
1272
1273        for (i = 0; i < edp_num; i++) {
1274                edp_link = edp_links[i];
1275                if (dc->config.edp_not_connected) {
1276                        edp_link->edp_sink_present = false;
1277                } else {
1278                        dc_link_detect_sink(edp_link, &type);
1279                        edp_link->edp_sink_present = (type != dc_connection_none);
1280                }
1281        }
1282}
1283
1284void dc_hardware_init(struct dc *dc)
1285{
1286
1287        detect_edp_presence(dc);
1288        if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW)
1289                dc->hwss.init_hw(dc);
1290}
1291
1292void dc_init_callbacks(struct dc *dc,
1293                const struct dc_callback_init *init_params)
1294{
1295#ifdef CONFIG_DRM_AMD_DC_HDCP
1296        dc->ctx->cp_psp = init_params->cp_psp;
1297#endif
1298}
1299
1300void dc_deinit_callbacks(struct dc *dc)
1301{
1302#ifdef CONFIG_DRM_AMD_DC_HDCP
1303        memset(&dc->ctx->cp_psp, 0, sizeof(dc->ctx->cp_psp));
1304#endif
1305}
1306
1307void dc_destroy(struct dc **dc)
1308{
1309        dc_destruct(*dc);
1310        kfree(*dc);
1311        *dc = NULL;
1312}
1313
1314static void enable_timing_multisync(
1315                struct dc *dc,
1316                struct dc_state *ctx)
1317{
1318        int i, multisync_count = 0;
1319        int pipe_count = dc->res_pool->pipe_count;
1320        struct pipe_ctx *multisync_pipes[MAX_PIPES] = { NULL };
1321
1322        for (i = 0; i < pipe_count; i++) {
1323                if (!ctx->res_ctx.pipe_ctx[i].stream ||
1324                                !ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.enabled)
1325                        continue;
1326                if (ctx->res_ctx.pipe_ctx[i].stream == ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.event_source)
1327                        continue;
1328                multisync_pipes[multisync_count] = &ctx->res_ctx.pipe_ctx[i];
1329                multisync_count++;
1330        }
1331
1332        if (multisync_count > 0) {
1333                dc->hwss.enable_per_frame_crtc_position_reset(
1334                        dc, multisync_count, multisync_pipes);
1335        }
1336}
1337
1338static void program_timing_sync(
1339                struct dc *dc,
1340                struct dc_state *ctx)
1341{
1342        int i, j, k;
1343        int group_index = 0;
1344        int num_group = 0;
1345        int pipe_count = dc->res_pool->pipe_count;
1346        struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
1347
1348        for (i = 0; i < pipe_count; i++) {
1349                if (!ctx->res_ctx.pipe_ctx[i].stream || ctx->res_ctx.pipe_ctx[i].top_pipe)
1350                        continue;
1351
1352                unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i];
1353        }
1354
1355        for (i = 0; i < pipe_count; i++) {
1356                int group_size = 1;
1357                enum timing_synchronization_type sync_type = NOT_SYNCHRONIZABLE;
1358                struct pipe_ctx *pipe_set[MAX_PIPES];
1359
1360                if (!unsynced_pipes[i])
1361                        continue;
1362
1363                pipe_set[0] = unsynced_pipes[i];
1364                unsynced_pipes[i] = NULL;
1365
1366                /* Add tg to the set, search rest of the tg's for ones with
1367                 * same timing, add all tgs with same timing to the group
1368                 */
1369                for (j = i + 1; j < pipe_count; j++) {
1370                        if (!unsynced_pipes[j])
1371                                continue;
1372                        if (sync_type != TIMING_SYNCHRONIZABLE &&
1373                                dc->hwss.enable_vblanks_synchronization &&
1374                                unsynced_pipes[j]->stream_res.tg->funcs->align_vblanks &&
1375                                resource_are_vblanks_synchronizable(
1376                                        unsynced_pipes[j]->stream,
1377                                        pipe_set[0]->stream)) {
1378                                sync_type = VBLANK_SYNCHRONIZABLE;
1379                                pipe_set[group_size] = unsynced_pipes[j];
1380                                unsynced_pipes[j] = NULL;
1381                                group_size++;
1382                        } else
1383                        if (sync_type != VBLANK_SYNCHRONIZABLE &&
1384                                resource_are_streams_timing_synchronizable(
1385                                        unsynced_pipes[j]->stream,
1386                                        pipe_set[0]->stream)) {
1387                                sync_type = TIMING_SYNCHRONIZABLE;
1388                                pipe_set[group_size] = unsynced_pipes[j];
1389                                unsynced_pipes[j] = NULL;
1390                                group_size++;
1391                        }
1392                }
1393
1394                /* set first unblanked pipe as master */
1395                for (j = 0; j < group_size; j++) {
1396                        bool is_blanked;
1397
1398                        if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
1399                                is_blanked =
1400                                        pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
1401                        else
1402                                is_blanked =
1403                                        pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
1404                        if (!is_blanked) {
1405                                if (j == 0)
1406                                        break;
1407
1408                                swap(pipe_set[0], pipe_set[j]);
1409                                break;
1410                        }
1411                }
1412
1413                for (k = 0; k < group_size; k++) {
1414                        struct dc_stream_status *status = dc_stream_get_status_from_state(ctx, pipe_set[k]->stream);
1415
1416                        status->timing_sync_info.group_id = num_group;
1417                        status->timing_sync_info.group_size = group_size;
1418                        if (k == 0)
1419                                status->timing_sync_info.master = true;
1420                        else
1421                                status->timing_sync_info.master = false;
1422
1423                }
1424                /* remove any other unblanked pipes as they have already been synced */
1425                for (j = j + 1; j < group_size; j++) {
1426                        bool is_blanked;
1427
1428                        if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
1429                                is_blanked =
1430                                        pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
1431                        else
1432                                is_blanked =
1433                                        pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
1434                        if (!is_blanked) {
1435                                group_size--;
1436                                pipe_set[j] = pipe_set[group_size];
1437                                j--;
1438                        }
1439                }
1440
1441                if (group_size > 1) {
1442                        if (sync_type == TIMING_SYNCHRONIZABLE) {
1443                                dc->hwss.enable_timing_synchronization(
1444                                        dc, group_index, group_size, pipe_set);
1445                        } else
1446                                if (sync_type == VBLANK_SYNCHRONIZABLE) {
1447                                dc->hwss.enable_vblanks_synchronization(
1448                                        dc, group_index, group_size, pipe_set);
1449                                }
1450                        group_index++;
1451                }
1452                num_group++;
1453        }
1454}
1455
1456static bool context_changed(
1457                struct dc *dc,
1458                struct dc_state *context)
1459{
1460        uint8_t i;
1461
1462        if (context->stream_count != dc->current_state->stream_count)
1463                return true;
1464
1465        for (i = 0; i < dc->current_state->stream_count; i++) {
1466                if (dc->current_state->streams[i] != context->streams[i])
1467                        return true;
1468        }
1469
1470        return false;
1471}
1472
1473bool dc_validate_seamless_boot_timing(const struct dc *dc,
1474                                const struct dc_sink *sink,
1475                                struct dc_crtc_timing *crtc_timing)
1476{
1477        struct timing_generator *tg;
1478        struct stream_encoder *se = NULL;
1479
1480        struct dc_crtc_timing hw_crtc_timing = {0};
1481
1482        struct dc_link *link = sink->link;
1483        unsigned int i, enc_inst, tg_inst = 0;
1484
1485        /* Support seamless boot on EDP displays only */
1486        if (sink->sink_signal != SIGNAL_TYPE_EDP) {
1487                return false;
1488        }
1489
1490        /* Check for enabled DIG to identify enabled display */
1491        if (!link->link_enc->funcs->is_dig_enabled(link->link_enc))
1492                return false;
1493
1494        enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
1495
1496        if (enc_inst == ENGINE_ID_UNKNOWN)
1497                return false;
1498
1499        for (i = 0; i < dc->res_pool->stream_enc_count; i++) {
1500                if (dc->res_pool->stream_enc[i]->id == enc_inst) {
1501
1502                        se = dc->res_pool->stream_enc[i];
1503
1504                        tg_inst = dc->res_pool->stream_enc[i]->funcs->dig_source_otg(
1505                                dc->res_pool->stream_enc[i]);
1506                        break;
1507                }
1508        }
1509
1510        // tg_inst not found
1511        if (i == dc->res_pool->stream_enc_count)
1512                return false;
1513
1514        if (tg_inst >= dc->res_pool->timing_generator_count)
1515                return false;
1516
1517        tg = dc->res_pool->timing_generators[tg_inst];
1518
1519        if (!tg->funcs->get_hw_timing)
1520                return false;
1521
1522        if (!tg->funcs->get_hw_timing(tg, &hw_crtc_timing))
1523                return false;
1524
1525        if (crtc_timing->h_total != hw_crtc_timing.h_total)
1526                return false;
1527
1528        if (crtc_timing->h_border_left != hw_crtc_timing.h_border_left)
1529                return false;
1530
1531        if (crtc_timing->h_addressable != hw_crtc_timing.h_addressable)
1532                return false;
1533
1534        if (crtc_timing->h_border_right != hw_crtc_timing.h_border_right)
1535                return false;
1536
1537        if (crtc_timing->h_front_porch != hw_crtc_timing.h_front_porch)
1538                return false;
1539
1540        if (crtc_timing->h_sync_width != hw_crtc_timing.h_sync_width)
1541                return false;
1542
1543        if (crtc_timing->v_total != hw_crtc_timing.v_total)
1544                return false;
1545
1546        if (crtc_timing->v_border_top != hw_crtc_timing.v_border_top)
1547                return false;
1548
1549        if (crtc_timing->v_addressable != hw_crtc_timing.v_addressable)
1550                return false;
1551
1552        if (crtc_timing->v_border_bottom != hw_crtc_timing.v_border_bottom)
1553                return false;
1554
1555        if (crtc_timing->v_front_porch != hw_crtc_timing.v_front_porch)
1556                return false;
1557
1558        if (crtc_timing->v_sync_width != hw_crtc_timing.v_sync_width)
1559                return false;
1560
1561        /* block DSC for now, as VBIOS does not currently support DSC timings */
1562        if (crtc_timing->flags.DSC)
1563                return false;
1564
1565        if (dc_is_dp_signal(link->connector_signal)) {
1566                unsigned int pix_clk_100hz;
1567
1568                dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
1569                        dc->res_pool->dp_clock_source,
1570                        tg_inst, &pix_clk_100hz);
1571
1572                if (crtc_timing->pix_clk_100hz != pix_clk_100hz)
1573                        return false;
1574
1575                if (!se->funcs->dp_get_pixel_format)
1576                        return false;
1577
1578                if (!se->funcs->dp_get_pixel_format(
1579                        se,
1580                        &hw_crtc_timing.pixel_encoding,
1581                        &hw_crtc_timing.display_color_depth))
1582                        return false;
1583
1584                if (hw_crtc_timing.display_color_depth != crtc_timing->display_color_depth)
1585                        return false;
1586
1587                if (hw_crtc_timing.pixel_encoding != crtc_timing->pixel_encoding)
1588                        return false;
1589        }
1590
1591        if (link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) {
1592                return false;
1593        }
1594
1595        if (is_edp_ilr_optimization_required(link, crtc_timing)) {
1596                DC_LOG_EVENT_LINK_TRAINING("Seamless boot disabled to optimize eDP link rate\n");
1597                return false;
1598        }
1599
1600        return true;
1601}
1602
1603static inline bool should_update_pipe_for_stream(
1604                struct dc_state *context,
1605                struct pipe_ctx *pipe_ctx,
1606                struct dc_stream_state *stream)
1607{
1608        return (pipe_ctx->stream && pipe_ctx->stream == stream);
1609}
1610
1611static inline bool should_update_pipe_for_plane(
1612                struct dc_state *context,
1613                struct pipe_ctx *pipe_ctx,
1614                struct dc_plane_state *plane_state)
1615{
1616        return (pipe_ctx->plane_state == plane_state);
1617}
1618
1619void dc_enable_stereo(
1620        struct dc *dc,
1621        struct dc_state *context,
1622        struct dc_stream_state *streams[],
1623        uint8_t stream_count)
1624{
1625        int i, j;
1626        struct pipe_ctx *pipe;
1627
1628        for (i = 0; i < MAX_PIPES; i++) {
1629                if (context != NULL) {
1630                        pipe = &context->res_ctx.pipe_ctx[i];
1631                } else {
1632                        context = dc->current_state;
1633                        pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1634                }
1635
1636                for (j = 0; pipe && j < stream_count; j++)  {
1637                        if (should_update_pipe_for_stream(context, pipe, streams[j]) &&
1638                                dc->hwss.setup_stereo)
1639                                dc->hwss.setup_stereo(pipe, dc);
1640                }
1641        }
1642}
1643
1644void dc_trigger_sync(struct dc *dc, struct dc_state *context)
1645{
1646        if (context->stream_count > 1 && !dc->debug.disable_timing_sync) {
1647                enable_timing_multisync(dc, context);
1648                program_timing_sync(dc, context);
1649        }
1650}
1651
1652static uint8_t get_stream_mask(struct dc *dc, struct dc_state *context)
1653{
1654        int i;
1655        unsigned int stream_mask = 0;
1656
1657        for (i = 0; i < dc->res_pool->pipe_count; i++) {
1658                if (context->res_ctx.pipe_ctx[i].stream)
1659                        stream_mask |= 1 << i;
1660        }
1661
1662        return stream_mask;
1663}
1664
1665#if defined(CONFIG_DRM_AMD_DC_DCN)
1666void dc_z10_restore(const struct dc *dc)
1667{
1668        if (dc->hwss.z10_restore)
1669                dc->hwss.z10_restore(dc);
1670}
1671
1672void dc_z10_save_init(struct dc *dc)
1673{
1674        if (dc->hwss.z10_save_init)
1675                dc->hwss.z10_save_init(dc);
1676}
1677#endif
1678/*
1679 * Applies given context to HW and copy it into current context.
1680 * It's up to the user to release the src context afterwards.
1681 */
1682static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context)
1683{
1684        struct dc_bios *dcb = dc->ctx->dc_bios;
1685        enum dc_status result = DC_ERROR_UNEXPECTED;
1686        struct pipe_ctx *pipe;
1687        int i, k, l;
1688        struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
1689
1690#if defined(CONFIG_DRM_AMD_DC_DCN)
1691        dc_z10_restore(dc);
1692        dc_allow_idle_optimizations(dc, false);
1693#endif
1694
1695        for (i = 0; i < context->stream_count; i++)
1696                dc_streams[i] =  context->streams[i];
1697
1698        if (!dcb->funcs->is_accelerated_mode(dcb)) {
1699                disable_vbios_mode_if_required(dc, context);
1700                dc->hwss.enable_accelerated_mode(dc, context);
1701        }
1702
1703        if (context->stream_count > get_seamless_boot_stream_count(context) ||
1704                context->stream_count == 0)
1705                dc->hwss.prepare_bandwidth(dc, context);
1706
1707        disable_dangling_plane(dc, context);
1708        /* re-program planes for existing stream, in case we need to
1709         * free up plane resource for later use
1710         */
1711        if (dc->hwss.apply_ctx_for_surface) {
1712                for (i = 0; i < context->stream_count; i++) {
1713                        if (context->streams[i]->mode_changed)
1714                                continue;
1715                        apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
1716                        dc->hwss.apply_ctx_for_surface(
1717                                dc, context->streams[i],
1718                                context->stream_status[i].plane_count,
1719                                context); /* use new pipe config in new context */
1720                        apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
1721                        dc->hwss.post_unlock_program_front_end(dc, context);
1722                }
1723        }
1724
1725        /* Program hardware */
1726        for (i = 0; i < dc->res_pool->pipe_count; i++) {
1727                pipe = &context->res_ctx.pipe_ctx[i];
1728                dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
1729        }
1730
1731        result = dc->hwss.apply_ctx_to_hw(dc, context);
1732
1733        if (result != DC_OK)
1734                return result;
1735
1736        dc_trigger_sync(dc, context);
1737
1738        /* Program all planes within new context*/
1739        if (dc->hwss.program_front_end_for_ctx) {
1740                dc->hwss.interdependent_update_lock(dc, context, true);
1741                dc->hwss.program_front_end_for_ctx(dc, context);
1742                dc->hwss.interdependent_update_lock(dc, context, false);
1743                dc->hwss.post_unlock_program_front_end(dc, context);
1744        }
1745        for (i = 0; i < context->stream_count; i++) {
1746                const struct dc_link *link = context->streams[i]->link;
1747
1748                if (!context->streams[i]->mode_changed)
1749                        continue;
1750
1751                if (dc->hwss.apply_ctx_for_surface) {
1752                        apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
1753                        dc->hwss.apply_ctx_for_surface(
1754                                        dc, context->streams[i],
1755                                        context->stream_status[i].plane_count,
1756                                        context);
1757                        apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
1758                        dc->hwss.post_unlock_program_front_end(dc, context);
1759                }
1760
1761                /*
1762                 * enable stereo
1763                 * TODO rework dc_enable_stereo call to work with validation sets?
1764                 */
1765                for (k = 0; k < MAX_PIPES; k++) {
1766                        pipe = &context->res_ctx.pipe_ctx[k];
1767
1768                        for (l = 0 ; pipe && l < context->stream_count; l++)  {
1769                                if (context->streams[l] &&
1770                                        context->streams[l] == pipe->stream &&
1771                                        dc->hwss.setup_stereo)
1772                                        dc->hwss.setup_stereo(pipe, dc);
1773                        }
1774                }
1775
1776                CONN_MSG_MODE(link, "{%dx%d, %dx%d@%dKhz}",
1777                                context->streams[i]->timing.h_addressable,
1778                                context->streams[i]->timing.v_addressable,
1779                                context->streams[i]->timing.h_total,
1780                                context->streams[i]->timing.v_total,
1781                                context->streams[i]->timing.pix_clk_100hz / 10);
1782        }
1783
1784        dc_enable_stereo(dc, context, dc_streams, context->stream_count);
1785
1786        if (context->stream_count > get_seamless_boot_stream_count(context) ||
1787                context->stream_count == 0) {
1788                /* Must wait for no flips to be pending before doing optimize bw */
1789                wait_for_no_pipes_pending(dc, context);
1790                /* pplib is notified if disp_num changed */
1791                dc->hwss.optimize_bandwidth(dc, context);
1792        }
1793
1794        if (dc->ctx->dce_version >= DCE_VERSION_MAX)
1795                TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
1796        else
1797                TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
1798
1799        context->stream_mask = get_stream_mask(dc, context);
1800
1801        if (context->stream_mask != dc->current_state->stream_mask)
1802                dc_dmub_srv_notify_stream_mask(dc->ctx->dmub_srv, context->stream_mask);
1803
1804        for (i = 0; i < context->stream_count; i++)
1805                context->streams[i]->mode_changed = false;
1806
1807        dc_release_state(dc->current_state);
1808
1809        dc->current_state = context;
1810
1811        dc_retain_state(dc->current_state);
1812
1813        return result;
1814}
1815
1816bool dc_commit_state(struct dc *dc, struct dc_state *context)
1817{
1818        enum dc_status result = DC_ERROR_UNEXPECTED;
1819        int i;
1820
1821        if (!context_changed(dc, context))
1822                return DC_OK;
1823
1824        DC_LOG_DC("%s: %d streams\n",
1825                                __func__, context->stream_count);
1826
1827        for (i = 0; i < context->stream_count; i++) {
1828                struct dc_stream_state *stream = context->streams[i];
1829
1830                dc_stream_log(dc, stream);
1831        }
1832
1833        result = dc_commit_state_no_check(dc, context);
1834
1835        return (result == DC_OK);
1836}
1837
1838#if defined(CONFIG_DRM_AMD_DC_DCN)
1839bool dc_acquire_release_mpc_3dlut(
1840                struct dc *dc, bool acquire,
1841                struct dc_stream_state *stream,
1842                struct dc_3dlut **lut,
1843                struct dc_transfer_func **shaper)
1844{
1845        int pipe_idx;
1846        bool ret = false;
1847        bool found_pipe_idx = false;
1848        const struct resource_pool *pool = dc->res_pool;
1849        struct resource_context *res_ctx = &dc->current_state->res_ctx;
1850        int mpcc_id = 0;
1851
1852        if (pool && res_ctx) {
1853                if (acquire) {
1854                        /*find pipe idx for the given stream*/
1855                        for (pipe_idx = 0; pipe_idx < pool->pipe_count; pipe_idx++) {
1856                                if (res_ctx->pipe_ctx[pipe_idx].stream == stream) {
1857                                        found_pipe_idx = true;
1858                                        mpcc_id = res_ctx->pipe_ctx[pipe_idx].plane_res.hubp->inst;
1859                                        break;
1860                                }
1861                        }
1862                } else
1863                        found_pipe_idx = true;/*for release pipe_idx is not required*/
1864
1865                if (found_pipe_idx) {
1866                        if (acquire && pool->funcs->acquire_post_bldn_3dlut)
1867                                ret = pool->funcs->acquire_post_bldn_3dlut(res_ctx, pool, mpcc_id, lut, shaper);
1868                        else if (!acquire && pool->funcs->release_post_bldn_3dlut)
1869                                ret = pool->funcs->release_post_bldn_3dlut(res_ctx, pool, lut, shaper);
1870                }
1871        }
1872        return ret;
1873}
1874#endif
1875static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context)
1876{
1877        int i;
1878        struct pipe_ctx *pipe;
1879
1880        for (i = 0; i < MAX_PIPES; i++) {
1881                pipe = &context->res_ctx.pipe_ctx[i];
1882
1883                if (!pipe->plane_state)
1884                        continue;
1885
1886                /* Must set to false to start with, due to OR in update function */
1887                pipe->plane_state->status.is_flip_pending = false;
1888                dc->hwss.update_pending_status(pipe);
1889                if (pipe->plane_state->status.is_flip_pending)
1890                        return true;
1891        }
1892        return false;
1893}
1894
1895#ifdef CONFIG_DRM_AMD_DC_DCN
1896/* Perform updates here which need to be deferred until next vupdate
1897 *
1898 * i.e. blnd lut, 3dlut, and shaper lut bypass regs are double buffered
1899 * but forcing lut memory to shutdown state is immediate. This causes
1900 * single frame corruption as lut gets disabled mid-frame unless shutdown
1901 * is deferred until after entering bypass.
1902 */
1903static void process_deferred_updates(struct dc *dc)
1904{
1905        int i = 0;
1906
1907        if (dc->debug.enable_mem_low_power.bits.cm) {
1908                ASSERT(dc->dcn_ip->max_num_dpp);
1909                for (i = 0; i < dc->dcn_ip->max_num_dpp; i++)
1910                        if (dc->res_pool->dpps[i]->funcs->dpp_deferred_update)
1911                                dc->res_pool->dpps[i]->funcs->dpp_deferred_update(dc->res_pool->dpps[i]);
1912        }
1913}
1914#endif /* CONFIG_DRM_AMD_DC_DCN */
1915
1916void dc_post_update_surfaces_to_stream(struct dc *dc)
1917{
1918        int i;
1919        struct dc_state *context = dc->current_state;
1920
1921        if ((!dc->optimized_required) || get_seamless_boot_stream_count(context) > 0)
1922                return;
1923
1924        post_surface_trace(dc);
1925
1926        if (dc->ctx->dce_version >= DCE_VERSION_MAX)
1927                TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
1928        else
1929                TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
1930
1931        if (is_flip_pending_in_pipes(dc, context))
1932                return;
1933
1934        for (i = 0; i < dc->res_pool->pipe_count; i++)
1935                if (context->res_ctx.pipe_ctx[i].stream == NULL ||
1936                    context->res_ctx.pipe_ctx[i].plane_state == NULL) {
1937                        context->res_ctx.pipe_ctx[i].pipe_idx = i;
1938                        dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
1939                }
1940
1941#ifdef CONFIG_DRM_AMD_DC_DCN
1942        process_deferred_updates(dc);
1943#endif
1944
1945        dc->hwss.optimize_bandwidth(dc, context);
1946
1947        dc->optimized_required = false;
1948        dc->wm_optimized_required = false;
1949}
1950
1951static void init_state(struct dc *dc, struct dc_state *context)
1952{
1953        /* Each context must have their own instance of VBA and in order to
1954         * initialize and obtain IP and SOC the base DML instance from DC is
1955         * initially copied into every context
1956         */
1957#ifdef CONFIG_DRM_AMD_DC_DCN
1958        memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib));
1959#endif
1960}
1961
1962struct dc_state *dc_create_state(struct dc *dc)
1963{
1964        struct dc_state *context = kvzalloc(sizeof(struct dc_state),
1965                                            GFP_KERNEL);
1966
1967        if (!context)
1968                return NULL;
1969
1970        init_state(dc, context);
1971
1972        kref_init(&context->refcount);
1973
1974        return context;
1975}
1976
1977struct dc_state *dc_copy_state(struct dc_state *src_ctx)
1978{
1979        int i, j;
1980        struct dc_state *new_ctx = kvmalloc(sizeof(struct dc_state), GFP_KERNEL);
1981
1982        if (!new_ctx)
1983                return NULL;
1984        memcpy(new_ctx, src_ctx, sizeof(struct dc_state));
1985
1986        for (i = 0; i < MAX_PIPES; i++) {
1987                        struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i];
1988
1989                        if (cur_pipe->top_pipe)
1990                                cur_pipe->top_pipe =  &new_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx];
1991
1992                        if (cur_pipe->bottom_pipe)
1993                                cur_pipe->bottom_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx];
1994
1995                        if (cur_pipe->prev_odm_pipe)
1996                                cur_pipe->prev_odm_pipe =  &new_ctx->res_ctx.pipe_ctx[cur_pipe->prev_odm_pipe->pipe_idx];
1997
1998                        if (cur_pipe->next_odm_pipe)
1999                                cur_pipe->next_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->next_odm_pipe->pipe_idx];
2000
2001        }
2002
2003        for (i = 0; i < new_ctx->stream_count; i++) {
2004                        dc_stream_retain(new_ctx->streams[i]);
2005                        for (j = 0; j < new_ctx->stream_status[i].plane_count; j++)
2006                                dc_plane_state_retain(
2007                                        new_ctx->stream_status[i].plane_states[j]);
2008        }
2009
2010        kref_init(&new_ctx->refcount);
2011
2012        return new_ctx;
2013}
2014
2015void dc_retain_state(struct dc_state *context)
2016{
2017        kref_get(&context->refcount);
2018}
2019
2020static void dc_state_free(struct kref *kref)
2021{
2022        struct dc_state *context = container_of(kref, struct dc_state, refcount);
2023        dc_resource_state_destruct(context);
2024        kvfree(context);
2025}
2026
2027void dc_release_state(struct dc_state *context)
2028{
2029        kref_put(&context->refcount, dc_state_free);
2030}
2031
2032bool dc_set_generic_gpio_for_stereo(bool enable,
2033                struct gpio_service *gpio_service)
2034{
2035        enum gpio_result gpio_result = GPIO_RESULT_NON_SPECIFIC_ERROR;
2036        struct gpio_pin_info pin_info;
2037        struct gpio *generic;
2038        struct gpio_generic_mux_config *config = kzalloc(sizeof(struct gpio_generic_mux_config),
2039                           GFP_KERNEL);
2040
2041        if (!config)
2042                return false;
2043        pin_info = dal_gpio_get_generic_pin_info(gpio_service, GPIO_ID_GENERIC, 0);
2044
2045        if (pin_info.mask == 0xFFFFFFFF || pin_info.offset == 0xFFFFFFFF) {
2046                kfree(config);
2047                return false;
2048        } else {
2049                generic = dal_gpio_service_create_generic_mux(
2050                        gpio_service,
2051                        pin_info.offset,
2052                        pin_info.mask);
2053        }
2054
2055        if (!generic) {
2056                kfree(config);
2057                return false;
2058        }
2059
2060        gpio_result = dal_gpio_open(generic, GPIO_MODE_OUTPUT);
2061
2062        config->enable_output_from_mux = enable;
2063        config->mux_select = GPIO_SIGNAL_SOURCE_PASS_THROUGH_STEREO_SYNC;
2064
2065        if (gpio_result == GPIO_RESULT_OK)
2066                gpio_result = dal_mux_setup_config(generic, config);
2067
2068        if (gpio_result == GPIO_RESULT_OK) {
2069                dal_gpio_close(generic);
2070                dal_gpio_destroy_generic_mux(&generic);
2071                kfree(config);
2072                return true;
2073        } else {
2074                dal_gpio_close(generic);
2075                dal_gpio_destroy_generic_mux(&generic);
2076                kfree(config);
2077                return false;
2078        }
2079}
2080
2081static bool is_surface_in_context(
2082                const struct dc_state *context,
2083                const struct dc_plane_state *plane_state)
2084{
2085        int j;
2086
2087        for (j = 0; j < MAX_PIPES; j++) {
2088                const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2089
2090                if (plane_state == pipe_ctx->plane_state) {
2091                        return true;
2092                }
2093        }
2094
2095        return false;
2096}
2097
2098static enum surface_update_type get_plane_info_update_type(const struct dc_surface_update *u)
2099{
2100        union surface_update_flags *update_flags = &u->surface->update_flags;
2101        enum surface_update_type update_type = UPDATE_TYPE_FAST;
2102
2103        if (!u->plane_info)
2104                return UPDATE_TYPE_FAST;
2105
2106        if (u->plane_info->color_space != u->surface->color_space) {
2107                update_flags->bits.color_space_change = 1;
2108                elevate_update_type(&update_type, UPDATE_TYPE_MED);
2109        }
2110
2111        if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) {
2112                update_flags->bits.horizontal_mirror_change = 1;
2113                elevate_update_type(&update_type, UPDATE_TYPE_MED);
2114        }
2115
2116        if (u->plane_info->rotation != u->surface->rotation) {
2117                update_flags->bits.rotation_change = 1;
2118                elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2119        }
2120
2121        if (u->plane_info->format != u->surface->format) {
2122                update_flags->bits.pixel_format_change = 1;
2123                elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2124        }
2125
2126        if (u->plane_info->stereo_format != u->surface->stereo_format) {
2127                update_flags->bits.stereo_format_change = 1;
2128                elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2129        }
2130
2131        if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha) {
2132                update_flags->bits.per_pixel_alpha_change = 1;
2133                elevate_update_type(&update_type, UPDATE_TYPE_MED);
2134        }
2135
2136        if (u->plane_info->global_alpha_value != u->surface->global_alpha_value) {
2137                update_flags->bits.global_alpha_change = 1;
2138                elevate_update_type(&update_type, UPDATE_TYPE_MED);
2139        }
2140
2141        if (u->plane_info->dcc.enable != u->surface->dcc.enable
2142                        || u->plane_info->dcc.dcc_ind_blk != u->surface->dcc.dcc_ind_blk
2143                        || u->plane_info->dcc.meta_pitch != u->surface->dcc.meta_pitch) {
2144                /* During DCC on/off, stutter period is calculated before
2145                 * DCC has fully transitioned. This results in incorrect
2146                 * stutter period calculation. Triggering a full update will
2147                 * recalculate stutter period.
2148                 */
2149                update_flags->bits.dcc_change = 1;
2150                elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2151        }
2152
2153        if (resource_pixel_format_to_bpp(u->plane_info->format) !=
2154                        resource_pixel_format_to_bpp(u->surface->format)) {
2155                /* different bytes per element will require full bandwidth
2156                 * and DML calculation
2157                 */
2158                update_flags->bits.bpp_change = 1;
2159                elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2160        }
2161
2162        if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch
2163                        || u->plane_info->plane_size.chroma_pitch != u->surface->plane_size.chroma_pitch) {
2164                update_flags->bits.plane_size_change = 1;
2165                elevate_update_type(&update_type, UPDATE_TYPE_MED);
2166        }
2167
2168
2169        if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,
2170                        sizeof(union dc_tiling_info)) != 0) {
2171                update_flags->bits.swizzle_change = 1;
2172                elevate_update_type(&update_type, UPDATE_TYPE_MED);
2173
2174                /* todo: below are HW dependent, we should add a hook to
2175                 * DCE/N resource and validated there.
2176                 */
2177                if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR) {
2178                        /* swizzled mode requires RQ to be setup properly,
2179                         * thus need to run DML to calculate RQ settings
2180                         */
2181                        update_flags->bits.bandwidth_change = 1;
2182                        elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2183                }
2184        }
2185
2186        /* This should be UPDATE_TYPE_FAST if nothing has changed. */
2187        return update_type;
2188}
2189
2190static enum surface_update_type get_scaling_info_update_type(
2191                const struct dc_surface_update *u)
2192{
2193        union surface_update_flags *update_flags = &u->surface->update_flags;
2194
2195        if (!u->scaling_info)
2196                return UPDATE_TYPE_FAST;
2197
2198        if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width
2199                        || u->scaling_info->clip_rect.height != u->surface->clip_rect.height
2200                        || u->scaling_info->dst_rect.width != u->surface->dst_rect.width
2201                        || u->scaling_info->dst_rect.height != u->surface->dst_rect.height
2202                        || u->scaling_info->scaling_quality.integer_scaling !=
2203                                u->surface->scaling_quality.integer_scaling
2204                        ) {
2205                update_flags->bits.scaling_change = 1;
2206
2207                if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width
2208                        || u->scaling_info->dst_rect.height < u->surface->dst_rect.height)
2209                                && (u->scaling_info->dst_rect.width < u->surface->src_rect.width
2210                                        || u->scaling_info->dst_rect.height < u->surface->src_rect.height))
2211                        /* Making dst rect smaller requires a bandwidth change */
2212                        update_flags->bits.bandwidth_change = 1;
2213        }
2214
2215        if (u->scaling_info->src_rect.width != u->surface->src_rect.width
2216                || u->scaling_info->src_rect.height != u->surface->src_rect.height) {
2217
2218                update_flags->bits.scaling_change = 1;
2219                if (u->scaling_info->src_rect.width > u->surface->src_rect.width
2220                                || u->scaling_info->src_rect.height > u->surface->src_rect.height)
2221                        /* Making src rect bigger requires a bandwidth change */
2222                        update_flags->bits.clock_change = 1;
2223        }
2224
2225        if (u->scaling_info->src_rect.x != u->surface->src_rect.x
2226                        || u->scaling_info->src_rect.y != u->surface->src_rect.y
2227                        || u->scaling_info->clip_rect.x != u->surface->clip_rect.x
2228                        || u->scaling_info->clip_rect.y != u->surface->clip_rect.y
2229                        || u->scaling_info->dst_rect.x != u->surface->dst_rect.x
2230                        || u->scaling_info->dst_rect.y != u->surface->dst_rect.y)
2231                update_flags->bits.position_change = 1;
2232
2233        if (update_flags->bits.clock_change
2234                        || update_flags->bits.bandwidth_change
2235                        || update_flags->bits.scaling_change)
2236                return UPDATE_TYPE_FULL;
2237
2238        if (update_flags->bits.position_change)
2239                return UPDATE_TYPE_MED;
2240
2241        return UPDATE_TYPE_FAST;
2242}
2243
2244static enum surface_update_type det_surface_update(const struct dc *dc,
2245                const struct dc_surface_update *u)
2246{
2247        const struct dc_state *context = dc->current_state;
2248        enum surface_update_type type;
2249        enum surface_update_type overall_type = UPDATE_TYPE_FAST;
2250        union surface_update_flags *update_flags = &u->surface->update_flags;
2251
2252        if (u->flip_addr)
2253                update_flags->bits.addr_update = 1;
2254
2255        if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) {
2256                update_flags->raw = 0xFFFFFFFF;
2257                return UPDATE_TYPE_FULL;
2258        }
2259
2260        update_flags->raw = 0; // Reset all flags
2261
2262        type = get_plane_info_update_type(u);
2263        elevate_update_type(&overall_type, type);
2264
2265        type = get_scaling_info_update_type(u);
2266        elevate_update_type(&overall_type, type);
2267
2268        if (u->flip_addr)
2269                update_flags->bits.addr_update = 1;
2270
2271        if (u->in_transfer_func)
2272                update_flags->bits.in_transfer_func_change = 1;
2273
2274        if (u->input_csc_color_matrix)
2275                update_flags->bits.input_csc_change = 1;
2276
2277        if (u->coeff_reduction_factor)
2278                update_flags->bits.coeff_reduction_change = 1;
2279
2280        if (u->gamut_remap_matrix)
2281                update_flags->bits.gamut_remap_change = 1;
2282
2283        if (u->gamma) {
2284                enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN;
2285
2286                if (u->plane_info)
2287                        format = u->plane_info->format;
2288                else if (u->surface)
2289                        format = u->surface->format;
2290
2291                if (dce_use_lut(format))
2292                        update_flags->bits.gamma_change = 1;
2293        }
2294
2295        if (u->lut3d_func || u->func_shaper)
2296                update_flags->bits.lut_3d = 1;
2297
2298        if (u->hdr_mult.value)
2299                if (u->hdr_mult.value != u->surface->hdr_mult.value) {
2300                        update_flags->bits.hdr_mult = 1;
2301                        elevate_update_type(&overall_type, UPDATE_TYPE_MED);
2302                }
2303
2304        if (update_flags->bits.in_transfer_func_change) {
2305                type = UPDATE_TYPE_MED;
2306                elevate_update_type(&overall_type, type);
2307        }
2308
2309        if (update_flags->bits.input_csc_change
2310                        || update_flags->bits.coeff_reduction_change
2311                        || update_flags->bits.lut_3d
2312                        || update_flags->bits.gamma_change
2313                        || update_flags->bits.gamut_remap_change) {
2314                type = UPDATE_TYPE_FULL;
2315                elevate_update_type(&overall_type, type);
2316        }
2317
2318        return overall_type;
2319}
2320
2321static enum surface_update_type check_update_surfaces_for_stream(
2322                struct dc *dc,
2323                struct dc_surface_update *updates,
2324                int surface_count,
2325                struct dc_stream_update *stream_update,
2326                const struct dc_stream_status *stream_status)
2327{
2328        int i;
2329        enum surface_update_type overall_type = UPDATE_TYPE_FAST;
2330
2331#if defined(CONFIG_DRM_AMD_DC_DCN)
2332        if (dc->idle_optimizations_allowed)
2333                overall_type = UPDATE_TYPE_FULL;
2334
2335#endif
2336        if (stream_status == NULL || stream_status->plane_count != surface_count)
2337                overall_type = UPDATE_TYPE_FULL;
2338
2339        if (stream_update && stream_update->pending_test_pattern) {
2340                overall_type = UPDATE_TYPE_FULL;
2341        }
2342
2343        /* some stream updates require passive update */
2344        if (stream_update) {
2345                union stream_update_flags *su_flags = &stream_update->stream->update_flags;
2346
2347                if ((stream_update->src.height != 0 && stream_update->src.width != 0) ||
2348                        (stream_update->dst.height != 0 && stream_update->dst.width != 0) ||
2349                        stream_update->integer_scaling_update)
2350                        su_flags->bits.scaling = 1;
2351
2352                if (stream_update->out_transfer_func)
2353                        su_flags->bits.out_tf = 1;
2354
2355                if (stream_update->abm_level)
2356                        su_flags->bits.abm_level = 1;
2357
2358                if (stream_update->dpms_off)
2359                        su_flags->bits.dpms_off = 1;
2360
2361                if (stream_update->gamut_remap)
2362                        su_flags->bits.gamut_remap = 1;
2363
2364                if (stream_update->wb_update)
2365                        su_flags->bits.wb_update = 1;
2366
2367                if (stream_update->dsc_config)
2368                        su_flags->bits.dsc_changed = 1;
2369
2370#if defined(CONFIG_DRM_AMD_DC_DCN)
2371                if (stream_update->mst_bw_update)
2372                        su_flags->bits.mst_bw = 1;
2373#endif
2374
2375                if (su_flags->raw != 0)
2376                        overall_type = UPDATE_TYPE_FULL;
2377
2378                if (stream_update->output_csc_transform || stream_update->output_color_space)
2379                        su_flags->bits.out_csc = 1;
2380        }
2381
2382        for (i = 0 ; i < surface_count; i++) {
2383                enum surface_update_type type =
2384                                det_surface_update(dc, &updates[i]);
2385
2386                elevate_update_type(&overall_type, type);
2387        }
2388
2389        return overall_type;
2390}
2391
2392/*
2393 * dc_check_update_surfaces_for_stream() - Determine update type (fast, med, or full)
2394 *
2395 * See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types
2396 */
2397enum surface_update_type dc_check_update_surfaces_for_stream(
2398                struct dc *dc,
2399                struct dc_surface_update *updates,
2400                int surface_count,
2401                struct dc_stream_update *stream_update,
2402                const struct dc_stream_status *stream_status)
2403{
2404        int i;
2405        enum surface_update_type type;
2406
2407        if (stream_update)
2408                stream_update->stream->update_flags.raw = 0;
2409        for (i = 0; i < surface_count; i++)
2410                updates[i].surface->update_flags.raw = 0;
2411
2412        type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
2413        if (type == UPDATE_TYPE_FULL) {
2414                if (stream_update) {
2415                        uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed;
2416                        stream_update->stream->update_flags.raw = 0xFFFFFFFF;
2417                        stream_update->stream->update_flags.bits.dsc_changed = dsc_changed;
2418                }
2419                for (i = 0; i < surface_count; i++)
2420                        updates[i].surface->update_flags.raw = 0xFFFFFFFF;
2421        }
2422
2423        if (type == UPDATE_TYPE_FAST) {
2424                // If there's an available clock comparator, we use that.
2425                if (dc->clk_mgr->funcs->are_clock_states_equal) {
2426                        if (!dc->clk_mgr->funcs->are_clock_states_equal(&dc->clk_mgr->clks, &dc->current_state->bw_ctx.bw.dcn.clk))
2427                                dc->optimized_required = true;
2428                // Else we fallback to mem compare.
2429                } else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) {
2430                        dc->optimized_required = true;
2431                }
2432
2433                dc->optimized_required |= dc->wm_optimized_required;
2434        }
2435
2436        return type;
2437}
2438
2439static struct dc_stream_status *stream_get_status(
2440        struct dc_state *ctx,
2441        struct dc_stream_state *stream)
2442{
2443        uint8_t i;
2444
2445        for (i = 0; i < ctx->stream_count; i++) {
2446                if (stream == ctx->streams[i]) {
2447                        return &ctx->stream_status[i];
2448                }
2449        }
2450
2451        return NULL;
2452}
2453
2454static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
2455
2456static void copy_surface_update_to_plane(
2457                struct dc_plane_state *surface,
2458                struct dc_surface_update *srf_update)
2459{
2460        if (srf_update->flip_addr) {
2461                surface->address = srf_update->flip_addr->address;
2462                surface->flip_immediate =
2463                        srf_update->flip_addr->flip_immediate;
2464                surface->time.time_elapsed_in_us[surface->time.index] =
2465                        srf_update->flip_addr->flip_timestamp_in_us -
2466                                surface->time.prev_update_time_in_us;
2467                surface->time.prev_update_time_in_us =
2468                        srf_update->flip_addr->flip_timestamp_in_us;
2469                surface->time.index++;
2470                if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX)
2471                        surface->time.index = 0;
2472
2473                surface->triplebuffer_flips = srf_update->flip_addr->triplebuffer_flips;
2474        }
2475
2476        if (srf_update->scaling_info) {
2477                surface->scaling_quality =
2478                                srf_update->scaling_info->scaling_quality;
2479                surface->dst_rect =
2480                                srf_update->scaling_info->dst_rect;
2481                surface->src_rect =
2482                                srf_update->scaling_info->src_rect;
2483                surface->clip_rect =
2484                                srf_update->scaling_info->clip_rect;
2485        }
2486
2487        if (srf_update->plane_info) {
2488                surface->color_space =
2489                                srf_update->plane_info->color_space;
2490                surface->format =
2491                                srf_update->plane_info->format;
2492                surface->plane_size =
2493                                srf_update->plane_info->plane_size;
2494                surface->rotation =
2495                                srf_update->plane_info->rotation;
2496                surface->horizontal_mirror =
2497                                srf_update->plane_info->horizontal_mirror;
2498                surface->stereo_format =
2499                                srf_update->plane_info->stereo_format;
2500                surface->tiling_info =
2501                                srf_update->plane_info->tiling_info;
2502                surface->visible =
2503                                srf_update->plane_info->visible;
2504                surface->per_pixel_alpha =
2505                                srf_update->plane_info->per_pixel_alpha;
2506                surface->global_alpha =
2507                                srf_update->plane_info->global_alpha;
2508                surface->global_alpha_value =
2509                                srf_update->plane_info->global_alpha_value;
2510                surface->dcc =
2511                                srf_update->plane_info->dcc;
2512                surface->layer_index =
2513                                srf_update->plane_info->layer_index;
2514        }
2515
2516        if (srf_update->gamma &&
2517                        (surface->gamma_correction !=
2518                                        srf_update->gamma)) {
2519                memcpy(&surface->gamma_correction->entries,
2520                        &srf_update->gamma->entries,
2521                        sizeof(struct dc_gamma_entries));
2522                surface->gamma_correction->is_identity =
2523                        srf_update->gamma->is_identity;
2524                surface->gamma_correction->num_entries =
2525                        srf_update->gamma->num_entries;
2526                surface->gamma_correction->type =
2527                        srf_update->gamma->type;
2528        }
2529
2530        if (srf_update->in_transfer_func &&
2531                        (surface->in_transfer_func !=
2532                                srf_update->in_transfer_func)) {
2533                surface->in_transfer_func->sdr_ref_white_level =
2534                        srf_update->in_transfer_func->sdr_ref_white_level;
2535                surface->in_transfer_func->tf =
2536                        srf_update->in_transfer_func->tf;
2537                surface->in_transfer_func->type =
2538                        srf_update->in_transfer_func->type;
2539                memcpy(&surface->in_transfer_func->tf_pts,
2540                        &srf_update->in_transfer_func->tf_pts,
2541                        sizeof(struct dc_transfer_func_distributed_points));
2542        }
2543
2544        if (srf_update->func_shaper &&
2545                        (surface->in_shaper_func !=
2546                        srf_update->func_shaper))
2547                memcpy(surface->in_shaper_func, srf_update->func_shaper,
2548                sizeof(*surface->in_shaper_func));
2549
2550        if (srf_update->lut3d_func &&
2551                        (surface->lut3d_func !=
2552                        srf_update->lut3d_func))
2553                memcpy(surface->lut3d_func, srf_update->lut3d_func,
2554                sizeof(*surface->lut3d_func));
2555
2556        if (srf_update->hdr_mult.value)
2557                surface->hdr_mult =
2558                                srf_update->hdr_mult;
2559
2560        if (srf_update->blend_tf &&
2561                        (surface->blend_tf !=
2562                        srf_update->blend_tf))
2563                memcpy(surface->blend_tf, srf_update->blend_tf,
2564                sizeof(*surface->blend_tf));
2565
2566        if (srf_update->input_csc_color_matrix)
2567                surface->input_csc_color_matrix =
2568                        *srf_update->input_csc_color_matrix;
2569
2570        if (srf_update->coeff_reduction_factor)
2571                surface->coeff_reduction_factor =
2572                        *srf_update->coeff_reduction_factor;
2573
2574        if (srf_update->gamut_remap_matrix)
2575                surface->gamut_remap_matrix =
2576                        *srf_update->gamut_remap_matrix;
2577}
2578
2579static void copy_stream_update_to_stream(struct dc *dc,
2580                                         struct dc_state *context,
2581                                         struct dc_stream_state *stream,
2582                                         struct dc_stream_update *update)
2583{
2584        struct dc_context *dc_ctx = dc->ctx;
2585
2586        if (update == NULL || stream == NULL)
2587                return;
2588
2589        if (update->src.height && update->src.width)
2590                stream->src = update->src;
2591
2592        if (update->dst.height && update->dst.width)
2593                stream->dst = update->dst;
2594
2595        if (update->out_transfer_func &&
2596            stream->out_transfer_func != update->out_transfer_func) {
2597                stream->out_transfer_func->sdr_ref_white_level =
2598                        update->out_transfer_func->sdr_ref_white_level;
2599                stream->out_transfer_func->tf = update->out_transfer_func->tf;
2600                stream->out_transfer_func->type =
2601                        update->out_transfer_func->type;
2602                memcpy(&stream->out_transfer_func->tf_pts,
2603                       &update->out_transfer_func->tf_pts,
2604                       sizeof(struct dc_transfer_func_distributed_points));
2605        }
2606
2607        if (update->hdr_static_metadata)
2608                stream->hdr_static_metadata = *update->hdr_static_metadata;
2609
2610        if (update->abm_level)
2611                stream->abm_level = *update->abm_level;
2612
2613        if (update->periodic_interrupt0)
2614                stream->periodic_interrupt0 = *update->periodic_interrupt0;
2615
2616        if (update->periodic_interrupt1)
2617                stream->periodic_interrupt1 = *update->periodic_interrupt1;
2618
2619        if (update->gamut_remap)
2620                stream->gamut_remap_matrix = *update->gamut_remap;
2621
2622        /* Note: this being updated after mode set is currently not a use case
2623         * however if it arises OCSC would need to be reprogrammed at the
2624         * minimum
2625         */
2626        if (update->output_color_space)
2627                stream->output_color_space = *update->output_color_space;
2628
2629        if (update->output_csc_transform)
2630                stream->csc_color_matrix = *update->output_csc_transform;
2631
2632        if (update->vrr_infopacket)
2633                stream->vrr_infopacket = *update->vrr_infopacket;
2634
2635        if (update->dpms_off)
2636                stream->dpms_off = *update->dpms_off;
2637
2638        if (update->vsc_infopacket)
2639                stream->vsc_infopacket = *update->vsc_infopacket;
2640
2641        if (update->vsp_infopacket)
2642                stream->vsp_infopacket = *update->vsp_infopacket;
2643
2644        if (update->dither_option)
2645                stream->dither_option = *update->dither_option;
2646
2647        if (update->pending_test_pattern)
2648                stream->test_pattern = *update->pending_test_pattern;
2649        /* update current stream with writeback info */
2650        if (update->wb_update) {
2651                int i;
2652
2653                stream->num_wb_info = update->wb_update->num_wb_info;
2654                ASSERT(stream->num_wb_info <= MAX_DWB_PIPES);
2655                for (i = 0; i < stream->num_wb_info; i++)
2656                        stream->writeback_info[i] =
2657                                update->wb_update->writeback_info[i];
2658        }
2659        if (update->dsc_config) {
2660                struct dc_dsc_config old_dsc_cfg = stream->timing.dsc_cfg;
2661                uint32_t old_dsc_enabled = stream->timing.flags.DSC;
2662                uint32_t enable_dsc = (update->dsc_config->num_slices_h != 0 &&
2663                                       update->dsc_config->num_slices_v != 0);
2664
2665                /* Use temporarry context for validating new DSC config */
2666                struct dc_state *dsc_validate_context = dc_create_state(dc);
2667
2668                if (dsc_validate_context) {
2669                        dc_resource_state_copy_construct(dc->current_state, dsc_validate_context);
2670
2671                        stream->timing.dsc_cfg = *update->dsc_config;
2672                        stream->timing.flags.DSC = enable_dsc;
2673                        if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true)) {
2674                                stream->timing.dsc_cfg = old_dsc_cfg;
2675                                stream->timing.flags.DSC = old_dsc_enabled;
2676                                update->dsc_config = NULL;
2677                        }
2678
2679                        dc_release_state(dsc_validate_context);
2680                } else {
2681                        DC_ERROR("Failed to allocate new validate context for DSC change\n");
2682                        update->dsc_config = NULL;
2683                }
2684        }
2685}
2686
2687static void commit_planes_do_stream_update(struct dc *dc,
2688                struct dc_stream_state *stream,
2689                struct dc_stream_update *stream_update,
2690                enum surface_update_type update_type,
2691                struct dc_state *context)
2692{
2693        int j;
2694
2695        // Stream updates
2696        for (j = 0; j < dc->res_pool->pipe_count; j++) {
2697                struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2698
2699                if (!pipe_ctx->top_pipe &&  !pipe_ctx->prev_odm_pipe && pipe_ctx->stream == stream) {
2700
2701                        if (stream_update->periodic_interrupt0 &&
2702                                        dc->hwss.setup_periodic_interrupt)
2703                                dc->hwss.setup_periodic_interrupt(dc, pipe_ctx, VLINE0);
2704
2705                        if (stream_update->periodic_interrupt1 &&
2706                                        dc->hwss.setup_periodic_interrupt)
2707                                dc->hwss.setup_periodic_interrupt(dc, pipe_ctx, VLINE1);
2708
2709                        if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) ||
2710                                        stream_update->vrr_infopacket ||
2711                                        stream_update->vsc_infopacket ||
2712                                        stream_update->vsp_infopacket) {
2713                                resource_build_info_frame(pipe_ctx);
2714                                dc->hwss.update_info_frame(pipe_ctx);
2715                        }
2716
2717                        if (stream_update->hdr_static_metadata &&
2718                                        stream->use_dynamic_meta &&
2719                                        dc->hwss.set_dmdata_attributes &&
2720                                        pipe_ctx->stream->dmdata_address.quad_part != 0)
2721                                dc->hwss.set_dmdata_attributes(pipe_ctx);
2722
2723                        if (stream_update->gamut_remap)
2724                                dc_stream_set_gamut_remap(dc, stream);
2725
2726                        if (stream_update->output_csc_transform)
2727                                dc_stream_program_csc_matrix(dc, stream);
2728
2729                        if (stream_update->dither_option) {
2730                                struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
2731                                resource_build_bit_depth_reduction_params(pipe_ctx->stream,
2732                                                                        &pipe_ctx->stream->bit_depth_params);
2733                                pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp,
2734                                                &stream->bit_depth_params,
2735                                                &stream->clamping);
2736                                while (odm_pipe) {
2737                                        odm_pipe->stream_res.opp->funcs->opp_program_fmt(odm_pipe->stream_res.opp,
2738                                                        &stream->bit_depth_params,
2739                                                        &stream->clamping);
2740                                        odm_pipe = odm_pipe->next_odm_pipe;
2741                                }
2742                        }
2743
2744
2745                        /* Full fe update*/
2746                        if (update_type == UPDATE_TYPE_FAST)
2747                                continue;
2748
2749                        if (stream_update->dsc_config)
2750                                dp_update_dsc_config(pipe_ctx);
2751
2752#if defined(CONFIG_DRM_AMD_DC_DCN)
2753                        if (stream_update->mst_bw_update) {
2754                                if (stream_update->mst_bw_update->is_increase)
2755                                        dc_link_increase_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw);
2756                                else
2757                                        dc_link_reduce_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw);
2758                        }
2759#endif
2760
2761                        if (stream_update->pending_test_pattern) {
2762                                dc_link_dp_set_test_pattern(stream->link,
2763                                        stream->test_pattern.type,
2764                                        stream->test_pattern.color_space,
2765                                        stream->test_pattern.p_link_settings,
2766                                        stream->test_pattern.p_custom_pattern,
2767                                        stream->test_pattern.cust_pattern_size);
2768                        }
2769
2770                        if (stream_update->dpms_off) {
2771                                if (*stream_update->dpms_off) {
2772                                        core_link_disable_stream(pipe_ctx);
2773                                        /* for dpms, keep acquired resources*/
2774                                        if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only)
2775                                                pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
2776
2777                                        dc->optimized_required = true;
2778
2779                                } else {
2780                                        if (get_seamless_boot_stream_count(context) == 0)
2781                                                dc->hwss.prepare_bandwidth(dc, dc->current_state);
2782
2783                                        core_link_enable_stream(dc->current_state, pipe_ctx);
2784                                }
2785                        }
2786
2787                        if (stream_update->abm_level && pipe_ctx->stream_res.abm) {
2788                                bool should_program_abm = true;
2789
2790                                // if otg funcs defined check if blanked before programming
2791                                if (pipe_ctx->stream_res.tg->funcs->is_blanked)
2792                                        if (pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg))
2793                                                should_program_abm = false;
2794
2795                                if (should_program_abm) {
2796                                        if (*stream_update->abm_level == ABM_LEVEL_IMMEDIATE_DISABLE) {
2797                                                dc->hwss.set_abm_immediate_disable(pipe_ctx);
2798                                        } else {
2799                                                pipe_ctx->stream_res.abm->funcs->set_abm_level(
2800                                                        pipe_ctx->stream_res.abm, stream->abm_level);
2801                                        }
2802                                }
2803                        }
2804                }
2805        }
2806}
2807
2808static void commit_planes_for_stream(struct dc *dc,
2809                struct dc_surface_update *srf_updates,
2810                int surface_count,
2811                struct dc_stream_state *stream,
2812                struct dc_stream_update *stream_update,
2813                enum surface_update_type update_type,
2814                struct dc_state *context)
2815{
2816        int i, j;
2817        struct pipe_ctx *top_pipe_to_program = NULL;
2818        bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST);
2819
2820#if defined(CONFIG_DRM_AMD_DC_DCN)
2821        dc_z10_restore(dc);
2822#endif
2823
2824        if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) {
2825                /* Optimize seamless boot flag keeps clocks and watermarks high until
2826                 * first flip. After first flip, optimization is required to lower
2827                 * bandwidth. Important to note that it is expected UEFI will
2828                 * only light up a single display on POST, therefore we only expect
2829                 * one stream with seamless boot flag set.
2830                 */
2831                if (stream->apply_seamless_boot_optimization) {
2832                        stream->apply_seamless_boot_optimization = false;
2833
2834                        if (get_seamless_boot_stream_count(context) == 0)
2835                                dc->optimized_required = true;
2836                }
2837        }
2838
2839        if (update_type == UPDATE_TYPE_FULL) {
2840#if defined(CONFIG_DRM_AMD_DC_DCN)
2841                dc_allow_idle_optimizations(dc, false);
2842
2843#endif
2844                if (get_seamless_boot_stream_count(context) == 0)
2845                        dc->hwss.prepare_bandwidth(dc, context);
2846
2847                context_clock_trace(dc, context);
2848        }
2849
2850        for (j = 0; j < dc->res_pool->pipe_count; j++) {
2851                struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2852
2853                if (!pipe_ctx->top_pipe &&
2854                        !pipe_ctx->prev_odm_pipe &&
2855                        pipe_ctx->stream &&
2856                        pipe_ctx->stream == stream) {
2857                        top_pipe_to_program = pipe_ctx;
2858                }
2859        }
2860
2861#ifdef CONFIG_DRM_AMD_DC_DCN
2862        if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) {
2863                struct pipe_ctx *mpcc_pipe;
2864                struct pipe_ctx *odm_pipe;
2865
2866                for (mpcc_pipe = top_pipe_to_program; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe)
2867                        for (odm_pipe = mpcc_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
2868                                odm_pipe->ttu_regs.min_ttu_vblank = MAX_TTU;
2869        }
2870#endif
2871
2872        if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
2873                if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
2874                        if (should_use_dmub_lock(stream->link)) {
2875                                union dmub_hw_lock_flags hw_locks = { 0 };
2876                                struct dmub_hw_lock_inst_flags inst_flags = { 0 };
2877
2878                                hw_locks.bits.lock_dig = 1;
2879                                inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst;
2880
2881                                dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
2882                                                        true,
2883                                                        &hw_locks,
2884                                                        &inst_flags);
2885                        } else
2886                                top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable(
2887                                                top_pipe_to_program->stream_res.tg);
2888                }
2889
2890        if (should_lock_all_pipes && dc->hwss.interdependent_update_lock)
2891                dc->hwss.interdependent_update_lock(dc, context, true);
2892        else
2893                /* Lock the top pipe while updating plane addrs, since freesync requires
2894                 *  plane addr update event triggers to be synchronized.
2895                 *  top_pipe_to_program is expected to never be NULL
2896                 */
2897                dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
2898
2899        // Stream updates
2900        if (stream_update)
2901                commit_planes_do_stream_update(dc, stream, stream_update, update_type, context);
2902
2903        if (surface_count == 0) {
2904                /*
2905                 * In case of turning off screen, no need to program front end a second time.
2906                 * just return after program blank.
2907                 */
2908                if (dc->hwss.apply_ctx_for_surface)
2909                        dc->hwss.apply_ctx_for_surface(dc, stream, 0, context);
2910                if (dc->hwss.program_front_end_for_ctx)
2911                        dc->hwss.program_front_end_for_ctx(dc, context);
2912
2913                if (should_lock_all_pipes && dc->hwss.interdependent_update_lock)
2914                        dc->hwss.interdependent_update_lock(dc, context, false);
2915                else
2916                        dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
2917                dc->hwss.post_unlock_program_front_end(dc, context);
2918                return;
2919        }
2920
2921        if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
2922                for (i = 0; i < surface_count; i++) {
2923                        struct dc_plane_state *plane_state = srf_updates[i].surface;
2924                        /*set logical flag for lock/unlock use*/
2925                        for (j = 0; j < dc->res_pool->pipe_count; j++) {
2926                                struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2927                                if (!pipe_ctx->plane_state)
2928                                        continue;
2929                                if (should_update_pipe_for_plane(context, pipe_ctx, plane_state))
2930                                        continue;
2931                                pipe_ctx->plane_state->triplebuffer_flips = false;
2932                                if (update_type == UPDATE_TYPE_FAST &&
2933                                        dc->hwss.program_triplebuffer != NULL &&
2934                                        !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) {
2935                                                /*triple buffer for VUpdate  only*/
2936                                                pipe_ctx->plane_state->triplebuffer_flips = true;
2937                                }
2938                        }
2939                        if (update_type == UPDATE_TYPE_FULL) {
2940                                /* force vsync flip when reconfiguring pipes to prevent underflow */
2941                                plane_state->flip_immediate = false;
2942                        }
2943                }
2944        }
2945
2946        // Update Type FULL, Surface updates
2947        for (j = 0; j < dc->res_pool->pipe_count; j++) {
2948                struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2949
2950                if (!pipe_ctx->top_pipe &&
2951                        !pipe_ctx->prev_odm_pipe &&
2952                        should_update_pipe_for_stream(context, pipe_ctx, stream)) {
2953                        struct dc_stream_status *stream_status = NULL;
2954
2955                        if (!pipe_ctx->plane_state)
2956                                continue;
2957
2958                        /* Full fe update*/
2959                        if (update_type == UPDATE_TYPE_FAST)
2960                                continue;
2961
2962                        ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
2963
2964                        if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
2965                                /*turn off triple buffer for full update*/
2966                                dc->hwss.program_triplebuffer(
2967                                        dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
2968                        }
2969                        stream_status =
2970                                stream_get_status(context, pipe_ctx->stream);
2971
2972                        if (dc->hwss.apply_ctx_for_surface)
2973                                dc->hwss.apply_ctx_for_surface(
2974                                        dc, pipe_ctx->stream, stream_status->plane_count, context);
2975                }
2976        }
2977        if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) {
2978                dc->hwss.program_front_end_for_ctx(dc, context);
2979#ifdef CONFIG_DRM_AMD_DC_DCN
2980                if (dc->debug.validate_dml_output) {
2981                        for (i = 0; i < dc->res_pool->pipe_count; i++) {
2982                                struct pipe_ctx cur_pipe = context->res_ctx.pipe_ctx[i];
2983                                if (cur_pipe.stream == NULL)
2984                                        continue;
2985
2986                                cur_pipe.plane_res.hubp->funcs->validate_dml_output(
2987                                                cur_pipe.plane_res.hubp, dc->ctx,
2988                                                &context->res_ctx.pipe_ctx[i].rq_regs,
2989                                                &context->res_ctx.pipe_ctx[i].dlg_regs,
2990                                                &context->res_ctx.pipe_ctx[i].ttu_regs);
2991                        }
2992                }
2993#endif
2994        }
2995
2996        // Update Type FAST, Surface updates
2997        if (update_type == UPDATE_TYPE_FAST) {
2998                if (dc->hwss.set_flip_control_gsl)
2999                        for (i = 0; i < surface_count; i++) {
3000                                struct dc_plane_state *plane_state = srf_updates[i].surface;
3001
3002                                for (j = 0; j < dc->res_pool->pipe_count; j++) {
3003                                        struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3004
3005                                        if (!should_update_pipe_for_stream(context, pipe_ctx, stream))
3006                                                continue;
3007
3008                                        if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
3009                                                continue;
3010
3011                                        // GSL has to be used for flip immediate
3012                                        dc->hwss.set_flip_control_gsl(pipe_ctx,
3013                                                        pipe_ctx->plane_state->flip_immediate);
3014                                }
3015                        }
3016
3017                /* Perform requested Updates */
3018                for (i = 0; i < surface_count; i++) {
3019                        struct dc_plane_state *plane_state = srf_updates[i].surface;
3020
3021                        for (j = 0; j < dc->res_pool->pipe_count; j++) {
3022                                struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3023
3024                                if (!should_update_pipe_for_stream(context, pipe_ctx, stream))
3025                                        continue;
3026
3027                                if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
3028                                        continue;
3029
3030                                /*program triple buffer after lock based on flip type*/
3031                                if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
3032                                        /*only enable triplebuffer for  fast_update*/
3033                                        dc->hwss.program_triplebuffer(
3034                                                dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
3035                                }
3036                                if (pipe_ctx->plane_state->update_flags.bits.addr_update)
3037                                        dc->hwss.update_plane_addr(dc, pipe_ctx);
3038                        }
3039                }
3040
3041        }
3042
3043        if (should_lock_all_pipes && dc->hwss.interdependent_update_lock)
3044                dc->hwss.interdependent_update_lock(dc, context, false);
3045        else
3046                dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
3047
3048        if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
3049                if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
3050                        top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
3051                                        top_pipe_to_program->stream_res.tg,
3052                                        CRTC_STATE_VACTIVE);
3053                        top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
3054                                        top_pipe_to_program->stream_res.tg,
3055                                        CRTC_STATE_VBLANK);
3056                        top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
3057                                        top_pipe_to_program->stream_res.tg,
3058                                        CRTC_STATE_VACTIVE);
3059
3060                        if (stream && should_use_dmub_lock(stream->link)) {
3061                                union dmub_hw_lock_flags hw_locks = { 0 };
3062                                struct dmub_hw_lock_inst_flags inst_flags = { 0 };
3063
3064                                hw_locks.bits.lock_dig = 1;
3065                                inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst;
3066
3067                                dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
3068                                                        false,
3069                                                        &hw_locks,
3070                                                        &inst_flags);
3071                        } else
3072                                top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_disable(
3073                                        top_pipe_to_program->stream_res.tg);
3074                }
3075
3076        if (update_type != UPDATE_TYPE_FAST)
3077                dc->hwss.post_unlock_program_front_end(dc, context);
3078
3079        // Fire manual trigger only when bottom plane is flipped
3080        for (j = 0; j < dc->res_pool->pipe_count; j++) {
3081                struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3082
3083                if (!pipe_ctx->plane_state)
3084                        continue;
3085
3086                if (pipe_ctx->bottom_pipe || pipe_ctx->next_odm_pipe ||
3087                                !pipe_ctx->stream || !should_update_pipe_for_stream(context, pipe_ctx, stream) ||
3088                                !pipe_ctx->plane_state->update_flags.bits.addr_update ||
3089                                pipe_ctx->plane_state->skip_manual_trigger)
3090                        continue;
3091
3092                if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger)
3093                        pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg);
3094        }
3095}
3096
3097void dc_commit_updates_for_stream(struct dc *dc,
3098                struct dc_surface_update *srf_updates,
3099                int surface_count,
3100                struct dc_stream_state *stream,
3101                struct dc_stream_update *stream_update,
3102                struct dc_state *state)
3103{
3104        const struct dc_stream_status *stream_status;
3105        enum surface_update_type update_type;
3106        struct dc_state *context;
3107        struct dc_context *dc_ctx = dc->ctx;
3108        int i, j;
3109
3110        stream_status = dc_stream_get_status(stream);
3111        context = dc->current_state;
3112
3113        update_type = dc_check_update_surfaces_for_stream(
3114                                dc, srf_updates, surface_count, stream_update, stream_status);
3115
3116        if (update_type >= update_surface_trace_level)
3117                update_surface_trace(dc, srf_updates, surface_count);
3118
3119
3120        if (update_type >= UPDATE_TYPE_FULL) {
3121
3122                /* initialize scratch memory for building context */
3123                context = dc_create_state(dc);
3124                if (context == NULL) {
3125                        DC_ERROR("Failed to allocate new validate context!\n");
3126                        return;
3127                }
3128
3129                dc_resource_state_copy_construct(state, context);
3130
3131                for (i = 0; i < dc->res_pool->pipe_count; i++) {
3132                        struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
3133                        struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
3134
3135                        if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
3136                                new_pipe->plane_state->force_full_update = true;
3137                }
3138        } else if (update_type == UPDATE_TYPE_FAST && dc_ctx->dce_version >= DCE_VERSION_MAX) {
3139                /*
3140                 * Previous frame finished and HW is ready for optimization.
3141                 *
3142                 * Only relevant for DCN behavior where we can guarantee the optimization
3143                 * is safe to apply - retain the legacy behavior for DCE.
3144                 */
3145                dc_post_update_surfaces_to_stream(dc);
3146        }
3147
3148
3149        for (i = 0; i < surface_count; i++) {
3150                struct dc_plane_state *surface = srf_updates[i].surface;
3151
3152                copy_surface_update_to_plane(surface, &srf_updates[i]);
3153
3154                if (update_type >= UPDATE_TYPE_MED) {
3155                        for (j = 0; j < dc->res_pool->pipe_count; j++) {
3156                                struct pipe_ctx *pipe_ctx =
3157                                        &context->res_ctx.pipe_ctx[j];
3158
3159                                if (pipe_ctx->plane_state != surface)
3160                                        continue;
3161
3162                                resource_build_scaling_params(pipe_ctx);
3163                        }
3164                }
3165        }
3166
3167        copy_stream_update_to_stream(dc, context, stream, stream_update);
3168
3169        if (update_type >= UPDATE_TYPE_FULL) {
3170                if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
3171                        DC_ERROR("Mode validation failed for stream update!\n");
3172                        dc_release_state(context);
3173                        return;
3174                }
3175        }
3176
3177        TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
3178
3179        commit_planes_for_stream(
3180                                dc,
3181                                srf_updates,
3182                                surface_count,
3183                                stream,
3184                                stream_update,
3185                                update_type,
3186                                context);
3187        /*update current_State*/
3188        if (dc->current_state != context) {
3189
3190                struct dc_state *old = dc->current_state;
3191
3192                dc->current_state = context;
3193                dc_release_state(old);
3194
3195                for (i = 0; i < dc->res_pool->pipe_count; i++) {
3196                        struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
3197
3198                        if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
3199                                pipe_ctx->plane_state->force_full_update = false;
3200                }
3201        }
3202
3203        /* Legacy optimization path for DCE. */
3204        if (update_type >= UPDATE_TYPE_FULL && dc_ctx->dce_version < DCE_VERSION_MAX) {
3205                dc_post_update_surfaces_to_stream(dc);
3206                TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
3207        }
3208
3209        return;
3210
3211}
3212
3213uint8_t dc_get_current_stream_count(struct dc *dc)
3214{
3215        return dc->current_state->stream_count;
3216}
3217
3218struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i)
3219{
3220        if (i < dc->current_state->stream_count)
3221                return dc->current_state->streams[i];
3222        return NULL;
3223}
3224
3225struct dc_stream_state *dc_stream_find_from_link(const struct dc_link *link)
3226{
3227        uint8_t i;
3228        struct dc_context *ctx = link->ctx;
3229
3230        for (i = 0; i < ctx->dc->current_state->stream_count; i++) {
3231                if (ctx->dc->current_state->streams[i]->link == link)
3232                        return ctx->dc->current_state->streams[i];
3233        }
3234
3235        return NULL;
3236}
3237
3238enum dc_irq_source dc_interrupt_to_irq_source(
3239                struct dc *dc,
3240                uint32_t src_id,
3241                uint32_t ext_id)
3242{
3243        return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
3244}
3245
3246/*
3247 * dc_interrupt_set() - Enable/disable an AMD hw interrupt source
3248 */
3249bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
3250{
3251
3252        if (dc == NULL)
3253                return false;
3254
3255        return dal_irq_service_set(dc->res_pool->irqs, src, enable);
3256}
3257
3258void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
3259{
3260        dal_irq_service_ack(dc->res_pool->irqs, src);
3261}
3262
3263void dc_power_down_on_boot(struct dc *dc)
3264{
3265        if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW &&
3266                        dc->hwss.power_down_on_boot)
3267                dc->hwss.power_down_on_boot(dc);
3268}
3269
3270void dc_set_power_state(
3271        struct dc *dc,
3272        enum dc_acpi_cm_power_state power_state)
3273{
3274        struct kref refcount;
3275        struct display_mode_lib *dml;
3276
3277        if (!dc->current_state)
3278                return;
3279
3280        switch (power_state) {
3281        case DC_ACPI_CM_POWER_STATE_D0:
3282                dc_resource_state_construct(dc, dc->current_state);
3283
3284#if defined(CONFIG_DRM_AMD_DC_DCN)
3285                dc_z10_restore(dc);
3286#endif
3287                if (dc->ctx->dmub_srv)
3288                        dc_dmub_srv_wait_phy_init(dc->ctx->dmub_srv);
3289
3290                dc->hwss.init_hw(dc);
3291
3292                if (dc->hwss.init_sys_ctx != NULL &&
3293                        dc->vm_pa_config.valid) {
3294                        dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config);
3295                }
3296
3297                break;
3298        default:
3299                ASSERT(dc->current_state->stream_count == 0);
3300                /* Zero out the current context so that on resume we start with
3301                 * clean state, and dc hw programming optimizations will not
3302                 * cause any trouble.
3303                 */
3304                dml = kzalloc(sizeof(struct display_mode_lib),
3305                                GFP_KERNEL);
3306
3307                ASSERT(dml);
3308                if (!dml)
3309                        return;
3310
3311                /* Preserve refcount */
3312                refcount = dc->current_state->refcount;
3313                /* Preserve display mode lib */
3314                memcpy(dml, &dc->current_state->bw_ctx.dml, sizeof(struct display_mode_lib));
3315
3316                dc_resource_state_destruct(dc->current_state);
3317                memset(dc->current_state, 0,
3318                                sizeof(*dc->current_state));
3319
3320                dc->current_state->refcount = refcount;
3321                dc->current_state->bw_ctx.dml = *dml;
3322
3323                kfree(dml);
3324
3325                break;
3326        }
3327}
3328
3329void dc_resume(struct dc *dc)
3330{
3331        uint32_t i;
3332
3333        for (i = 0; i < dc->link_count; i++)
3334                core_link_resume(dc->links[i]);
3335}
3336
3337bool dc_is_dmcu_initialized(struct dc *dc)
3338{
3339        struct dmcu *dmcu = dc->res_pool->dmcu;
3340
3341        if (dmcu)
3342                return dmcu->funcs->is_dmcu_initialized(dmcu);
3343        return false;
3344}
3345
3346bool dc_submit_i2c(
3347                struct dc *dc,
3348                uint32_t link_index,
3349                struct i2c_command *cmd)
3350{
3351
3352        struct dc_link *link = dc->links[link_index];
3353        struct ddc_service *ddc = link->ddc;
3354        return dce_i2c_submit_command(
3355                dc->res_pool,
3356                ddc->ddc_pin,
3357                cmd);
3358}
3359
3360bool dc_submit_i2c_oem(
3361                struct dc *dc,
3362                struct i2c_command *cmd)
3363{
3364        struct ddc_service *ddc = dc->res_pool->oem_device;
3365        return dce_i2c_submit_command(
3366                dc->res_pool,
3367                ddc->ddc_pin,
3368                cmd);
3369}
3370
3371static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink *sink)
3372{
3373        if (dc_link->sink_count >= MAX_SINKS_PER_LINK) {
3374                BREAK_TO_DEBUGGER();
3375                return false;
3376        }
3377
3378        dc_sink_retain(sink);
3379
3380        dc_link->remote_sinks[dc_link->sink_count] = sink;
3381        dc_link->sink_count++;
3382
3383        return true;
3384}
3385
3386/*
3387 * dc_link_add_remote_sink() - Create a sink and attach it to an existing link
3388 *
3389 * EDID length is in bytes
3390 */
3391struct dc_sink *dc_link_add_remote_sink(
3392                struct dc_link *link,
3393                const uint8_t *edid,
3394                int len,
3395                struct dc_sink_init_data *init_data)
3396{
3397        struct dc_sink *dc_sink;
3398        enum dc_edid_status edid_status;
3399
3400        if (len > DC_MAX_EDID_BUFFER_SIZE) {
3401                dm_error("Max EDID buffer size breached!\n");
3402                return NULL;
3403        }
3404
3405        if (!init_data) {
3406                BREAK_TO_DEBUGGER();
3407                return NULL;
3408        }
3409
3410        if (!init_data->link) {
3411                BREAK_TO_DEBUGGER();
3412                return NULL;
3413        }
3414
3415        dc_sink = dc_sink_create(init_data);
3416
3417        if (!dc_sink)
3418                return NULL;
3419
3420        memmove(dc_sink->dc_edid.raw_edid, edid, len);
3421        dc_sink->dc_edid.length = len;
3422
3423        if (!link_add_remote_sink_helper(
3424                        link,
3425                        dc_sink))
3426                goto fail_add_sink;
3427
3428        edid_status = dm_helpers_parse_edid_caps(
3429                        link->ctx,
3430                        &dc_sink->dc_edid,
3431                        &dc_sink->edid_caps);
3432
3433        /*
3434         * Treat device as no EDID device if EDID
3435         * parsing fails
3436         */
3437        if (edid_status != EDID_OK) {
3438                dc_sink->dc_edid.length = 0;
3439                dm_error("Bad EDID, status%d!\n", edid_status);
3440        }
3441
3442        return dc_sink;
3443
3444fail_add_sink:
3445        dc_sink_release(dc_sink);
3446        return NULL;
3447}
3448
3449/*
3450 * dc_link_remove_remote_sink() - Remove a remote sink from a dc_link
3451 *
3452 * Note that this just removes the struct dc_sink - it doesn't
3453 * program hardware or alter other members of dc_link
3454 */
3455void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink)
3456{
3457        int i;
3458
3459        if (!link->sink_count) {
3460                BREAK_TO_DEBUGGER();
3461                return;
3462        }
3463
3464        for (i = 0; i < link->sink_count; i++) {
3465                if (link->remote_sinks[i] == sink) {
3466                        dc_sink_release(sink);
3467                        link->remote_sinks[i] = NULL;
3468
3469                        /* shrink array to remove empty place */
3470                        while (i < link->sink_count - 1) {
3471                                link->remote_sinks[i] = link->remote_sinks[i+1];
3472                                i++;
3473                        }
3474                        link->remote_sinks[i] = NULL;
3475                        link->sink_count--;
3476                        return;
3477                }
3478        }
3479}
3480
3481void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info)
3482{
3483        info->displayClock                              = (unsigned int)state->bw_ctx.bw.dcn.clk.dispclk_khz;
3484        info->engineClock                               = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_khz;
3485        info->memoryClock                               = (unsigned int)state->bw_ctx.bw.dcn.clk.dramclk_khz;
3486        info->maxSupportedDppClock              = (unsigned int)state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz;
3487        info->dppClock                                  = (unsigned int)state->bw_ctx.bw.dcn.clk.dppclk_khz;
3488        info->socClock                                  = (unsigned int)state->bw_ctx.bw.dcn.clk.socclk_khz;
3489        info->dcfClockDeepSleep                 = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz;
3490        info->fClock                                    = (unsigned int)state->bw_ctx.bw.dcn.clk.fclk_khz;
3491        info->phyClock                                  = (unsigned int)state->bw_ctx.bw.dcn.clk.phyclk_khz;
3492}
3493enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping)
3494{
3495        if (dc->hwss.set_clock)
3496                return dc->hwss.set_clock(dc, clock_type, clk_khz, stepping);
3497        return DC_ERROR_UNEXPECTED;
3498}
3499void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg)
3500{
3501        if (dc->hwss.get_clock)
3502                dc->hwss.get_clock(dc, clock_type, clock_cfg);
3503}
3504
3505/* enable/disable eDP PSR without specify stream for eDP */
3506bool dc_set_psr_allow_active(struct dc *dc, bool enable)
3507{
3508        int i;
3509        bool allow_active;
3510
3511        for (i = 0; i < dc->current_state->stream_count ; i++) {
3512                struct dc_link *link;
3513                struct dc_stream_state *stream = dc->current_state->streams[i];
3514
3515                link = stream->link;
3516                if (!link)
3517                        continue;
3518
3519                if (link->psr_settings.psr_feature_enabled) {
3520                        if (enable && !link->psr_settings.psr_allow_active) {
3521                                allow_active = true;
3522                                if (!dc_link_set_psr_allow_active(link, &allow_active, false, false, NULL))
3523                                        return false;
3524                        } else if (!enable && link->psr_settings.psr_allow_active) {
3525                                allow_active = false;
3526                                if (!dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL))
3527                                        return false;
3528                        }
3529                }
3530        }
3531
3532        return true;
3533}
3534
3535#if defined(CONFIG_DRM_AMD_DC_DCN)
3536
3537void dc_allow_idle_optimizations(struct dc *dc, bool allow)
3538{
3539        if (dc->debug.disable_idle_power_optimizations)
3540                return;
3541
3542        if (dc->clk_mgr != NULL && dc->clk_mgr->funcs->is_smu_present)
3543                if (!dc->clk_mgr->funcs->is_smu_present(dc->clk_mgr))
3544                        return;
3545
3546        if (allow == dc->idle_optimizations_allowed)
3547                return;
3548
3549        if (dc->hwss.apply_idle_power_optimizations && dc->hwss.apply_idle_power_optimizations(dc, allow))
3550                dc->idle_optimizations_allowed = allow;
3551}
3552
3553/*
3554 * blank all streams, and set min and max memory clock to
3555 * lowest and highest DPM level, respectively
3556 */
3557void dc_unlock_memory_clock_frequency(struct dc *dc)
3558{
3559        unsigned int i;
3560
3561        for (i = 0; i < MAX_PIPES; i++)
3562                if (dc->current_state->res_ctx.pipe_ctx[i].plane_state)
3563                        core_link_disable_stream(&dc->current_state->res_ctx.pipe_ctx[i]);
3564
3565        dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, false);
3566        dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
3567}
3568
3569/*
3570 * set min memory clock to the min required for current mode,
3571 * max to maxDPM, and unblank streams
3572 */
3573void dc_lock_memory_clock_frequency(struct dc *dc)
3574{
3575        unsigned int i;
3576
3577        dc->clk_mgr->funcs->get_memclk_states_from_smu(dc->clk_mgr);
3578        dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, true);
3579        dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
3580
3581        for (i = 0; i < MAX_PIPES; i++)
3582                if (dc->current_state->res_ctx.pipe_ctx[i].plane_state)
3583                        core_link_enable_stream(dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]);
3584}
3585
3586bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_state *plane,
3587                struct dc_cursor_attributes *cursor_attr)
3588{
3589        if (dc->hwss.does_plane_fit_in_mall && dc->hwss.does_plane_fit_in_mall(dc, plane, cursor_attr))
3590                return true;
3591        return false;
3592}
3593
3594/* cleanup on driver unload */
3595void dc_hardware_release(struct dc *dc)
3596{
3597        if (dc->hwss.hardware_release)
3598                dc->hwss.hardware_release(dc);
3599}
3600#endif
3601
3602/**
3603 * dc_enable_dmub_notifications - Returns whether dmub notification can be enabled
3604 * @dc: dc structure
3605 *
3606 * Returns: True to enable dmub notifications, False otherwise
3607 */
3608bool dc_enable_dmub_notifications(struct dc *dc)
3609{
3610#if defined(CONFIG_DRM_AMD_DC_DCN)
3611        /* YELLOW_CARP B0 USB4 DPIA needs dmub notifications for interrupts */
3612        if (dc->ctx->asic_id.chip_family == FAMILY_YELLOW_CARP &&
3613            dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 &&
3614            !dc->debug.dpia_debug.bits.disable_dpia)
3615                return true;
3616#endif
3617        /* dmub aux needs dmub notifications to be enabled */
3618        return dc->debug.enable_dmub_aux_for_legacy_ddc;
3619}
3620
3621/**
3622 * dc_process_dmub_aux_transfer_async - Submits aux command to dmub via inbox message
3623 *                                      Sets port index appropriately for legacy DDC
3624 * @dc: dc structure
3625 * @link_index: link index
3626 * @payload: aux payload
3627 *
3628 * Returns: True if successful, False if failure
3629 */
3630bool dc_process_dmub_aux_transfer_async(struct dc *dc,
3631                                uint32_t link_index,
3632                                struct aux_payload *payload)
3633{
3634        uint8_t action;
3635        union dmub_rb_cmd cmd = {0};
3636        struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv;
3637
3638        ASSERT(payload->length <= 16);
3639
3640        cmd.dp_aux_access.header.type = DMUB_CMD__DP_AUX_ACCESS;
3641        cmd.dp_aux_access.header.payload_bytes = 0;
3642        /* For dpia, ddc_pin is set to NULL */
3643        if (!dc->links[link_index]->ddc->ddc_pin)
3644                cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_DPIA;
3645        else
3646                cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_LEGACY_DDC;
3647
3648        cmd.dp_aux_access.aux_control.instance = dc->links[link_index]->ddc_hw_inst;
3649        cmd.dp_aux_access.aux_control.sw_crc_enabled = 0;
3650        cmd.dp_aux_access.aux_control.timeout = 0;
3651        cmd.dp_aux_access.aux_control.dpaux.address = payload->address;
3652        cmd.dp_aux_access.aux_control.dpaux.is_i2c_over_aux = payload->i2c_over_aux;
3653        cmd.dp_aux_access.aux_control.dpaux.length = payload->length;
3654
3655        /* set aux action */
3656        if (payload->i2c_over_aux) {
3657                if (payload->write) {
3658                        if (payload->mot)
3659                                action = DP_AUX_REQ_ACTION_I2C_WRITE_MOT;
3660                        else
3661                                action = DP_AUX_REQ_ACTION_I2C_WRITE;
3662                } else {
3663                        if (payload->mot)
3664                                action = DP_AUX_REQ_ACTION_I2C_READ_MOT;
3665                        else
3666                                action = DP_AUX_REQ_ACTION_I2C_READ;
3667                        }
3668        } else {
3669                if (payload->write)
3670                        action = DP_AUX_REQ_ACTION_DPCD_WRITE;
3671                else
3672                        action = DP_AUX_REQ_ACTION_DPCD_READ;
3673        }
3674
3675        cmd.dp_aux_access.aux_control.dpaux.action = action;
3676
3677        if (payload->length && payload->write) {
3678                memcpy(cmd.dp_aux_access.aux_control.dpaux.data,
3679                        payload->data,
3680                        payload->length
3681                        );
3682        }
3683
3684        dc_dmub_srv_cmd_queue(dmub_srv, &cmd);
3685        dc_dmub_srv_cmd_execute(dmub_srv);
3686        dc_dmub_srv_wait_idle(dmub_srv);
3687
3688        return true;
3689}
3690
3691uint8_t get_link_index_from_dpia_port_index(const struct dc *dc,
3692                                            uint8_t dpia_port_index)
3693{
3694        uint8_t index, link_index = 0xFF;
3695
3696        for (index = 0; index < dc->link_count; index++) {
3697                /* ddc_hw_inst has dpia port index for dpia links
3698                 * and ddc instance for legacy links
3699                 */
3700                if (!dc->links[index]->ddc->ddc_pin) {
3701                        if (dc->links[index]->ddc_hw_inst == dpia_port_index) {
3702                                link_index = index;
3703                                break;
3704                        }
3705                }
3706        }
3707        ASSERT(link_index != 0xFF);
3708        return link_index;
3709}
3710
3711/**
3712 *****************************************************************************
3713 *  Function: dc_process_dmub_set_config_async
3714 *
3715 *  @brief
3716 *              Submits set_config command to dmub via inbox message
3717 *
3718 *  @param
3719 *              [in] dc: dc structure
3720 *              [in] link_index: link index
3721 *              [in] payload: aux payload
3722 *              [out] notify: set_config immediate reply
3723 *
3724 *      @return
3725 *              True if successful, False if failure
3726 *****************************************************************************
3727 */
3728bool dc_process_dmub_set_config_async(struct dc *dc,
3729                                uint32_t link_index,
3730                                struct set_config_cmd_payload *payload,
3731                                struct dmub_notification *notify)
3732{
3733        union dmub_rb_cmd cmd = {0};
3734        struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv;
3735        bool is_cmd_complete = true;
3736
3737        /* prepare SET_CONFIG command */
3738        cmd.set_config_access.header.type = DMUB_CMD__DPIA;
3739        cmd.set_config_access.header.sub_type = DMUB_CMD__DPIA_SET_CONFIG_ACCESS;
3740
3741        cmd.set_config_access.set_config_control.instance = dc->links[link_index]->ddc_hw_inst;
3742        cmd.set_config_access.set_config_control.cmd_pkt.msg_type = payload->msg_type;
3743        cmd.set_config_access.set_config_control.cmd_pkt.msg_data = payload->msg_data;
3744
3745        if (!dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd)) {
3746                /* command is not processed by dmub */
3747                notify->sc_status = SET_CONFIG_UNKNOWN_ERROR;
3748                return is_cmd_complete;
3749        }
3750
3751        /* command processed by dmub, if ret_status is 1, it is completed instantly */
3752        if (cmd.set_config_access.header.ret_status == 1)
3753                notify->sc_status = cmd.set_config_access.set_config_control.immed_status;
3754        else
3755                /* cmd pending, will receive notification via outbox */
3756                is_cmd_complete = false;
3757
3758        return is_cmd_complete;
3759}
3760
3761/**
3762 *****************************************************************************
3763 *  Function: dc_process_dmub_set_mst_slots
3764 *
3765 *  @brief
3766 *              Submits mst slot allocation command to dmub via inbox message
3767 *
3768 *  @param
3769 *              [in] dc: dc structure
3770 *              [in] link_index: link index
3771 *              [in] mst_alloc_slots: mst slots to be allotted
3772 *              [out] mst_slots_in_use: mst slots in use returned in failure case
3773 *
3774 *      @return
3775 *              DC_OK if successful, DC_ERROR if failure
3776 *****************************************************************************
3777 */
3778enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
3779                                uint32_t link_index,
3780                                uint8_t mst_alloc_slots,
3781                                uint8_t *mst_slots_in_use)
3782{
3783        union dmub_rb_cmd cmd = {0};
3784        struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv;
3785
3786        /* prepare MST_ALLOC_SLOTS command */
3787        cmd.set_mst_alloc_slots.header.type = DMUB_CMD__DPIA;
3788        cmd.set_mst_alloc_slots.header.sub_type = DMUB_CMD__DPIA_MST_ALLOC_SLOTS;
3789
3790        cmd.set_mst_alloc_slots.mst_slots_control.instance = dc->links[link_index]->ddc_hw_inst;
3791        cmd.set_mst_alloc_slots.mst_slots_control.mst_alloc_slots = mst_alloc_slots;
3792
3793        if (!dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd))
3794                /* command is not processed by dmub */
3795                return DC_ERROR_UNEXPECTED;
3796
3797        /* command processed by dmub, if ret_status is 1 */
3798        if (cmd.set_config_access.header.ret_status != 1)
3799                /* command processing error */
3800                return DC_ERROR_UNEXPECTED;
3801
3802        /* command processed and we have a status of 2, mst not enabled in dpia */
3803        if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 2)
3804                return DC_FAIL_UNSUPPORTED_1;
3805
3806        /* previously configured mst alloc and used slots did not match */
3807        if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 3) {
3808                *mst_slots_in_use = cmd.set_mst_alloc_slots.mst_slots_control.mst_slots_in_use;
3809                return DC_NOT_SUPPORTED;
3810        }
3811
3812        return DC_OK;
3813}
3814
3815/**
3816 * dc_disable_accelerated_mode - disable accelerated mode
3817 * @dc: dc structure
3818 */
3819void dc_disable_accelerated_mode(struct dc *dc)
3820{
3821        bios_set_scratch_acc_mode_change(dc->ctx->dc_bios, 0);
3822}
3823
3824
3825/**
3826 *****************************************************************************
3827 *  dc_notify_vsync_int_state() - notifies vsync enable/disable state
3828 *  @dc: dc structure
3829 *      @stream: stream where vsync int state changed
3830 *      @enable: whether vsync is enabled or disabled
3831 *
3832 *  Called when vsync is enabled/disabled
3833 *      Will notify DMUB to start/stop ABM interrupts after steady state is reached
3834 *
3835 *****************************************************************************
3836 */
3837void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bool enable)
3838{
3839        int i;
3840        int edp_num;
3841        struct pipe_ctx *pipe = NULL;
3842        struct dc_link *link = stream->sink->link;
3843        struct dc_link *edp_links[MAX_NUM_EDP];
3844
3845
3846        if (link->psr_settings.psr_feature_enabled)
3847                return;
3848
3849        /*find primary pipe associated with stream*/
3850        for (i = 0; i < MAX_PIPES; i++) {
3851                pipe = &dc->current_state->res_ctx.pipe_ctx[i];
3852
3853                if (pipe->stream == stream && pipe->stream_res.tg)
3854                        break;
3855        }
3856
3857        if (i == MAX_PIPES) {
3858                ASSERT(0);
3859                return;
3860        }
3861
3862        get_edp_links(dc, edp_links, &edp_num);
3863
3864        /* Determine panel inst */
3865        for (i = 0; i < edp_num; i++) {
3866                if (edp_links[i] == link)
3867                        break;
3868        }
3869
3870        if (i == edp_num) {
3871                return;
3872        }
3873
3874        if (pipe->stream_res.abm && pipe->stream_res.abm->funcs->set_abm_pause)
3875                pipe->stream_res.abm->funcs->set_abm_pause(pipe->stream_res.abm, !enable, i, pipe->stream_res.tg->inst);
3876}
3877