linux/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
<<
>>
Prefs
   1/*
   2 * Copyright 2015 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: AMD
  23 *
  24 */
  25
  26/* The caprices of the preprocessor require that this be declared right here */
  27#define CREATE_TRACE_POINTS
  28
  29#include "dm_services_types.h"
  30#include "dc.h"
  31#include "dc/inc/core_types.h"
  32
  33#include "vid.h"
  34#include "amdgpu.h"
  35#include "amdgpu_display.h"
  36#include "amdgpu_ucode.h"
  37#include "atom.h"
  38#include "amdgpu_dm.h"
  39#include "amdgpu_pm.h"
  40
  41#include "amd_shared.h"
  42#include "amdgpu_dm_irq.h"
  43#include "dm_helpers.h"
  44#include "amdgpu_dm_mst_types.h"
  45#if defined(CONFIG_DEBUG_FS)
  46#include "amdgpu_dm_debugfs.h"
  47#endif
  48
  49#include "ivsrcid/ivsrcid_vislands30.h"
  50
  51#include <linux/module.h>
  52#include <linux/moduleparam.h>
  53#include <linux/version.h>
  54#include <linux/types.h>
  55#include <linux/pm_runtime.h>
  56#include <linux/firmware.h>
  57
  58#include <drm/drmP.h>
  59#include <drm/drm_atomic.h>
  60#include <drm/drm_atomic_uapi.h>
  61#include <drm/drm_atomic_helper.h>
  62#include <drm/drm_dp_mst_helper.h>
  63#include <drm/drm_fb_helper.h>
  64#include <drm/drm_edid.h>
  65
  66#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
  67#include "ivsrcid/irqsrcs_dcn_1_0.h"
  68
  69#include "dcn/dcn_1_0_offset.h"
  70#include "dcn/dcn_1_0_sh_mask.h"
  71#include "soc15_hw_ip.h"
  72#include "vega10_ip_offset.h"
  73
  74#include "soc15_common.h"
  75#endif
  76
  77#include "modules/inc/mod_freesync.h"
  78#include "modules/power/power_helpers.h"
  79#include "modules/inc/mod_info_packet.h"
  80
  81#define FIRMWARE_RAVEN_DMCU             "amdgpu/raven_dmcu.bin"
  82MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
  83
  84/**
  85 * DOC: overview
  86 *
  87 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
  88 * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
  89 * requests into DC requests, and DC responses into DRM responses.
  90 *
  91 * The root control structure is &struct amdgpu_display_manager.
  92 */
  93
  94/* basic init/fini API */
  95static int amdgpu_dm_init(struct amdgpu_device *adev);
  96static void amdgpu_dm_fini(struct amdgpu_device *adev);
  97
  98/*
  99 * initializes drm_device display related structures, based on the information
 100 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
 101 * drm_encoder, drm_mode_config
 102 *
 103 * Returns 0 on success
 104 */
 105static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
 106/* removes and deallocates the drm structures, created by the above function */
 107static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
 108
 109static void
 110amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector);
 111
 112static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
 113                                struct drm_plane *plane,
 114                                unsigned long possible_crtcs);
 115static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
 116                               struct drm_plane *plane,
 117                               uint32_t link_index);
 118static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
 119                                    struct amdgpu_dm_connector *amdgpu_dm_connector,
 120                                    uint32_t link_index,
 121                                    struct amdgpu_encoder *amdgpu_encoder);
 122static int amdgpu_dm_encoder_init(struct drm_device *dev,
 123                                  struct amdgpu_encoder *aencoder,
 124                                  uint32_t link_index);
 125
 126static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
 127
 128static int amdgpu_dm_atomic_commit(struct drm_device *dev,
 129                                   struct drm_atomic_state *state,
 130                                   bool nonblock);
 131
 132static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
 133
 134static int amdgpu_dm_atomic_check(struct drm_device *dev,
 135                                  struct drm_atomic_state *state);
 136
 137static void handle_cursor_update(struct drm_plane *plane,
 138                                 struct drm_plane_state *old_plane_state);
 139
 140
 141
 142static const enum drm_plane_type dm_plane_type_default[AMDGPU_MAX_PLANES] = {
 143        DRM_PLANE_TYPE_PRIMARY,
 144        DRM_PLANE_TYPE_PRIMARY,
 145        DRM_PLANE_TYPE_PRIMARY,
 146        DRM_PLANE_TYPE_PRIMARY,
 147        DRM_PLANE_TYPE_PRIMARY,
 148        DRM_PLANE_TYPE_PRIMARY,
 149};
 150
 151static const enum drm_plane_type dm_plane_type_carizzo[AMDGPU_MAX_PLANES] = {
 152        DRM_PLANE_TYPE_PRIMARY,
 153        DRM_PLANE_TYPE_PRIMARY,
 154        DRM_PLANE_TYPE_PRIMARY,
 155        DRM_PLANE_TYPE_OVERLAY,/* YUV Capable Underlay */
 156};
 157
 158static const enum drm_plane_type dm_plane_type_stoney[AMDGPU_MAX_PLANES] = {
 159        DRM_PLANE_TYPE_PRIMARY,
 160        DRM_PLANE_TYPE_PRIMARY,
 161        DRM_PLANE_TYPE_OVERLAY, /* YUV Capable Underlay */
 162};
 163
 164/*
 165 * dm_vblank_get_counter
 166 *
 167 * @brief
 168 * Get counter for number of vertical blanks
 169 *
 170 * @param
 171 * struct amdgpu_device *adev - [in] desired amdgpu device
 172 * int disp_idx - [in] which CRTC to get the counter from
 173 *
 174 * @return
 175 * Counter for vertical blanks
 176 */
 177static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
 178{
 179        if (crtc >= adev->mode_info.num_crtc)
 180                return 0;
 181        else {
 182                struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
 183                struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
 184                                acrtc->base.state);
 185
 186
 187                if (acrtc_state->stream == NULL) {
 188                        DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
 189                                  crtc);
 190                        return 0;
 191                }
 192
 193                return dc_stream_get_vblank_counter(acrtc_state->stream);
 194        }
 195}
 196
 197static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
 198                                  u32 *vbl, u32 *position)
 199{
 200        uint32_t v_blank_start, v_blank_end, h_position, v_position;
 201
 202        if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
 203                return -EINVAL;
 204        else {
 205                struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
 206                struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
 207                                                acrtc->base.state);
 208
 209                if (acrtc_state->stream ==  NULL) {
 210                        DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
 211                                  crtc);
 212                        return 0;
 213                }
 214
 215                /*
 216                 * TODO rework base driver to use values directly.
 217                 * for now parse it back into reg-format
 218                 */
 219                dc_stream_get_scanoutpos(acrtc_state->stream,
 220                                         &v_blank_start,
 221                                         &v_blank_end,
 222                                         &h_position,
 223                                         &v_position);
 224
 225                *position = v_position | (h_position << 16);
 226                *vbl = v_blank_start | (v_blank_end << 16);
 227        }
 228
 229        return 0;
 230}
 231
 232static bool dm_is_idle(void *handle)
 233{
 234        /* XXX todo */
 235        return true;
 236}
 237
 238static int dm_wait_for_idle(void *handle)
 239{
 240        /* XXX todo */
 241        return 0;
 242}
 243
 244static bool dm_check_soft_reset(void *handle)
 245{
 246        return false;
 247}
 248
 249static int dm_soft_reset(void *handle)
 250{
 251        /* XXX todo */
 252        return 0;
 253}
 254
 255static struct amdgpu_crtc *
 256get_crtc_by_otg_inst(struct amdgpu_device *adev,
 257                     int otg_inst)
 258{
 259        struct drm_device *dev = adev->ddev;
 260        struct drm_crtc *crtc;
 261        struct amdgpu_crtc *amdgpu_crtc;
 262
 263        if (otg_inst == -1) {
 264                WARN_ON(1);
 265                return adev->mode_info.crtcs[0];
 266        }
 267
 268        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 269                amdgpu_crtc = to_amdgpu_crtc(crtc);
 270
 271                if (amdgpu_crtc->otg_inst == otg_inst)
 272                        return amdgpu_crtc;
 273        }
 274
 275        return NULL;
 276}
 277
 278static void dm_pflip_high_irq(void *interrupt_params)
 279{
 280        struct amdgpu_crtc *amdgpu_crtc;
 281        struct common_irq_params *irq_params = interrupt_params;
 282        struct amdgpu_device *adev = irq_params->adev;
 283        unsigned long flags;
 284
 285        amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
 286
 287        /* IRQ could occur when in initial stage */
 288        /* TODO work and BO cleanup */
 289        if (amdgpu_crtc == NULL) {
 290                DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
 291                return;
 292        }
 293
 294        spin_lock_irqsave(&adev->ddev->event_lock, flags);
 295
 296        if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
 297                DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
 298                                                 amdgpu_crtc->pflip_status,
 299                                                 AMDGPU_FLIP_SUBMITTED,
 300                                                 amdgpu_crtc->crtc_id,
 301                                                 amdgpu_crtc);
 302                spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
 303                return;
 304        }
 305
 306        /* Update to correct count(s) if racing with vblank irq */
 307        amdgpu_crtc->last_flip_vblank = drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
 308
 309        /* wake up userspace */
 310        if (amdgpu_crtc->event) {
 311                drm_crtc_send_vblank_event(&amdgpu_crtc->base, amdgpu_crtc->event);
 312
 313                /* page flip completed. clean up */
 314                amdgpu_crtc->event = NULL;
 315
 316        } else
 317                WARN_ON(1);
 318
 319        amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
 320        spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
 321
 322        DRM_DEBUG_DRIVER("%s - crtc :%d[%p], pflip_stat:AMDGPU_FLIP_NONE\n",
 323                                        __func__, amdgpu_crtc->crtc_id, amdgpu_crtc);
 324
 325        drm_crtc_vblank_put(&amdgpu_crtc->base);
 326}
 327
 328static void dm_crtc_high_irq(void *interrupt_params)
 329{
 330        struct common_irq_params *irq_params = interrupt_params;
 331        struct amdgpu_device *adev = irq_params->adev;
 332        struct amdgpu_crtc *acrtc;
 333        struct dm_crtc_state *acrtc_state;
 334
 335        acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
 336
 337        if (acrtc) {
 338                drm_crtc_handle_vblank(&acrtc->base);
 339                amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
 340
 341                acrtc_state = to_dm_crtc_state(acrtc->base.state);
 342
 343                if (acrtc_state->stream &&
 344                    acrtc_state->vrr_params.supported &&
 345                    acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
 346                        mod_freesync_handle_v_update(
 347                                adev->dm.freesync_module,
 348                                acrtc_state->stream,
 349                                &acrtc_state->vrr_params);
 350
 351                        dc_stream_adjust_vmin_vmax(
 352                                adev->dm.dc,
 353                                acrtc_state->stream,
 354                                &acrtc_state->vrr_params.adjust);
 355                }
 356        }
 357}
 358
 359static int dm_set_clockgating_state(void *handle,
 360                  enum amd_clockgating_state state)
 361{
 362        return 0;
 363}
 364
 365static int dm_set_powergating_state(void *handle,
 366                  enum amd_powergating_state state)
 367{
 368        return 0;
 369}
 370
 371/* Prototypes of private functions */
 372static int dm_early_init(void* handle);
 373
 374/* Allocate memory for FBC compressed data  */
 375static void amdgpu_dm_fbc_init(struct drm_connector *connector)
 376{
 377        struct drm_device *dev = connector->dev;
 378        struct amdgpu_device *adev = dev->dev_private;
 379        struct dm_comressor_info *compressor = &adev->dm.compressor;
 380        struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
 381        struct drm_display_mode *mode;
 382        unsigned long max_size = 0;
 383
 384        if (adev->dm.dc->fbc_compressor == NULL)
 385                return;
 386
 387        if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
 388                return;
 389
 390        if (compressor->bo_ptr)
 391                return;
 392
 393
 394        list_for_each_entry(mode, &connector->modes, head) {
 395                if (max_size < mode->htotal * mode->vtotal)
 396                        max_size = mode->htotal * mode->vtotal;
 397        }
 398
 399        if (max_size) {
 400                int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
 401                            AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
 402                            &compressor->gpu_addr, &compressor->cpu_addr);
 403
 404                if (r)
 405                        DRM_ERROR("DM: Failed to initialize FBC\n");
 406                else {
 407                        adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
 408                        DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
 409                }
 410
 411        }
 412
 413}
 414
 415static int amdgpu_dm_init(struct amdgpu_device *adev)
 416{
 417        struct dc_init_data init_data;
 418        adev->dm.ddev = adev->ddev;
 419        adev->dm.adev = adev;
 420
 421        /* Zero all the fields */
 422        memset(&init_data, 0, sizeof(init_data));
 423
 424        mutex_init(&adev->dm.dc_lock);
 425
 426        if(amdgpu_dm_irq_init(adev)) {
 427                DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
 428                goto error;
 429        }
 430
 431        init_data.asic_id.chip_family = adev->family;
 432
 433        init_data.asic_id.pci_revision_id = adev->rev_id;
 434        init_data.asic_id.hw_internal_rev = adev->external_rev_id;
 435
 436        init_data.asic_id.vram_width = adev->gmc.vram_width;
 437        /* TODO: initialize init_data.asic_id.vram_type here!!!! */
 438        init_data.asic_id.atombios_base_address =
 439                adev->mode_info.atom_context->bios;
 440
 441        init_data.driver = adev;
 442
 443        adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
 444
 445        if (!adev->dm.cgs_device) {
 446                DRM_ERROR("amdgpu: failed to create cgs device.\n");
 447                goto error;
 448        }
 449
 450        init_data.cgs_device = adev->dm.cgs_device;
 451
 452        init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
 453
 454        /*
 455         * TODO debug why this doesn't work on Raven
 456         */
 457        if (adev->flags & AMD_IS_APU &&
 458            adev->asic_type >= CHIP_CARRIZO &&
 459            adev->asic_type < CHIP_RAVEN)
 460                init_data.flags.gpu_vm_support = true;
 461
 462        if (amdgpu_dc_feature_mask & DC_FBC_MASK)
 463                init_data.flags.fbc_support = true;
 464
 465        /* Display Core create. */
 466        adev->dm.dc = dc_create(&init_data);
 467
 468        if (adev->dm.dc) {
 469                DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
 470        } else {
 471                DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
 472                goto error;
 473        }
 474
 475        adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
 476        if (!adev->dm.freesync_module) {
 477                DRM_ERROR(
 478                "amdgpu: failed to initialize freesync_module.\n");
 479        } else
 480                DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
 481                                adev->dm.freesync_module);
 482
 483        amdgpu_dm_init_color_mod();
 484
 485        if (amdgpu_dm_initialize_drm_device(adev)) {
 486                DRM_ERROR(
 487                "amdgpu: failed to initialize sw for display support.\n");
 488                goto error;
 489        }
 490
 491        /* Update the actual used number of crtc */
 492        adev->mode_info.num_crtc = adev->dm.display_indexes_num;
 493
 494        /* TODO: Add_display_info? */
 495
 496        /* TODO use dynamic cursor width */
 497        adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
 498        adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
 499
 500        if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
 501                DRM_ERROR(
 502                "amdgpu: failed to initialize sw for display support.\n");
 503                goto error;
 504        }
 505
 506#if defined(CONFIG_DEBUG_FS)
 507        if (dtn_debugfs_init(adev))
 508                DRM_ERROR("amdgpu: failed initialize dtn debugfs support.\n");
 509#endif
 510
 511        DRM_DEBUG_DRIVER("KMS initialized.\n");
 512
 513        return 0;
 514error:
 515        amdgpu_dm_fini(adev);
 516
 517        return -EINVAL;
 518}
 519
 520static void amdgpu_dm_fini(struct amdgpu_device *adev)
 521{
 522        amdgpu_dm_destroy_drm_device(&adev->dm);
 523        /*
 524         * TODO: pageflip, vlank interrupt
 525         *
 526         * amdgpu_dm_irq_fini(adev);
 527         */
 528
 529        if (adev->dm.cgs_device) {
 530                amdgpu_cgs_destroy_device(adev->dm.cgs_device);
 531                adev->dm.cgs_device = NULL;
 532        }
 533        if (adev->dm.freesync_module) {
 534                mod_freesync_destroy(adev->dm.freesync_module);
 535                adev->dm.freesync_module = NULL;
 536        }
 537        /* DC Destroy TODO: Replace destroy DAL */
 538        if (adev->dm.dc)
 539                dc_destroy(&adev->dm.dc);
 540
 541        mutex_destroy(&adev->dm.dc_lock);
 542
 543        return;
 544}
 545
 546static int load_dmcu_fw(struct amdgpu_device *adev)
 547{
 548        const char *fw_name_dmcu;
 549        int r;
 550        const struct dmcu_firmware_header_v1_0 *hdr;
 551
 552        switch(adev->asic_type) {
 553        case CHIP_BONAIRE:
 554        case CHIP_HAWAII:
 555        case CHIP_KAVERI:
 556        case CHIP_KABINI:
 557        case CHIP_MULLINS:
 558        case CHIP_TONGA:
 559        case CHIP_FIJI:
 560        case CHIP_CARRIZO:
 561        case CHIP_STONEY:
 562        case CHIP_POLARIS11:
 563        case CHIP_POLARIS10:
 564        case CHIP_POLARIS12:
 565        case CHIP_VEGAM:
 566        case CHIP_VEGA10:
 567        case CHIP_VEGA12:
 568        case CHIP_VEGA20:
 569                return 0;
 570        case CHIP_RAVEN:
 571                fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
 572                break;
 573        default:
 574                DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
 575                return -EINVAL;
 576        }
 577
 578        if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
 579                DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
 580                return 0;
 581        }
 582
 583        r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
 584        if (r == -ENOENT) {
 585                /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
 586                DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
 587                adev->dm.fw_dmcu = NULL;
 588                return 0;
 589        }
 590        if (r) {
 591                dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
 592                        fw_name_dmcu);
 593                return r;
 594        }
 595
 596        r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
 597        if (r) {
 598                dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
 599                        fw_name_dmcu);
 600                release_firmware(adev->dm.fw_dmcu);
 601                adev->dm.fw_dmcu = NULL;
 602                return r;
 603        }
 604
 605        hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
 606        adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
 607        adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
 608        adev->firmware.fw_size +=
 609                ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
 610
 611        adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
 612        adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
 613        adev->firmware.fw_size +=
 614                ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
 615
 616        adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
 617
 618        DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
 619
 620        return 0;
 621}
 622
 623static int dm_sw_init(void *handle)
 624{
 625        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 626
 627        return load_dmcu_fw(adev);
 628}
 629
 630static int dm_sw_fini(void *handle)
 631{
 632        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 633
 634        if(adev->dm.fw_dmcu) {
 635                release_firmware(adev->dm.fw_dmcu);
 636                adev->dm.fw_dmcu = NULL;
 637        }
 638
 639        return 0;
 640}
 641
 642static int detect_mst_link_for_all_connectors(struct drm_device *dev)
 643{
 644        struct amdgpu_dm_connector *aconnector;
 645        struct drm_connector *connector;
 646        int ret = 0;
 647
 648        drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
 649
 650        list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 651                aconnector = to_amdgpu_dm_connector(connector);
 652                if (aconnector->dc_link->type == dc_connection_mst_branch &&
 653                    aconnector->mst_mgr.aux) {
 654                        DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
 655                                        aconnector, aconnector->base.base.id);
 656
 657                        ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
 658                        if (ret < 0) {
 659                                DRM_ERROR("DM_MST: Failed to start MST\n");
 660                                ((struct dc_link *)aconnector->dc_link)->type = dc_connection_single;
 661                                return ret;
 662                                }
 663                        }
 664        }
 665
 666        drm_modeset_unlock(&dev->mode_config.connection_mutex);
 667        return ret;
 668}
 669
 670static int dm_late_init(void *handle)
 671{
 672        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 673
 674        struct dmcu_iram_parameters params;
 675        unsigned int linear_lut[16];
 676        int i;
 677        struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
 678        bool ret;
 679
 680        for (i = 0; i < 16; i++)
 681                linear_lut[i] = 0xFFFF * i / 15;
 682
 683        params.set = 0;
 684        params.backlight_ramping_start = 0xCCCC;
 685        params.backlight_ramping_reduction = 0xCCCCCCCC;
 686        params.backlight_lut_array_size = 16;
 687        params.backlight_lut_array = linear_lut;
 688
 689        ret = dmcu_load_iram(dmcu, params);
 690
 691        if (!ret)
 692                return -EINVAL;
 693
 694        return detect_mst_link_for_all_connectors(adev->ddev);
 695}
 696
 697static void s3_handle_mst(struct drm_device *dev, bool suspend)
 698{
 699        struct amdgpu_dm_connector *aconnector;
 700        struct drm_connector *connector;
 701        struct drm_dp_mst_topology_mgr *mgr;
 702        int ret;
 703        bool need_hotplug = false;
 704
 705        drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
 706
 707        list_for_each_entry(connector, &dev->mode_config.connector_list,
 708                            head) {
 709                aconnector = to_amdgpu_dm_connector(connector);
 710                if (aconnector->dc_link->type != dc_connection_mst_branch ||
 711                    aconnector->mst_port)
 712                        continue;
 713
 714                mgr = &aconnector->mst_mgr;
 715
 716                if (suspend) {
 717                        drm_dp_mst_topology_mgr_suspend(mgr);
 718                } else {
 719                        ret = drm_dp_mst_topology_mgr_resume(mgr);
 720                        if (ret < 0) {
 721                                drm_dp_mst_topology_mgr_set_mst(mgr, false);
 722                                need_hotplug = true;
 723                        }
 724                }
 725        }
 726
 727        drm_modeset_unlock(&dev->mode_config.connection_mutex);
 728
 729        if (need_hotplug)
 730                drm_kms_helper_hotplug_event(dev);
 731}
 732
 733/**
 734 * dm_hw_init() - Initialize DC device
 735 * @handle: The base driver device containing the amdpgu_dm device.
 736 *
 737 * Initialize the &struct amdgpu_display_manager device. This involves calling
 738 * the initializers of each DM component, then populating the struct with them.
 739 *
 740 * Although the function implies hardware initialization, both hardware and
 741 * software are initialized here. Splitting them out to their relevant init
 742 * hooks is a future TODO item.
 743 *
 744 * Some notable things that are initialized here:
 745 *
 746 * - Display Core, both software and hardware
 747 * - DC modules that we need (freesync and color management)
 748 * - DRM software states
 749 * - Interrupt sources and handlers
 750 * - Vblank support
 751 * - Debug FS entries, if enabled
 752 */
 753static int dm_hw_init(void *handle)
 754{
 755        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 756        /* Create DAL display manager */
 757        amdgpu_dm_init(adev);
 758        amdgpu_dm_hpd_init(adev);
 759
 760        return 0;
 761}
 762
 763/**
 764 * dm_hw_fini() - Teardown DC device
 765 * @handle: The base driver device containing the amdpgu_dm device.
 766 *
 767 * Teardown components within &struct amdgpu_display_manager that require
 768 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
 769 * were loaded. Also flush IRQ workqueues and disable them.
 770 */
 771static int dm_hw_fini(void *handle)
 772{
 773        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 774
 775        amdgpu_dm_hpd_fini(adev);
 776
 777        amdgpu_dm_irq_fini(adev);
 778        amdgpu_dm_fini(adev);
 779        return 0;
 780}
 781
 782static int dm_suspend(void *handle)
 783{
 784        struct amdgpu_device *adev = handle;
 785        struct amdgpu_display_manager *dm = &adev->dm;
 786        int ret = 0;
 787
 788        WARN_ON(adev->dm.cached_state);
 789        adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
 790
 791        s3_handle_mst(adev->ddev, true);
 792
 793        amdgpu_dm_irq_suspend(adev);
 794
 795
 796        dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
 797
 798        return ret;
 799}
 800
 801static struct amdgpu_dm_connector *
 802amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
 803                                             struct drm_crtc *crtc)
 804{
 805        uint32_t i;
 806        struct drm_connector_state *new_con_state;
 807        struct drm_connector *connector;
 808        struct drm_crtc *crtc_from_state;
 809
 810        for_each_new_connector_in_state(state, connector, new_con_state, i) {
 811                crtc_from_state = new_con_state->crtc;
 812
 813                if (crtc_from_state == crtc)
 814                        return to_amdgpu_dm_connector(connector);
 815        }
 816
 817        return NULL;
 818}
 819
 820static void emulated_link_detect(struct dc_link *link)
 821{
 822        struct dc_sink_init_data sink_init_data = { 0 };
 823        struct display_sink_capability sink_caps = { 0 };
 824        enum dc_edid_status edid_status;
 825        struct dc_context *dc_ctx = link->ctx;
 826        struct dc_sink *sink = NULL;
 827        struct dc_sink *prev_sink = NULL;
 828
 829        link->type = dc_connection_none;
 830        prev_sink = link->local_sink;
 831
 832        if (prev_sink != NULL)
 833                dc_sink_retain(prev_sink);
 834
 835        switch (link->connector_signal) {
 836        case SIGNAL_TYPE_HDMI_TYPE_A: {
 837                sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
 838                sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
 839                break;
 840        }
 841
 842        case SIGNAL_TYPE_DVI_SINGLE_LINK: {
 843                sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
 844                sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
 845                break;
 846        }
 847
 848        case SIGNAL_TYPE_DVI_DUAL_LINK: {
 849                sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
 850                sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
 851                break;
 852        }
 853
 854        case SIGNAL_TYPE_LVDS: {
 855                sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
 856                sink_caps.signal = SIGNAL_TYPE_LVDS;
 857                break;
 858        }
 859
 860        case SIGNAL_TYPE_EDP: {
 861                sink_caps.transaction_type =
 862                        DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
 863                sink_caps.signal = SIGNAL_TYPE_EDP;
 864                break;
 865        }
 866
 867        case SIGNAL_TYPE_DISPLAY_PORT: {
 868                sink_caps.transaction_type =
 869                        DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
 870                sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
 871                break;
 872        }
 873
 874        default:
 875                DC_ERROR("Invalid connector type! signal:%d\n",
 876                        link->connector_signal);
 877                return;
 878        }
 879
 880        sink_init_data.link = link;
 881        sink_init_data.sink_signal = sink_caps.signal;
 882
 883        sink = dc_sink_create(&sink_init_data);
 884        if (!sink) {
 885                DC_ERROR("Failed to create sink!\n");
 886                return;
 887        }
 888
 889        /* dc_sink_create returns a new reference */
 890        link->local_sink = sink;
 891
 892        edid_status = dm_helpers_read_local_edid(
 893                        link->ctx,
 894                        link,
 895                        sink);
 896
 897        if (edid_status != EDID_OK)
 898                DC_ERROR("Failed to read EDID");
 899
 900}
 901
 902static int dm_resume(void *handle)
 903{
 904        struct amdgpu_device *adev = handle;
 905        struct drm_device *ddev = adev->ddev;
 906        struct amdgpu_display_manager *dm = &adev->dm;
 907        struct amdgpu_dm_connector *aconnector;
 908        struct drm_connector *connector;
 909        struct drm_crtc *crtc;
 910        struct drm_crtc_state *new_crtc_state;
 911        struct dm_crtc_state *dm_new_crtc_state;
 912        struct drm_plane *plane;
 913        struct drm_plane_state *new_plane_state;
 914        struct dm_plane_state *dm_new_plane_state;
 915        enum dc_connection_type new_connection_type = dc_connection_none;
 916        int i;
 917
 918        /* power on hardware */
 919        dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
 920
 921        /* program HPD filter */
 922        dc_resume(dm->dc);
 923
 924        /* On resume we need to  rewrite the MSTM control bits to enamble MST*/
 925        s3_handle_mst(ddev, false);
 926
 927        /*
 928         * early enable HPD Rx IRQ, should be done before set mode as short
 929         * pulse interrupts are used for MST
 930         */
 931        amdgpu_dm_irq_resume_early(adev);
 932
 933        /* Do detection*/
 934        list_for_each_entry(connector, &ddev->mode_config.connector_list, head) {
 935                aconnector = to_amdgpu_dm_connector(connector);
 936
 937                /*
 938                 * this is the case when traversing through already created
 939                 * MST connectors, should be skipped
 940                 */
 941                if (aconnector->mst_port)
 942                        continue;
 943
 944                mutex_lock(&aconnector->hpd_lock);
 945                if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
 946                        DRM_ERROR("KMS: Failed to detect connector\n");
 947
 948                if (aconnector->base.force && new_connection_type == dc_connection_none)
 949                        emulated_link_detect(aconnector->dc_link);
 950                else
 951                        dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
 952
 953                if (aconnector->fake_enable && aconnector->dc_link->local_sink)
 954                        aconnector->fake_enable = false;
 955
 956                if (aconnector->dc_sink)
 957                        dc_sink_release(aconnector->dc_sink);
 958                aconnector->dc_sink = NULL;
 959                amdgpu_dm_update_connector_after_detect(aconnector);
 960                mutex_unlock(&aconnector->hpd_lock);
 961        }
 962
 963        /* Force mode set in atomic commit */
 964        for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
 965                new_crtc_state->active_changed = true;
 966
 967        /*
 968         * atomic_check is expected to create the dc states. We need to release
 969         * them here, since they were duplicated as part of the suspend
 970         * procedure.
 971         */
 972        for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
 973                dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
 974                if (dm_new_crtc_state->stream) {
 975                        WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
 976                        dc_stream_release(dm_new_crtc_state->stream);
 977                        dm_new_crtc_state->stream = NULL;
 978                }
 979        }
 980
 981        for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
 982                dm_new_plane_state = to_dm_plane_state(new_plane_state);
 983                if (dm_new_plane_state->dc_state) {
 984                        WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
 985                        dc_plane_state_release(dm_new_plane_state->dc_state);
 986                        dm_new_plane_state->dc_state = NULL;
 987                }
 988        }
 989
 990        drm_atomic_helper_resume(ddev, dm->cached_state);
 991
 992        dm->cached_state = NULL;
 993
 994        amdgpu_dm_irq_resume_late(adev);
 995
 996        return 0;
 997}
 998
 999/**
1000 * DOC: DM Lifecycle
1001 *
1002 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
1003 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
1004 * the base driver's device list to be initialized and torn down accordingly.
1005 *
1006 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
1007 */
1008
1009static const struct amd_ip_funcs amdgpu_dm_funcs = {
1010        .name = "dm",
1011        .early_init = dm_early_init,
1012        .late_init = dm_late_init,
1013        .sw_init = dm_sw_init,
1014        .sw_fini = dm_sw_fini,
1015        .hw_init = dm_hw_init,
1016        .hw_fini = dm_hw_fini,
1017        .suspend = dm_suspend,
1018        .resume = dm_resume,
1019        .is_idle = dm_is_idle,
1020        .wait_for_idle = dm_wait_for_idle,
1021        .check_soft_reset = dm_check_soft_reset,
1022        .soft_reset = dm_soft_reset,
1023        .set_clockgating_state = dm_set_clockgating_state,
1024        .set_powergating_state = dm_set_powergating_state,
1025};
1026
1027const struct amdgpu_ip_block_version dm_ip_block =
1028{
1029        .type = AMD_IP_BLOCK_TYPE_DCE,
1030        .major = 1,
1031        .minor = 0,
1032        .rev = 0,
1033        .funcs = &amdgpu_dm_funcs,
1034};
1035
1036
1037/**
1038 * DOC: atomic
1039 *
1040 * *WIP*
1041 */
1042
1043static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
1044        .fb_create = amdgpu_display_user_framebuffer_create,
1045        .output_poll_changed = drm_fb_helper_output_poll_changed,
1046        .atomic_check = amdgpu_dm_atomic_check,
1047        .atomic_commit = amdgpu_dm_atomic_commit,
1048};
1049
1050static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
1051        .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
1052};
1053
1054static void
1055amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
1056{
1057        struct drm_connector *connector = &aconnector->base;
1058        struct drm_device *dev = connector->dev;
1059        struct dc_sink *sink;
1060
1061        /* MST handled by drm_mst framework */
1062        if (aconnector->mst_mgr.mst_state == true)
1063                return;
1064
1065
1066        sink = aconnector->dc_link->local_sink;
1067        if (sink)
1068                dc_sink_retain(sink);
1069
1070        /*
1071         * Edid mgmt connector gets first update only in mode_valid hook and then
1072         * the connector sink is set to either fake or physical sink depends on link status.
1073         * Skip if already done during boot.
1074         */
1075        if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
1076                        && aconnector->dc_em_sink) {
1077
1078                /*
1079                 * For S3 resume with headless use eml_sink to fake stream
1080                 * because on resume connector->sink is set to NULL
1081                 */
1082                mutex_lock(&dev->mode_config.mutex);
1083
1084                if (sink) {
1085                        if (aconnector->dc_sink) {
1086                                amdgpu_dm_update_freesync_caps(connector, NULL);
1087                                /*
1088                                 * retain and release below are used to
1089                                 * bump up refcount for sink because the link doesn't point
1090                                 * to it anymore after disconnect, so on next crtc to connector
1091                                 * reshuffle by UMD we will get into unwanted dc_sink release
1092                                 */
1093                                dc_sink_release(aconnector->dc_sink);
1094                        }
1095                        aconnector->dc_sink = sink;
1096                        dc_sink_retain(aconnector->dc_sink);
1097                        amdgpu_dm_update_freesync_caps(connector,
1098                                        aconnector->edid);
1099                } else {
1100                        amdgpu_dm_update_freesync_caps(connector, NULL);
1101                        if (!aconnector->dc_sink) {
1102                                aconnector->dc_sink = aconnector->dc_em_sink;
1103                                dc_sink_retain(aconnector->dc_sink);
1104                        }
1105                }
1106
1107                mutex_unlock(&dev->mode_config.mutex);
1108
1109                if (sink)
1110                        dc_sink_release(sink);
1111                return;
1112        }
1113
1114        /*
1115         * TODO: temporary guard to look for proper fix
1116         * if this sink is MST sink, we should not do anything
1117         */
1118        if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
1119                dc_sink_release(sink);
1120                return;
1121        }
1122
1123        if (aconnector->dc_sink == sink) {
1124                /*
1125                 * We got a DP short pulse (Link Loss, DP CTS, etc...).
1126                 * Do nothing!!
1127                 */
1128                DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
1129                                aconnector->connector_id);
1130                if (sink)
1131                        dc_sink_release(sink);
1132                return;
1133        }
1134
1135        DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
1136                aconnector->connector_id, aconnector->dc_sink, sink);
1137
1138        mutex_lock(&dev->mode_config.mutex);
1139
1140        /*
1141         * 1. Update status of the drm connector
1142         * 2. Send an event and let userspace tell us what to do
1143         */
1144        if (sink) {
1145                /*
1146                 * TODO: check if we still need the S3 mode update workaround.
1147                 * If yes, put it here.
1148                 */
1149                if (aconnector->dc_sink)
1150                        amdgpu_dm_update_freesync_caps(connector, NULL);
1151
1152                aconnector->dc_sink = sink;
1153                dc_sink_retain(aconnector->dc_sink);
1154                if (sink->dc_edid.length == 0) {
1155                        aconnector->edid = NULL;
1156                        drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
1157                } else {
1158                        aconnector->edid =
1159                                (struct edid *) sink->dc_edid.raw_edid;
1160
1161
1162                        drm_connector_update_edid_property(connector,
1163                                        aconnector->edid);
1164                        drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
1165                                            aconnector->edid);
1166                }
1167                amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
1168
1169        } else {
1170                drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
1171                amdgpu_dm_update_freesync_caps(connector, NULL);
1172                drm_connector_update_edid_property(connector, NULL);
1173                aconnector->num_modes = 0;
1174                dc_sink_release(aconnector->dc_sink);
1175                aconnector->dc_sink = NULL;
1176                aconnector->edid = NULL;
1177        }
1178
1179        mutex_unlock(&dev->mode_config.mutex);
1180
1181        if (sink)
1182                dc_sink_release(sink);
1183}
1184
1185static void handle_hpd_irq(void *param)
1186{
1187        struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
1188        struct drm_connector *connector = &aconnector->base;
1189        struct drm_device *dev = connector->dev;
1190        enum dc_connection_type new_connection_type = dc_connection_none;
1191
1192        /*
1193         * In case of failure or MST no need to update connector status or notify the OS
1194         * since (for MST case) MST does this in its own context.
1195         */
1196        mutex_lock(&aconnector->hpd_lock);
1197
1198        if (aconnector->fake_enable)
1199                aconnector->fake_enable = false;
1200
1201        if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1202                DRM_ERROR("KMS: Failed to detect connector\n");
1203
1204        if (aconnector->base.force && new_connection_type == dc_connection_none) {
1205                emulated_link_detect(aconnector->dc_link);
1206
1207
1208                drm_modeset_lock_all(dev);
1209                dm_restore_drm_connector_state(dev, connector);
1210                drm_modeset_unlock_all(dev);
1211
1212                if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
1213                        drm_kms_helper_hotplug_event(dev);
1214
1215        } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
1216                amdgpu_dm_update_connector_after_detect(aconnector);
1217
1218
1219                drm_modeset_lock_all(dev);
1220                dm_restore_drm_connector_state(dev, connector);
1221                drm_modeset_unlock_all(dev);
1222
1223                if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
1224                        drm_kms_helper_hotplug_event(dev);
1225        }
1226        mutex_unlock(&aconnector->hpd_lock);
1227
1228}
1229
1230static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
1231{
1232        uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
1233        uint8_t dret;
1234        bool new_irq_handled = false;
1235        int dpcd_addr;
1236        int dpcd_bytes_to_read;
1237
1238        const int max_process_count = 30;
1239        int process_count = 0;
1240
1241        const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
1242
1243        if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
1244                dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
1245                /* DPCD 0x200 - 0x201 for downstream IRQ */
1246                dpcd_addr = DP_SINK_COUNT;
1247        } else {
1248                dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
1249                /* DPCD 0x2002 - 0x2005 for downstream IRQ */
1250                dpcd_addr = DP_SINK_COUNT_ESI;
1251        }
1252
1253        dret = drm_dp_dpcd_read(
1254                &aconnector->dm_dp_aux.aux,
1255                dpcd_addr,
1256                esi,
1257                dpcd_bytes_to_read);
1258
1259        while (dret == dpcd_bytes_to_read &&
1260                process_count < max_process_count) {
1261                uint8_t retry;
1262                dret = 0;
1263
1264                process_count++;
1265
1266                DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
1267                /* handle HPD short pulse irq */
1268                if (aconnector->mst_mgr.mst_state)
1269                        drm_dp_mst_hpd_irq(
1270                                &aconnector->mst_mgr,
1271                                esi,
1272                                &new_irq_handled);
1273
1274                if (new_irq_handled) {
1275                        /* ACK at DPCD to notify down stream */
1276                        const int ack_dpcd_bytes_to_write =
1277                                dpcd_bytes_to_read - 1;
1278
1279                        for (retry = 0; retry < 3; retry++) {
1280                                uint8_t wret;
1281
1282                                wret = drm_dp_dpcd_write(
1283                                        &aconnector->dm_dp_aux.aux,
1284                                        dpcd_addr + 1,
1285                                        &esi[1],
1286                                        ack_dpcd_bytes_to_write);
1287                                if (wret == ack_dpcd_bytes_to_write)
1288                                        break;
1289                        }
1290
1291                        /* check if there is new irq to be handled */
1292                        dret = drm_dp_dpcd_read(
1293                                &aconnector->dm_dp_aux.aux,
1294                                dpcd_addr,
1295                                esi,
1296                                dpcd_bytes_to_read);
1297
1298                        new_irq_handled = false;
1299                } else {
1300                        break;
1301                }
1302        }
1303
1304        if (process_count == max_process_count)
1305                DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
1306}
1307
1308static void handle_hpd_rx_irq(void *param)
1309{
1310        struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
1311        struct drm_connector *connector = &aconnector->base;
1312        struct drm_device *dev = connector->dev;
1313        struct dc_link *dc_link = aconnector->dc_link;
1314        bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
1315        enum dc_connection_type new_connection_type = dc_connection_none;
1316
1317        /*
1318         * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
1319         * conflict, after implement i2c helper, this mutex should be
1320         * retired.
1321         */
1322        if (dc_link->type != dc_connection_mst_branch)
1323                mutex_lock(&aconnector->hpd_lock);
1324
1325        if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
1326                        !is_mst_root_connector) {
1327                /* Downstream Port status changed. */
1328                if (!dc_link_detect_sink(dc_link, &new_connection_type))
1329                        DRM_ERROR("KMS: Failed to detect connector\n");
1330
1331                if (aconnector->base.force && new_connection_type == dc_connection_none) {
1332                        emulated_link_detect(dc_link);
1333
1334                        if (aconnector->fake_enable)
1335                                aconnector->fake_enable = false;
1336
1337                        amdgpu_dm_update_connector_after_detect(aconnector);
1338
1339
1340                        drm_modeset_lock_all(dev);
1341                        dm_restore_drm_connector_state(dev, connector);
1342                        drm_modeset_unlock_all(dev);
1343
1344                        drm_kms_helper_hotplug_event(dev);
1345                } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
1346
1347                        if (aconnector->fake_enable)
1348                                aconnector->fake_enable = false;
1349
1350                        amdgpu_dm_update_connector_after_detect(aconnector);
1351
1352
1353                        drm_modeset_lock_all(dev);
1354                        dm_restore_drm_connector_state(dev, connector);
1355                        drm_modeset_unlock_all(dev);
1356
1357                        drm_kms_helper_hotplug_event(dev);
1358                }
1359        }
1360        if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
1361            (dc_link->type == dc_connection_mst_branch))
1362                dm_handle_hpd_rx_irq(aconnector);
1363
1364        if (dc_link->type != dc_connection_mst_branch) {
1365                drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
1366                mutex_unlock(&aconnector->hpd_lock);
1367        }
1368}
1369
1370static void register_hpd_handlers(struct amdgpu_device *adev)
1371{
1372        struct drm_device *dev = adev->ddev;
1373        struct drm_connector *connector;
1374        struct amdgpu_dm_connector *aconnector;
1375        const struct dc_link *dc_link;
1376        struct dc_interrupt_params int_params = {0};
1377
1378        int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1379        int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1380
1381        list_for_each_entry(connector,
1382                        &dev->mode_config.connector_list, head) {
1383
1384                aconnector = to_amdgpu_dm_connector(connector);
1385                dc_link = aconnector->dc_link;
1386
1387                if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
1388                        int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
1389                        int_params.irq_source = dc_link->irq_source_hpd;
1390
1391                        amdgpu_dm_irq_register_interrupt(adev, &int_params,
1392                                        handle_hpd_irq,
1393                                        (void *) aconnector);
1394                }
1395
1396                if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
1397
1398                        /* Also register for DP short pulse (hpd_rx). */
1399                        int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
1400                        int_params.irq_source = dc_link->irq_source_hpd_rx;
1401
1402                        amdgpu_dm_irq_register_interrupt(adev, &int_params,
1403                                        handle_hpd_rx_irq,
1404                                        (void *) aconnector);
1405                }
1406        }
1407}
1408
1409/* Register IRQ sources and initialize IRQ callbacks */
1410static int dce110_register_irq_handlers(struct amdgpu_device *adev)
1411{
1412        struct dc *dc = adev->dm.dc;
1413        struct common_irq_params *c_irq_params;
1414        struct dc_interrupt_params int_params = {0};
1415        int r;
1416        int i;
1417        unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
1418
1419        if (adev->asic_type == CHIP_VEGA10 ||
1420            adev->asic_type == CHIP_VEGA12 ||
1421            adev->asic_type == CHIP_VEGA20 ||
1422            adev->asic_type == CHIP_RAVEN)
1423                client_id = SOC15_IH_CLIENTID_DCE;
1424
1425        int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1426        int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1427
1428        /*
1429         * Actions of amdgpu_irq_add_id():
1430         * 1. Register a set() function with base driver.
1431         *    Base driver will call set() function to enable/disable an
1432         *    interrupt in DC hardware.
1433         * 2. Register amdgpu_dm_irq_handler().
1434         *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1435         *    coming from DC hardware.
1436         *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1437         *    for acknowledging and handling. */
1438
1439        /* Use VBLANK interrupt */
1440        for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
1441                r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
1442                if (r) {
1443                        DRM_ERROR("Failed to add crtc irq id!\n");
1444                        return r;
1445                }
1446
1447                int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1448                int_params.irq_source =
1449                        dc_interrupt_to_irq_source(dc, i, 0);
1450
1451                c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
1452
1453                c_irq_params->adev = adev;
1454                c_irq_params->irq_src = int_params.irq_source;
1455
1456                amdgpu_dm_irq_register_interrupt(adev, &int_params,
1457                                dm_crtc_high_irq, c_irq_params);
1458        }
1459
1460        /* Use GRPH_PFLIP interrupt */
1461        for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
1462                        i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
1463                r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
1464                if (r) {
1465                        DRM_ERROR("Failed to add page flip irq id!\n");
1466                        return r;
1467                }
1468
1469                int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1470                int_params.irq_source =
1471                        dc_interrupt_to_irq_source(dc, i, 0);
1472
1473                c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
1474
1475                c_irq_params->adev = adev;
1476                c_irq_params->irq_src = int_params.irq_source;
1477
1478                amdgpu_dm_irq_register_interrupt(adev, &int_params,
1479                                dm_pflip_high_irq, c_irq_params);
1480
1481        }
1482
1483        /* HPD */
1484        r = amdgpu_irq_add_id(adev, client_id,
1485                        VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
1486        if (r) {
1487                DRM_ERROR("Failed to add hpd irq id!\n");
1488                return r;
1489        }
1490
1491        register_hpd_handlers(adev);
1492
1493        return 0;
1494}
1495
1496#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1497/* Register IRQ sources and initialize IRQ callbacks */
1498static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
1499{
1500        struct dc *dc = adev->dm.dc;
1501        struct common_irq_params *c_irq_params;
1502        struct dc_interrupt_params int_params = {0};
1503        int r;
1504        int i;
1505
1506        int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1507        int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1508
1509        /*
1510         * Actions of amdgpu_irq_add_id():
1511         * 1. Register a set() function with base driver.
1512         *    Base driver will call set() function to enable/disable an
1513         *    interrupt in DC hardware.
1514         * 2. Register amdgpu_dm_irq_handler().
1515         *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1516         *    coming from DC hardware.
1517         *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1518         *    for acknowledging and handling.
1519         */
1520
1521        /* Use VSTARTUP interrupt */
1522        for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
1523                        i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
1524                        i++) {
1525                r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
1526
1527                if (r) {
1528                        DRM_ERROR("Failed to add crtc irq id!\n");
1529                        return r;
1530                }
1531
1532                int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1533                int_params.irq_source =
1534                        dc_interrupt_to_irq_source(dc, i, 0);
1535
1536                c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
1537
1538                c_irq_params->adev = adev;
1539                c_irq_params->irq_src = int_params.irq_source;
1540
1541                amdgpu_dm_irq_register_interrupt(adev, &int_params,
1542                                dm_crtc_high_irq, c_irq_params);
1543        }
1544
1545        /* Use GRPH_PFLIP interrupt */
1546        for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
1547                        i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
1548                        i++) {
1549                r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
1550                if (r) {
1551                        DRM_ERROR("Failed to add page flip irq id!\n");
1552                        return r;
1553                }
1554
1555                int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1556                int_params.irq_source =
1557                        dc_interrupt_to_irq_source(dc, i, 0);
1558
1559                c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
1560
1561                c_irq_params->adev = adev;
1562                c_irq_params->irq_src = int_params.irq_source;
1563
1564                amdgpu_dm_irq_register_interrupt(adev, &int_params,
1565                                dm_pflip_high_irq, c_irq_params);
1566
1567        }
1568
1569        /* HPD */
1570        r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
1571                        &adev->hpd_irq);
1572        if (r) {
1573                DRM_ERROR("Failed to add hpd irq id!\n");
1574                return r;
1575        }
1576
1577        register_hpd_handlers(adev);
1578
1579        return 0;
1580}
1581#endif
1582
1583/*
1584 * Acquires the lock for the atomic state object and returns
1585 * the new atomic state.
1586 *
1587 * This should only be called during atomic check.
1588 */
1589static int dm_atomic_get_state(struct drm_atomic_state *state,
1590                               struct dm_atomic_state **dm_state)
1591{
1592        struct drm_device *dev = state->dev;
1593        struct amdgpu_device *adev = dev->dev_private;
1594        struct amdgpu_display_manager *dm = &adev->dm;
1595        struct drm_private_state *priv_state;
1596        int ret;
1597
1598        if (*dm_state)
1599                return 0;
1600
1601        ret = drm_modeset_lock(&dm->atomic_obj_lock, state->acquire_ctx);
1602        if (ret)
1603                return ret;
1604
1605        priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
1606        if (IS_ERR(priv_state))
1607                return PTR_ERR(priv_state);
1608
1609        *dm_state = to_dm_atomic_state(priv_state);
1610
1611        return 0;
1612}
1613
1614struct dm_atomic_state *
1615dm_atomic_get_new_state(struct drm_atomic_state *state)
1616{
1617        struct drm_device *dev = state->dev;
1618        struct amdgpu_device *adev = dev->dev_private;
1619        struct amdgpu_display_manager *dm = &adev->dm;
1620        struct drm_private_obj *obj;
1621        struct drm_private_state *new_obj_state;
1622        int i;
1623
1624        for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
1625                if (obj->funcs == dm->atomic_obj.funcs)
1626                        return to_dm_atomic_state(new_obj_state);
1627        }
1628
1629        return NULL;
1630}
1631
1632struct dm_atomic_state *
1633dm_atomic_get_old_state(struct drm_atomic_state *state)
1634{
1635        struct drm_device *dev = state->dev;
1636        struct amdgpu_device *adev = dev->dev_private;
1637        struct amdgpu_display_manager *dm = &adev->dm;
1638        struct drm_private_obj *obj;
1639        struct drm_private_state *old_obj_state;
1640        int i;
1641
1642        for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
1643                if (obj->funcs == dm->atomic_obj.funcs)
1644                        return to_dm_atomic_state(old_obj_state);
1645        }
1646
1647        return NULL;
1648}
1649
1650static struct drm_private_state *
1651dm_atomic_duplicate_state(struct drm_private_obj *obj)
1652{
1653        struct dm_atomic_state *old_state, *new_state;
1654
1655        new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
1656        if (!new_state)
1657                return NULL;
1658
1659        __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
1660
1661        new_state->context = dc_create_state();
1662        if (!new_state->context) {
1663                kfree(new_state);
1664                return NULL;
1665        }
1666
1667        old_state = to_dm_atomic_state(obj->state);
1668        if (old_state && old_state->context)
1669                dc_resource_state_copy_construct(old_state->context,
1670                                                 new_state->context);
1671
1672        return &new_state->base;
1673}
1674
1675static void dm_atomic_destroy_state(struct drm_private_obj *obj,
1676                                    struct drm_private_state *state)
1677{
1678        struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
1679
1680        if (dm_state && dm_state->context)
1681                dc_release_state(dm_state->context);
1682
1683        kfree(dm_state);
1684}
1685
1686static struct drm_private_state_funcs dm_atomic_state_funcs = {
1687        .atomic_duplicate_state = dm_atomic_duplicate_state,
1688        .atomic_destroy_state = dm_atomic_destroy_state,
1689};
1690
1691static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
1692{
1693        struct dm_atomic_state *state;
1694        int r;
1695
1696        adev->mode_info.mode_config_initialized = true;
1697
1698        adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
1699        adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
1700
1701        adev->ddev->mode_config.max_width = 16384;
1702        adev->ddev->mode_config.max_height = 16384;
1703
1704        adev->ddev->mode_config.preferred_depth = 24;
1705        adev->ddev->mode_config.prefer_shadow = 1;
1706        /* indicates support for immediate flip */
1707        adev->ddev->mode_config.async_page_flip = true;
1708
1709        adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
1710
1711        drm_modeset_lock_init(&adev->dm.atomic_obj_lock);
1712
1713        state = kzalloc(sizeof(*state), GFP_KERNEL);
1714        if (!state)
1715                return -ENOMEM;
1716
1717        state->context = dc_create_state();
1718        if (!state->context) {
1719                kfree(state);
1720                return -ENOMEM;
1721        }
1722
1723        dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
1724
1725        drm_atomic_private_obj_init(&adev->dm.atomic_obj,
1726                                    &state->base,
1727                                    &dm_atomic_state_funcs);
1728
1729        r = amdgpu_display_modeset_create_props(adev);
1730        if (r)
1731                return r;
1732
1733        return 0;
1734}
1735
1736#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
1737#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
1738
1739#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
1740        defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
1741
1742static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
1743{
1744#if defined(CONFIG_ACPI)
1745        struct amdgpu_dm_backlight_caps caps;
1746
1747        if (dm->backlight_caps.caps_valid)
1748                return;
1749
1750        amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
1751        if (caps.caps_valid) {
1752                dm->backlight_caps.min_input_signal = caps.min_input_signal;
1753                dm->backlight_caps.max_input_signal = caps.max_input_signal;
1754                dm->backlight_caps.caps_valid = true;
1755        } else {
1756                dm->backlight_caps.min_input_signal =
1757                                AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
1758                dm->backlight_caps.max_input_signal =
1759                                AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
1760        }
1761#else
1762        dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
1763        dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
1764#endif
1765}
1766
1767static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
1768{
1769        struct amdgpu_display_manager *dm = bl_get_data(bd);
1770        struct amdgpu_dm_backlight_caps caps;
1771        uint32_t brightness = bd->props.brightness;
1772
1773        amdgpu_dm_update_backlight_caps(dm);
1774        caps = dm->backlight_caps;
1775        /*
1776         * The brightness input is in the range 0-255
1777         * It needs to be rescaled to be between the
1778         * requested min and max input signal
1779         *
1780         * It also needs to be scaled up by 0x101 to
1781         * match the DC interface which has a range of
1782         * 0 to 0xffff
1783         */
1784        brightness =
1785                brightness
1786                * 0x101
1787                * (caps.max_input_signal - caps.min_input_signal)
1788                / AMDGPU_MAX_BL_LEVEL
1789                + caps.min_input_signal * 0x101;
1790
1791        if (dc_link_set_backlight_level(dm->backlight_link,
1792                        brightness, 0))
1793                return 0;
1794        else
1795                return 1;
1796}
1797
1798static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
1799{
1800        struct amdgpu_display_manager *dm = bl_get_data(bd);
1801        int ret = dc_link_get_backlight_level(dm->backlight_link);
1802
1803        if (ret == DC_ERROR_UNEXPECTED)
1804                return bd->props.brightness;
1805        return ret;
1806}
1807
1808static const struct backlight_ops amdgpu_dm_backlight_ops = {
1809        .get_brightness = amdgpu_dm_backlight_get_brightness,
1810        .update_status  = amdgpu_dm_backlight_update_status,
1811};
1812
1813static void
1814amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
1815{
1816        char bl_name[16];
1817        struct backlight_properties props = { 0 };
1818
1819        amdgpu_dm_update_backlight_caps(dm);
1820
1821        props.max_brightness = AMDGPU_MAX_BL_LEVEL;
1822        props.brightness = AMDGPU_MAX_BL_LEVEL;
1823        props.type = BACKLIGHT_RAW;
1824
1825        snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
1826                        dm->adev->ddev->primary->index);
1827
1828        dm->backlight_dev = backlight_device_register(bl_name,
1829                        dm->adev->ddev->dev,
1830                        dm,
1831                        &amdgpu_dm_backlight_ops,
1832                        &props);
1833
1834        if (IS_ERR(dm->backlight_dev))
1835                DRM_ERROR("DM: Backlight registration failed!\n");
1836        else
1837                DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
1838}
1839
1840#endif
1841
1842static int initialize_plane(struct amdgpu_display_manager *dm,
1843                             struct amdgpu_mode_info *mode_info,
1844                             int plane_id)
1845{
1846        struct drm_plane *plane;
1847        unsigned long possible_crtcs;
1848        int ret = 0;
1849
1850        plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
1851        mode_info->planes[plane_id] = plane;
1852
1853        if (!plane) {
1854                DRM_ERROR("KMS: Failed to allocate plane\n");
1855                return -ENOMEM;
1856        }
1857        plane->type = mode_info->plane_type[plane_id];
1858
1859        /*
1860         * HACK: IGT tests expect that each plane can only have
1861         * one possible CRTC. For now, set one CRTC for each
1862         * plane that is not an underlay, but still allow multiple
1863         * CRTCs for underlay planes.
1864         */
1865        possible_crtcs = 1 << plane_id;
1866        if (plane_id >= dm->dc->caps.max_streams)
1867                possible_crtcs = 0xff;
1868
1869        ret = amdgpu_dm_plane_init(dm, mode_info->planes[plane_id], possible_crtcs);
1870
1871        if (ret) {
1872                DRM_ERROR("KMS: Failed to initialize plane\n");
1873                return ret;
1874        }
1875
1876        return ret;
1877}
1878
1879
1880static void register_backlight_device(struct amdgpu_display_manager *dm,
1881                                      struct dc_link *link)
1882{
1883#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
1884        defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
1885
1886        if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
1887            link->type != dc_connection_none) {
1888                /*
1889                 * Event if registration failed, we should continue with
1890                 * DM initialization because not having a backlight control
1891                 * is better then a black screen.
1892                 */
1893                amdgpu_dm_register_backlight_device(dm);
1894
1895                if (dm->backlight_dev)
1896                        dm->backlight_link = link;
1897        }
1898#endif
1899}
1900
1901
1902/*
1903 * In this architecture, the association
1904 * connector -> encoder -> crtc
1905 * id not really requried. The crtc and connector will hold the
1906 * display_index as an abstraction to use with DAL component
1907 *
1908 * Returns 0 on success
1909 */
1910static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
1911{
1912        struct amdgpu_display_manager *dm = &adev->dm;
1913        int32_t i;
1914        struct amdgpu_dm_connector *aconnector = NULL;
1915        struct amdgpu_encoder *aencoder = NULL;
1916        struct amdgpu_mode_info *mode_info = &adev->mode_info;
1917        uint32_t link_cnt;
1918        int32_t total_overlay_planes, total_primary_planes;
1919        enum dc_connection_type new_connection_type = dc_connection_none;
1920
1921        link_cnt = dm->dc->caps.max_links;
1922        if (amdgpu_dm_mode_config_init(dm->adev)) {
1923                DRM_ERROR("DM: Failed to initialize mode config\n");
1924                return -EINVAL;
1925        }
1926
1927        /* Identify the number of planes to be initialized */
1928        total_overlay_planes = dm->dc->caps.max_slave_planes;
1929        total_primary_planes = dm->dc->caps.max_planes - dm->dc->caps.max_slave_planes;
1930
1931        /* First initialize overlay planes, index starting after primary planes */
1932        for (i = (total_overlay_planes - 1); i >= 0; i--) {
1933                if (initialize_plane(dm, mode_info, (total_primary_planes + i))) {
1934                        DRM_ERROR("KMS: Failed to initialize overlay plane\n");
1935                        goto fail;
1936                }
1937        }
1938
1939        /* Initialize primary planes */
1940        for (i = (total_primary_planes - 1); i >= 0; i--) {
1941                if (initialize_plane(dm, mode_info, i)) {
1942                        DRM_ERROR("KMS: Failed to initialize primary plane\n");
1943                        goto fail;
1944                }
1945        }
1946
1947        for (i = 0; i < dm->dc->caps.max_streams; i++)
1948                if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
1949                        DRM_ERROR("KMS: Failed to initialize crtc\n");
1950                        goto fail;
1951                }
1952
1953        dm->display_indexes_num = dm->dc->caps.max_streams;
1954
1955        /* loops over all connectors on the board */
1956        for (i = 0; i < link_cnt; i++) {
1957                struct dc_link *link = NULL;
1958
1959                if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
1960                        DRM_ERROR(
1961                                "KMS: Cannot support more than %d display indexes\n",
1962                                        AMDGPU_DM_MAX_DISPLAY_INDEX);
1963                        continue;
1964                }
1965
1966                aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
1967                if (!aconnector)
1968                        goto fail;
1969
1970                aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
1971                if (!aencoder)
1972                        goto fail;
1973
1974                if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
1975                        DRM_ERROR("KMS: Failed to initialize encoder\n");
1976                        goto fail;
1977                }
1978
1979                if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
1980                        DRM_ERROR("KMS: Failed to initialize connector\n");
1981                        goto fail;
1982                }
1983
1984                link = dc_get_link_at_index(dm->dc, i);
1985
1986                if (!dc_link_detect_sink(link, &new_connection_type))
1987                        DRM_ERROR("KMS: Failed to detect connector\n");
1988
1989                if (aconnector->base.force && new_connection_type == dc_connection_none) {
1990                        emulated_link_detect(link);
1991                        amdgpu_dm_update_connector_after_detect(aconnector);
1992
1993                } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
1994                        amdgpu_dm_update_connector_after_detect(aconnector);
1995                        register_backlight_device(dm, link);
1996                }
1997
1998
1999        }
2000
2001        /* Software is initialized. Now we can register interrupt handlers. */
2002        switch (adev->asic_type) {
2003        case CHIP_BONAIRE:
2004        case CHIP_HAWAII:
2005        case CHIP_KAVERI:
2006        case CHIP_KABINI:
2007        case CHIP_MULLINS:
2008        case CHIP_TONGA:
2009        case CHIP_FIJI:
2010        case CHIP_CARRIZO:
2011        case CHIP_STONEY:
2012        case CHIP_POLARIS11:
2013        case CHIP_POLARIS10:
2014        case CHIP_POLARIS12:
2015        case CHIP_VEGAM:
2016        case CHIP_VEGA10:
2017        case CHIP_VEGA12:
2018        case CHIP_VEGA20:
2019                if (dce110_register_irq_handlers(dm->adev)) {
2020                        DRM_ERROR("DM: Failed to initialize IRQ\n");
2021                        goto fail;
2022                }
2023                break;
2024#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
2025        case CHIP_RAVEN:
2026                if (dcn10_register_irq_handlers(dm->adev)) {
2027                        DRM_ERROR("DM: Failed to initialize IRQ\n");
2028                        goto fail;
2029                }
2030                break;
2031#endif
2032        default:
2033                DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
2034                goto fail;
2035        }
2036
2037        if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
2038                dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
2039
2040        return 0;
2041fail:
2042        kfree(aencoder);
2043        kfree(aconnector);
2044        for (i = 0; i < dm->dc->caps.max_planes; i++)
2045                kfree(mode_info->planes[i]);
2046        return -EINVAL;
2047}
2048
2049static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
2050{
2051        drm_mode_config_cleanup(dm->ddev);
2052        drm_atomic_private_obj_fini(&dm->atomic_obj);
2053        return;
2054}
2055
2056/******************************************************************************
2057 * amdgpu_display_funcs functions
2058 *****************************************************************************/
2059
2060/*
2061 * dm_bandwidth_update - program display watermarks
2062 *
2063 * @adev: amdgpu_device pointer
2064 *
2065 * Calculate and program the display watermarks and line buffer allocation.
2066 */
2067static void dm_bandwidth_update(struct amdgpu_device *adev)
2068{
2069        /* TODO: implement later */
2070}
2071
2072static const struct amdgpu_display_funcs dm_display_funcs = {
2073        .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
2074        .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
2075        .backlight_set_level = NULL, /* never called for DC */
2076        .backlight_get_level = NULL, /* never called for DC */
2077        .hpd_sense = NULL,/* called unconditionally */
2078        .hpd_set_polarity = NULL, /* called unconditionally */
2079        .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
2080        .page_flip_get_scanoutpos =
2081                dm_crtc_get_scanoutpos,/* called unconditionally */
2082        .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
2083        .add_connector = NULL, /* VBIOS parsing. DAL does it. */
2084};
2085
2086#if defined(CONFIG_DEBUG_KERNEL_DC)
2087
2088static ssize_t s3_debug_store(struct device *device,
2089                              struct device_attribute *attr,
2090                              const char *buf,
2091                              size_t count)
2092{
2093        int ret;
2094        int s3_state;
2095        struct pci_dev *pdev = to_pci_dev(device);
2096        struct drm_device *drm_dev = pci_get_drvdata(pdev);
2097        struct amdgpu_device *adev = drm_dev->dev_private;
2098
2099        ret = kstrtoint(buf, 0, &s3_state);
2100
2101        if (ret == 0) {
2102                if (s3_state) {
2103                        dm_resume(adev);
2104                        drm_kms_helper_hotplug_event(adev->ddev);
2105                } else
2106                        dm_suspend(adev);
2107        }
2108
2109        return ret == 0 ? count : 0;
2110}
2111
2112DEVICE_ATTR_WO(s3_debug);
2113
2114#endif
2115
2116static int dm_early_init(void *handle)
2117{
2118        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2119
2120        switch (adev->asic_type) {
2121        case CHIP_BONAIRE:
2122        case CHIP_HAWAII:
2123                adev->mode_info.num_crtc = 6;
2124                adev->mode_info.num_hpd = 6;
2125                adev->mode_info.num_dig = 6;
2126                adev->mode_info.plane_type = dm_plane_type_default;
2127                break;
2128        case CHIP_KAVERI:
2129                adev->mode_info.num_crtc = 4;
2130                adev->mode_info.num_hpd = 6;
2131                adev->mode_info.num_dig = 7;
2132                adev->mode_info.plane_type = dm_plane_type_default;
2133                break;
2134        case CHIP_KABINI:
2135        case CHIP_MULLINS:
2136                adev->mode_info.num_crtc = 2;
2137                adev->mode_info.num_hpd = 6;
2138                adev->mode_info.num_dig = 6;
2139                adev->mode_info.plane_type = dm_plane_type_default;
2140                break;
2141        case CHIP_FIJI:
2142        case CHIP_TONGA:
2143                adev->mode_info.num_crtc = 6;
2144                adev->mode_info.num_hpd = 6;
2145                adev->mode_info.num_dig = 7;
2146                adev->mode_info.plane_type = dm_plane_type_default;
2147                break;
2148        case CHIP_CARRIZO:
2149                adev->mode_info.num_crtc = 3;
2150                adev->mode_info.num_hpd = 6;
2151                adev->mode_info.num_dig = 9;
2152                adev->mode_info.plane_type = dm_plane_type_carizzo;
2153                break;
2154        case CHIP_STONEY:
2155                adev->mode_info.num_crtc = 2;
2156                adev->mode_info.num_hpd = 6;
2157                adev->mode_info.num_dig = 9;
2158                adev->mode_info.plane_type = dm_plane_type_stoney;
2159                break;
2160        case CHIP_POLARIS11:
2161        case CHIP_POLARIS12:
2162                adev->mode_info.num_crtc = 5;
2163                adev->mode_info.num_hpd = 5;
2164                adev->mode_info.num_dig = 5;
2165                adev->mode_info.plane_type = dm_plane_type_default;
2166                break;
2167        case CHIP_POLARIS10:
2168        case CHIP_VEGAM:
2169                adev->mode_info.num_crtc = 6;
2170                adev->mode_info.num_hpd = 6;
2171                adev->mode_info.num_dig = 6;
2172                adev->mode_info.plane_type = dm_plane_type_default;
2173                break;
2174        case CHIP_VEGA10:
2175        case CHIP_VEGA12:
2176        case CHIP_VEGA20:
2177                adev->mode_info.num_crtc = 6;
2178                adev->mode_info.num_hpd = 6;
2179                adev->mode_info.num_dig = 6;
2180                adev->mode_info.plane_type = dm_plane_type_default;
2181                break;
2182#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
2183        case CHIP_RAVEN:
2184                adev->mode_info.num_crtc = 4;
2185                adev->mode_info.num_hpd = 4;
2186                adev->mode_info.num_dig = 4;
2187                adev->mode_info.plane_type = dm_plane_type_default;
2188                break;
2189#endif
2190        default:
2191                DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
2192                return -EINVAL;
2193        }
2194
2195        amdgpu_dm_set_irq_funcs(adev);
2196
2197        if (adev->mode_info.funcs == NULL)
2198                adev->mode_info.funcs = &dm_display_funcs;
2199
2200        /*
2201         * Note: Do NOT change adev->audio_endpt_rreg and
2202         * adev->audio_endpt_wreg because they are initialised in
2203         * amdgpu_device_init()
2204         */
2205#if defined(CONFIG_DEBUG_KERNEL_DC)
2206        device_create_file(
2207                adev->ddev->dev,
2208                &dev_attr_s3_debug);
2209#endif
2210
2211        return 0;
2212}
2213
2214static bool modeset_required(struct drm_crtc_state *crtc_state,
2215                             struct dc_stream_state *new_stream,
2216                             struct dc_stream_state *old_stream)
2217{
2218        if (!drm_atomic_crtc_needs_modeset(crtc_state))
2219                return false;
2220
2221        if (!crtc_state->enable)
2222                return false;
2223
2224        return crtc_state->active;
2225}
2226
2227static bool modereset_required(struct drm_crtc_state *crtc_state)
2228{
2229        if (!drm_atomic_crtc_needs_modeset(crtc_state))
2230                return false;
2231
2232        return !crtc_state->enable || !crtc_state->active;
2233}
2234
2235static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
2236{
2237        drm_encoder_cleanup(encoder);
2238        kfree(encoder);
2239}
2240
2241static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
2242        .destroy = amdgpu_dm_encoder_destroy,
2243};
2244
2245static bool fill_rects_from_plane_state(const struct drm_plane_state *state,
2246                                        struct dc_plane_state *plane_state)
2247{
2248        plane_state->src_rect.x = state->src_x >> 16;
2249        plane_state->src_rect.y = state->src_y >> 16;
2250        /* we ignore the mantissa for now and do not deal with floating pixels :( */
2251        plane_state->src_rect.width = state->src_w >> 16;
2252
2253        if (plane_state->src_rect.width == 0)
2254                return false;
2255
2256        plane_state->src_rect.height = state->src_h >> 16;
2257        if (plane_state->src_rect.height == 0)
2258                return false;
2259
2260        plane_state->dst_rect.x = state->crtc_x;
2261        plane_state->dst_rect.y = state->crtc_y;
2262
2263        if (state->crtc_w == 0)
2264                return false;
2265
2266        plane_state->dst_rect.width = state->crtc_w;
2267
2268        if (state->crtc_h == 0)
2269                return false;
2270
2271        plane_state->dst_rect.height = state->crtc_h;
2272
2273        plane_state->clip_rect = plane_state->dst_rect;
2274
2275        switch (state->rotation & DRM_MODE_ROTATE_MASK) {
2276        case DRM_MODE_ROTATE_0:
2277                plane_state->rotation = ROTATION_ANGLE_0;
2278                break;
2279        case DRM_MODE_ROTATE_90:
2280                plane_state->rotation = ROTATION_ANGLE_90;
2281                break;
2282        case DRM_MODE_ROTATE_180:
2283                plane_state->rotation = ROTATION_ANGLE_180;
2284                break;
2285        case DRM_MODE_ROTATE_270:
2286                plane_state->rotation = ROTATION_ANGLE_270;
2287                break;
2288        default:
2289                plane_state->rotation = ROTATION_ANGLE_0;
2290                break;
2291        }
2292
2293        return true;
2294}
2295static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
2296                       uint64_t *tiling_flags)
2297{
2298        struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
2299        int r = amdgpu_bo_reserve(rbo, false);
2300
2301        if (unlikely(r)) {
2302                /* Don't show error message when returning -ERESTARTSYS */
2303                if (r != -ERESTARTSYS)
2304                        DRM_ERROR("Unable to reserve buffer: %d\n", r);
2305                return r;
2306        }
2307
2308        if (tiling_flags)
2309                amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
2310
2311        amdgpu_bo_unreserve(rbo);
2312
2313        return r;
2314}
2315
2316static int fill_plane_attributes_from_fb(struct amdgpu_device *adev,
2317                                         struct dc_plane_state *plane_state,
2318                                         const struct amdgpu_framebuffer *amdgpu_fb)
2319{
2320        uint64_t tiling_flags;
2321        unsigned int awidth;
2322        const struct drm_framebuffer *fb = &amdgpu_fb->base;
2323        int ret = 0;
2324        struct drm_format_name_buf format_name;
2325
2326        ret = get_fb_info(
2327                amdgpu_fb,
2328                &tiling_flags);
2329
2330        if (ret)
2331                return ret;
2332
2333        switch (fb->format->format) {
2334        case DRM_FORMAT_C8:
2335                plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
2336                break;
2337        case DRM_FORMAT_RGB565:
2338                plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
2339                break;
2340        case DRM_FORMAT_XRGB8888:
2341        case DRM_FORMAT_ARGB8888:
2342                plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
2343                break;
2344        case DRM_FORMAT_XRGB2101010:
2345        case DRM_FORMAT_ARGB2101010:
2346                plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
2347                break;
2348        case DRM_FORMAT_XBGR2101010:
2349        case DRM_FORMAT_ABGR2101010:
2350                plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
2351                break;
2352        case DRM_FORMAT_XBGR8888:
2353        case DRM_FORMAT_ABGR8888:
2354                plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
2355                break;
2356        case DRM_FORMAT_NV21:
2357                plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
2358                break;
2359        case DRM_FORMAT_NV12:
2360                plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
2361                break;
2362        default:
2363                DRM_ERROR("Unsupported screen format %s\n",
2364                          drm_get_format_name(fb->format->format, &format_name));
2365                return -EINVAL;
2366        }
2367
2368        if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
2369                plane_state->address.type = PLN_ADDR_TYPE_GRAPHICS;
2370                plane_state->plane_size.grph.surface_size.x = 0;
2371                plane_state->plane_size.grph.surface_size.y = 0;
2372                plane_state->plane_size.grph.surface_size.width = fb->width;
2373                plane_state->plane_size.grph.surface_size.height = fb->height;
2374                plane_state->plane_size.grph.surface_pitch =
2375                                fb->pitches[0] / fb->format->cpp[0];
2376                /* TODO: unhardcode */
2377                plane_state->color_space = COLOR_SPACE_SRGB;
2378
2379        } else {
2380                awidth = ALIGN(fb->width, 64);
2381                plane_state->address.type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
2382                plane_state->plane_size.video.luma_size.x = 0;
2383                plane_state->plane_size.video.luma_size.y = 0;
2384                plane_state->plane_size.video.luma_size.width = awidth;
2385                plane_state->plane_size.video.luma_size.height = fb->height;
2386                /* TODO: unhardcode */
2387                plane_state->plane_size.video.luma_pitch = awidth;
2388
2389                plane_state->plane_size.video.chroma_size.x = 0;
2390                plane_state->plane_size.video.chroma_size.y = 0;
2391                plane_state->plane_size.video.chroma_size.width = awidth;
2392                plane_state->plane_size.video.chroma_size.height = fb->height;
2393                plane_state->plane_size.video.chroma_pitch = awidth / 2;
2394
2395                /* TODO: unhardcode */
2396                plane_state->color_space = COLOR_SPACE_YCBCR709;
2397        }
2398
2399        memset(&plane_state->tiling_info, 0, sizeof(plane_state->tiling_info));
2400
2401        /* Fill GFX8 params */
2402        if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
2403                unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
2404
2405                bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
2406                bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
2407                mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
2408                tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
2409                num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
2410
2411                /* XXX fix me for VI */
2412                plane_state->tiling_info.gfx8.num_banks = num_banks;
2413                plane_state->tiling_info.gfx8.array_mode =
2414                                DC_ARRAY_2D_TILED_THIN1;
2415                plane_state->tiling_info.gfx8.tile_split = tile_split;
2416                plane_state->tiling_info.gfx8.bank_width = bankw;
2417                plane_state->tiling_info.gfx8.bank_height = bankh;
2418                plane_state->tiling_info.gfx8.tile_aspect = mtaspect;
2419                plane_state->tiling_info.gfx8.tile_mode =
2420                                DC_ADDR_SURF_MICRO_TILING_DISPLAY;
2421        } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
2422                        == DC_ARRAY_1D_TILED_THIN1) {
2423                plane_state->tiling_info.gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
2424        }
2425
2426        plane_state->tiling_info.gfx8.pipe_config =
2427                        AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
2428
2429        if (adev->asic_type == CHIP_VEGA10 ||
2430            adev->asic_type == CHIP_VEGA12 ||
2431            adev->asic_type == CHIP_VEGA20 ||
2432            adev->asic_type == CHIP_RAVEN) {
2433                /* Fill GFX9 params */
2434                plane_state->tiling_info.gfx9.num_pipes =
2435                        adev->gfx.config.gb_addr_config_fields.num_pipes;
2436                plane_state->tiling_info.gfx9.num_banks =
2437                        adev->gfx.config.gb_addr_config_fields.num_banks;
2438                plane_state->tiling_info.gfx9.pipe_interleave =
2439                        adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
2440                plane_state->tiling_info.gfx9.num_shader_engines =
2441                        adev->gfx.config.gb_addr_config_fields.num_se;
2442                plane_state->tiling_info.gfx9.max_compressed_frags =
2443                        adev->gfx.config.gb_addr_config_fields.max_compress_frags;
2444                plane_state->tiling_info.gfx9.num_rb_per_se =
2445                        adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
2446                plane_state->tiling_info.gfx9.swizzle =
2447                        AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
2448                plane_state->tiling_info.gfx9.shaderEnable = 1;
2449        }
2450
2451        plane_state->visible = true;
2452        plane_state->scaling_quality.h_taps_c = 0;
2453        plane_state->scaling_quality.v_taps_c = 0;
2454
2455        /* is this needed? is plane_state zeroed at allocation? */
2456        plane_state->scaling_quality.h_taps = 0;
2457        plane_state->scaling_quality.v_taps = 0;
2458        plane_state->stereo_format = PLANE_STEREO_FORMAT_NONE;
2459
2460        return ret;
2461
2462}
2463
2464static int fill_plane_attributes(struct amdgpu_device *adev,
2465                                 struct dc_plane_state *dc_plane_state,
2466                                 struct drm_plane_state *plane_state,
2467                                 struct drm_crtc_state *crtc_state)
2468{
2469        const struct amdgpu_framebuffer *amdgpu_fb =
2470                to_amdgpu_framebuffer(plane_state->fb);
2471        const struct drm_crtc *crtc = plane_state->crtc;
2472        int ret = 0;
2473
2474        if (!fill_rects_from_plane_state(plane_state, dc_plane_state))
2475                return -EINVAL;
2476
2477        ret = fill_plane_attributes_from_fb(
2478                crtc->dev->dev_private,
2479                dc_plane_state,
2480                amdgpu_fb);
2481
2482        if (ret)
2483                return ret;
2484
2485        /*
2486         * Always set input transfer function, since plane state is refreshed
2487         * every time.
2488         */
2489        ret = amdgpu_dm_set_degamma_lut(crtc_state, dc_plane_state);
2490        if (ret) {
2491                dc_transfer_func_release(dc_plane_state->in_transfer_func);
2492                dc_plane_state->in_transfer_func = NULL;
2493        }
2494
2495        return ret;
2496}
2497
2498static void update_stream_scaling_settings(const struct drm_display_mode *mode,
2499                                           const struct dm_connector_state *dm_state,
2500                                           struct dc_stream_state *stream)
2501{
2502        enum amdgpu_rmx_type rmx_type;
2503
2504        struct rect src = { 0 }; /* viewport in composition space*/
2505        struct rect dst = { 0 }; /* stream addressable area */
2506
2507        /* no mode. nothing to be done */
2508        if (!mode)
2509                return;
2510
2511        /* Full screen scaling by default */
2512        src.width = mode->hdisplay;
2513        src.height = mode->vdisplay;
2514        dst.width = stream->timing.h_addressable;
2515        dst.height = stream->timing.v_addressable;
2516
2517        if (dm_state) {
2518                rmx_type = dm_state->scaling;
2519                if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
2520                        if (src.width * dst.height <
2521                                        src.height * dst.width) {
2522                                /* height needs less upscaling/more downscaling */
2523                                dst.width = src.width *
2524                                                dst.height / src.height;
2525                        } else {
2526                                /* width needs less upscaling/more downscaling */
2527                                dst.height = src.height *
2528                                                dst.width / src.width;
2529                        }
2530                } else if (rmx_type == RMX_CENTER) {
2531                        dst = src;
2532                }
2533
2534                dst.x = (stream->timing.h_addressable - dst.width) / 2;
2535                dst.y = (stream->timing.v_addressable - dst.height) / 2;
2536
2537                if (dm_state->underscan_enable) {
2538                        dst.x += dm_state->underscan_hborder / 2;
2539                        dst.y += dm_state->underscan_vborder / 2;
2540                        dst.width -= dm_state->underscan_hborder;
2541                        dst.height -= dm_state->underscan_vborder;
2542                }
2543        }
2544
2545        stream->src = src;
2546        stream->dst = dst;
2547
2548        DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
2549                        dst.x, dst.y, dst.width, dst.height);
2550
2551}
2552
2553static enum dc_color_depth
2554convert_color_depth_from_display_info(const struct drm_connector *connector)
2555{
2556        struct dm_connector_state *dm_conn_state =
2557                to_dm_connector_state(connector->state);
2558        uint32_t bpc = connector->display_info.bpc;
2559
2560        /* TODO: Remove this when there's support for max_bpc in drm */
2561        if (dm_conn_state && bpc > dm_conn_state->max_bpc)
2562                /* Round down to nearest even number. */
2563                bpc = dm_conn_state->max_bpc - (dm_conn_state->max_bpc & 1);
2564
2565        switch (bpc) {
2566        case 0:
2567                /*
2568                 * Temporary Work around, DRM doesn't parse color depth for
2569                 * EDID revision before 1.4
2570                 * TODO: Fix edid parsing
2571                 */
2572                return COLOR_DEPTH_888;
2573        case 6:
2574                return COLOR_DEPTH_666;
2575        case 8:
2576                return COLOR_DEPTH_888;
2577        case 10:
2578                return COLOR_DEPTH_101010;
2579        case 12:
2580                return COLOR_DEPTH_121212;
2581        case 14:
2582                return COLOR_DEPTH_141414;
2583        case 16:
2584                return COLOR_DEPTH_161616;
2585        default:
2586                return COLOR_DEPTH_UNDEFINED;
2587        }
2588}
2589
2590static enum dc_aspect_ratio
2591get_aspect_ratio(const struct drm_display_mode *mode_in)
2592{
2593        /* 1-1 mapping, since both enums follow the HDMI spec. */
2594        return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
2595}
2596
2597static enum dc_color_space
2598get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
2599{
2600        enum dc_color_space color_space = COLOR_SPACE_SRGB;
2601
2602        switch (dc_crtc_timing->pixel_encoding) {
2603        case PIXEL_ENCODING_YCBCR422:
2604        case PIXEL_ENCODING_YCBCR444:
2605        case PIXEL_ENCODING_YCBCR420:
2606        {
2607                /*
2608                 * 27030khz is the separation point between HDTV and SDTV
2609                 * according to HDMI spec, we use YCbCr709 and YCbCr601
2610                 * respectively
2611                 */
2612                if (dc_crtc_timing->pix_clk_khz > 27030) {
2613                        if (dc_crtc_timing->flags.Y_ONLY)
2614                                color_space =
2615                                        COLOR_SPACE_YCBCR709_LIMITED;
2616                        else
2617                                color_space = COLOR_SPACE_YCBCR709;
2618                } else {
2619                        if (dc_crtc_timing->flags.Y_ONLY)
2620                                color_space =
2621                                        COLOR_SPACE_YCBCR601_LIMITED;
2622                        else
2623                                color_space = COLOR_SPACE_YCBCR601;
2624                }
2625
2626        }
2627        break;
2628        case PIXEL_ENCODING_RGB:
2629                color_space = COLOR_SPACE_SRGB;
2630                break;
2631
2632        default:
2633                WARN_ON(1);
2634                break;
2635        }
2636
2637        return color_space;
2638}
2639
2640static void reduce_mode_colour_depth(struct dc_crtc_timing *timing_out)
2641{
2642        if (timing_out->display_color_depth <= COLOR_DEPTH_888)
2643                return;
2644
2645        timing_out->display_color_depth--;
2646}
2647
2648static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_out,
2649                                                const struct drm_display_info *info)
2650{
2651        int normalized_clk;
2652        if (timing_out->display_color_depth <= COLOR_DEPTH_888)
2653                return;
2654        do {
2655                normalized_clk = timing_out->pix_clk_khz;
2656                /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
2657                if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
2658                        normalized_clk /= 2;
2659                /* Adjusting pix clock following on HDMI spec based on colour depth */
2660                switch (timing_out->display_color_depth) {
2661                case COLOR_DEPTH_101010:
2662                        normalized_clk = (normalized_clk * 30) / 24;
2663                        break;
2664                case COLOR_DEPTH_121212:
2665                        normalized_clk = (normalized_clk * 36) / 24;
2666                        break;
2667                case COLOR_DEPTH_161616:
2668                        normalized_clk = (normalized_clk * 48) / 24;
2669                        break;
2670                default:
2671                        return;
2672                }
2673                if (normalized_clk <= info->max_tmds_clock)
2674                        return;
2675                reduce_mode_colour_depth(timing_out);
2676
2677        } while (timing_out->display_color_depth > COLOR_DEPTH_888);
2678
2679}
2680
2681static void
2682fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
2683                                             const struct drm_display_mode *mode_in,
2684                                             const struct drm_connector *connector,
2685                                             const struct dc_stream_state *old_stream)
2686{
2687        struct dc_crtc_timing *timing_out = &stream->timing;
2688        const struct drm_display_info *info = &connector->display_info;
2689
2690        memset(timing_out, 0, sizeof(struct dc_crtc_timing));
2691
2692        timing_out->h_border_left = 0;
2693        timing_out->h_border_right = 0;
2694        timing_out->v_border_top = 0;
2695        timing_out->v_border_bottom = 0;
2696        /* TODO: un-hardcode */
2697        if (drm_mode_is_420_only(info, mode_in)
2698                        && stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
2699                timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
2700        else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
2701                        && stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
2702                timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
2703        else
2704                timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
2705
2706        timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
2707        timing_out->display_color_depth = convert_color_depth_from_display_info(
2708                        connector);
2709        timing_out->scan_type = SCANNING_TYPE_NODATA;
2710        timing_out->hdmi_vic = 0;
2711
2712        if(old_stream) {
2713                timing_out->vic = old_stream->timing.vic;
2714                timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
2715                timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
2716        } else {
2717                timing_out->vic = drm_match_cea_mode(mode_in);
2718                if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
2719                        timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
2720                if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
2721                        timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
2722        }
2723
2724        timing_out->h_addressable = mode_in->crtc_hdisplay;
2725        timing_out->h_total = mode_in->crtc_htotal;
2726        timing_out->h_sync_width =
2727                mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
2728        timing_out->h_front_porch =
2729                mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
2730        timing_out->v_total = mode_in->crtc_vtotal;
2731        timing_out->v_addressable = mode_in->crtc_vdisplay;
2732        timing_out->v_front_porch =
2733                mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
2734        timing_out->v_sync_width =
2735                mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
2736        timing_out->pix_clk_khz = mode_in->crtc_clock;
2737        timing_out->aspect_ratio = get_aspect_ratio(mode_in);
2738
2739        stream->output_color_space = get_output_color_space(timing_out);
2740
2741        stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
2742        stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
2743        if (stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
2744                adjust_colour_depth_from_display_info(timing_out, info);
2745}
2746
2747static void fill_audio_info(struct audio_info *audio_info,
2748                            const struct drm_connector *drm_connector,
2749                            const struct dc_sink *dc_sink)
2750{
2751        int i = 0;
2752        int cea_revision = 0;
2753        const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
2754
2755        audio_info->manufacture_id = edid_caps->manufacturer_id;
2756        audio_info->product_id = edid_caps->product_id;
2757
2758        cea_revision = drm_connector->display_info.cea_rev;
2759
2760        strscpy(audio_info->display_name,
2761                edid_caps->display_name,
2762                AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
2763
2764        if (cea_revision >= 3) {
2765                audio_info->mode_count = edid_caps->audio_mode_count;
2766
2767                for (i = 0; i < audio_info->mode_count; ++i) {
2768                        audio_info->modes[i].format_code =
2769                                        (enum audio_format_code)
2770                                        (edid_caps->audio_modes[i].format_code);
2771                        audio_info->modes[i].channel_count =
2772                                        edid_caps->audio_modes[i].channel_count;
2773                        audio_info->modes[i].sample_rates.all =
2774                                        edid_caps->audio_modes[i].sample_rate;
2775                        audio_info->modes[i].sample_size =
2776                                        edid_caps->audio_modes[i].sample_size;
2777                }
2778        }
2779
2780        audio_info->flags.all = edid_caps->speaker_flags;
2781
2782        /* TODO: We only check for the progressive mode, check for interlace mode too */
2783        if (drm_connector->latency_present[0]) {
2784                audio_info->video_latency = drm_connector->video_latency[0];
2785                audio_info->audio_latency = drm_connector->audio_latency[0];
2786        }
2787
2788        /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
2789
2790}
2791
2792static void
2793copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
2794                                      struct drm_display_mode *dst_mode)
2795{
2796        dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
2797        dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
2798        dst_mode->crtc_clock = src_mode->crtc_clock;
2799        dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
2800        dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
2801        dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
2802        dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
2803        dst_mode->crtc_htotal = src_mode->crtc_htotal;
2804        dst_mode->crtc_hskew = src_mode->crtc_hskew;
2805        dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
2806        dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
2807        dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
2808        dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
2809        dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
2810}
2811
2812static void
2813decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
2814                                        const struct drm_display_mode *native_mode,
2815                                        bool scale_enabled)
2816{
2817        if (scale_enabled) {
2818                copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
2819        } else if (native_mode->clock == drm_mode->clock &&
2820                        native_mode->htotal == drm_mode->htotal &&
2821                        native_mode->vtotal == drm_mode->vtotal) {
2822                copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
2823        } else {
2824                /* no scaling nor amdgpu inserted, no need to patch */
2825        }
2826}
2827
2828static struct dc_sink *
2829create_fake_sink(struct amdgpu_dm_connector *aconnector)
2830{
2831        struct dc_sink_init_data sink_init_data = { 0 };
2832        struct dc_sink *sink = NULL;
2833        sink_init_data.link = aconnector->dc_link;
2834        sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
2835
2836        sink = dc_sink_create(&sink_init_data);
2837        if (!sink) {
2838                DRM_ERROR("Failed to create sink!\n");
2839                return NULL;
2840        }
2841        sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
2842
2843        return sink;
2844}
2845
2846static void set_multisync_trigger_params(
2847                struct dc_stream_state *stream)
2848{
2849        if (stream->triggered_crtc_reset.enabled) {
2850                stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
2851                stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
2852        }
2853}
2854
2855static void set_master_stream(struct dc_stream_state *stream_set[],
2856                              int stream_count)
2857{
2858        int j, highest_rfr = 0, master_stream = 0;
2859
2860        for (j = 0;  j < stream_count; j++) {
2861                if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
2862                        int refresh_rate = 0;
2863
2864                        refresh_rate = (stream_set[j]->timing.pix_clk_khz*1000)/
2865                                (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
2866                        if (refresh_rate > highest_rfr) {
2867                                highest_rfr = refresh_rate;
2868                                master_stream = j;
2869                        }
2870                }
2871        }
2872        for (j = 0;  j < stream_count; j++) {
2873                if (stream_set[j])
2874                        stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
2875        }
2876}
2877
2878static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
2879{
2880        int i = 0;
2881
2882        if (context->stream_count < 2)
2883                return;
2884        for (i = 0; i < context->stream_count ; i++) {
2885                if (!context->streams[i])
2886                        continue;
2887                /*
2888                 * TODO: add a function to read AMD VSDB bits and set
2889                 * crtc_sync_master.multi_sync_enabled flag
2890                 * For now it's set to false
2891                 */
2892                set_multisync_trigger_params(context->streams[i]);
2893        }
2894        set_master_stream(context->streams, context->stream_count);
2895}
2896
2897static struct dc_stream_state *
2898create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
2899                       const struct drm_display_mode *drm_mode,
2900                       const struct dm_connector_state *dm_state,
2901                       const struct dc_stream_state *old_stream)
2902{
2903        struct drm_display_mode *preferred_mode = NULL;
2904        struct drm_connector *drm_connector;
2905        struct dc_stream_state *stream = NULL;
2906        struct drm_display_mode mode = *drm_mode;
2907        bool native_mode_found = false;
2908        bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
2909        int mode_refresh;
2910        int preferred_refresh = 0;
2911
2912        struct dc_sink *sink = NULL;
2913        if (aconnector == NULL) {
2914                DRM_ERROR("aconnector is NULL!\n");
2915                return stream;
2916        }
2917
2918        drm_connector = &aconnector->base;
2919
2920        if (!aconnector->dc_sink) {
2921                if (!aconnector->mst_port) {
2922                        sink = create_fake_sink(aconnector);
2923                        if (!sink)
2924                                return stream;
2925                }
2926        } else {
2927                sink = aconnector->dc_sink;
2928                dc_sink_retain(sink);
2929        }
2930
2931        stream = dc_create_stream_for_sink(sink);
2932
2933        if (stream == NULL) {
2934                DRM_ERROR("Failed to create stream for sink!\n");
2935                goto finish;
2936        }
2937
2938        list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
2939                /* Search for preferred mode */
2940                if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
2941                        native_mode_found = true;
2942                        break;
2943                }
2944        }
2945        if (!native_mode_found)
2946                preferred_mode = list_first_entry_or_null(
2947                                &aconnector->base.modes,
2948                                struct drm_display_mode,
2949                                head);
2950
2951        mode_refresh = drm_mode_vrefresh(&mode);
2952
2953        if (preferred_mode == NULL) {
2954                /*
2955                 * This may not be an error, the use case is when we have no
2956                 * usermode calls to reset and set mode upon hotplug. In this
2957                 * case, we call set mode ourselves to restore the previous mode
2958                 * and the modelist may not be filled in in time.
2959                 */
2960                DRM_DEBUG_DRIVER("No preferred mode found\n");
2961        } else {
2962                decide_crtc_timing_for_drm_display_mode(
2963                                &mode, preferred_mode,
2964                                dm_state ? (dm_state->scaling != RMX_OFF) : false);
2965                preferred_refresh = drm_mode_vrefresh(preferred_mode);
2966        }
2967
2968        if (!dm_state)
2969                drm_mode_set_crtcinfo(&mode, 0);
2970
2971        /*
2972        * If scaling is enabled and refresh rate didn't change
2973        * we copy the vic and polarities of the old timings
2974        */
2975        if (!scale || mode_refresh != preferred_refresh)
2976                fill_stream_properties_from_drm_display_mode(stream,
2977                        &mode, &aconnector->base, NULL);
2978        else
2979                fill_stream_properties_from_drm_display_mode(stream,
2980                        &mode, &aconnector->base, old_stream);
2981
2982        update_stream_scaling_settings(&mode, dm_state, stream);
2983
2984        fill_audio_info(
2985                &stream->audio_info,
2986                drm_connector,
2987                sink);
2988
2989        update_stream_signal(stream);
2990
2991        if (dm_state && dm_state->freesync_capable)
2992                stream->ignore_msa_timing_param = true;
2993
2994finish:
2995        dc_sink_release(sink);
2996
2997        return stream;
2998}
2999
3000static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
3001{
3002        drm_crtc_cleanup(crtc);
3003        kfree(crtc);
3004}
3005
3006static void dm_crtc_destroy_state(struct drm_crtc *crtc,
3007                                  struct drm_crtc_state *state)
3008{
3009        struct dm_crtc_state *cur = to_dm_crtc_state(state);
3010
3011        /* TODO Destroy dc_stream objects are stream object is flattened */
3012        if (cur->stream)
3013                dc_stream_release(cur->stream);
3014
3015
3016        __drm_atomic_helper_crtc_destroy_state(state);
3017
3018
3019        kfree(state);
3020}
3021
3022static void dm_crtc_reset_state(struct drm_crtc *crtc)
3023{
3024        struct dm_crtc_state *state;
3025
3026        if (crtc->state)
3027                dm_crtc_destroy_state(crtc, crtc->state);
3028
3029        state = kzalloc(sizeof(*state), GFP_KERNEL);
3030        if (WARN_ON(!state))
3031                return;
3032
3033        crtc->state = &state->base;
3034        crtc->state->crtc = crtc;
3035
3036}
3037
3038static struct drm_crtc_state *
3039dm_crtc_duplicate_state(struct drm_crtc *crtc)
3040{
3041        struct dm_crtc_state *state, *cur;
3042
3043        cur = to_dm_crtc_state(crtc->state);
3044
3045        if (WARN_ON(!crtc->state))
3046                return NULL;
3047
3048        state = kzalloc(sizeof(*state), GFP_KERNEL);
3049        if (!state)
3050                return NULL;
3051
3052        __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
3053
3054        if (cur->stream) {
3055                state->stream = cur->stream;
3056                dc_stream_retain(state->stream);
3057        }
3058
3059        state->vrr_params = cur->vrr_params;
3060        state->vrr_infopacket = cur->vrr_infopacket;
3061        state->abm_level = cur->abm_level;
3062        state->vrr_supported = cur->vrr_supported;
3063        state->freesync_config = cur->freesync_config;
3064        state->crc_enabled = cur->crc_enabled;
3065
3066        /* TODO Duplicate dc_stream after objects are stream object is flattened */
3067
3068        return &state->base;
3069}
3070
3071
3072static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
3073{
3074        enum dc_irq_source irq_source;
3075        struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
3076        struct amdgpu_device *adev = crtc->dev->dev_private;
3077
3078        irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
3079        return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
3080}
3081
3082static int dm_enable_vblank(struct drm_crtc *crtc)
3083{
3084        return dm_set_vblank(crtc, true);
3085}
3086
3087static void dm_disable_vblank(struct drm_crtc *crtc)
3088{
3089        dm_set_vblank(crtc, false);
3090}
3091
3092/* Implemented only the options currently availible for the driver */
3093static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
3094        .reset = dm_crtc_reset_state,
3095        .destroy = amdgpu_dm_crtc_destroy,
3096        .gamma_set = drm_atomic_helper_legacy_gamma_set,
3097        .set_config = drm_atomic_helper_set_config,
3098        .page_flip = drm_atomic_helper_page_flip,
3099        .atomic_duplicate_state = dm_crtc_duplicate_state,
3100        .atomic_destroy_state = dm_crtc_destroy_state,
3101        .set_crc_source = amdgpu_dm_crtc_set_crc_source,
3102        .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
3103        .enable_vblank = dm_enable_vblank,
3104        .disable_vblank = dm_disable_vblank,
3105};
3106
3107static enum drm_connector_status
3108amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
3109{
3110        bool connected;
3111        struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
3112
3113        /*
3114         * Notes:
3115         * 1. This interface is NOT called in context of HPD irq.
3116         * 2. This interface *is called* in context of user-mode ioctl. Which
3117         * makes it a bad place for *any* MST-related activity.
3118         */
3119
3120        if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
3121            !aconnector->fake_enable)
3122                connected = (aconnector->dc_sink != NULL);
3123        else
3124                connected = (aconnector->base.force == DRM_FORCE_ON);
3125
3126        return (connected ? connector_status_connected :
3127                        connector_status_disconnected);
3128}
3129
3130int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
3131                                            struct drm_connector_state *connector_state,
3132                                            struct drm_property *property,
3133                                            uint64_t val)
3134{
3135        struct drm_device *dev = connector->dev;
3136        struct amdgpu_device *adev = dev->dev_private;
3137        struct dm_connector_state *dm_old_state =
3138                to_dm_connector_state(connector->state);
3139        struct dm_connector_state *dm_new_state =
3140                to_dm_connector_state(connector_state);
3141
3142        int ret = -EINVAL;
3143
3144        if (property == dev->mode_config.scaling_mode_property) {
3145                enum amdgpu_rmx_type rmx_type;
3146
3147                switch (val) {
3148                case DRM_MODE_SCALE_CENTER:
3149                        rmx_type = RMX_CENTER;
3150                        break;
3151                case DRM_MODE_SCALE_ASPECT:
3152                        rmx_type = RMX_ASPECT;
3153                        break;
3154                case DRM_MODE_SCALE_FULLSCREEN:
3155                        rmx_type = RMX_FULL;
3156                        break;
3157                case DRM_MODE_SCALE_NONE:
3158                default:
3159                        rmx_type = RMX_OFF;
3160                        break;
3161                }
3162
3163                if (dm_old_state->scaling == rmx_type)
3164                        return 0;
3165
3166                dm_new_state->scaling = rmx_type;
3167                ret = 0;
3168        } else if (property == adev->mode_info.underscan_hborder_property) {
3169                dm_new_state->underscan_hborder = val;
3170                ret = 0;
3171        } else if (property == adev->mode_info.underscan_vborder_property) {
3172                dm_new_state->underscan_vborder = val;
3173                ret = 0;
3174        } else if (property == adev->mode_info.underscan_property) {
3175                dm_new_state->underscan_enable = val;
3176                ret = 0;
3177        } else if (property == adev->mode_info.max_bpc_property) {
3178                dm_new_state->max_bpc = val;
3179                ret = 0;
3180        } else if (property == adev->mode_info.abm_level_property) {
3181                dm_new_state->abm_level = val;
3182                ret = 0;
3183        }
3184
3185        return ret;
3186}
3187
3188int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
3189                                            const struct drm_connector_state *state,
3190                                            struct drm_property *property,
3191                                            uint64_t *val)
3192{
3193        struct drm_device *dev = connector->dev;
3194        struct amdgpu_device *adev = dev->dev_private;
3195        struct dm_connector_state *dm_state =
3196                to_dm_connector_state(state);
3197        int ret = -EINVAL;
3198
3199        if (property == dev->mode_config.scaling_mode_property) {
3200                switch (dm_state->scaling) {
3201                case RMX_CENTER:
3202                        *val = DRM_MODE_SCALE_CENTER;
3203                        break;
3204                case RMX_ASPECT:
3205                        *val = DRM_MODE_SCALE_ASPECT;
3206                        break;
3207                case RMX_FULL:
3208                        *val = DRM_MODE_SCALE_FULLSCREEN;
3209                        break;
3210                case RMX_OFF:
3211                default:
3212                        *val = DRM_MODE_SCALE_NONE;
3213                        break;
3214                }
3215                ret = 0;
3216        } else if (property == adev->mode_info.underscan_hborder_property) {
3217                *val = dm_state->underscan_hborder;
3218                ret = 0;
3219        } else if (property == adev->mode_info.underscan_vborder_property) {
3220                *val = dm_state->underscan_vborder;
3221                ret = 0;
3222        } else if (property == adev->mode_info.underscan_property) {
3223                *val = dm_state->underscan_enable;
3224                ret = 0;
3225        } else if (property == adev->mode_info.max_bpc_property) {
3226                *val = dm_state->max_bpc;
3227                ret = 0;
3228        } else if (property == adev->mode_info.abm_level_property) {
3229                *val = dm_state->abm_level;
3230                ret = 0;
3231        }
3232
3233        return ret;
3234}
3235
3236static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
3237{
3238        struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
3239        const struct dc_link *link = aconnector->dc_link;
3240        struct amdgpu_device *adev = connector->dev->dev_private;
3241        struct amdgpu_display_manager *dm = &adev->dm;
3242
3243#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3244        defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3245
3246        if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3247            link->type != dc_connection_none &&
3248            dm->backlight_dev) {
3249                backlight_device_unregister(dm->backlight_dev);
3250                dm->backlight_dev = NULL;
3251        }
3252#endif
3253
3254        if (aconnector->dc_em_sink)
3255                dc_sink_release(aconnector->dc_em_sink);
3256        aconnector->dc_em_sink = NULL;
3257        if (aconnector->dc_sink)
3258                dc_sink_release(aconnector->dc_sink);
3259        aconnector->dc_sink = NULL;
3260
3261        drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
3262        drm_connector_unregister(connector);
3263        drm_connector_cleanup(connector);
3264        kfree(connector);
3265}
3266
3267void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
3268{
3269        struct dm_connector_state *state =
3270                to_dm_connector_state(connector->state);
3271
3272        if (connector->state)
3273                __drm_atomic_helper_connector_destroy_state(connector->state);
3274
3275        kfree(state);
3276
3277        state = kzalloc(sizeof(*state), GFP_KERNEL);
3278
3279        if (state) {
3280                state->scaling = RMX_OFF;
3281                state->underscan_enable = false;
3282                state->underscan_hborder = 0;
3283                state->underscan_vborder = 0;
3284                state->max_bpc = 8;
3285
3286                __drm_atomic_helper_connector_reset(connector, &state->base);
3287        }
3288}
3289
3290struct drm_connector_state *
3291amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
3292{
3293        struct dm_connector_state *state =
3294                to_dm_connector_state(connector->state);
3295
3296        struct dm_connector_state *new_state =
3297                        kmemdup(state, sizeof(*state), GFP_KERNEL);
3298
3299        if (!new_state)
3300                return NULL;
3301
3302        __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
3303
3304        new_state->freesync_capable = state->freesync_capable;
3305        new_state->abm_level = state->abm_level;
3306        new_state->scaling = state->scaling;
3307        new_state->underscan_enable = state->underscan_enable;
3308        new_state->underscan_hborder = state->underscan_hborder;
3309        new_state->underscan_vborder = state->underscan_vborder;
3310        new_state->max_bpc = state->max_bpc;
3311
3312        return &new_state->base;
3313}
3314
3315static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
3316        .reset = amdgpu_dm_connector_funcs_reset,
3317        .detect = amdgpu_dm_connector_detect,
3318        .fill_modes = drm_helper_probe_single_connector_modes,
3319        .destroy = amdgpu_dm_connector_destroy,
3320        .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
3321        .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
3322        .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
3323        .atomic_get_property = amdgpu_dm_connector_atomic_get_property
3324};
3325
3326static int get_modes(struct drm_connector *connector)
3327{
3328        return amdgpu_dm_connector_get_modes(connector);
3329}
3330
3331static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
3332{
3333        struct dc_sink_init_data init_params = {
3334                        .link = aconnector->dc_link,
3335                        .sink_signal = SIGNAL_TYPE_VIRTUAL
3336        };
3337        struct edid *edid;
3338
3339        if (!aconnector->base.edid_blob_ptr) {
3340                DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
3341                                aconnector->base.name);
3342
3343                aconnector->base.force = DRM_FORCE_OFF;
3344                aconnector->base.override_edid = false;
3345                return;
3346        }
3347
3348        edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
3349
3350        aconnector->edid = edid;
3351
3352        aconnector->dc_em_sink = dc_link_add_remote_sink(
3353                aconnector->dc_link,
3354                (uint8_t *)edid,
3355                (edid->extensions + 1) * EDID_LENGTH,
3356                &init_params);
3357
3358        if (aconnector->base.force == DRM_FORCE_ON) {
3359                aconnector->dc_sink = aconnector->dc_link->local_sink ?
3360                aconnector->dc_link->local_sink :
3361                aconnector->dc_em_sink;
3362                dc_sink_retain(aconnector->dc_sink);
3363        }
3364}
3365
3366static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
3367{
3368        struct dc_link *link = (struct dc_link *)aconnector->dc_link;
3369
3370        /*
3371         * In case of headless boot with force on for DP managed connector
3372         * Those settings have to be != 0 to get initial modeset
3373         */
3374        if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
3375                link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
3376                link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
3377        }
3378
3379
3380        aconnector->base.override_edid = true;
3381        create_eml_sink(aconnector);
3382}
3383
3384enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
3385                                   struct drm_display_mode *mode)
3386{
3387        int result = MODE_ERROR;
3388        struct dc_sink *dc_sink;
3389        struct amdgpu_device *adev = connector->dev->dev_private;
3390        /* TODO: Unhardcode stream count */
3391        struct dc_stream_state *stream;
3392        struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
3393        enum dc_status dc_result = DC_OK;
3394
3395        if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
3396                        (mode->flags & DRM_MODE_FLAG_DBLSCAN))
3397                return result;
3398
3399        /*
3400         * Only run this the first time mode_valid is called to initilialize
3401         * EDID mgmt
3402         */
3403        if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
3404                !aconnector->dc_em_sink)
3405                handle_edid_mgmt(aconnector);
3406
3407        dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
3408
3409        if (dc_sink == NULL) {
3410                DRM_ERROR("dc_sink is NULL!\n");
3411                goto fail;
3412        }
3413
3414        stream = create_stream_for_sink(aconnector, mode, NULL, NULL);
3415        if (stream == NULL) {
3416                DRM_ERROR("Failed to create stream for sink!\n");
3417                goto fail;
3418        }
3419
3420        dc_result = dc_validate_stream(adev->dm.dc, stream);
3421
3422        if (dc_result == DC_OK)
3423                result = MODE_OK;
3424        else
3425                DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
3426                              mode->vdisplay,
3427                              mode->hdisplay,
3428                              mode->clock,
3429                              dc_result);
3430
3431        dc_stream_release(stream);
3432
3433fail:
3434        /* TODO: error handling*/
3435        return result;
3436}
3437
3438static const struct drm_connector_helper_funcs
3439amdgpu_dm_connector_helper_funcs = {
3440        /*
3441         * If hotplugging a second bigger display in FB Con mode, bigger resolution
3442         * modes will be filtered by drm_mode_validate_size(), and those modes
3443         * are missing after user start lightdm. So we need to renew modes list.
3444         * in get_modes call back, not just return the modes count
3445         */
3446        .get_modes = get_modes,
3447        .mode_valid = amdgpu_dm_connector_mode_valid,
3448};
3449
3450static void dm_crtc_helper_disable(struct drm_crtc *crtc)
3451{
3452}
3453
3454static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
3455                                       struct drm_crtc_state *state)
3456{
3457        struct amdgpu_device *adev = crtc->dev->dev_private;
3458        struct dc *dc = adev->dm.dc;
3459        struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
3460        int ret = -EINVAL;
3461
3462        if (unlikely(!dm_crtc_state->stream &&
3463                     modeset_required(state, NULL, dm_crtc_state->stream))) {
3464                WARN_ON(1);
3465                return ret;
3466        }
3467
3468        /* In some use cases, like reset, no stream is attached */
3469        if (!dm_crtc_state->stream)
3470                return 0;
3471
3472        if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
3473                return 0;
3474
3475        return ret;
3476}
3477
3478static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
3479                                      const struct drm_display_mode *mode,
3480                                      struct drm_display_mode *adjusted_mode)
3481{
3482        return true;
3483}
3484
3485static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
3486        .disable = dm_crtc_helper_disable,
3487        .atomic_check = dm_crtc_helper_atomic_check,
3488        .mode_fixup = dm_crtc_helper_mode_fixup
3489};
3490
3491static void dm_encoder_helper_disable(struct drm_encoder *encoder)
3492{
3493
3494}
3495
3496static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
3497                                          struct drm_crtc_state *crtc_state,
3498                                          struct drm_connector_state *conn_state)
3499{
3500        return 0;
3501}
3502
3503const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
3504        .disable = dm_encoder_helper_disable,
3505        .atomic_check = dm_encoder_helper_atomic_check
3506};
3507
3508static void dm_drm_plane_reset(struct drm_plane *plane)
3509{
3510        struct dm_plane_state *amdgpu_state = NULL;
3511
3512        if (plane->state)
3513                plane->funcs->atomic_destroy_state(plane, plane->state);
3514
3515        amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
3516        WARN_ON(amdgpu_state == NULL);
3517
3518        if (amdgpu_state) {
3519                plane->state = &amdgpu_state->base;
3520                plane->state->plane = plane;
3521                plane->state->rotation = DRM_MODE_ROTATE_0;
3522        }
3523}
3524
3525static struct drm_plane_state *
3526dm_drm_plane_duplicate_state(struct drm_plane *plane)
3527{
3528        struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
3529
3530        old_dm_plane_state = to_dm_plane_state(plane->state);
3531        dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
3532        if (!dm_plane_state)
3533                return NULL;
3534
3535        __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
3536
3537        if (old_dm_plane_state->dc_state) {
3538                dm_plane_state->dc_state = old_dm_plane_state->dc_state;
3539                dc_plane_state_retain(dm_plane_state->dc_state);
3540        }
3541
3542        return &dm_plane_state->base;
3543}
3544
3545void dm_drm_plane_destroy_state(struct drm_plane *plane,
3546                                struct drm_plane_state *state)
3547{
3548        struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
3549
3550        if (dm_plane_state->dc_state)
3551                dc_plane_state_release(dm_plane_state->dc_state);
3552
3553        drm_atomic_helper_plane_destroy_state(plane, state);
3554}
3555
3556static const struct drm_plane_funcs dm_plane_funcs = {
3557        .update_plane   = drm_atomic_helper_update_plane,
3558        .disable_plane  = drm_atomic_helper_disable_plane,
3559        .destroy        = drm_primary_helper_destroy,
3560        .reset = dm_drm_plane_reset,
3561        .atomic_duplicate_state = dm_drm_plane_duplicate_state,
3562        .atomic_destroy_state = dm_drm_plane_destroy_state,
3563};
3564
3565static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
3566                                      struct drm_plane_state *new_state)
3567{
3568        struct amdgpu_framebuffer *afb;
3569        struct drm_gem_object *obj;
3570        struct amdgpu_device *adev;
3571        struct amdgpu_bo *rbo;
3572        uint64_t chroma_addr = 0;
3573        struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
3574        unsigned int awidth;
3575        uint32_t domain;
3576        int r;
3577
3578        dm_plane_state_old = to_dm_plane_state(plane->state);
3579        dm_plane_state_new = to_dm_plane_state(new_state);
3580
3581        if (!new_state->fb) {
3582                DRM_DEBUG_DRIVER("No FB bound\n");
3583                return 0;
3584        }
3585
3586        afb = to_amdgpu_framebuffer(new_state->fb);
3587        obj = new_state->fb->obj[0];
3588        rbo = gem_to_amdgpu_bo(obj);
3589        adev = amdgpu_ttm_adev(rbo->tbo.bdev);
3590        r = amdgpu_bo_reserve(rbo, false);
3591        if (unlikely(r != 0))
3592                return r;
3593
3594        if (plane->type != DRM_PLANE_TYPE_CURSOR)
3595                domain = amdgpu_display_supported_domains(adev);
3596        else
3597                domain = AMDGPU_GEM_DOMAIN_VRAM;
3598
3599        r = amdgpu_bo_pin(rbo, domain);
3600        if (unlikely(r != 0)) {
3601                if (r != -ERESTARTSYS)
3602                        DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
3603                amdgpu_bo_unreserve(rbo);
3604                return r;
3605        }
3606
3607        r = amdgpu_ttm_alloc_gart(&rbo->tbo);
3608        if (unlikely(r != 0)) {
3609                amdgpu_bo_unpin(rbo);
3610                amdgpu_bo_unreserve(rbo);
3611                DRM_ERROR("%p bind failed\n", rbo);
3612                return r;
3613        }
3614        amdgpu_bo_unreserve(rbo);
3615
3616        afb->address = amdgpu_bo_gpu_offset(rbo);
3617
3618        amdgpu_bo_ref(rbo);
3619
3620        if (dm_plane_state_new->dc_state &&
3621                        dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
3622                struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
3623
3624                if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3625                        plane_state->address.grph.addr.low_part = lower_32_bits(afb->address);
3626                        plane_state->address.grph.addr.high_part = upper_32_bits(afb->address);
3627                } else {
3628                        awidth = ALIGN(new_state->fb->width, 64);
3629                        plane_state->address.type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3630                        plane_state->address.video_progressive.luma_addr.low_part
3631                                                        = lower_32_bits(afb->address);
3632                        plane_state->address.video_progressive.luma_addr.high_part
3633                                                        = upper_32_bits(afb->address);
3634                        chroma_addr = afb->address + (u64)awidth * new_state->fb->height;
3635                        plane_state->address.video_progressive.chroma_addr.low_part
3636                                                        = lower_32_bits(chroma_addr);
3637                        plane_state->address.video_progressive.chroma_addr.high_part
3638                                                        = upper_32_bits(chroma_addr);
3639                }
3640        }
3641
3642        return 0;
3643}
3644
3645static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
3646                                       struct drm_plane_state *old_state)
3647{
3648        struct amdgpu_bo *rbo;
3649        int r;
3650
3651        if (!old_state->fb)
3652                return;
3653
3654        rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
3655        r = amdgpu_bo_reserve(rbo, false);
3656        if (unlikely(r)) {
3657                DRM_ERROR("failed to reserve rbo before unpin\n");
3658                return;
3659        }
3660
3661        amdgpu_bo_unpin(rbo);
3662        amdgpu_bo_unreserve(rbo);
3663        amdgpu_bo_unref(&rbo);
3664}
3665
3666static int dm_plane_atomic_check(struct drm_plane *plane,
3667                                 struct drm_plane_state *state)
3668{
3669        struct amdgpu_device *adev = plane->dev->dev_private;
3670        struct dc *dc = adev->dm.dc;
3671        struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
3672
3673        if (!dm_plane_state->dc_state)
3674                return 0;
3675
3676        if (!fill_rects_from_plane_state(state, dm_plane_state->dc_state))
3677                return -EINVAL;
3678
3679        if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
3680                return 0;
3681
3682        return -EINVAL;
3683}
3684
3685static int dm_plane_atomic_async_check(struct drm_plane *plane,
3686                                       struct drm_plane_state *new_plane_state)
3687{
3688        struct drm_plane_state *old_plane_state =
3689                drm_atomic_get_old_plane_state(new_plane_state->state, plane);
3690
3691        /* Only support async updates on cursor planes. */
3692        if (plane->type != DRM_PLANE_TYPE_CURSOR)
3693                return -EINVAL;
3694
3695        /*
3696         * DRM calls prepare_fb and cleanup_fb on new_plane_state for
3697         * async commits so don't allow fb changes.
3698         */
3699        if (old_plane_state->fb != new_plane_state->fb)
3700                return -EINVAL;
3701
3702        return 0;
3703}
3704
3705static void dm_plane_atomic_async_update(struct drm_plane *plane,
3706                                         struct drm_plane_state *new_state)
3707{
3708        struct drm_plane_state *old_state =
3709                drm_atomic_get_old_plane_state(new_state->state, plane);
3710
3711        if (plane->state->fb != new_state->fb)
3712                drm_atomic_set_fb_for_plane(plane->state, new_state->fb);
3713
3714        plane->state->src_x = new_state->src_x;
3715        plane->state->src_y = new_state->src_y;
3716        plane->state->src_w = new_state->src_w;
3717        plane->state->src_h = new_state->src_h;
3718        plane->state->crtc_x = new_state->crtc_x;
3719        plane->state->crtc_y = new_state->crtc_y;
3720        plane->state->crtc_w = new_state->crtc_w;
3721        plane->state->crtc_h = new_state->crtc_h;
3722
3723        handle_cursor_update(plane, old_state);
3724}
3725
3726static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
3727        .prepare_fb = dm_plane_helper_prepare_fb,
3728        .cleanup_fb = dm_plane_helper_cleanup_fb,
3729        .atomic_check = dm_plane_atomic_check,
3730        .atomic_async_check = dm_plane_atomic_async_check,
3731        .atomic_async_update = dm_plane_atomic_async_update
3732};
3733
3734/*
3735 * TODO: these are currently initialized to rgb formats only.
3736 * For future use cases we should either initialize them dynamically based on
3737 * plane capabilities, or initialize this array to all formats, so internal drm
3738 * check will succeed, and let DC implement proper check
3739 */
3740static const uint32_t rgb_formats[] = {
3741        DRM_FORMAT_RGB888,
3742        DRM_FORMAT_XRGB8888,
3743        DRM_FORMAT_ARGB8888,
3744        DRM_FORMAT_RGBA8888,
3745        DRM_FORMAT_XRGB2101010,
3746        DRM_FORMAT_XBGR2101010,
3747        DRM_FORMAT_ARGB2101010,
3748        DRM_FORMAT_ABGR2101010,
3749        DRM_FORMAT_XBGR8888,
3750        DRM_FORMAT_ABGR8888,
3751};
3752
3753static const uint32_t yuv_formats[] = {
3754        DRM_FORMAT_NV12,
3755        DRM_FORMAT_NV21,
3756};
3757
3758static const u32 cursor_formats[] = {
3759        DRM_FORMAT_ARGB8888
3760};
3761
3762static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
3763                                struct drm_plane *plane,
3764                                unsigned long possible_crtcs)
3765{
3766        int res = -EPERM;
3767
3768        switch (plane->type) {
3769        case DRM_PLANE_TYPE_PRIMARY:
3770                res = drm_universal_plane_init(
3771                                dm->adev->ddev,
3772                                plane,
3773                                possible_crtcs,
3774                                &dm_plane_funcs,
3775                                rgb_formats,
3776                                ARRAY_SIZE(rgb_formats),
3777                                NULL, plane->type, NULL);
3778                break;
3779        case DRM_PLANE_TYPE_OVERLAY:
3780                res = drm_universal_plane_init(
3781                                dm->adev->ddev,
3782                                plane,
3783                                possible_crtcs,
3784                                &dm_plane_funcs,
3785                                yuv_formats,
3786                                ARRAY_SIZE(yuv_formats),
3787                                NULL, plane->type, NULL);
3788                break;
3789        case DRM_PLANE_TYPE_CURSOR:
3790                res = drm_universal_plane_init(
3791                                dm->adev->ddev,
3792                                plane,
3793                                possible_crtcs,
3794                                &dm_plane_funcs,
3795                                cursor_formats,
3796                                ARRAY_SIZE(cursor_formats),
3797                                NULL, plane->type, NULL);
3798                break;
3799        }
3800
3801        drm_plane_helper_add(plane, &dm_plane_helper_funcs);
3802
3803        /* Create (reset) the plane state */
3804        if (plane->funcs->reset)
3805                plane->funcs->reset(plane);
3806
3807
3808        return res;
3809}
3810
3811static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
3812                               struct drm_plane *plane,
3813                               uint32_t crtc_index)
3814{
3815        struct amdgpu_crtc *acrtc = NULL;
3816        struct drm_plane *cursor_plane;
3817
3818        int res = -ENOMEM;
3819
3820        cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
3821        if (!cursor_plane)
3822                goto fail;
3823
3824        cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
3825        res = amdgpu_dm_plane_init(dm, cursor_plane, 0);
3826
3827        acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
3828        if (!acrtc)
3829                goto fail;
3830
3831        res = drm_crtc_init_with_planes(
3832                        dm->ddev,
3833                        &acrtc->base,
3834                        plane,
3835                        cursor_plane,
3836                        &amdgpu_dm_crtc_funcs, NULL);
3837
3838        if (res)
3839                goto fail;
3840
3841        drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
3842
3843        /* Create (reset) the plane state */
3844        if (acrtc->base.funcs->reset)
3845                acrtc->base.funcs->reset(&acrtc->base);
3846
3847        acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
3848        acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
3849
3850        acrtc->crtc_id = crtc_index;
3851        acrtc->base.enabled = false;
3852        acrtc->otg_inst = -1;
3853
3854        dm->adev->mode_info.crtcs[crtc_index] = acrtc;
3855        drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
3856                                   true, MAX_COLOR_LUT_ENTRIES);
3857        drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
3858
3859        return 0;
3860
3861fail:
3862        kfree(acrtc);
3863        kfree(cursor_plane);
3864        return res;
3865}
3866
3867
3868static int to_drm_connector_type(enum signal_type st)
3869{
3870        switch (st) {
3871        case SIGNAL_TYPE_HDMI_TYPE_A:
3872                return DRM_MODE_CONNECTOR_HDMIA;
3873        case SIGNAL_TYPE_EDP:
3874                return DRM_MODE_CONNECTOR_eDP;
3875        case SIGNAL_TYPE_LVDS:
3876                return DRM_MODE_CONNECTOR_LVDS;
3877        case SIGNAL_TYPE_RGB:
3878                return DRM_MODE_CONNECTOR_VGA;
3879        case SIGNAL_TYPE_DISPLAY_PORT:
3880        case SIGNAL_TYPE_DISPLAY_PORT_MST:
3881                return DRM_MODE_CONNECTOR_DisplayPort;
3882        case SIGNAL_TYPE_DVI_DUAL_LINK:
3883        case SIGNAL_TYPE_DVI_SINGLE_LINK:
3884                return DRM_MODE_CONNECTOR_DVID;
3885        case SIGNAL_TYPE_VIRTUAL:
3886                return DRM_MODE_CONNECTOR_VIRTUAL;
3887
3888        default:
3889                return DRM_MODE_CONNECTOR_Unknown;
3890        }
3891}
3892
3893static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
3894{
3895        return drm_encoder_find(connector->dev, NULL, connector->encoder_ids[0]);
3896}
3897
3898static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
3899{
3900        struct drm_encoder *encoder;
3901        struct amdgpu_encoder *amdgpu_encoder;
3902
3903        encoder = amdgpu_dm_connector_to_encoder(connector);
3904
3905        if (encoder == NULL)
3906                return;
3907
3908        amdgpu_encoder = to_amdgpu_encoder(encoder);
3909
3910        amdgpu_encoder->native_mode.clock = 0;
3911
3912        if (!list_empty(&connector->probed_modes)) {
3913                struct drm_display_mode *preferred_mode = NULL;
3914
3915                list_for_each_entry(preferred_mode,
3916                                    &connector->probed_modes,
3917                                    head) {
3918                        if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
3919                                amdgpu_encoder->native_mode = *preferred_mode;
3920
3921                        break;
3922                }
3923
3924        }
3925}
3926
3927static struct drm_display_mode *
3928amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
3929                             char *name,
3930                             int hdisplay, int vdisplay)
3931{
3932        struct drm_device *dev = encoder->dev;
3933        struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3934        struct drm_display_mode *mode = NULL;
3935        struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
3936
3937        mode = drm_mode_duplicate(dev, native_mode);
3938
3939        if (mode == NULL)
3940                return NULL;
3941
3942        mode->hdisplay = hdisplay;
3943        mode->vdisplay = vdisplay;
3944        mode->type &= ~DRM_MODE_TYPE_PREFERRED;
3945        strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
3946
3947        return mode;
3948
3949}
3950
3951static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
3952                                                 struct drm_connector *connector)
3953{
3954        struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3955        struct drm_display_mode *mode = NULL;
3956        struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
3957        struct amdgpu_dm_connector *amdgpu_dm_connector =
3958                                to_amdgpu_dm_connector(connector);
3959        int i;
3960        int n;
3961        struct mode_size {
3962                char name[DRM_DISPLAY_MODE_LEN];
3963                int w;
3964                int h;
3965        } common_modes[] = {
3966                {  "640x480",  640,  480},
3967                {  "800x600",  800,  600},
3968                { "1024x768", 1024,  768},
3969                { "1280x720", 1280,  720},
3970                { "1280x800", 1280,  800},
3971                {"1280x1024", 1280, 1024},
3972                { "1440x900", 1440,  900},
3973                {"1680x1050", 1680, 1050},
3974                {"1600x1200", 1600, 1200},
3975                {"1920x1080", 1920, 1080},
3976                {"1920x1200", 1920, 1200}
3977        };
3978
3979        n = ARRAY_SIZE(common_modes);
3980
3981        for (i = 0; i < n; i++) {
3982                struct drm_display_mode *curmode = NULL;
3983                bool mode_existed = false;
3984
3985                if (common_modes[i].w > native_mode->hdisplay ||
3986                    common_modes[i].h > native_mode->vdisplay ||
3987                   (common_modes[i].w == native_mode->hdisplay &&
3988                    common_modes[i].h == native_mode->vdisplay))
3989                        continue;
3990
3991                list_for_each_entry(curmode, &connector->probed_modes, head) {
3992                        if (common_modes[i].w == curmode->hdisplay &&
3993                            common_modes[i].h == curmode->vdisplay) {
3994                                mode_existed = true;
3995                                break;
3996                        }
3997                }
3998
3999                if (mode_existed)
4000                        continue;
4001
4002                mode = amdgpu_dm_create_common_mode(encoder,
4003                                common_modes[i].name, common_modes[i].w,
4004                                common_modes[i].h);
4005                drm_mode_probed_add(connector, mode);
4006                amdgpu_dm_connector->num_modes++;
4007        }
4008}
4009
4010static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
4011                                              struct edid *edid)
4012{
4013        struct amdgpu_dm_connector *amdgpu_dm_connector =
4014                        to_amdgpu_dm_connector(connector);
4015
4016        if (edid) {
4017                /* empty probed_modes */
4018                INIT_LIST_HEAD(&connector->probed_modes);
4019                amdgpu_dm_connector->num_modes =
4020                                drm_add_edid_modes(connector, edid);
4021
4022                amdgpu_dm_get_native_mode(connector);
4023        } else {
4024                amdgpu_dm_connector->num_modes = 0;
4025        }
4026}
4027
4028static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
4029{
4030        struct amdgpu_dm_connector *amdgpu_dm_connector =
4031                        to_amdgpu_dm_connector(connector);
4032        struct drm_encoder *encoder;
4033        struct edid *edid = amdgpu_dm_connector->edid;
4034
4035        encoder = amdgpu_dm_connector_to_encoder(connector);
4036
4037        if (!edid || !drm_edid_is_valid(edid)) {
4038                amdgpu_dm_connector->num_modes =
4039                                drm_add_modes_noedid(connector, 640, 480);
4040        } else {
4041                amdgpu_dm_connector_ddc_get_modes(connector, edid);
4042                amdgpu_dm_connector_add_common_modes(encoder, connector);
4043        }
4044        amdgpu_dm_fbc_init(connector);
4045
4046        return amdgpu_dm_connector->num_modes;
4047}
4048
4049void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
4050                                     struct amdgpu_dm_connector *aconnector,
4051                                     int connector_type,
4052                                     struct dc_link *link,
4053                                     int link_index)
4054{
4055        struct amdgpu_device *adev = dm->ddev->dev_private;
4056
4057        aconnector->connector_id = link_index;
4058        aconnector->dc_link = link;
4059        aconnector->base.interlace_allowed = false;
4060        aconnector->base.doublescan_allowed = false;
4061        aconnector->base.stereo_allowed = false;
4062        aconnector->base.dpms = DRM_MODE_DPMS_OFF;
4063        aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
4064        mutex_init(&aconnector->hpd_lock);
4065
4066        /*
4067         * configure support HPD hot plug connector_>polled default value is 0
4068         * which means HPD hot plug not supported
4069         */
4070        switch (connector_type) {
4071        case DRM_MODE_CONNECTOR_HDMIA:
4072                aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
4073                aconnector->base.ycbcr_420_allowed =
4074                        link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
4075                break;
4076        case DRM_MODE_CONNECTOR_DisplayPort:
4077                aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
4078                aconnector->base.ycbcr_420_allowed =
4079                        link->link_enc->features.dp_ycbcr420_supported ? true : false;
4080                break;
4081        case DRM_MODE_CONNECTOR_DVID:
4082                aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
4083                break;
4084        default:
4085                break;
4086        }
4087
4088        drm_object_attach_property(&aconnector->base.base,
4089                                dm->ddev->mode_config.scaling_mode_property,
4090                                DRM_MODE_SCALE_NONE);
4091
4092        drm_object_attach_property(&aconnector->base.base,
4093                                adev->mode_info.underscan_property,
4094                                UNDERSCAN_OFF);
4095        drm_object_attach_property(&aconnector->base.base,
4096                                adev->mode_info.underscan_hborder_property,
4097                                0);
4098        drm_object_attach_property(&aconnector->base.base,
4099                                adev->mode_info.underscan_vborder_property,
4100                                0);
4101        drm_object_attach_property(&aconnector->base.base,
4102                                adev->mode_info.max_bpc_property,
4103                                0);
4104
4105        if (connector_type == DRM_MODE_CONNECTOR_eDP &&
4106            dc_is_dmcu_initialized(adev->dm.dc)) {
4107                drm_object_attach_property(&aconnector->base.base,
4108                                adev->mode_info.abm_level_property, 0);
4109        }
4110
4111        if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
4112            connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4113            connector_type == DRM_MODE_CONNECTOR_eDP) {
4114                drm_connector_attach_vrr_capable_property(
4115                        &aconnector->base);
4116        }
4117}
4118
4119static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
4120                              struct i2c_msg *msgs, int num)
4121{
4122        struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
4123        struct ddc_service *ddc_service = i2c->ddc_service;
4124        struct i2c_command cmd;
4125        int i;
4126        int result = -EIO;
4127
4128        cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
4129
4130        if (!cmd.payloads)
4131                return result;
4132
4133        cmd.number_of_payloads = num;
4134        cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
4135        cmd.speed = 100;
4136
4137        for (i = 0; i < num; i++) {
4138                cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
4139                cmd.payloads[i].address = msgs[i].addr;
4140                cmd.payloads[i].length = msgs[i].len;
4141                cmd.payloads[i].data = msgs[i].buf;
4142        }
4143
4144        if (dc_submit_i2c(
4145                        ddc_service->ctx->dc,
4146                        ddc_service->ddc_pin->hw_info.ddc_channel,
4147                        &cmd))
4148                result = num;
4149
4150        kfree(cmd.payloads);
4151        return result;
4152}
4153
4154static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
4155{
4156        return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
4157}
4158
4159static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
4160        .master_xfer = amdgpu_dm_i2c_xfer,
4161        .functionality = amdgpu_dm_i2c_func,
4162};
4163
4164static struct amdgpu_i2c_adapter *
4165create_i2c(struct ddc_service *ddc_service,
4166           int link_index,
4167           int *res)
4168{
4169        struct amdgpu_device *adev = ddc_service->ctx->driver_context;
4170        struct amdgpu_i2c_adapter *i2c;
4171
4172        i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
4173        if (!i2c)
4174                return NULL;
4175        i2c->base.owner = THIS_MODULE;
4176        i2c->base.class = I2C_CLASS_DDC;
4177        i2c->base.dev.parent = &adev->pdev->dev;
4178        i2c->base.algo = &amdgpu_dm_i2c_algo;
4179        snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
4180        i2c_set_adapdata(&i2c->base, i2c);
4181        i2c->ddc_service = ddc_service;
4182        i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
4183
4184        return i2c;
4185}
4186
4187
4188/*
4189 * Note: this function assumes that dc_link_detect() was called for the
4190 * dc_link which will be represented by this aconnector.
4191 */
4192static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
4193                                    struct amdgpu_dm_connector *aconnector,
4194                                    uint32_t link_index,
4195                                    struct amdgpu_encoder *aencoder)
4196{
4197        int res = 0;
4198        int connector_type;
4199        struct dc *dc = dm->dc;
4200        struct dc_link *link = dc_get_link_at_index(dc, link_index);
4201        struct amdgpu_i2c_adapter *i2c;
4202
4203        link->priv = aconnector;
4204
4205        DRM_DEBUG_DRIVER("%s()\n", __func__);
4206
4207        i2c = create_i2c(link->ddc, link->link_index, &res);
4208        if (!i2c) {
4209                DRM_ERROR("Failed to create i2c adapter data\n");
4210                return -ENOMEM;
4211        }
4212
4213        aconnector->i2c = i2c;
4214        res = i2c_add_adapter(&i2c->base);
4215
4216        if (res) {
4217                DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
4218                goto out_free;
4219        }
4220
4221        connector_type = to_drm_connector_type(link->connector_signal);
4222
4223        res = drm_connector_init(
4224                        dm->ddev,
4225                        &aconnector->base,
4226                        &amdgpu_dm_connector_funcs,
4227                        connector_type);
4228
4229        if (res) {
4230                DRM_ERROR("connector_init failed\n");
4231                aconnector->connector_id = -1;
4232                goto out_free;
4233        }
4234
4235        drm_connector_helper_add(
4236                        &aconnector->base,
4237                        &amdgpu_dm_connector_helper_funcs);
4238
4239        if (aconnector->base.funcs->reset)
4240                aconnector->base.funcs->reset(&aconnector->base);
4241
4242        amdgpu_dm_connector_init_helper(
4243                dm,
4244                aconnector,
4245                connector_type,
4246                link,
4247                link_index);
4248
4249        drm_connector_attach_encoder(
4250                &aconnector->base, &aencoder->base);
4251
4252        drm_connector_register(&aconnector->base);
4253#if defined(CONFIG_DEBUG_FS)
4254        res = connector_debugfs_init(aconnector);
4255        if (res) {
4256                DRM_ERROR("Failed to create debugfs for connector");
4257                goto out_free;
4258        }
4259#endif
4260
4261        if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
4262                || connector_type == DRM_MODE_CONNECTOR_eDP)
4263                amdgpu_dm_initialize_dp_connector(dm, aconnector);
4264
4265out_free:
4266        if (res) {
4267                kfree(i2c);
4268                aconnector->i2c = NULL;
4269        }
4270        return res;
4271}
4272
4273int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
4274{
4275        switch (adev->mode_info.num_crtc) {
4276        case 1:
4277                return 0x1;
4278        case 2:
4279                return 0x3;
4280        case 3:
4281                return 0x7;
4282        case 4:
4283                return 0xf;
4284        case 5:
4285                return 0x1f;
4286        case 6:
4287        default:
4288                return 0x3f;
4289        }
4290}
4291
4292static int amdgpu_dm_encoder_init(struct drm_device *dev,
4293                                  struct amdgpu_encoder *aencoder,
4294                                  uint32_t link_index)
4295{
4296        struct amdgpu_device *adev = dev->dev_private;
4297
4298        int res = drm_encoder_init(dev,
4299                                   &aencoder->base,
4300                                   &amdgpu_dm_encoder_funcs,
4301                                   DRM_MODE_ENCODER_TMDS,
4302                                   NULL);
4303
4304        aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
4305
4306        if (!res)
4307                aencoder->encoder_id = link_index;
4308        else
4309                aencoder->encoder_id = -1;
4310
4311        drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
4312
4313        return res;
4314}
4315
4316static void manage_dm_interrupts(struct amdgpu_device *adev,
4317                                 struct amdgpu_crtc *acrtc,
4318                                 bool enable)
4319{
4320        /*
4321         * this is not correct translation but will work as soon as VBLANK
4322         * constant is the same as PFLIP
4323         */
4324        int irq_type =
4325                amdgpu_display_crtc_idx_to_irq_type(
4326                        adev,
4327                        acrtc->crtc_id);
4328
4329        if (enable) {
4330                drm_crtc_vblank_on(&acrtc->base);
4331                amdgpu_irq_get(
4332                        adev,
4333                        &adev->pageflip_irq,
4334                        irq_type);
4335        } else {
4336
4337                amdgpu_irq_put(
4338                        adev,
4339                        &adev->pageflip_irq,
4340                        irq_type);
4341                drm_crtc_vblank_off(&acrtc->base);
4342        }
4343}
4344
4345static bool
4346is_scaling_state_different(const struct dm_connector_state *dm_state,
4347                           const struct dm_connector_state *old_dm_state)
4348{
4349        if (dm_state->scaling != old_dm_state->scaling)
4350                return true;
4351        if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
4352                if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
4353                        return true;
4354        } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
4355                if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
4356                        return true;
4357        } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
4358                   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
4359                return true;
4360        return false;
4361}
4362
4363static void remove_stream(struct amdgpu_device *adev,
4364                          struct amdgpu_crtc *acrtc,
4365                          struct dc_stream_state *stream)
4366{
4367        /* this is the update mode case */
4368
4369        acrtc->otg_inst = -1;
4370        acrtc->enabled = false;
4371}
4372
4373static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
4374                               struct dc_cursor_position *position)
4375{
4376        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
4377        int x, y;
4378        int xorigin = 0, yorigin = 0;
4379
4380        if (!crtc || !plane->state->fb) {
4381                position->enable = false;
4382                position->x = 0;
4383                position->y = 0;
4384                return 0;
4385        }
4386
4387        if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
4388            (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
4389                DRM_ERROR("%s: bad cursor width or height %d x %d\n",
4390                          __func__,
4391                          plane->state->crtc_w,
4392                          plane->state->crtc_h);
4393                return -EINVAL;
4394        }
4395
4396        x = plane->state->crtc_x;
4397        y = plane->state->crtc_y;
4398        /* avivo cursor are offset into the total surface */
4399        x += crtc->primary->state->src_x >> 16;
4400        y += crtc->primary->state->src_y >> 16;
4401        if (x < 0) {
4402                xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
4403                x = 0;
4404        }
4405        if (y < 0) {
4406                yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
4407                y = 0;
4408        }
4409        position->enable = true;
4410        position->x = x;
4411        position->y = y;
4412        position->x_hotspot = xorigin;
4413        position->y_hotspot = yorigin;
4414
4415        return 0;
4416}
4417
4418static void handle_cursor_update(struct drm_plane *plane,
4419                                 struct drm_plane_state *old_plane_state)
4420{
4421        struct amdgpu_device *adev = plane->dev->dev_private;
4422        struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
4423        struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
4424        struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
4425        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
4426        uint64_t address = afb ? afb->address : 0;
4427        struct dc_cursor_position position;
4428        struct dc_cursor_attributes attributes;
4429        int ret;
4430
4431        if (!plane->state->fb && !old_plane_state->fb)
4432                return;
4433
4434        DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
4435                         __func__,
4436                         amdgpu_crtc->crtc_id,
4437                         plane->state->crtc_w,
4438                         plane->state->crtc_h);
4439
4440        ret = get_cursor_position(plane, crtc, &position);
4441        if (ret)
4442                return;
4443
4444        if (!position.enable) {
4445                /* turn off cursor */
4446                if (crtc_state && crtc_state->stream) {
4447                        mutex_lock(&adev->dm.dc_lock);
4448                        dc_stream_set_cursor_position(crtc_state->stream,
4449                                                      &position);
4450                        mutex_unlock(&adev->dm.dc_lock);
4451                }
4452                return;
4453        }
4454
4455        amdgpu_crtc->cursor_width = plane->state->crtc_w;
4456        amdgpu_crtc->cursor_height = plane->state->crtc_h;
4457
4458        attributes.address.high_part = upper_32_bits(address);
4459        attributes.address.low_part  = lower_32_bits(address);
4460        attributes.width             = plane->state->crtc_w;
4461        attributes.height            = plane->state->crtc_h;
4462        attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
4463        attributes.rotation_angle    = 0;
4464        attributes.attribute_flags.value = 0;
4465
4466        attributes.pitch = attributes.width;
4467
4468        if (crtc_state->stream) {
4469                mutex_lock(&adev->dm.dc_lock);
4470                if (!dc_stream_set_cursor_attributes(crtc_state->stream,
4471                                                         &attributes))
4472                        DRM_ERROR("DC failed to set cursor attributes\n");
4473
4474                if (!dc_stream_set_cursor_position(crtc_state->stream,
4475                                                   &position))
4476                        DRM_ERROR("DC failed to set cursor position\n");
4477                mutex_unlock(&adev->dm.dc_lock);
4478        }
4479}
4480
4481static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
4482{
4483
4484        assert_spin_locked(&acrtc->base.dev->event_lock);
4485        WARN_ON(acrtc->event);
4486
4487        acrtc->event = acrtc->base.state->event;
4488
4489        /* Set the flip status */
4490        acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
4491
4492        /* Mark this event as consumed */
4493        acrtc->base.state->event = NULL;
4494
4495        DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
4496                                                 acrtc->crtc_id);
4497}
4498
4499struct dc_stream_status *dc_state_get_stream_status(
4500        struct dc_state *state,
4501        struct dc_stream_state *stream)
4502{
4503        uint8_t i;
4504
4505        for (i = 0; i < state->stream_count; i++) {
4506                if (stream == state->streams[i])
4507                        return &state->stream_status[i];
4508        }
4509
4510        return NULL;
4511}
4512
4513static void update_freesync_state_on_stream(
4514        struct amdgpu_display_manager *dm,
4515        struct dm_crtc_state *new_crtc_state,
4516        struct dc_stream_state *new_stream,
4517        struct dc_plane_state *surface,
4518        u32 flip_timestamp_in_us)
4519{
4520        struct mod_vrr_params vrr_params = new_crtc_state->vrr_params;
4521        struct dc_info_packet vrr_infopacket = {0};
4522        struct mod_freesync_config config = new_crtc_state->freesync_config;
4523
4524        if (!new_stream)
4525                return;
4526
4527        /*
4528         * TODO: Determine why min/max totals and vrefresh can be 0 here.
4529         * For now it's sufficient to just guard against these conditions.
4530         */
4531
4532        if (!new_stream->timing.h_total || !new_stream->timing.v_total)
4533                return;
4534
4535        if (new_crtc_state->vrr_supported &&
4536            config.min_refresh_in_uhz &&
4537            config.max_refresh_in_uhz) {
4538                config.state = new_crtc_state->base.vrr_enabled ?
4539                        VRR_STATE_ACTIVE_VARIABLE :
4540                        VRR_STATE_INACTIVE;
4541        } else {
4542                config.state = VRR_STATE_UNSUPPORTED;
4543        }
4544
4545        mod_freesync_build_vrr_params(dm->freesync_module,
4546                                      new_stream,
4547                                      &config, &vrr_params);
4548
4549        if (surface) {
4550                mod_freesync_handle_preflip(
4551                        dm->freesync_module,
4552                        surface,
4553                        new_stream,
4554                        flip_timestamp_in_us,
4555                        &vrr_params);
4556        }
4557
4558        mod_freesync_build_vrr_infopacket(
4559                dm->freesync_module,
4560                new_stream,
4561                &vrr_params,
4562                PACKET_TYPE_VRR,
4563                TRANSFER_FUNC_UNKNOWN,
4564                &vrr_infopacket);
4565
4566        new_crtc_state->freesync_timing_changed =
4567                (memcmp(&new_crtc_state->vrr_params.adjust,
4568                        &vrr_params.adjust,
4569                        sizeof(vrr_params.adjust)) != 0);
4570
4571        new_crtc_state->freesync_vrr_info_changed =
4572                (memcmp(&new_crtc_state->vrr_infopacket,
4573                        &vrr_infopacket,
4574                        sizeof(vrr_infopacket)) != 0);
4575
4576        new_crtc_state->vrr_params = vrr_params;
4577        new_crtc_state->vrr_infopacket = vrr_infopacket;
4578
4579        new_stream->adjust = new_crtc_state->vrr_params.adjust;
4580        new_stream->vrr_infopacket = vrr_infopacket;
4581
4582        if (new_crtc_state->freesync_vrr_info_changed)
4583                DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
4584                              new_crtc_state->base.crtc->base.id,
4585                              (int)new_crtc_state->base.vrr_enabled,
4586                              (int)vrr_params.state);
4587
4588        if (new_crtc_state->freesync_timing_changed)
4589                DRM_DEBUG_KMS("VRR timing update: crtc=%u min=%u max=%u\n",
4590                              new_crtc_state->base.crtc->base.id,
4591                                  vrr_params.adjust.v_total_min,
4592                                  vrr_params.adjust.v_total_max);
4593}
4594
4595/*
4596 * Executes flip
4597 *
4598 * Waits on all BO's fences and for proper vblank count
4599 */
4600static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
4601                              struct drm_framebuffer *fb,
4602                              uint32_t target,
4603                              struct dc_state *state)
4604{
4605        unsigned long flags;
4606        uint64_t timestamp_ns;
4607        uint32_t target_vblank;
4608        int r, vpos, hpos;
4609        struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4610        struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
4611        struct amdgpu_bo *abo = gem_to_amdgpu_bo(fb->obj[0]);
4612        struct amdgpu_device *adev = crtc->dev->dev_private;
4613        bool async_flip = (crtc->state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
4614        struct dc_flip_addrs addr = { {0} };
4615        /* TODO eliminate or rename surface_update */
4616        struct dc_surface_update surface_updates[1] = { {0} };
4617        struct dc_stream_update stream_update = { {0} };
4618        struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4619        struct dc_stream_status *stream_status;
4620        struct dc_plane_state *surface;
4621
4622
4623        /* Prepare wait for target vblank early - before the fence-waits */
4624        target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) +
4625                        amdgpu_get_vblank_counter_kms(crtc->dev, acrtc->crtc_id);
4626
4627        /*
4628         * TODO This might fail and hence better not used, wait
4629         * explicitly on fences instead
4630         * and in general should be called for
4631         * blocking commit to as per framework helpers
4632         */
4633        r = amdgpu_bo_reserve(abo, true);
4634        if (unlikely(r != 0)) {
4635                DRM_ERROR("failed to reserve buffer before flip\n");
4636                WARN_ON(1);
4637        }
4638
4639        /* Wait for all fences on this FB */
4640        WARN_ON(reservation_object_wait_timeout_rcu(abo->tbo.resv, true, false,
4641                                                                    MAX_SCHEDULE_TIMEOUT) < 0);
4642
4643        amdgpu_bo_unreserve(abo);
4644
4645        /*
4646         * Wait until we're out of the vertical blank period before the one
4647         * targeted by the flip
4648         */
4649        while ((acrtc->enabled &&
4650                (amdgpu_display_get_crtc_scanoutpos(adev->ddev, acrtc->crtc_id,
4651                                                    0, &vpos, &hpos, NULL,
4652                                                    NULL, &crtc->hwmode)
4653                 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
4654                (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
4655                (int)(target_vblank -
4656                  amdgpu_get_vblank_counter_kms(adev->ddev, acrtc->crtc_id)) > 0)) {
4657                usleep_range(1000, 1100);
4658        }
4659
4660        /* Flip */
4661        spin_lock_irqsave(&crtc->dev->event_lock, flags);
4662
4663        WARN_ON(acrtc->pflip_status != AMDGPU_FLIP_NONE);
4664        WARN_ON(!acrtc_state->stream);
4665
4666        addr.address.grph.addr.low_part = lower_32_bits(afb->address);
4667        addr.address.grph.addr.high_part = upper_32_bits(afb->address);
4668        addr.flip_immediate = async_flip;
4669
4670        timestamp_ns = ktime_get_ns();
4671        addr.flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
4672
4673
4674        if (acrtc->base.state->event)
4675                prepare_flip_isr(acrtc);
4676
4677        spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
4678
4679        stream_status = dc_stream_get_status(acrtc_state->stream);
4680        if (!stream_status) {
4681                DRM_ERROR("No stream status for CRTC: id=%d\n",
4682                        acrtc->crtc_id);
4683                return;
4684        }
4685
4686        surface = stream_status->plane_states[0];
4687        surface_updates->surface = surface;
4688
4689        if (!surface) {
4690                DRM_ERROR("No surface for CRTC: id=%d\n",
4691                        acrtc->crtc_id);
4692                return;
4693        }
4694        surface_updates->flip_addr = &addr;
4695
4696        if (acrtc_state->stream) {
4697                update_freesync_state_on_stream(
4698                        &adev->dm,
4699                        acrtc_state,
4700                        acrtc_state->stream,
4701                        surface,
4702                        addr.flip_timestamp_in_us);
4703
4704                if (acrtc_state->freesync_timing_changed)
4705                        stream_update.adjust =
4706                                &acrtc_state->stream->adjust;
4707
4708                if (acrtc_state->freesync_vrr_info_changed)
4709                        stream_update.vrr_infopacket =
4710                                &acrtc_state->stream->vrr_infopacket;
4711        }
4712
4713        /* Update surface timing information. */
4714        surface->time.time_elapsed_in_us[surface->time.index] =
4715                addr.flip_timestamp_in_us - surface->time.prev_update_time_in_us;
4716        surface->time.prev_update_time_in_us = addr.flip_timestamp_in_us;
4717        surface->time.index++;
4718        if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX)
4719                surface->time.index = 0;
4720
4721        mutex_lock(&adev->dm.dc_lock);
4722
4723        dc_commit_updates_for_stream(adev->dm.dc,
4724                                             surface_updates,
4725                                             1,
4726                                             acrtc_state->stream,
4727                                             &stream_update,
4728                                             &surface_updates->surface,
4729                                             state);
4730        mutex_unlock(&adev->dm.dc_lock);
4731
4732        DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x \n",
4733                         __func__,
4734                         addr.address.grph.addr.high_part,
4735                         addr.address.grph.addr.low_part);
4736}
4737
4738/*
4739 * TODO this whole function needs to go
4740 *
4741 * dc_surface_update is needlessly complex. See if we can just replace this
4742 * with a dc_plane_state and follow the atomic model a bit more closely here.
4743 */
4744static bool commit_planes_to_stream(
4745                struct amdgpu_display_manager *dm,
4746                struct dc *dc,
4747                struct dc_plane_state **plane_states,
4748                uint8_t new_plane_count,
4749                struct dm_crtc_state *dm_new_crtc_state,
4750                struct dm_crtc_state *dm_old_crtc_state,
4751                struct dc_state *state)
4752{
4753        /* no need to dynamically allocate this. it's pretty small */
4754        struct dc_surface_update updates[MAX_SURFACES];
4755        struct dc_flip_addrs *flip_addr;
4756        struct dc_plane_info *plane_info;
4757        struct dc_scaling_info *scaling_info;
4758        int i;
4759        struct dc_stream_state *dc_stream = dm_new_crtc_state->stream;
4760        struct dc_stream_update *stream_update =
4761                        kzalloc(sizeof(struct dc_stream_update), GFP_KERNEL);
4762        unsigned int abm_level;
4763
4764        if (!stream_update) {
4765                BREAK_TO_DEBUGGER();
4766                return false;
4767        }
4768
4769        flip_addr = kcalloc(MAX_SURFACES, sizeof(struct dc_flip_addrs),
4770                            GFP_KERNEL);
4771        plane_info = kcalloc(MAX_SURFACES, sizeof(struct dc_plane_info),
4772                             GFP_KERNEL);
4773        scaling_info = kcalloc(MAX_SURFACES, sizeof(struct dc_scaling_info),
4774                               GFP_KERNEL);
4775
4776        if (!flip_addr || !plane_info || !scaling_info) {
4777                kfree(flip_addr);
4778                kfree(plane_info);
4779                kfree(scaling_info);
4780                kfree(stream_update);
4781                return false;
4782        }
4783
4784        memset(updates, 0, sizeof(updates));
4785
4786        stream_update->src = dc_stream->src;
4787        stream_update->dst = dc_stream->dst;
4788        stream_update->out_transfer_func = dc_stream->out_transfer_func;
4789
4790        if (dm_new_crtc_state->abm_level != dm_old_crtc_state->abm_level) {
4791                abm_level = dm_new_crtc_state->abm_level;
4792                stream_update->abm_level = &abm_level;
4793        }
4794
4795        for (i = 0; i < new_plane_count; i++) {
4796                updates[i].surface = plane_states[i];
4797                updates[i].gamma =
4798                        (struct dc_gamma *)plane_states[i]->gamma_correction;
4799                updates[i].in_transfer_func = plane_states[i]->in_transfer_func;
4800                flip_addr[i].address = plane_states[i]->address;
4801                flip_addr[i].flip_immediate = plane_states[i]->flip_immediate;
4802                plane_info[i].color_space = plane_states[i]->color_space;
4803                plane_info[i].format = plane_states[i]->format;
4804                plane_info[i].plane_size = plane_states[i]->plane_size;
4805                plane_info[i].rotation = plane_states[i]->rotation;
4806                plane_info[i].horizontal_mirror = plane_states[i]->horizontal_mirror;
4807                plane_info[i].stereo_format = plane_states[i]->stereo_format;
4808                plane_info[i].tiling_info = plane_states[i]->tiling_info;
4809                plane_info[i].visible = plane_states[i]->visible;
4810                plane_info[i].per_pixel_alpha = plane_states[i]->per_pixel_alpha;
4811                plane_info[i].dcc = plane_states[i]->dcc;
4812                scaling_info[i].scaling_quality = plane_states[i]->scaling_quality;
4813                scaling_info[i].src_rect = plane_states[i]->src_rect;
4814                scaling_info[i].dst_rect = plane_states[i]->dst_rect;
4815                scaling_info[i].clip_rect = plane_states[i]->clip_rect;
4816
4817                updates[i].flip_addr = &flip_addr[i];
4818                updates[i].plane_info = &plane_info[i];
4819                updates[i].scaling_info = &scaling_info[i];
4820        }
4821
4822        mutex_lock(&dm->dc_lock);
4823        dc_commit_updates_for_stream(
4824                        dc,
4825                        updates,
4826                        new_plane_count,
4827                        dc_stream, stream_update, plane_states, state);
4828        mutex_unlock(&dm->dc_lock);
4829
4830        kfree(flip_addr);
4831        kfree(plane_info);
4832        kfree(scaling_info);
4833        kfree(stream_update);
4834        return true;
4835}
4836
4837static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
4838                                    struct dc_state *dc_state,
4839                                    struct drm_device *dev,
4840                                    struct amdgpu_display_manager *dm,
4841                                    struct drm_crtc *pcrtc,
4842                                    bool *wait_for_vblank)
4843{
4844        uint32_t i;
4845        struct drm_plane *plane;
4846        struct drm_plane_state *old_plane_state, *new_plane_state;
4847        struct dc_stream_state *dc_stream_attach;
4848        struct dc_plane_state *plane_states_constructed[MAX_SURFACES];
4849        struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
4850        struct drm_crtc_state *new_pcrtc_state =
4851                        drm_atomic_get_new_crtc_state(state, pcrtc);
4852        struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
4853        struct dm_crtc_state *dm_old_crtc_state =
4854                        to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
4855        int planes_count = 0;
4856        unsigned long flags;
4857        u64 last_flip_vblank;
4858        bool vrr_active = acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE;
4859
4860        /* update planes when needed */
4861        for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
4862                struct drm_crtc *crtc = new_plane_state->crtc;
4863                struct drm_crtc_state *new_crtc_state;
4864                struct drm_framebuffer *fb = new_plane_state->fb;
4865                bool pflip_needed;
4866                struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
4867
4868                if (plane->type == DRM_PLANE_TYPE_CURSOR) {
4869                        handle_cursor_update(plane, old_plane_state);
4870                        continue;
4871                }
4872
4873                if (!fb || !crtc || pcrtc != crtc)
4874                        continue;
4875
4876                new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
4877                if (!new_crtc_state->active)
4878                        continue;
4879
4880                pflip_needed = !state->allow_modeset;
4881
4882                spin_lock_irqsave(&crtc->dev->event_lock, flags);
4883                if (acrtc_attach->pflip_status != AMDGPU_FLIP_NONE) {
4884                        DRM_ERROR("%s: acrtc %d, already busy\n",
4885                                  __func__,
4886                                  acrtc_attach->crtc_id);
4887                        /* In commit tail framework this cannot happen */
4888                        WARN_ON(1);
4889                }
4890
4891                /* For variable refresh rate mode only:
4892                 * Get vblank of last completed flip to avoid > 1 vrr flips per
4893                 * video frame by use of throttling, but allow flip programming
4894                 * anywhere in the possibly large variable vrr vblank interval
4895                 * for fine-grained flip timing control and more opportunity to
4896                 * avoid stutter on late submission of amdgpu_dm_do_flip() calls.
4897                 */
4898                last_flip_vblank = acrtc_attach->last_flip_vblank;
4899
4900                spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
4901
4902                if (!pflip_needed || plane->type == DRM_PLANE_TYPE_OVERLAY) {
4903                        WARN_ON(!dm_new_plane_state->dc_state);
4904
4905                        plane_states_constructed[planes_count] = dm_new_plane_state->dc_state;
4906
4907                        dc_stream_attach = acrtc_state->stream;
4908                        planes_count++;
4909
4910                } else if (new_crtc_state->planes_changed) {
4911                        /* Assume even ONE crtc with immediate flip means
4912                         * entire can't wait for VBLANK
4913                         * TODO Check if it's correct
4914                         */
4915                        *wait_for_vblank =
4916                                        new_pcrtc_state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC ?
4917                                false : true;
4918
4919                        /* TODO: Needs rework for multiplane flip */
4920                        if (plane->type == DRM_PLANE_TYPE_PRIMARY)
4921                                drm_crtc_vblank_get(crtc);
4922
4923                        /* Use old throttling in non-vrr fixed refresh rate mode
4924                         * to keep flip scheduling based on target vblank counts
4925                         * working in a backwards compatible way, e.g., clients
4926                         * using GLX_OML_sync_control extension.
4927                         */
4928                        if (!vrr_active)
4929                                last_flip_vblank = drm_crtc_vblank_count(crtc);
4930
4931                        amdgpu_dm_do_flip(
4932                                crtc,
4933                                fb,
4934                                (uint32_t) last_flip_vblank + *wait_for_vblank,
4935                                dc_state);
4936                }
4937
4938        }
4939
4940        if (planes_count) {
4941                unsigned long flags;
4942
4943                if (new_pcrtc_state->event) {
4944
4945                        drm_crtc_vblank_get(pcrtc);
4946
4947                        spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
4948                        prepare_flip_isr(acrtc_attach);
4949                        spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
4950                }
4951
4952                dc_stream_attach->abm_level = acrtc_state->abm_level;
4953
4954                if (false == commit_planes_to_stream(dm,
4955                                                        dm->dc,
4956                                                        plane_states_constructed,
4957                                                        planes_count,
4958                                                        acrtc_state,
4959                                                        dm_old_crtc_state,
4960                                                        dc_state))
4961                        dm_error("%s: Failed to attach plane!\n", __func__);
4962        } else {
4963                /*TODO BUG Here should go disable planes on CRTC. */
4964        }
4965}
4966
4967/*
4968 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
4969 * @crtc_state: the DRM CRTC state
4970 * @stream_state: the DC stream state.
4971 *
4972 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
4973 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
4974 */
4975static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
4976                                                struct dc_stream_state *stream_state)
4977{
4978        stream_state->mode_changed =
4979                crtc_state->mode_changed || crtc_state->active_changed;
4980}
4981
4982static int amdgpu_dm_atomic_commit(struct drm_device *dev,
4983                                   struct drm_atomic_state *state,
4984                                   bool nonblock)
4985{
4986        struct drm_crtc *crtc;
4987        struct drm_crtc_state *old_crtc_state, *new_crtc_state;
4988        struct amdgpu_device *adev = dev->dev_private;
4989        int i;
4990
4991        /*
4992         * We evade vblanks and pflips on crtc that
4993         * should be changed. We do it here to flush & disable
4994         * interrupts before drm_swap_state is called in drm_atomic_helper_commit
4995         * it will update crtc->dm_crtc_state->stream pointer which is used in
4996         * the ISRs.
4997         */
4998        for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
4999                struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
5000                struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
5001                struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5002
5003                if (drm_atomic_crtc_needs_modeset(new_crtc_state)
5004                    && dm_old_crtc_state->stream) {
5005                        /*
5006                         * CRC capture was enabled but not disabled.
5007                         * Release the vblank reference.
5008                         */
5009                        if (dm_new_crtc_state->crc_enabled) {
5010                                drm_crtc_vblank_put(crtc);
5011                                dm_new_crtc_state->crc_enabled = false;
5012                        }
5013
5014                        manage_dm_interrupts(adev, acrtc, false);
5015                }
5016        }
5017        /*
5018         * Add check here for SoC's that support hardware cursor plane, to
5019         * unset legacy_cursor_update
5020         */
5021
5022        return drm_atomic_helper_commit(dev, state, nonblock);
5023
5024        /*TODO Handle EINTR, reenable IRQ*/
5025}
5026
5027/**
5028 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
5029 * @state: The atomic state to commit
5030 *
5031 * This will tell DC to commit the constructed DC state from atomic_check,
5032 * programming the hardware. Any failures here implies a hardware failure, since
5033 * atomic check should have filtered anything non-kosher.
5034 */
5035static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
5036{
5037        struct drm_device *dev = state->dev;
5038        struct amdgpu_device *adev = dev->dev_private;
5039        struct amdgpu_display_manager *dm = &adev->dm;
5040        struct dm_atomic_state *dm_state;
5041        struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
5042        uint32_t i, j;
5043        struct drm_crtc *crtc;
5044        struct drm_crtc_state *old_crtc_state, *new_crtc_state;
5045        unsigned long flags;
5046        bool wait_for_vblank = true;
5047        struct drm_connector *connector;
5048        struct drm_connector_state *old_con_state, *new_con_state;
5049        struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
5050        int crtc_disable_count = 0;
5051
5052        drm_atomic_helper_update_legacy_modeset_state(dev, state);
5053
5054        dm_state = dm_atomic_get_new_state(state);
5055        if (dm_state && dm_state->context) {
5056                dc_state = dm_state->context;
5057        } else {
5058                /* No state changes, retain current state. */
5059                dc_state_temp = dc_create_state();
5060                ASSERT(dc_state_temp);
5061                dc_state = dc_state_temp;
5062                dc_resource_state_copy_construct_current(dm->dc, dc_state);
5063        }
5064
5065        /* update changed items */
5066        for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
5067                struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5068
5069                dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
5070                dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
5071
5072                DRM_DEBUG_DRIVER(
5073                        "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
5074                        "planes_changed:%d, mode_changed:%d,active_changed:%d,"
5075                        "connectors_changed:%d\n",
5076                        acrtc->crtc_id,
5077                        new_crtc_state->enable,
5078                        new_crtc_state->active,
5079                        new_crtc_state->planes_changed,
5080                        new_crtc_state->mode_changed,
5081                        new_crtc_state->active_changed,
5082                        new_crtc_state->connectors_changed);
5083
5084                /* Copy all transient state flags into dc state */
5085                if (dm_new_crtc_state->stream) {
5086                        amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
5087                                                            dm_new_crtc_state->stream);
5088                }
5089
5090                /* handles headless hotplug case, updating new_state and
5091                 * aconnector as needed
5092                 */
5093
5094                if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
5095
5096                        DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
5097
5098                        if (!dm_new_crtc_state->stream) {
5099                                /*
5100                                 * this could happen because of issues with
5101                                 * userspace notifications delivery.
5102                                 * In this case userspace tries to set mode on
5103                                 * display which is disconnected in fact.
5104                                 * dc_sink is NULL in this case on aconnector.
5105                                 * We expect reset mode will come soon.
5106                                 *
5107                                 * This can also happen when unplug is done
5108                                 * during resume sequence ended
5109                                 *
5110                                 * In this case, we want to pretend we still
5111                                 * have a sink to keep the pipe running so that
5112                                 * hw state is consistent with the sw state
5113                                 */
5114                                DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
5115                                                __func__, acrtc->base.base.id);
5116                                continue;
5117                        }
5118
5119                        if (dm_old_crtc_state->stream)
5120                                remove_stream(adev, acrtc, dm_old_crtc_state->stream);
5121
5122                        pm_runtime_get_noresume(dev->dev);
5123
5124                        acrtc->enabled = true;
5125                        acrtc->hw_mode = new_crtc_state->mode;
5126                        crtc->hwmode = new_crtc_state->mode;
5127                } else if (modereset_required(new_crtc_state)) {
5128                        DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
5129
5130                        /* i.e. reset mode */
5131                        if (dm_old_crtc_state->stream)
5132                                remove_stream(adev, acrtc, dm_old_crtc_state->stream);
5133                }
5134        } /* for_each_crtc_in_state() */
5135
5136        if (dc_state) {
5137                dm_enable_per_frame_crtc_master_sync(dc_state);
5138                mutex_lock(&dm->dc_lock);
5139                WARN_ON(!dc_commit_state(dm->dc, dc_state));
5140                mutex_unlock(&dm->dc_lock);
5141        }
5142
5143        for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
5144                struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5145
5146                dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
5147
5148                if (dm_new_crtc_state->stream != NULL) {
5149                        const struct dc_stream_status *status =
5150                                        dc_stream_get_status(dm_new_crtc_state->stream);
5151
5152                        if (!status)
5153                                status = dc_state_get_stream_status(dc_state,
5154                                                                    dm_new_crtc_state->stream);
5155
5156                        if (!status)
5157                                DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
5158                        else
5159                                acrtc->otg_inst = status->primary_otg_inst;
5160                }
5161        }
5162
5163        /* Handle scaling, underscan, and abm changes*/
5164        for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5165                struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
5166                struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
5167                struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
5168                struct dc_stream_status *status = NULL;
5169
5170                if (acrtc) {
5171                        new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
5172                        old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
5173                }
5174
5175                /* Skip any modesets/resets */
5176                if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
5177                        continue;
5178
5179
5180                dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
5181                dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
5182
5183                /* Skip anything that is not scaling or underscan changes */
5184                if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state) &&
5185                                (dm_new_crtc_state->abm_level == dm_old_crtc_state->abm_level))
5186                        continue;
5187
5188                update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
5189                                dm_new_con_state, (struct dc_stream_state *)dm_new_crtc_state->stream);
5190
5191                if (!dm_new_crtc_state->stream)
5192                        continue;
5193
5194                status = dc_stream_get_status(dm_new_crtc_state->stream);
5195                WARN_ON(!status);
5196                WARN_ON(!status->plane_count);
5197
5198                dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
5199
5200                /*TODO How it works with MPO ?*/
5201                if (!commit_planes_to_stream(
5202                                dm,
5203                                dm->dc,
5204                                status->plane_states,
5205                                status->plane_count,
5206                                dm_new_crtc_state,
5207                                to_dm_crtc_state(old_crtc_state),
5208                                dc_state))
5209                        dm_error("%s: Failed to update stream scaling!\n", __func__);
5210        }
5211
5212        for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
5213                        new_crtc_state, i) {
5214                /*
5215                 * loop to enable interrupts on newly arrived crtc
5216                 */
5217                struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5218                bool modeset_needed;
5219
5220                if (old_crtc_state->active && !new_crtc_state->active)
5221                        crtc_disable_count++;
5222
5223                dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
5224                dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
5225                modeset_needed = modeset_required(
5226                                new_crtc_state,
5227                                dm_new_crtc_state->stream,
5228                                dm_old_crtc_state->stream);
5229
5230                if (dm_new_crtc_state->stream == NULL || !modeset_needed)
5231                        continue;
5232
5233                manage_dm_interrupts(adev, acrtc, true);
5234        }
5235
5236        /* update planes when needed per crtc*/
5237        for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
5238                dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
5239
5240                if (dm_new_crtc_state->stream)
5241                        amdgpu_dm_commit_planes(state, dc_state, dev,
5242                                                dm, crtc, &wait_for_vblank);
5243        }
5244
5245
5246        /*
5247         * send vblank event on all events not handled in flip and
5248         * mark consumed event for drm_atomic_helper_commit_hw_done
5249         */
5250        spin_lock_irqsave(&adev->ddev->event_lock, flags);
5251        for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
5252
5253                if (new_crtc_state->event)
5254                        drm_send_event_locked(dev, &new_crtc_state->event->base);
5255
5256                new_crtc_state->event = NULL;
5257        }
5258        spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
5259
5260
5261        if (wait_for_vblank)
5262                drm_atomic_helper_wait_for_flip_done(dev, state);
5263
5264        /*
5265         * FIXME:
5266         * Delay hw_done() until flip_done() is signaled. This is to block
5267         * another commit from freeing the CRTC state while we're still
5268         * waiting on flip_done.
5269         */
5270        drm_atomic_helper_commit_hw_done(state);
5271
5272        drm_atomic_helper_cleanup_planes(dev, state);
5273
5274        /*
5275         * Finally, drop a runtime PM reference for each newly disabled CRTC,
5276         * so we can put the GPU into runtime suspend if we're not driving any
5277         * displays anymore
5278         */
5279        for (i = 0; i < crtc_disable_count; i++)
5280                pm_runtime_put_autosuspend(dev->dev);
5281        pm_runtime_mark_last_busy(dev->dev);
5282
5283        if (dc_state_temp)
5284                dc_release_state(dc_state_temp);
5285}
5286
5287
5288static int dm_force_atomic_commit(struct drm_connector *connector)
5289{
5290        int ret = 0;
5291        struct drm_device *ddev = connector->dev;
5292        struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
5293        struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
5294        struct drm_plane *plane = disconnected_acrtc->base.primary;
5295        struct drm_connector_state *conn_state;
5296        struct drm_crtc_state *crtc_state;
5297        struct drm_plane_state *plane_state;
5298
5299        if (!state)
5300                return -ENOMEM;
5301
5302        state->acquire_ctx = ddev->mode_config.acquire_ctx;
5303
5304        /* Construct an atomic state to restore previous display setting */
5305
5306        /*
5307         * Attach connectors to drm_atomic_state
5308         */
5309        conn_state = drm_atomic_get_connector_state(state, connector);
5310
5311        ret = PTR_ERR_OR_ZERO(conn_state);
5312        if (ret)
5313                goto err;
5314
5315        /* Attach crtc to drm_atomic_state*/
5316        crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
5317
5318        ret = PTR_ERR_OR_ZERO(crtc_state);
5319        if (ret)
5320                goto err;
5321
5322        /* force a restore */
5323        crtc_state->mode_changed = true;
5324
5325        /* Attach plane to drm_atomic_state */
5326        plane_state = drm_atomic_get_plane_state(state, plane);
5327
5328        ret = PTR_ERR_OR_ZERO(plane_state);
5329        if (ret)
5330                goto err;
5331
5332
5333        /* Call commit internally with the state we just constructed */
5334        ret = drm_atomic_commit(state);
5335        if (!ret)
5336                return 0;
5337
5338err:
5339        DRM_ERROR("Restoring old state failed with %i\n", ret);
5340        drm_atomic_state_put(state);
5341
5342        return ret;
5343}
5344
5345/*
5346 * This function handles all cases when set mode does not come upon hotplug.
5347 * This includes when a display is unplugged then plugged back into the
5348 * same port and when running without usermode desktop manager supprot
5349 */
5350void dm_restore_drm_connector_state(struct drm_device *dev,
5351                                    struct drm_connector *connector)
5352{
5353        struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5354        struct amdgpu_crtc *disconnected_acrtc;
5355        struct dm_crtc_state *acrtc_state;
5356
5357        if (!aconnector->dc_sink || !connector->state || !connector->encoder)
5358                return;
5359
5360        disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
5361        if (!disconnected_acrtc)
5362                return;
5363
5364        acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
5365        if (!acrtc_state->stream)
5366                return;
5367
5368        /*
5369         * If the previous sink is not released and different from the current,
5370         * we deduce we are in a state where we can not rely on usermode call
5371         * to turn on the display, so we do it here
5372         */
5373        if (acrtc_state->stream->sink != aconnector->dc_sink)
5374                dm_force_atomic_commit(&aconnector->base);
5375}
5376
5377/*
5378 * Grabs all modesetting locks to serialize against any blocking commits,
5379 * Waits for completion of all non blocking commits.
5380 */
5381static int do_aquire_global_lock(struct drm_device *dev,
5382                                 struct drm_atomic_state *state)
5383{
5384        struct drm_crtc *crtc;
5385        struct drm_crtc_commit *commit;
5386        long ret;
5387
5388        /*
5389         * Adding all modeset locks to aquire_ctx will
5390         * ensure that when the framework release it the
5391         * extra locks we are locking here will get released to
5392         */
5393        ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
5394        if (ret)
5395                return ret;
5396
5397        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
5398                spin_lock(&crtc->commit_lock);
5399                commit = list_first_entry_or_null(&crtc->commit_list,
5400                                struct drm_crtc_commit, commit_entry);
5401                if (commit)
5402                        drm_crtc_commit_get(commit);
5403                spin_unlock(&crtc->commit_lock);
5404
5405                if (!commit)
5406                        continue;
5407
5408                /*
5409                 * Make sure all pending HW programming completed and
5410                 * page flips done
5411                 */
5412                ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
5413
5414                if (ret > 0)
5415                        ret = wait_for_completion_interruptible_timeout(
5416                                        &commit->flip_done, 10*HZ);
5417
5418                if (ret == 0)
5419                        DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
5420                                  "timed out\n", crtc->base.id, crtc->name);
5421
5422                drm_crtc_commit_put(commit);
5423        }
5424
5425        return ret < 0 ? ret : 0;
5426}
5427
5428static void get_freesync_config_for_crtc(
5429        struct dm_crtc_state *new_crtc_state,
5430        struct dm_connector_state *new_con_state)
5431{
5432        struct mod_freesync_config config = {0};
5433        struct amdgpu_dm_connector *aconnector =
5434                        to_amdgpu_dm_connector(new_con_state->base.connector);
5435
5436        new_crtc_state->vrr_supported = new_con_state->freesync_capable;
5437
5438        if (new_con_state->freesync_capable) {
5439                config.state = new_crtc_state->base.vrr_enabled ?
5440                                VRR_STATE_ACTIVE_VARIABLE :
5441                                VRR_STATE_INACTIVE;
5442                config.min_refresh_in_uhz =
5443                                aconnector->min_vfreq * 1000000;
5444                config.max_refresh_in_uhz =
5445                                aconnector->max_vfreq * 1000000;
5446                config.vsif_supported = true;
5447                config.btr = true;
5448        }
5449
5450        new_crtc_state->freesync_config = config;
5451}
5452
5453static void reset_freesync_config_for_crtc(
5454        struct dm_crtc_state *new_crtc_state)
5455{
5456        new_crtc_state->vrr_supported = false;
5457
5458        memset(&new_crtc_state->vrr_params, 0,
5459               sizeof(new_crtc_state->vrr_params));
5460        memset(&new_crtc_state->vrr_infopacket, 0,
5461               sizeof(new_crtc_state->vrr_infopacket));
5462}
5463
5464static int dm_update_crtcs_state(struct amdgpu_display_manager *dm,
5465                                 struct drm_atomic_state *state,
5466                                 bool enable,
5467                                 bool *lock_and_validation_needed)
5468{
5469        struct dm_atomic_state *dm_state = NULL;
5470        struct drm_crtc *crtc;
5471        struct drm_crtc_state *old_crtc_state, *new_crtc_state;
5472        int i;
5473        struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
5474        struct dc_stream_state *new_stream;
5475        int ret = 0;
5476
5477        /*
5478         * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
5479         * update changed items
5480         */
5481        for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
5482                struct amdgpu_crtc *acrtc = NULL;
5483                struct amdgpu_dm_connector *aconnector = NULL;
5484                struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
5485                struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
5486                struct drm_plane_state *new_plane_state = NULL;
5487
5488                new_stream = NULL;
5489
5490                dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
5491                dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
5492                acrtc = to_amdgpu_crtc(crtc);
5493
5494                new_plane_state = drm_atomic_get_new_plane_state(state, new_crtc_state->crtc->primary);
5495
5496                if (new_crtc_state->enable && new_plane_state && !new_plane_state->fb) {
5497                        ret = -EINVAL;
5498                        goto fail;
5499                }
5500
5501                aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
5502
5503                /* TODO This hack should go away */
5504                if (aconnector && enable) {
5505                        /* Make sure fake sink is created in plug-in scenario */
5506                        drm_new_conn_state = drm_atomic_get_new_connector_state(state,
5507                                                                    &aconnector->base);
5508                        drm_old_conn_state = drm_atomic_get_old_connector_state(state,
5509                                                                    &aconnector->base);
5510
5511                        if (IS_ERR(drm_new_conn_state)) {
5512                                ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
5513                                break;
5514                        }
5515
5516                        dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
5517                        dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
5518
5519                        new_stream = create_stream_for_sink(aconnector,
5520                                                             &new_crtc_state->mode,
5521                                                            dm_new_conn_state,
5522                                                            dm_old_crtc_state->stream);
5523
5524                        /*
5525                         * we can have no stream on ACTION_SET if a display
5526                         * was disconnected during S3, in this case it is not an
5527                         * error, the OS will be updated after detection, and
5528                         * will do the right thing on next atomic commit
5529                         */
5530
5531                        if (!new_stream) {
5532                                DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
5533                                                __func__, acrtc->base.base.id);
5534                                break;
5535                        }
5536
5537                        dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
5538
5539                        if (dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
5540                            dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
5541                                new_crtc_state->mode_changed = false;
5542                                DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
5543                                                 new_crtc_state->mode_changed);
5544                        }
5545                }
5546
5547                if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
5548                        goto next_crtc;
5549
5550                DRM_DEBUG_DRIVER(
5551                        "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
5552                        "planes_changed:%d, mode_changed:%d,active_changed:%d,"
5553                        "connectors_changed:%d\n",
5554                        acrtc->crtc_id,
5555                        new_crtc_state->enable,
5556                        new_crtc_state->active,
5557                        new_crtc_state->planes_changed,
5558                        new_crtc_state->mode_changed,
5559                        new_crtc_state->active_changed,
5560                        new_crtc_state->connectors_changed);
5561
5562                /* Remove stream for any changed/disabled CRTC */
5563                if (!enable) {
5564
5565                        if (!dm_old_crtc_state->stream)
5566                                goto next_crtc;
5567
5568                        ret = dm_atomic_get_state(state, &dm_state);
5569                        if (ret)
5570                                goto fail;
5571
5572                        DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
5573                                        crtc->base.id);
5574
5575                        /* i.e. reset mode */
5576                        if (dc_remove_stream_from_ctx(
5577                                        dm->dc,
5578                                        dm_state->context,
5579                                        dm_old_crtc_state->stream) != DC_OK) {
5580                                ret = -EINVAL;
5581                                goto fail;
5582                        }
5583
5584                        dc_stream_release(dm_old_crtc_state->stream);
5585                        dm_new_crtc_state->stream = NULL;
5586
5587                        reset_freesync_config_for_crtc(dm_new_crtc_state);
5588
5589                        *lock_and_validation_needed = true;
5590
5591                } else {/* Add stream for any updated/enabled CRTC */
5592                        /*
5593                         * Quick fix to prevent NULL pointer on new_stream when
5594                         * added MST connectors not found in existing crtc_state in the chained mode
5595                         * TODO: need to dig out the root cause of that
5596                         */
5597                        if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
5598                                goto next_crtc;
5599
5600                        if (modereset_required(new_crtc_state))
5601                                goto next_crtc;
5602
5603                        if (modeset_required(new_crtc_state, new_stream,
5604                                             dm_old_crtc_state->stream)) {
5605
5606                                WARN_ON(dm_new_crtc_state->stream);
5607
5608                                ret = dm_atomic_get_state(state, &dm_state);
5609                                if (ret)
5610                                        goto fail;
5611
5612                                dm_new_crtc_state->stream = new_stream;
5613
5614                                dc_stream_retain(new_stream);
5615
5616                                DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
5617                                                        crtc->base.id);
5618
5619                                if (dc_add_stream_to_ctx(
5620                                                dm->dc,
5621                                                dm_state->context,
5622                                                dm_new_crtc_state->stream) != DC_OK) {
5623                                        ret = -EINVAL;
5624                                        goto fail;
5625                                }
5626
5627                                *lock_and_validation_needed = true;
5628                        }
5629                }
5630
5631next_crtc:
5632                /* Release extra reference */
5633                if (new_stream)
5634                         dc_stream_release(new_stream);
5635
5636                /*
5637                 * We want to do dc stream updates that do not require a
5638                 * full modeset below.
5639                 */
5640                if (!(enable && aconnector && new_crtc_state->enable &&
5641                      new_crtc_state->active))
5642                        continue;
5643                /*
5644                 * Given above conditions, the dc state cannot be NULL because:
5645                 * 1. We're in the process of enabling CRTCs (just been added
5646                 *    to the dc context, or already is on the context)
5647                 * 2. Has a valid connector attached, and
5648                 * 3. Is currently active and enabled.
5649                 * => The dc stream state currently exists.
5650                 */
5651                BUG_ON(dm_new_crtc_state->stream == NULL);
5652
5653                /* Scaling or underscan settings */
5654                if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
5655                        update_stream_scaling_settings(
5656                                &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
5657
5658                /*
5659                 * Color management settings. We also update color properties
5660                 * when a modeset is needed, to ensure it gets reprogrammed.
5661                 */
5662                if (dm_new_crtc_state->base.color_mgmt_changed ||
5663                    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
5664                        ret = amdgpu_dm_set_regamma_lut(dm_new_crtc_state);
5665                        if (ret)
5666                                goto fail;
5667                        amdgpu_dm_set_ctm(dm_new_crtc_state);
5668                }
5669
5670                /* Update Freesync settings. */
5671                get_freesync_config_for_crtc(dm_new_crtc_state,
5672                                             dm_new_conn_state);
5673        }
5674
5675        return ret;
5676
5677fail:
5678        if (new_stream)
5679                dc_stream_release(new_stream);
5680        return ret;
5681}
5682
5683static int dm_update_planes_state(struct dc *dc,
5684                                  struct drm_atomic_state *state,
5685                                  bool enable,
5686                                  bool *lock_and_validation_needed)
5687{
5688
5689        struct dm_atomic_state *dm_state = NULL;
5690        struct drm_crtc *new_plane_crtc, *old_plane_crtc;
5691        struct drm_crtc_state *old_crtc_state, *new_crtc_state;
5692        struct drm_plane *plane;
5693        struct drm_plane_state *old_plane_state, *new_plane_state;
5694        struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
5695        struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
5696        int i ;
5697        /* TODO return page_flip_needed() function */
5698        bool pflip_needed  = !state->allow_modeset;
5699        int ret = 0;
5700
5701
5702        /* Add new planes, in reverse order as DC expectation */
5703        for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
5704                new_plane_crtc = new_plane_state->crtc;
5705                old_plane_crtc = old_plane_state->crtc;
5706                dm_new_plane_state = to_dm_plane_state(new_plane_state);
5707                dm_old_plane_state = to_dm_plane_state(old_plane_state);
5708
5709                /*TODO Implement atomic check for cursor plane */
5710                if (plane->type == DRM_PLANE_TYPE_CURSOR)
5711                        continue;
5712
5713                /* Remove any changed/removed planes */
5714                if (!enable) {
5715                        if (pflip_needed &&
5716                            plane->type != DRM_PLANE_TYPE_OVERLAY)
5717                                continue;
5718
5719                        if (!old_plane_crtc)
5720                                continue;
5721
5722                        old_crtc_state = drm_atomic_get_old_crtc_state(
5723                                        state, old_plane_crtc);
5724                        dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
5725
5726                        if (!dm_old_crtc_state->stream)
5727                                continue;
5728
5729                        DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
5730                                        plane->base.id, old_plane_crtc->base.id);
5731
5732                        ret = dm_atomic_get_state(state, &dm_state);
5733                        if (ret)
5734                                return ret;
5735
5736                        if (!dc_remove_plane_from_context(
5737                                        dc,
5738                                        dm_old_crtc_state->stream,
5739                                        dm_old_plane_state->dc_state,
5740                                        dm_state->context)) {
5741
5742                                ret = EINVAL;
5743                                return ret;
5744                        }
5745
5746
5747                        dc_plane_state_release(dm_old_plane_state->dc_state);
5748                        dm_new_plane_state->dc_state = NULL;
5749
5750                        *lock_and_validation_needed = true;
5751
5752                } else { /* Add new planes */
5753                        struct dc_plane_state *dc_new_plane_state;
5754
5755                        if (drm_atomic_plane_disabling(plane->state, new_plane_state))
5756                                continue;
5757
5758                        if (!new_plane_crtc)
5759                                continue;
5760
5761                        new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
5762                        dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
5763
5764                        if (!dm_new_crtc_state->stream)
5765                                continue;
5766
5767                        if (pflip_needed &&
5768                            plane->type != DRM_PLANE_TYPE_OVERLAY)
5769                                continue;
5770
5771                        WARN_ON(dm_new_plane_state->dc_state);
5772
5773                        dc_new_plane_state = dc_create_plane_state(dc);
5774                        if (!dc_new_plane_state)
5775                                return -ENOMEM;
5776
5777                        DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
5778                                        plane->base.id, new_plane_crtc->base.id);
5779
5780                        ret = fill_plane_attributes(
5781                                new_plane_crtc->dev->dev_private,
5782                                dc_new_plane_state,
5783                                new_plane_state,
5784                                new_crtc_state);
5785                        if (ret) {
5786                                dc_plane_state_release(dc_new_plane_state);
5787                                return ret;
5788                        }
5789
5790                        ret = dm_atomic_get_state(state, &dm_state);
5791                        if (ret) {
5792                                dc_plane_state_release(dc_new_plane_state);
5793                                return ret;
5794                        }
5795
5796                        /*
5797                         * Any atomic check errors that occur after this will
5798                         * not need a release. The plane state will be attached
5799                         * to the stream, and therefore part of the atomic
5800                         * state. It'll be released when the atomic state is
5801                         * cleaned.
5802                         */
5803                        if (!dc_add_plane_to_context(
5804                                        dc,
5805                                        dm_new_crtc_state->stream,
5806                                        dc_new_plane_state,
5807                                        dm_state->context)) {
5808
5809                                dc_plane_state_release(dc_new_plane_state);
5810                                return -EINVAL;
5811                        }
5812
5813                        dm_new_plane_state->dc_state = dc_new_plane_state;
5814
5815                        /* Tell DC to do a full surface update every time there
5816                         * is a plane change. Inefficient, but works for now.
5817                         */
5818                        dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
5819
5820                        *lock_and_validation_needed = true;
5821                }
5822        }
5823
5824
5825        return ret;
5826}
5827
5828static int
5829dm_determine_update_type_for_commit(struct dc *dc,
5830                                    struct drm_atomic_state *state,
5831                                    enum surface_update_type *out_type)
5832{
5833        struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
5834        int i, j, num_plane, ret = 0;
5835        struct drm_plane_state *old_plane_state, *new_plane_state;
5836        struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
5837        struct drm_crtc *new_plane_crtc, *old_plane_crtc;
5838        struct drm_plane *plane;
5839
5840        struct drm_crtc *crtc;
5841        struct drm_crtc_state *new_crtc_state, *old_crtc_state;
5842        struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
5843        struct dc_stream_status *status = NULL;
5844
5845        struct dc_surface_update *updates = kzalloc(MAX_SURFACES * sizeof(struct dc_surface_update), GFP_KERNEL);
5846        struct dc_plane_state *surface = kzalloc(MAX_SURFACES * sizeof(struct dc_plane_state), GFP_KERNEL);
5847        struct dc_stream_update stream_update;
5848        enum surface_update_type update_type = UPDATE_TYPE_FAST;
5849
5850        if (!updates || !surface) {
5851                DRM_ERROR("Plane or surface update failed to allocate");
5852                /* Set type to FULL to avoid crashing in DC*/
5853                update_type = UPDATE_TYPE_FULL;
5854                goto cleanup;
5855        }
5856
5857        for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
5858                new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
5859                old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
5860                num_plane = 0;
5861
5862                if (new_dm_crtc_state->stream) {
5863
5864                        for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
5865                                new_plane_crtc = new_plane_state->crtc;
5866                                old_plane_crtc = old_plane_state->crtc;
5867                                new_dm_plane_state = to_dm_plane_state(new_plane_state);
5868                                old_dm_plane_state = to_dm_plane_state(old_plane_state);
5869
5870                                if (plane->type == DRM_PLANE_TYPE_CURSOR)
5871                                        continue;
5872
5873                                if (!state->allow_modeset)
5874                                        continue;
5875
5876                                if (crtc == new_plane_crtc) {
5877                                        updates[num_plane].surface = &surface[num_plane];
5878
5879                                        if (new_crtc_state->mode_changed) {
5880                                                updates[num_plane].surface->src_rect =
5881                                                                        new_dm_plane_state->dc_state->src_rect;
5882                                                updates[num_plane].surface->dst_rect =
5883                                                                        new_dm_plane_state->dc_state->dst_rect;
5884                                                updates[num_plane].surface->rotation =
5885                                                                        new_dm_plane_state->dc_state->rotation;
5886                                                updates[num_plane].surface->in_transfer_func =
5887                                                                        new_dm_plane_state->dc_state->in_transfer_func;
5888                                                stream_update.dst = new_dm_crtc_state->stream->dst;
5889                                                stream_update.src = new_dm_crtc_state->stream->src;
5890                                        }
5891
5892                                        if (new_crtc_state->color_mgmt_changed) {
5893                                                updates[num_plane].gamma =
5894                                                                new_dm_plane_state->dc_state->gamma_correction;
5895                                                updates[num_plane].in_transfer_func =
5896                                                                new_dm_plane_state->dc_state->in_transfer_func;
5897                                                stream_update.gamut_remap =
5898                                                                &new_dm_crtc_state->stream->gamut_remap_matrix;
5899                                                stream_update.out_transfer_func =
5900                                                                new_dm_crtc_state->stream->out_transfer_func;
5901                                        }
5902
5903                                        num_plane++;
5904                                }
5905                        }
5906
5907                        if (num_plane > 0) {
5908                                ret = dm_atomic_get_state(state, &dm_state);
5909                                if (ret)
5910                                        goto cleanup;
5911
5912                                old_dm_state = dm_atomic_get_old_state(state);
5913                                if (!old_dm_state) {
5914                                        ret = -EINVAL;
5915                                        goto cleanup;
5916                                }
5917
5918                                status = dc_state_get_stream_status(old_dm_state->context,
5919                                                                    new_dm_crtc_state->stream);
5920
5921                                update_type = dc_check_update_surfaces_for_stream(dc, updates, num_plane,
5922                                                                                  &stream_update, status);
5923
5924                                if (update_type > UPDATE_TYPE_MED) {
5925                                        update_type = UPDATE_TYPE_FULL;
5926                                        goto cleanup;
5927                                }
5928                        }
5929
5930                } else if (!new_dm_crtc_state->stream && old_dm_crtc_state->stream) {
5931                        update_type = UPDATE_TYPE_FULL;
5932                        goto cleanup;
5933                }
5934        }
5935
5936cleanup:
5937        kfree(updates);
5938        kfree(surface);
5939
5940        *out_type = update_type;
5941        return ret;
5942}
5943
5944/**
5945 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
5946 * @dev: The DRM device
5947 * @state: The atomic state to commit
5948 *
5949 * Validate that the given atomic state is programmable by DC into hardware.
5950 * This involves constructing a &struct dc_state reflecting the new hardware
5951 * state we wish to commit, then querying DC to see if it is programmable. It's
5952 * important not to modify the existing DC state. Otherwise, atomic_check
5953 * may unexpectedly commit hardware changes.
5954 *
5955 * When validating the DC state, it's important that the right locks are
5956 * acquired. For full updates case which removes/adds/updates streams on one
5957 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
5958 * that any such full update commit will wait for completion of any outstanding
5959 * flip using DRMs synchronization events. See
5960 * dm_determine_update_type_for_commit()
5961 *
5962 * Note that DM adds the affected connectors for all CRTCs in state, when that
5963 * might not seem necessary. This is because DC stream creation requires the
5964 * DC sink, which is tied to the DRM connector state. Cleaning this up should
5965 * be possible but non-trivial - a possible TODO item.
5966 *
5967 * Return: -Error code if validation failed.
5968 */
5969static int amdgpu_dm_atomic_check(struct drm_device *dev,
5970                                  struct drm_atomic_state *state)
5971{
5972        struct amdgpu_device *adev = dev->dev_private;
5973        struct dm_atomic_state *dm_state = NULL;
5974        struct dc *dc = adev->dm.dc;
5975        struct drm_connector *connector;
5976        struct drm_connector_state *old_con_state, *new_con_state;
5977        struct drm_crtc *crtc;
5978        struct drm_crtc_state *old_crtc_state, *new_crtc_state;
5979        enum surface_update_type update_type = UPDATE_TYPE_FAST;
5980        enum surface_update_type overall_update_type = UPDATE_TYPE_FAST;
5981
5982        int ret, i;
5983
5984        /*
5985         * This bool will be set for true for any modeset/reset
5986         * or plane update which implies non fast surface update.
5987         */
5988        bool lock_and_validation_needed = false;
5989
5990        ret = drm_atomic_helper_check_modeset(dev, state);
5991        if (ret)
5992                goto fail;
5993
5994        for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
5995                if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
5996                    !new_crtc_state->color_mgmt_changed &&
5997                    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
5998                        continue;
5999
6000                if (!new_crtc_state->enable)
6001                        continue;
6002
6003                ret = drm_atomic_add_affected_connectors(state, crtc);
6004                if (ret)
6005                        return ret;
6006
6007                ret = drm_atomic_add_affected_planes(state, crtc);
6008                if (ret)
6009                        goto fail;
6010        }
6011
6012        /* Remove exiting planes if they are modified */
6013        ret = dm_update_planes_state(dc, state, false, &lock_and_validation_needed);
6014        if (ret) {
6015                goto fail;
6016        }
6017
6018        /* Disable all crtcs which require disable */
6019        ret = dm_update_crtcs_state(&adev->dm, state, false, &lock_and_validation_needed);
6020        if (ret) {
6021                goto fail;
6022        }
6023
6024        /* Enable all crtcs which require enable */
6025        ret = dm_update_crtcs_state(&adev->dm, state, true, &lock_and_validation_needed);
6026        if (ret) {
6027                goto fail;
6028        }
6029
6030        /* Add new/modified planes */
6031        ret = dm_update_planes_state(dc, state, true, &lock_and_validation_needed);
6032        if (ret) {
6033                goto fail;
6034        }
6035
6036        /* Run this here since we want to validate the streams we created */
6037        ret = drm_atomic_helper_check_planes(dev, state);
6038        if (ret)
6039                goto fail;
6040
6041        /* Check scaling and underscan changes*/
6042        /* TODO Removed scaling changes validation due to inability to commit
6043         * new stream into context w\o causing full reset. Need to
6044         * decide how to handle.
6045         */
6046        for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6047                struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
6048                struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
6049                struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
6050
6051                /* Skip any modesets/resets */
6052                if (!acrtc || drm_atomic_crtc_needs_modeset(
6053                                drm_atomic_get_new_crtc_state(state, &acrtc->base)))
6054                        continue;
6055
6056                /* Skip any thing not scale or underscan changes */
6057                if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
6058                        continue;
6059
6060                overall_update_type = UPDATE_TYPE_FULL;
6061                lock_and_validation_needed = true;
6062        }
6063
6064        ret = dm_determine_update_type_for_commit(dc, state, &update_type);
6065        if (ret)
6066                goto fail;
6067
6068        if (overall_update_type < update_type)
6069                overall_update_type = update_type;
6070
6071        /*
6072         * lock_and_validation_needed was an old way to determine if we need to set
6073         * the global lock. Leaving it in to check if we broke any corner cases
6074         * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
6075         * lock_and_validation_needed false = UPDATE_TYPE_FAST
6076         */
6077        if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
6078                WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
6079        else if (!lock_and_validation_needed && overall_update_type > UPDATE_TYPE_FAST)
6080                WARN(1, "Global lock should NOT be set, overall_update_type should be UPDATE_TYPE_FAST");
6081
6082
6083        if (overall_update_type > UPDATE_TYPE_FAST) {
6084                ret = dm_atomic_get_state(state, &dm_state);
6085                if (ret)
6086                        goto fail;
6087
6088                ret = do_aquire_global_lock(dev, state);
6089                if (ret)
6090                        goto fail;
6091
6092                if (dc_validate_global_state(dc, dm_state->context) != DC_OK) {
6093                        ret = -EINVAL;
6094                        goto fail;
6095                }
6096        } else if (state->legacy_cursor_update) {
6097                /*
6098                 * This is a fast cursor update coming from the plane update
6099                 * helper, check if it can be done asynchronously for better
6100                 * performance.
6101                 */
6102                state->async_update = !drm_atomic_helper_async_check(dev, state);
6103        }
6104
6105        /* Must be success */
6106        WARN_ON(ret);
6107        return ret;
6108
6109fail:
6110        if (ret == -EDEADLK)
6111                DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
6112        else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
6113                DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
6114        else
6115                DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
6116
6117        return ret;
6118}
6119
6120static bool is_dp_capable_without_timing_msa(struct dc *dc,
6121                                             struct amdgpu_dm_connector *amdgpu_dm_connector)
6122{
6123        uint8_t dpcd_data;
6124        bool capable = false;
6125
6126        if (amdgpu_dm_connector->dc_link &&
6127                dm_helpers_dp_read_dpcd(
6128                                NULL,
6129                                amdgpu_dm_connector->dc_link,
6130                                DP_DOWN_STREAM_PORT_COUNT,
6131                                &dpcd_data,
6132                                sizeof(dpcd_data))) {
6133                capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
6134        }
6135
6136        return capable;
6137}
6138void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
6139                                        struct edid *edid)
6140{
6141        int i;
6142        bool edid_check_required;
6143        struct detailed_timing *timing;
6144        struct detailed_non_pixel *data;
6145        struct detailed_data_monitor_range *range;
6146        struct amdgpu_dm_connector *amdgpu_dm_connector =
6147                        to_amdgpu_dm_connector(connector);
6148        struct dm_connector_state *dm_con_state = NULL;
6149
6150        struct drm_device *dev = connector->dev;
6151        struct amdgpu_device *adev = dev->dev_private;
6152        bool freesync_capable = false;
6153
6154        if (!connector->state) {
6155                DRM_ERROR("%s - Connector has no state", __func__);
6156                goto update;
6157        }
6158
6159        if (!edid) {
6160                dm_con_state = to_dm_connector_state(connector->state);
6161
6162                amdgpu_dm_connector->min_vfreq = 0;
6163                amdgpu_dm_connector->max_vfreq = 0;
6164                amdgpu_dm_connector->pixel_clock_mhz = 0;
6165
6166                goto update;
6167        }
6168
6169        dm_con_state = to_dm_connector_state(connector->state);
6170
6171        edid_check_required = false;
6172        if (!amdgpu_dm_connector->dc_sink) {
6173                DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
6174                goto update;
6175        }
6176        if (!adev->dm.freesync_module)
6177                goto update;
6178        /*
6179         * if edid non zero restrict freesync only for dp and edp
6180         */
6181        if (edid) {
6182                if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
6183                        || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
6184                        edid_check_required = is_dp_capable_without_timing_msa(
6185                                                adev->dm.dc,
6186                                                amdgpu_dm_connector);
6187                }
6188        }
6189        if (edid_check_required == true && (edid->version > 1 ||
6190           (edid->version == 1 && edid->revision > 1))) {
6191                for (i = 0; i < 4; i++) {
6192
6193                        timing  = &edid->detailed_timings[i];
6194                        data    = &timing->data.other_data;
6195                        range   = &data->data.range;
6196                        /*
6197                         * Check if monitor has continuous frequency mode
6198                         */
6199                        if (data->type != EDID_DETAIL_MONITOR_RANGE)
6200                                continue;
6201                        /*
6202                         * Check for flag range limits only. If flag == 1 then
6203                         * no additional timing information provided.
6204                         * Default GTF, GTF Secondary curve and CVT are not
6205                         * supported
6206                         */
6207                        if (range->flags != 1)
6208                                continue;
6209
6210                        amdgpu_dm_connector->min_vfreq = range->min_vfreq;
6211                        amdgpu_dm_connector->max_vfreq = range->max_vfreq;
6212                        amdgpu_dm_connector->pixel_clock_mhz =
6213                                range->pixel_clock_mhz * 10;
6214                        break;
6215                }
6216
6217                if (amdgpu_dm_connector->max_vfreq -
6218                    amdgpu_dm_connector->min_vfreq > 10) {
6219
6220                        freesync_capable = true;
6221                }
6222        }
6223
6224update:
6225        if (dm_con_state)
6226                dm_con_state->freesync_capable = freesync_capable;
6227
6228        if (connector->vrr_capable_property)
6229                drm_connector_set_vrr_capable_property(connector,
6230                                                       freesync_capable);
6231}
6232
6233