linux/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
<<
>>
Prefs
   1/*
   2 * Copyright 2015 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: AMD
  23 *
  24 */
  25
  26/* The caprices of the preprocessor require that this be declared right here */
  27#define CREATE_TRACE_POINTS
  28
  29#include "dm_services_types.h"
  30#include "dc.h"
  31#include "dc/inc/core_types.h"
  32#include "dal_asic_id.h"
  33#include "dmub/dmub_srv.h"
  34#include "dc/inc/hw/dmcu.h"
  35#include "dc/inc/hw/abm.h"
  36#include "dc/dc_dmub_srv.h"
  37#include "amdgpu_dm_trace.h"
  38
  39#include "vid.h"
  40#include "amdgpu.h"
  41#include "amdgpu_display.h"
  42#include "amdgpu_ucode.h"
  43#include "atom.h"
  44#include "amdgpu_dm.h"
  45#ifdef CONFIG_DRM_AMD_DC_HDCP
  46#include "amdgpu_dm_hdcp.h"
  47#include <drm/drm_hdcp.h>
  48#endif
  49#include "amdgpu_pm.h"
  50
  51#include "amd_shared.h"
  52#include "amdgpu_dm_irq.h"
  53#include "dm_helpers.h"
  54#include "amdgpu_dm_mst_types.h"
  55#if defined(CONFIG_DEBUG_FS)
  56#include "amdgpu_dm_debugfs.h"
  57#endif
  58
  59#include "ivsrcid/ivsrcid_vislands30.h"
  60
  61#include <linux/module.h>
  62#include <linux/moduleparam.h>
  63#include <linux/types.h>
  64#include <linux/pm_runtime.h>
  65#include <linux/pci.h>
  66#include <linux/firmware.h>
  67#include <linux/component.h>
  68
  69#include <drm/drm_atomic.h>
  70#include <drm/drm_atomic_uapi.h>
  71#include <drm/drm_atomic_helper.h>
  72#include <drm/drm_dp_mst_helper.h>
  73#include <drm/drm_fb_helper.h>
  74#include <drm/drm_fourcc.h>
  75#include <drm/drm_edid.h>
  76#include <drm/drm_vblank.h>
  77#include <drm/drm_audio_component.h>
  78#include <drm/drm_hdcp.h>
  79
  80#if defined(CONFIG_DRM_AMD_DC_DCN)
  81#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
  82
  83#include "dcn/dcn_1_0_offset.h"
  84#include "dcn/dcn_1_0_sh_mask.h"
  85#include "soc15_hw_ip.h"
  86#include "vega10_ip_offset.h"
  87
  88#include "soc15_common.h"
  89#endif
  90
  91#include "modules/inc/mod_freesync.h"
  92#include "modules/power/power_helpers.h"
  93#include "modules/inc/mod_info_packet.h"
  94
  95#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
  96MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
  97#define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
  98MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
  99#define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
 100MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
 101#define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
 102MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
 103#define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
 104MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
 105#define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
 106MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
 107
 108#define FIRMWARE_RAVEN_DMCU             "amdgpu/raven_dmcu.bin"
 109MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
 110
 111#define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
 112MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
 113
 114/* Number of bytes in PSP header for firmware. */
 115#define PSP_HEADER_BYTES 0x100
 116
 117/* Number of bytes in PSP footer for firmware. */
 118#define PSP_FOOTER_BYTES 0x100
 119
 120/**
 121 * DOC: overview
 122 *
 123 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
 124 * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
 125 * requests into DC requests, and DC responses into DRM responses.
 126 *
 127 * The root control structure is &struct amdgpu_display_manager.
 128 */
 129
 130/* basic init/fini API */
 131static int amdgpu_dm_init(struct amdgpu_device *adev);
 132static void amdgpu_dm_fini(struct amdgpu_device *adev);
 133
 134static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
 135{
 136        switch (link->dpcd_caps.dongle_type) {
 137        case DISPLAY_DONGLE_NONE:
 138                return DRM_MODE_SUBCONNECTOR_Native;
 139        case DISPLAY_DONGLE_DP_VGA_CONVERTER:
 140                return DRM_MODE_SUBCONNECTOR_VGA;
 141        case DISPLAY_DONGLE_DP_DVI_CONVERTER:
 142        case DISPLAY_DONGLE_DP_DVI_DONGLE:
 143                return DRM_MODE_SUBCONNECTOR_DVID;
 144        case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
 145        case DISPLAY_DONGLE_DP_HDMI_DONGLE:
 146                return DRM_MODE_SUBCONNECTOR_HDMIA;
 147        case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
 148        default:
 149                return DRM_MODE_SUBCONNECTOR_Unknown;
 150        }
 151}
 152
 153static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
 154{
 155        struct dc_link *link = aconnector->dc_link;
 156        struct drm_connector *connector = &aconnector->base;
 157        enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
 158
 159        if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
 160                return;
 161
 162        if (aconnector->dc_sink)
 163                subconnector = get_subconnector_type(link);
 164
 165        drm_object_property_set_value(&connector->base,
 166                        connector->dev->mode_config.dp_subconnector_property,
 167                        subconnector);
 168}
 169
 170/*
 171 * initializes drm_device display related structures, based on the information
 172 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
 173 * drm_encoder, drm_mode_config
 174 *
 175 * Returns 0 on success
 176 */
 177static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
 178/* removes and deallocates the drm structures, created by the above function */
 179static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
 180
 181static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
 182                                struct drm_plane *plane,
 183                                unsigned long possible_crtcs,
 184                                const struct dc_plane_cap *plane_cap);
 185static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
 186                               struct drm_plane *plane,
 187                               uint32_t link_index);
 188static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
 189                                    struct amdgpu_dm_connector *amdgpu_dm_connector,
 190                                    uint32_t link_index,
 191                                    struct amdgpu_encoder *amdgpu_encoder);
 192static int amdgpu_dm_encoder_init(struct drm_device *dev,
 193                                  struct amdgpu_encoder *aencoder,
 194                                  uint32_t link_index);
 195
 196static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
 197
 198static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
 199
 200static int amdgpu_dm_atomic_check(struct drm_device *dev,
 201                                  struct drm_atomic_state *state);
 202
 203static void handle_cursor_update(struct drm_plane *plane,
 204                                 struct drm_plane_state *old_plane_state);
 205
 206static void amdgpu_dm_set_psr_caps(struct dc_link *link);
 207static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
 208static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
 209static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
 210static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
 211
 212static const struct drm_format_info *
 213amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
 214
 215/*
 216 * dm_vblank_get_counter
 217 *
 218 * @brief
 219 * Get counter for number of vertical blanks
 220 *
 221 * @param
 222 * struct amdgpu_device *adev - [in] desired amdgpu device
 223 * int disp_idx - [in] which CRTC to get the counter from
 224 *
 225 * @return
 226 * Counter for vertical blanks
 227 */
 228static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
 229{
 230        if (crtc >= adev->mode_info.num_crtc)
 231                return 0;
 232        else {
 233                struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
 234
 235                if (acrtc->dm_irq_params.stream == NULL) {
 236                        DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
 237                                  crtc);
 238                        return 0;
 239                }
 240
 241                return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
 242        }
 243}
 244
 245static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
 246                                  u32 *vbl, u32 *position)
 247{
 248        uint32_t v_blank_start, v_blank_end, h_position, v_position;
 249
 250        if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
 251                return -EINVAL;
 252        else {
 253                struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
 254
 255                if (acrtc->dm_irq_params.stream ==  NULL) {
 256                        DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
 257                                  crtc);
 258                        return 0;
 259                }
 260
 261                /*
 262                 * TODO rework base driver to use values directly.
 263                 * for now parse it back into reg-format
 264                 */
 265                dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
 266                                         &v_blank_start,
 267                                         &v_blank_end,
 268                                         &h_position,
 269                                         &v_position);
 270
 271                *position = v_position | (h_position << 16);
 272                *vbl = v_blank_start | (v_blank_end << 16);
 273        }
 274
 275        return 0;
 276}
 277
 278static bool dm_is_idle(void *handle)
 279{
 280        /* XXX todo */
 281        return true;
 282}
 283
 284static int dm_wait_for_idle(void *handle)
 285{
 286        /* XXX todo */
 287        return 0;
 288}
 289
 290static bool dm_check_soft_reset(void *handle)
 291{
 292        return false;
 293}
 294
 295static int dm_soft_reset(void *handle)
 296{
 297        /* XXX todo */
 298        return 0;
 299}
 300
 301static struct amdgpu_crtc *
 302get_crtc_by_otg_inst(struct amdgpu_device *adev,
 303                     int otg_inst)
 304{
 305        struct drm_device *dev = adev_to_drm(adev);
 306        struct drm_crtc *crtc;
 307        struct amdgpu_crtc *amdgpu_crtc;
 308
 309        if (otg_inst == -1) {
 310                WARN_ON(1);
 311                return adev->mode_info.crtcs[0];
 312        }
 313
 314        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 315                amdgpu_crtc = to_amdgpu_crtc(crtc);
 316
 317                if (amdgpu_crtc->otg_inst == otg_inst)
 318                        return amdgpu_crtc;
 319        }
 320
 321        return NULL;
 322}
 323
 324static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
 325{
 326        return acrtc->dm_irq_params.freesync_config.state ==
 327                       VRR_STATE_ACTIVE_VARIABLE ||
 328               acrtc->dm_irq_params.freesync_config.state ==
 329                       VRR_STATE_ACTIVE_FIXED;
 330}
 331
 332static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
 333{
 334        return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
 335               dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
 336}
 337
 338/**
 339 * dm_pflip_high_irq() - Handle pageflip interrupt
 340 * @interrupt_params: ignored
 341 *
 342 * Handles the pageflip interrupt by notifying all interested parties
 343 * that the pageflip has been completed.
 344 */
 345static void dm_pflip_high_irq(void *interrupt_params)
 346{
 347        struct amdgpu_crtc *amdgpu_crtc;
 348        struct common_irq_params *irq_params = interrupt_params;
 349        struct amdgpu_device *adev = irq_params->adev;
 350        unsigned long flags;
 351        struct drm_pending_vblank_event *e;
 352        uint32_t vpos, hpos, v_blank_start, v_blank_end;
 353        bool vrr_active;
 354
 355        amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
 356
 357        /* IRQ could occur when in initial stage */
 358        /* TODO work and BO cleanup */
 359        if (amdgpu_crtc == NULL) {
 360                DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
 361                return;
 362        }
 363
 364        spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
 365
 366        if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
 367                DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
 368                                                 amdgpu_crtc->pflip_status,
 369                                                 AMDGPU_FLIP_SUBMITTED,
 370                                                 amdgpu_crtc->crtc_id,
 371                                                 amdgpu_crtc);
 372                spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
 373                return;
 374        }
 375
 376        /* page flip completed. */
 377        e = amdgpu_crtc->event;
 378        amdgpu_crtc->event = NULL;
 379
 380        if (!e)
 381                WARN_ON(1);
 382
 383        vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
 384
 385        /* Fixed refresh rate, or VRR scanout position outside front-porch? */
 386        if (!vrr_active ||
 387            !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
 388                                      &v_blank_end, &hpos, &vpos) ||
 389            (vpos < v_blank_start)) {
 390                /* Update to correct count and vblank timestamp if racing with
 391                 * vblank irq. This also updates to the correct vblank timestamp
 392                 * even in VRR mode, as scanout is past the front-porch atm.
 393                 */
 394                drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
 395
 396                /* Wake up userspace by sending the pageflip event with proper
 397                 * count and timestamp of vblank of flip completion.
 398                 */
 399                if (e) {
 400                        drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
 401
 402                        /* Event sent, so done with vblank for this flip */
 403                        drm_crtc_vblank_put(&amdgpu_crtc->base);
 404                }
 405        } else if (e) {
 406                /* VRR active and inside front-porch: vblank count and
 407                 * timestamp for pageflip event will only be up to date after
 408                 * drm_crtc_handle_vblank() has been executed from late vblank
 409                 * irq handler after start of back-porch (vline 0). We queue the
 410                 * pageflip event for send-out by drm_crtc_handle_vblank() with
 411                 * updated timestamp and count, once it runs after us.
 412                 *
 413                 * We need to open-code this instead of using the helper
 414                 * drm_crtc_arm_vblank_event(), as that helper would
 415                 * call drm_crtc_accurate_vblank_count(), which we must
 416                 * not call in VRR mode while we are in front-porch!
 417                 */
 418
 419                /* sequence will be replaced by real count during send-out. */
 420                e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
 421                e->pipe = amdgpu_crtc->crtc_id;
 422
 423                list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
 424                e = NULL;
 425        }
 426
 427        /* Keep track of vblank of this flip for flip throttling. We use the
 428         * cooked hw counter, as that one incremented at start of this vblank
 429         * of pageflip completion, so last_flip_vblank is the forbidden count
 430         * for queueing new pageflips if vsync + VRR is enabled.
 431         */
 432        amdgpu_crtc->dm_irq_params.last_flip_vblank =
 433                amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
 434
 435        amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
 436        spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
 437
 438        DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
 439                         amdgpu_crtc->crtc_id, amdgpu_crtc,
 440                         vrr_active, (int) !e);
 441}
 442
 443static void dm_vupdate_high_irq(void *interrupt_params)
 444{
 445        struct common_irq_params *irq_params = interrupt_params;
 446        struct amdgpu_device *adev = irq_params->adev;
 447        struct amdgpu_crtc *acrtc;
 448        unsigned long flags;
 449        int vrr_active;
 450
 451        acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
 452
 453        if (acrtc) {
 454                vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
 455
 456                DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
 457                              acrtc->crtc_id,
 458                              vrr_active);
 459
 460                /* Core vblank handling is done here after end of front-porch in
 461                 * vrr mode, as vblank timestamping will give valid results
 462                 * while now done after front-porch. This will also deliver
 463                 * page-flip completion events that have been queued to us
 464                 * if a pageflip happened inside front-porch.
 465                 */
 466                if (vrr_active) {
 467                        drm_crtc_handle_vblank(&acrtc->base);
 468
 469                        /* BTR processing for pre-DCE12 ASICs */
 470                        if (acrtc->dm_irq_params.stream &&
 471                            adev->family < AMDGPU_FAMILY_AI) {
 472                                spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
 473                                mod_freesync_handle_v_update(
 474                                    adev->dm.freesync_module,
 475                                    acrtc->dm_irq_params.stream,
 476                                    &acrtc->dm_irq_params.vrr_params);
 477
 478                                dc_stream_adjust_vmin_vmax(
 479                                    adev->dm.dc,
 480                                    acrtc->dm_irq_params.stream,
 481                                    &acrtc->dm_irq_params.vrr_params.adjust);
 482                                spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
 483                        }
 484                }
 485        }
 486}
 487
 488/**
 489 * dm_crtc_high_irq() - Handles CRTC interrupt
 490 * @interrupt_params: used for determining the CRTC instance
 491 *
 492 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
 493 * event handler.
 494 */
 495static void dm_crtc_high_irq(void *interrupt_params)
 496{
 497        struct common_irq_params *irq_params = interrupt_params;
 498        struct amdgpu_device *adev = irq_params->adev;
 499        struct amdgpu_crtc *acrtc;
 500        unsigned long flags;
 501        int vrr_active;
 502
 503        acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
 504        if (!acrtc)
 505                return;
 506
 507        vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
 508
 509        DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
 510                      vrr_active, acrtc->dm_irq_params.active_planes);
 511
 512        /**
 513         * Core vblank handling at start of front-porch is only possible
 514         * in non-vrr mode, as only there vblank timestamping will give
 515         * valid results while done in front-porch. Otherwise defer it
 516         * to dm_vupdate_high_irq after end of front-porch.
 517         */
 518        if (!vrr_active)
 519                drm_crtc_handle_vblank(&acrtc->base);
 520
 521        /**
 522         * Following stuff must happen at start of vblank, for crc
 523         * computation and below-the-range btr support in vrr mode.
 524         */
 525        amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
 526
 527        /* BTR updates need to happen before VUPDATE on Vega and above. */
 528        if (adev->family < AMDGPU_FAMILY_AI)
 529                return;
 530
 531        spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
 532
 533        if (acrtc->dm_irq_params.stream &&
 534            acrtc->dm_irq_params.vrr_params.supported &&
 535            acrtc->dm_irq_params.freesync_config.state ==
 536                    VRR_STATE_ACTIVE_VARIABLE) {
 537                mod_freesync_handle_v_update(adev->dm.freesync_module,
 538                                             acrtc->dm_irq_params.stream,
 539                                             &acrtc->dm_irq_params.vrr_params);
 540
 541                dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
 542                                           &acrtc->dm_irq_params.vrr_params.adjust);
 543        }
 544
 545        /*
 546         * If there aren't any active_planes then DCH HUBP may be clock-gated.
 547         * In that case, pageflip completion interrupts won't fire and pageflip
 548         * completion events won't get delivered. Prevent this by sending
 549         * pending pageflip events from here if a flip is still pending.
 550         *
 551         * If any planes are enabled, use dm_pflip_high_irq() instead, to
 552         * avoid race conditions between flip programming and completion,
 553         * which could cause too early flip completion events.
 554         */
 555        if (adev->family >= AMDGPU_FAMILY_RV &&
 556            acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
 557            acrtc->dm_irq_params.active_planes == 0) {
 558                if (acrtc->event) {
 559                        drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
 560                        acrtc->event = NULL;
 561                        drm_crtc_vblank_put(&acrtc->base);
 562                }
 563                acrtc->pflip_status = AMDGPU_FLIP_NONE;
 564        }
 565
 566        spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
 567}
 568
 569static int dm_set_clockgating_state(void *handle,
 570                  enum amd_clockgating_state state)
 571{
 572        return 0;
 573}
 574
 575static int dm_set_powergating_state(void *handle,
 576                  enum amd_powergating_state state)
 577{
 578        return 0;
 579}
 580
 581/* Prototypes of private functions */
 582static int dm_early_init(void* handle);
 583
 584/* Allocate memory for FBC compressed data  */
 585static void amdgpu_dm_fbc_init(struct drm_connector *connector)
 586{
 587        struct drm_device *dev = connector->dev;
 588        struct amdgpu_device *adev = drm_to_adev(dev);
 589        struct dm_compressor_info *compressor = &adev->dm.compressor;
 590        struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
 591        struct drm_display_mode *mode;
 592        unsigned long max_size = 0;
 593
 594        if (adev->dm.dc->fbc_compressor == NULL)
 595                return;
 596
 597        if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
 598                return;
 599
 600        if (compressor->bo_ptr)
 601                return;
 602
 603
 604        list_for_each_entry(mode, &connector->modes, head) {
 605                if (max_size < mode->htotal * mode->vtotal)
 606                        max_size = mode->htotal * mode->vtotal;
 607        }
 608
 609        if (max_size) {
 610                int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
 611                            AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
 612                            &compressor->gpu_addr, &compressor->cpu_addr);
 613
 614                if (r)
 615                        DRM_ERROR("DM: Failed to initialize FBC\n");
 616                else {
 617                        adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
 618                        DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
 619                }
 620
 621        }
 622
 623}
 624
 625static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
 626                                          int pipe, bool *enabled,
 627                                          unsigned char *buf, int max_bytes)
 628{
 629        struct drm_device *dev = dev_get_drvdata(kdev);
 630        struct amdgpu_device *adev = drm_to_adev(dev);
 631        struct drm_connector *connector;
 632        struct drm_connector_list_iter conn_iter;
 633        struct amdgpu_dm_connector *aconnector;
 634        int ret = 0;
 635
 636        *enabled = false;
 637
 638        mutex_lock(&adev->dm.audio_lock);
 639
 640        drm_connector_list_iter_begin(dev, &conn_iter);
 641        drm_for_each_connector_iter(connector, &conn_iter) {
 642                aconnector = to_amdgpu_dm_connector(connector);
 643                if (aconnector->audio_inst != port)
 644                        continue;
 645
 646                *enabled = true;
 647                ret = drm_eld_size(connector->eld);
 648                memcpy(buf, connector->eld, min(max_bytes, ret));
 649
 650                break;
 651        }
 652        drm_connector_list_iter_end(&conn_iter);
 653
 654        mutex_unlock(&adev->dm.audio_lock);
 655
 656        DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
 657
 658        return ret;
 659}
 660
 661static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
 662        .get_eld = amdgpu_dm_audio_component_get_eld,
 663};
 664
 665static int amdgpu_dm_audio_component_bind(struct device *kdev,
 666                                       struct device *hda_kdev, void *data)
 667{
 668        struct drm_device *dev = dev_get_drvdata(kdev);
 669        struct amdgpu_device *adev = drm_to_adev(dev);
 670        struct drm_audio_component *acomp = data;
 671
 672        acomp->ops = &amdgpu_dm_audio_component_ops;
 673        acomp->dev = kdev;
 674        adev->dm.audio_component = acomp;
 675
 676        return 0;
 677}
 678
 679static void amdgpu_dm_audio_component_unbind(struct device *kdev,
 680                                          struct device *hda_kdev, void *data)
 681{
 682        struct drm_device *dev = dev_get_drvdata(kdev);
 683        struct amdgpu_device *adev = drm_to_adev(dev);
 684        struct drm_audio_component *acomp = data;
 685
 686        acomp->ops = NULL;
 687        acomp->dev = NULL;
 688        adev->dm.audio_component = NULL;
 689}
 690
 691static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
 692        .bind   = amdgpu_dm_audio_component_bind,
 693        .unbind = amdgpu_dm_audio_component_unbind,
 694};
 695
 696static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
 697{
 698        int i, ret;
 699
 700        if (!amdgpu_audio)
 701                return 0;
 702
 703        adev->mode_info.audio.enabled = true;
 704
 705        adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
 706
 707        for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
 708                adev->mode_info.audio.pin[i].channels = -1;
 709                adev->mode_info.audio.pin[i].rate = -1;
 710                adev->mode_info.audio.pin[i].bits_per_sample = -1;
 711                adev->mode_info.audio.pin[i].status_bits = 0;
 712                adev->mode_info.audio.pin[i].category_code = 0;
 713                adev->mode_info.audio.pin[i].connected = false;
 714                adev->mode_info.audio.pin[i].id =
 715                        adev->dm.dc->res_pool->audios[i]->inst;
 716                adev->mode_info.audio.pin[i].offset = 0;
 717        }
 718
 719        ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
 720        if (ret < 0)
 721                return ret;
 722
 723        adev->dm.audio_registered = true;
 724
 725        return 0;
 726}
 727
 728static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
 729{
 730        if (!amdgpu_audio)
 731                return;
 732
 733        if (!adev->mode_info.audio.enabled)
 734                return;
 735
 736        if (adev->dm.audio_registered) {
 737                component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
 738                adev->dm.audio_registered = false;
 739        }
 740
 741        /* TODO: Disable audio? */
 742
 743        adev->mode_info.audio.enabled = false;
 744}
 745
 746static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
 747{
 748        struct drm_audio_component *acomp = adev->dm.audio_component;
 749
 750        if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
 751                DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
 752
 753                acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
 754                                                 pin, -1);
 755        }
 756}
 757
 758static int dm_dmub_hw_init(struct amdgpu_device *adev)
 759{
 760        const struct dmcub_firmware_header_v1_0 *hdr;
 761        struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
 762        struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
 763        const struct firmware *dmub_fw = adev->dm.dmub_fw;
 764        struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
 765        struct abm *abm = adev->dm.dc->res_pool->abm;
 766        struct dmub_srv_hw_params hw_params;
 767        enum dmub_status status;
 768        const unsigned char *fw_inst_const, *fw_bss_data;
 769        uint32_t i, fw_inst_const_size, fw_bss_data_size;
 770        bool has_hw_support;
 771
 772        if (!dmub_srv)
 773                /* DMUB isn't supported on the ASIC. */
 774                return 0;
 775
 776        if (!fb_info) {
 777                DRM_ERROR("No framebuffer info for DMUB service.\n");
 778                return -EINVAL;
 779        }
 780
 781        if (!dmub_fw) {
 782                /* Firmware required for DMUB support. */
 783                DRM_ERROR("No firmware provided for DMUB.\n");
 784                return -EINVAL;
 785        }
 786
 787        status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
 788        if (status != DMUB_STATUS_OK) {
 789                DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
 790                return -EINVAL;
 791        }
 792
 793        if (!has_hw_support) {
 794                DRM_INFO("DMUB unsupported on ASIC\n");
 795                return 0;
 796        }
 797
 798        hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
 799
 800        fw_inst_const = dmub_fw->data +
 801                        le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
 802                        PSP_HEADER_BYTES;
 803
 804        fw_bss_data = dmub_fw->data +
 805                      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
 806                      le32_to_cpu(hdr->inst_const_bytes);
 807
 808        /* Copy firmware and bios info into FB memory. */
 809        fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
 810                             PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
 811
 812        fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
 813
 814        /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
 815         * amdgpu_ucode_init_single_fw will load dmub firmware
 816         * fw_inst_const part to cw0; otherwise, the firmware back door load
 817         * will be done by dm_dmub_hw_init
 818         */
 819        if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
 820                memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
 821                                fw_inst_const_size);
 822        }
 823
 824        if (fw_bss_data_size)
 825                memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
 826                       fw_bss_data, fw_bss_data_size);
 827
 828        /* Copy firmware bios info into FB memory. */
 829        memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
 830               adev->bios_size);
 831
 832        /* Reset regions that need to be reset. */
 833        memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
 834        fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
 835
 836        memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
 837               fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
 838
 839        memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
 840               fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
 841
 842        /* Initialize hardware. */
 843        memset(&hw_params, 0, sizeof(hw_params));
 844        hw_params.fb_base = adev->gmc.fb_start;
 845        hw_params.fb_offset = adev->gmc.aper_base;
 846
 847        /* backdoor load firmware and trigger dmub running */
 848        if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
 849                hw_params.load_inst_const = true;
 850
 851        if (dmcu)
 852                hw_params.psp_version = dmcu->psp_version;
 853
 854        for (i = 0; i < fb_info->num_fb; ++i)
 855                hw_params.fb[i] = &fb_info->fb[i];
 856
 857        status = dmub_srv_hw_init(dmub_srv, &hw_params);
 858        if (status != DMUB_STATUS_OK) {
 859                DRM_ERROR("Error initializing DMUB HW: %d\n", status);
 860                return -EINVAL;
 861        }
 862
 863        /* Wait for firmware load to finish. */
 864        status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
 865        if (status != DMUB_STATUS_OK)
 866                DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
 867
 868        /* Init DMCU and ABM if available. */
 869        if (dmcu && abm) {
 870                dmcu->funcs->dmcu_init(dmcu);
 871                abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
 872        }
 873
 874        adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
 875        if (!adev->dm.dc->ctx->dmub_srv) {
 876                DRM_ERROR("Couldn't allocate DC DMUB server!\n");
 877                return -ENOMEM;
 878        }
 879
 880        DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
 881                 adev->dm.dmcub_fw_version);
 882
 883        return 0;
 884}
 885
 886#if defined(CONFIG_DRM_AMD_DC_DCN)
 887static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
 888{
 889        uint64_t pt_base;
 890        uint32_t logical_addr_low;
 891        uint32_t logical_addr_high;
 892        uint32_t agp_base, agp_bot, agp_top;
 893        PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
 894
 895        logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
 896        pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
 897
 898        if (adev->apu_flags & AMD_APU_IS_RAVEN2)
 899                /*
 900                 * Raven2 has a HW issue that it is unable to use the vram which
 901                 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
 902                 * workaround that increase system aperture high address (add 1)
 903                 * to get rid of the VM fault and hardware hang.
 904                 */
 905                logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
 906        else
 907                logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
 908
 909        agp_base = 0;
 910        agp_bot = adev->gmc.agp_start >> 24;
 911        agp_top = adev->gmc.agp_end >> 24;
 912
 913
 914        page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
 915        page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
 916        page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
 917        page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
 918        page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
 919        page_table_base.low_part = lower_32_bits(pt_base);
 920
 921        pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
 922        pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
 923
 924        pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
 925        pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
 926        pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
 927
 928        pa_config->system_aperture.fb_base = adev->gmc.fb_start;
 929        pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
 930        pa_config->system_aperture.fb_top = adev->gmc.fb_end;
 931
 932        pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
 933        pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
 934        pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
 935
 936        pa_config->is_hvm_enabled = 0;
 937
 938}
 939#endif
 940#if defined(CONFIG_DRM_AMD_DC_DCN)
 941static void event_mall_stutter(struct work_struct *work)
 942{
 943
 944        struct vblank_workqueue *vblank_work = container_of(work, struct vblank_workqueue, mall_work);
 945        struct amdgpu_display_manager *dm = vblank_work->dm;
 946
 947        mutex_lock(&dm->dc_lock);
 948
 949        if (vblank_work->enable)
 950                dm->active_vblank_irq_count++;
 951        else
 952                dm->active_vblank_irq_count--;
 953
 954
 955        dc_allow_idle_optimizations(
 956                dm->dc, dm->active_vblank_irq_count == 0 ? true : false);
 957
 958        DRM_DEBUG_DRIVER("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
 959
 960
 961        mutex_unlock(&dm->dc_lock);
 962}
 963
 964static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc)
 965{
 966
 967        int max_caps = dc->caps.max_links;
 968        struct vblank_workqueue *vblank_work;
 969        int i = 0;
 970
 971        vblank_work = kcalloc(max_caps, sizeof(*vblank_work), GFP_KERNEL);
 972        if (ZERO_OR_NULL_PTR(vblank_work)) {
 973                kfree(vblank_work);
 974                return NULL;
 975        }
 976
 977        for (i = 0; i < max_caps; i++)
 978                INIT_WORK(&vblank_work[i].mall_work, event_mall_stutter);
 979
 980        return vblank_work;
 981}
 982#endif
 983static int amdgpu_dm_init(struct amdgpu_device *adev)
 984{
 985        struct dc_init_data init_data;
 986#ifdef CONFIG_DRM_AMD_DC_HDCP
 987        struct dc_callback_init init_params;
 988#endif
 989        int r;
 990
 991        adev->dm.ddev = adev_to_drm(adev);
 992        adev->dm.adev = adev;
 993
 994        /* Zero all the fields */
 995        memset(&init_data, 0, sizeof(init_data));
 996#ifdef CONFIG_DRM_AMD_DC_HDCP
 997        memset(&init_params, 0, sizeof(init_params));
 998#endif
 999
1000        mutex_init(&adev->dm.dc_lock);
1001        mutex_init(&adev->dm.audio_lock);
1002#if defined(CONFIG_DRM_AMD_DC_DCN)
1003        spin_lock_init(&adev->dm.vblank_lock);
1004#endif
1005
1006        if(amdgpu_dm_irq_init(adev)) {
1007                DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1008                goto error;
1009        }
1010
1011        init_data.asic_id.chip_family = adev->family;
1012
1013        init_data.asic_id.pci_revision_id = adev->pdev->revision;
1014        init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1015
1016        init_data.asic_id.vram_width = adev->gmc.vram_width;
1017        /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1018        init_data.asic_id.atombios_base_address =
1019                adev->mode_info.atom_context->bios;
1020
1021        init_data.driver = adev;
1022
1023        adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1024
1025        if (!adev->dm.cgs_device) {
1026                DRM_ERROR("amdgpu: failed to create cgs device.\n");
1027                goto error;
1028        }
1029
1030        init_data.cgs_device = adev->dm.cgs_device;
1031
1032        init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1033
1034        switch (adev->asic_type) {
1035        case CHIP_CARRIZO:
1036        case CHIP_STONEY:
1037        case CHIP_RAVEN:
1038        case CHIP_RENOIR:
1039                init_data.flags.gpu_vm_support = true;
1040                if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1041                        init_data.flags.disable_dmcu = true;
1042                break;
1043#if defined(CONFIG_DRM_AMD_DC_DCN)
1044        case CHIP_VANGOGH:
1045                init_data.flags.gpu_vm_support = true;
1046                break;
1047#endif
1048        default:
1049                break;
1050        }
1051
1052        if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1053                init_data.flags.fbc_support = true;
1054
1055        if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1056                init_data.flags.multi_mon_pp_mclk_switch = true;
1057
1058        if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1059                init_data.flags.disable_fractional_pwm = true;
1060
1061        init_data.flags.power_down_display_on_boot = true;
1062
1063        /* Display Core create. */
1064        adev->dm.dc = dc_create(&init_data);
1065
1066        if (adev->dm.dc) {
1067                DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1068        } else {
1069                DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1070                goto error;
1071        }
1072
1073        if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1074                adev->dm.dc->debug.force_single_disp_pipe_split = false;
1075                adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1076        }
1077
1078        if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1079                adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1080
1081        if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1082                adev->dm.dc->debug.disable_stutter = true;
1083
1084        if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1085                adev->dm.dc->debug.disable_dsc = true;
1086
1087        if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1088                adev->dm.dc->debug.disable_clock_gate = true;
1089
1090        r = dm_dmub_hw_init(adev);
1091        if (r) {
1092                DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1093                goto error;
1094        }
1095
1096        dc_hardware_init(adev->dm.dc);
1097
1098#if defined(CONFIG_DRM_AMD_DC_DCN)
1099        if (adev->apu_flags) {
1100                struct dc_phy_addr_space_config pa_config;
1101
1102                mmhub_read_system_context(adev, &pa_config);
1103
1104                // Call the DC init_memory func
1105                dc_setup_system_context(adev->dm.dc, &pa_config);
1106        }
1107#endif
1108
1109        adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1110        if (!adev->dm.freesync_module) {
1111                DRM_ERROR(
1112                "amdgpu: failed to initialize freesync_module.\n");
1113        } else
1114                DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1115                                adev->dm.freesync_module);
1116
1117        amdgpu_dm_init_color_mod();
1118
1119#if defined(CONFIG_DRM_AMD_DC_DCN)
1120        if (adev->dm.dc->caps.max_links > 0) {
1121                adev->dm.vblank_workqueue = vblank_create_workqueue(adev, adev->dm.dc);
1122
1123                if (!adev->dm.vblank_workqueue)
1124                        DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1125                else
1126                        DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done %p.\n", adev->dm.vblank_workqueue);
1127        }
1128#endif
1129
1130#ifdef CONFIG_DRM_AMD_DC_HDCP
1131        if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1132                adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1133
1134                if (!adev->dm.hdcp_workqueue)
1135                        DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1136                else
1137                        DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1138
1139                dc_init_callbacks(adev->dm.dc, &init_params);
1140        }
1141#endif
1142        if (amdgpu_dm_initialize_drm_device(adev)) {
1143                DRM_ERROR(
1144                "amdgpu: failed to initialize sw for display support.\n");
1145                goto error;
1146        }
1147
1148        /* create fake encoders for MST */
1149        dm_dp_create_fake_mst_encoders(adev);
1150
1151        /* TODO: Add_display_info? */
1152
1153        /* TODO use dynamic cursor width */
1154        adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1155        adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1156
1157        if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1158                DRM_ERROR(
1159                "amdgpu: failed to initialize sw for display support.\n");
1160                goto error;
1161        }
1162
1163
1164        DRM_DEBUG_DRIVER("KMS initialized.\n");
1165
1166        return 0;
1167error:
1168        amdgpu_dm_fini(adev);
1169
1170        return -EINVAL;
1171}
1172
1173static void amdgpu_dm_fini(struct amdgpu_device *adev)
1174{
1175        int i;
1176
1177        for (i = 0; i < adev->dm.display_indexes_num; i++) {
1178                drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1179        }
1180
1181        amdgpu_dm_audio_fini(adev);
1182
1183        amdgpu_dm_destroy_drm_device(&adev->dm);
1184
1185#ifdef CONFIG_DRM_AMD_DC_HDCP
1186        if (adev->dm.hdcp_workqueue) {
1187                hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1188                adev->dm.hdcp_workqueue = NULL;
1189        }
1190
1191        if (adev->dm.dc)
1192                dc_deinit_callbacks(adev->dm.dc);
1193#endif
1194        if (adev->dm.dc->ctx->dmub_srv) {
1195                dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1196                adev->dm.dc->ctx->dmub_srv = NULL;
1197        }
1198
1199        if (adev->dm.dmub_bo)
1200                amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1201                                      &adev->dm.dmub_bo_gpu_addr,
1202                                      &adev->dm.dmub_bo_cpu_addr);
1203
1204        /* DC Destroy TODO: Replace destroy DAL */
1205        if (adev->dm.dc)
1206                dc_destroy(&adev->dm.dc);
1207        /*
1208         * TODO: pageflip, vlank interrupt
1209         *
1210         * amdgpu_dm_irq_fini(adev);
1211         */
1212
1213        if (adev->dm.cgs_device) {
1214                amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1215                adev->dm.cgs_device = NULL;
1216        }
1217        if (adev->dm.freesync_module) {
1218                mod_freesync_destroy(adev->dm.freesync_module);
1219                adev->dm.freesync_module = NULL;
1220        }
1221
1222        mutex_destroy(&adev->dm.audio_lock);
1223        mutex_destroy(&adev->dm.dc_lock);
1224
1225        return;
1226}
1227
1228static int load_dmcu_fw(struct amdgpu_device *adev)
1229{
1230        const char *fw_name_dmcu = NULL;
1231        int r;
1232        const struct dmcu_firmware_header_v1_0 *hdr;
1233
1234        switch(adev->asic_type) {
1235#if defined(CONFIG_DRM_AMD_DC_SI)
1236        case CHIP_TAHITI:
1237        case CHIP_PITCAIRN:
1238        case CHIP_VERDE:
1239        case CHIP_OLAND:
1240#endif
1241        case CHIP_BONAIRE:
1242        case CHIP_HAWAII:
1243        case CHIP_KAVERI:
1244        case CHIP_KABINI:
1245        case CHIP_MULLINS:
1246        case CHIP_TONGA:
1247        case CHIP_FIJI:
1248        case CHIP_CARRIZO:
1249        case CHIP_STONEY:
1250        case CHIP_POLARIS11:
1251        case CHIP_POLARIS10:
1252        case CHIP_POLARIS12:
1253        case CHIP_VEGAM:
1254        case CHIP_VEGA10:
1255        case CHIP_VEGA12:
1256        case CHIP_VEGA20:
1257        case CHIP_NAVI10:
1258        case CHIP_NAVI14:
1259        case CHIP_RENOIR:
1260        case CHIP_SIENNA_CICHLID:
1261        case CHIP_NAVY_FLOUNDER:
1262        case CHIP_DIMGREY_CAVEFISH:
1263        case CHIP_VANGOGH:
1264                return 0;
1265        case CHIP_NAVI12:
1266                fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1267                break;
1268        case CHIP_RAVEN:
1269                if (ASICREV_IS_PICASSO(adev->external_rev_id))
1270                        fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1271                else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1272                        fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1273                else
1274                        return 0;
1275                break;
1276        default:
1277                DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1278                return -EINVAL;
1279        }
1280
1281        if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1282                DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1283                return 0;
1284        }
1285
1286        r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1287        if (r == -ENOENT) {
1288                /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1289                DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1290                adev->dm.fw_dmcu = NULL;
1291                return 0;
1292        }
1293        if (r) {
1294                dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1295                        fw_name_dmcu);
1296                return r;
1297        }
1298
1299        r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1300        if (r) {
1301                dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1302                        fw_name_dmcu);
1303                release_firmware(adev->dm.fw_dmcu);
1304                adev->dm.fw_dmcu = NULL;
1305                return r;
1306        }
1307
1308        hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1309        adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1310        adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1311        adev->firmware.fw_size +=
1312                ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1313
1314        adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1315        adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1316        adev->firmware.fw_size +=
1317                ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1318
1319        adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1320
1321        DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1322
1323        return 0;
1324}
1325
1326static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1327{
1328        struct amdgpu_device *adev = ctx;
1329
1330        return dm_read_reg(adev->dm.dc->ctx, address);
1331}
1332
1333static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1334                                     uint32_t value)
1335{
1336        struct amdgpu_device *adev = ctx;
1337
1338        return dm_write_reg(adev->dm.dc->ctx, address, value);
1339}
1340
1341static int dm_dmub_sw_init(struct amdgpu_device *adev)
1342{
1343        struct dmub_srv_create_params create_params;
1344        struct dmub_srv_region_params region_params;
1345        struct dmub_srv_region_info region_info;
1346        struct dmub_srv_fb_params fb_params;
1347        struct dmub_srv_fb_info *fb_info;
1348        struct dmub_srv *dmub_srv;
1349        const struct dmcub_firmware_header_v1_0 *hdr;
1350        const char *fw_name_dmub;
1351        enum dmub_asic dmub_asic;
1352        enum dmub_status status;
1353        int r;
1354
1355        switch (adev->asic_type) {
1356        case CHIP_RENOIR:
1357                dmub_asic = DMUB_ASIC_DCN21;
1358                fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1359                if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1360                        fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1361                break;
1362        case CHIP_SIENNA_CICHLID:
1363                dmub_asic = DMUB_ASIC_DCN30;
1364                fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1365                break;
1366        case CHIP_NAVY_FLOUNDER:
1367                dmub_asic = DMUB_ASIC_DCN30;
1368                fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1369                break;
1370        case CHIP_VANGOGH:
1371                dmub_asic = DMUB_ASIC_DCN301;
1372                fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1373                break;
1374        case CHIP_DIMGREY_CAVEFISH:
1375                dmub_asic = DMUB_ASIC_DCN302;
1376                fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1377                break;
1378
1379        default:
1380                /* ASIC doesn't support DMUB. */
1381                return 0;
1382        }
1383
1384        r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1385        if (r) {
1386                DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1387                return 0;
1388        }
1389
1390        r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1391        if (r) {
1392                DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1393                return 0;
1394        }
1395
1396        hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1397
1398        if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1399                adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1400                        AMDGPU_UCODE_ID_DMCUB;
1401                adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1402                        adev->dm.dmub_fw;
1403                adev->firmware.fw_size +=
1404                        ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1405
1406                DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1407                         adev->dm.dmcub_fw_version);
1408        }
1409
1410        adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1411
1412        adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1413        dmub_srv = adev->dm.dmub_srv;
1414
1415        if (!dmub_srv) {
1416                DRM_ERROR("Failed to allocate DMUB service!\n");
1417                return -ENOMEM;
1418        }
1419
1420        memset(&create_params, 0, sizeof(create_params));
1421        create_params.user_ctx = adev;
1422        create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1423        create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1424        create_params.asic = dmub_asic;
1425
1426        /* Create the DMUB service. */
1427        status = dmub_srv_create(dmub_srv, &create_params);
1428        if (status != DMUB_STATUS_OK) {
1429                DRM_ERROR("Error creating DMUB service: %d\n", status);
1430                return -EINVAL;
1431        }
1432
1433        /* Calculate the size of all the regions for the DMUB service. */
1434        memset(&region_params, 0, sizeof(region_params));
1435
1436        region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1437                                        PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1438        region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1439        region_params.vbios_size = adev->bios_size;
1440        region_params.fw_bss_data = region_params.bss_data_size ?
1441                adev->dm.dmub_fw->data +
1442                le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1443                le32_to_cpu(hdr->inst_const_bytes) : NULL;
1444        region_params.fw_inst_const =
1445                adev->dm.dmub_fw->data +
1446                le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1447                PSP_HEADER_BYTES;
1448
1449        status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1450                                           &region_info);
1451
1452        if (status != DMUB_STATUS_OK) {
1453                DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1454                return -EINVAL;
1455        }
1456
1457        /*
1458         * Allocate a framebuffer based on the total size of all the regions.
1459         * TODO: Move this into GART.
1460         */
1461        r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1462                                    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1463                                    &adev->dm.dmub_bo_gpu_addr,
1464                                    &adev->dm.dmub_bo_cpu_addr);
1465        if (r)
1466                return r;
1467
1468        /* Rebase the regions on the framebuffer address. */
1469        memset(&fb_params, 0, sizeof(fb_params));
1470        fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1471        fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1472        fb_params.region_info = &region_info;
1473
1474        adev->dm.dmub_fb_info =
1475                kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1476        fb_info = adev->dm.dmub_fb_info;
1477
1478        if (!fb_info) {
1479                DRM_ERROR(
1480                        "Failed to allocate framebuffer info for DMUB service!\n");
1481                return -ENOMEM;
1482        }
1483
1484        status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1485        if (status != DMUB_STATUS_OK) {
1486                DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1487                return -EINVAL;
1488        }
1489
1490        return 0;
1491}
1492
1493static int dm_sw_init(void *handle)
1494{
1495        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1496        int r;
1497
1498        r = dm_dmub_sw_init(adev);
1499        if (r)
1500                return r;
1501
1502        return load_dmcu_fw(adev);
1503}
1504
1505static int dm_sw_fini(void *handle)
1506{
1507        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1508
1509        kfree(adev->dm.dmub_fb_info);
1510        adev->dm.dmub_fb_info = NULL;
1511
1512        if (adev->dm.dmub_srv) {
1513                dmub_srv_destroy(adev->dm.dmub_srv);
1514                adev->dm.dmub_srv = NULL;
1515        }
1516
1517        release_firmware(adev->dm.dmub_fw);
1518        adev->dm.dmub_fw = NULL;
1519
1520        release_firmware(adev->dm.fw_dmcu);
1521        adev->dm.fw_dmcu = NULL;
1522
1523        return 0;
1524}
1525
1526static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1527{
1528        struct amdgpu_dm_connector *aconnector;
1529        struct drm_connector *connector;
1530        struct drm_connector_list_iter iter;
1531        int ret = 0;
1532
1533        drm_connector_list_iter_begin(dev, &iter);
1534        drm_for_each_connector_iter(connector, &iter) {
1535                aconnector = to_amdgpu_dm_connector(connector);
1536                if (aconnector->dc_link->type == dc_connection_mst_branch &&
1537                    aconnector->mst_mgr.aux) {
1538                        DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1539                                         aconnector,
1540                                         aconnector->base.base.id);
1541
1542                        ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1543                        if (ret < 0) {
1544                                DRM_ERROR("DM_MST: Failed to start MST\n");
1545                                aconnector->dc_link->type =
1546                                        dc_connection_single;
1547                                break;
1548                        }
1549                }
1550        }
1551        drm_connector_list_iter_end(&iter);
1552
1553        return ret;
1554}
1555
1556static int dm_late_init(void *handle)
1557{
1558        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1559
1560        struct dmcu_iram_parameters params;
1561        unsigned int linear_lut[16];
1562        int i;
1563        struct dmcu *dmcu = NULL;
1564        bool ret = true;
1565
1566        dmcu = adev->dm.dc->res_pool->dmcu;
1567
1568        for (i = 0; i < 16; i++)
1569                linear_lut[i] = 0xFFFF * i / 15;
1570
1571        params.set = 0;
1572        params.backlight_ramping_start = 0xCCCC;
1573        params.backlight_ramping_reduction = 0xCCCCCCCC;
1574        params.backlight_lut_array_size = 16;
1575        params.backlight_lut_array = linear_lut;
1576
1577        /* Min backlight level after ABM reduction,  Don't allow below 1%
1578         * 0xFFFF x 0.01 = 0x28F
1579         */
1580        params.min_abm_backlight = 0x28F;
1581
1582        /* In the case where abm is implemented on dmcub,
1583         * dmcu object will be null.
1584         * ABM 2.4 and up are implemented on dmcub.
1585         */
1586        if (dmcu)
1587                ret = dmcu_load_iram(dmcu, params);
1588        else if (adev->dm.dc->ctx->dmub_srv)
1589                ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1590
1591        if (!ret)
1592                return -EINVAL;
1593
1594        return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1595}
1596
1597static void s3_handle_mst(struct drm_device *dev, bool suspend)
1598{
1599        struct amdgpu_dm_connector *aconnector;
1600        struct drm_connector *connector;
1601        struct drm_connector_list_iter iter;
1602        struct drm_dp_mst_topology_mgr *mgr;
1603        int ret;
1604        bool need_hotplug = false;
1605
1606        drm_connector_list_iter_begin(dev, &iter);
1607        drm_for_each_connector_iter(connector, &iter) {
1608                aconnector = to_amdgpu_dm_connector(connector);
1609                if (aconnector->dc_link->type != dc_connection_mst_branch ||
1610                    aconnector->mst_port)
1611                        continue;
1612
1613                mgr = &aconnector->mst_mgr;
1614
1615                if (suspend) {
1616                        drm_dp_mst_topology_mgr_suspend(mgr);
1617                } else {
1618                        ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1619                        if (ret < 0) {
1620                                drm_dp_mst_topology_mgr_set_mst(mgr, false);
1621                                need_hotplug = true;
1622                        }
1623                }
1624        }
1625        drm_connector_list_iter_end(&iter);
1626
1627        if (need_hotplug)
1628                drm_kms_helper_hotplug_event(dev);
1629}
1630
1631static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1632{
1633        struct smu_context *smu = &adev->smu;
1634        int ret = 0;
1635
1636        if (!is_support_sw_smu(adev))
1637                return 0;
1638
1639        /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1640         * on window driver dc implementation.
1641         * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1642         * should be passed to smu during boot up and resume from s3.
1643         * boot up: dc calculate dcn watermark clock settings within dc_create,
1644         * dcn20_resource_construct
1645         * then call pplib functions below to pass the settings to smu:
1646         * smu_set_watermarks_for_clock_ranges
1647         * smu_set_watermarks_table
1648         * navi10_set_watermarks_table
1649         * smu_write_watermarks_table
1650         *
1651         * For Renoir, clock settings of dcn watermark are also fixed values.
1652         * dc has implemented different flow for window driver:
1653         * dc_hardware_init / dc_set_power_state
1654         * dcn10_init_hw
1655         * notify_wm_ranges
1656         * set_wm_ranges
1657         * -- Linux
1658         * smu_set_watermarks_for_clock_ranges
1659         * renoir_set_watermarks_table
1660         * smu_write_watermarks_table
1661         *
1662         * For Linux,
1663         * dc_hardware_init -> amdgpu_dm_init
1664         * dc_set_power_state --> dm_resume
1665         *
1666         * therefore, this function apply to navi10/12/14 but not Renoir
1667         * *
1668         */
1669        switch(adev->asic_type) {
1670        case CHIP_NAVI10:
1671        case CHIP_NAVI14:
1672        case CHIP_NAVI12:
1673                break;
1674        default:
1675                return 0;
1676        }
1677
1678        ret = smu_write_watermarks_table(smu);
1679        if (ret) {
1680                DRM_ERROR("Failed to update WMTABLE!\n");
1681                return ret;
1682        }
1683
1684        return 0;
1685}
1686
1687/**
1688 * dm_hw_init() - Initialize DC device
1689 * @handle: The base driver device containing the amdgpu_dm device.
1690 *
1691 * Initialize the &struct amdgpu_display_manager device. This involves calling
1692 * the initializers of each DM component, then populating the struct with them.
1693 *
1694 * Although the function implies hardware initialization, both hardware and
1695 * software are initialized here. Splitting them out to their relevant init
1696 * hooks is a future TODO item.
1697 *
1698 * Some notable things that are initialized here:
1699 *
1700 * - Display Core, both software and hardware
1701 * - DC modules that we need (freesync and color management)
1702 * - DRM software states
1703 * - Interrupt sources and handlers
1704 * - Vblank support
1705 * - Debug FS entries, if enabled
1706 */
1707static int dm_hw_init(void *handle)
1708{
1709        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1710        /* Create DAL display manager */
1711        amdgpu_dm_init(adev);
1712        amdgpu_dm_hpd_init(adev);
1713
1714        return 0;
1715}
1716
1717/**
1718 * dm_hw_fini() - Teardown DC device
1719 * @handle: The base driver device containing the amdgpu_dm device.
1720 *
1721 * Teardown components within &struct amdgpu_display_manager that require
1722 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1723 * were loaded. Also flush IRQ workqueues and disable them.
1724 */
1725static int dm_hw_fini(void *handle)
1726{
1727        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1728
1729        amdgpu_dm_hpd_fini(adev);
1730
1731        amdgpu_dm_irq_fini(adev);
1732        amdgpu_dm_fini(adev);
1733        return 0;
1734}
1735
1736
1737static int dm_enable_vblank(struct drm_crtc *crtc);
1738static void dm_disable_vblank(struct drm_crtc *crtc);
1739
1740static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1741                                 struct dc_state *state, bool enable)
1742{
1743        enum dc_irq_source irq_source;
1744        struct amdgpu_crtc *acrtc;
1745        int rc = -EBUSY;
1746        int i = 0;
1747
1748        for (i = 0; i < state->stream_count; i++) {
1749                acrtc = get_crtc_by_otg_inst(
1750                                adev, state->stream_status[i].primary_otg_inst);
1751
1752                if (acrtc && state->stream_status[i].plane_count != 0) {
1753                        irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1754                        rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1755                        DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1756                                  acrtc->crtc_id, enable ? "en" : "dis", rc);
1757                        if (rc)
1758                                DRM_WARN("Failed to %s pflip interrupts\n",
1759                                         enable ? "enable" : "disable");
1760
1761                        if (enable) {
1762                                rc = dm_enable_vblank(&acrtc->base);
1763                                if (rc)
1764                                        DRM_WARN("Failed to enable vblank interrupts\n");
1765                        } else {
1766                                dm_disable_vblank(&acrtc->base);
1767                        }
1768
1769                }
1770        }
1771
1772}
1773
1774static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1775{
1776        struct dc_state *context = NULL;
1777        enum dc_status res = DC_ERROR_UNEXPECTED;
1778        int i;
1779        struct dc_stream_state *del_streams[MAX_PIPES];
1780        int del_streams_count = 0;
1781
1782        memset(del_streams, 0, sizeof(del_streams));
1783
1784        context = dc_create_state(dc);
1785        if (context == NULL)
1786                goto context_alloc_fail;
1787
1788        dc_resource_state_copy_construct_current(dc, context);
1789
1790        /* First remove from context all streams */
1791        for (i = 0; i < context->stream_count; i++) {
1792                struct dc_stream_state *stream = context->streams[i];
1793
1794                del_streams[del_streams_count++] = stream;
1795        }
1796
1797        /* Remove all planes for removed streams and then remove the streams */
1798        for (i = 0; i < del_streams_count; i++) {
1799                if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1800                        res = DC_FAIL_DETACH_SURFACES;
1801                        goto fail;
1802                }
1803
1804                res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1805                if (res != DC_OK)
1806                        goto fail;
1807        }
1808
1809
1810        res = dc_validate_global_state(dc, context, false);
1811
1812        if (res != DC_OK) {
1813                DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1814                goto fail;
1815        }
1816
1817        res = dc_commit_state(dc, context);
1818
1819fail:
1820        dc_release_state(context);
1821
1822context_alloc_fail:
1823        return res;
1824}
1825
1826static int dm_suspend(void *handle)
1827{
1828        struct amdgpu_device *adev = handle;
1829        struct amdgpu_display_manager *dm = &adev->dm;
1830        int ret = 0;
1831
1832        if (amdgpu_in_reset(adev)) {
1833                mutex_lock(&dm->dc_lock);
1834
1835#if defined(CONFIG_DRM_AMD_DC_DCN)
1836                dc_allow_idle_optimizations(adev->dm.dc, false);
1837#endif
1838
1839                dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1840
1841                dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1842
1843                amdgpu_dm_commit_zero_streams(dm->dc);
1844
1845                amdgpu_dm_irq_suspend(adev);
1846
1847                return ret;
1848        }
1849
1850        WARN_ON(adev->dm.cached_state);
1851        adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1852
1853        s3_handle_mst(adev_to_drm(adev), true);
1854
1855        amdgpu_dm_irq_suspend(adev);
1856
1857
1858        dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1859
1860        return 0;
1861}
1862
1863static struct amdgpu_dm_connector *
1864amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1865                                             struct drm_crtc *crtc)
1866{
1867        uint32_t i;
1868        struct drm_connector_state *new_con_state;
1869        struct drm_connector *connector;
1870        struct drm_crtc *crtc_from_state;
1871
1872        for_each_new_connector_in_state(state, connector, new_con_state, i) {
1873                crtc_from_state = new_con_state->crtc;
1874
1875                if (crtc_from_state == crtc)
1876                        return to_amdgpu_dm_connector(connector);
1877        }
1878
1879        return NULL;
1880}
1881
1882static void emulated_link_detect(struct dc_link *link)
1883{
1884        struct dc_sink_init_data sink_init_data = { 0 };
1885        struct display_sink_capability sink_caps = { 0 };
1886        enum dc_edid_status edid_status;
1887        struct dc_context *dc_ctx = link->ctx;
1888        struct dc_sink *sink = NULL;
1889        struct dc_sink *prev_sink = NULL;
1890
1891        link->type = dc_connection_none;
1892        prev_sink = link->local_sink;
1893
1894        if (prev_sink)
1895                dc_sink_release(prev_sink);
1896
1897        switch (link->connector_signal) {
1898        case SIGNAL_TYPE_HDMI_TYPE_A: {
1899                sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1900                sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1901                break;
1902        }
1903
1904        case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1905                sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1906                sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1907                break;
1908        }
1909
1910        case SIGNAL_TYPE_DVI_DUAL_LINK: {
1911                sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1912                sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1913                break;
1914        }
1915
1916        case SIGNAL_TYPE_LVDS: {
1917                sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1918                sink_caps.signal = SIGNAL_TYPE_LVDS;
1919                break;
1920        }
1921
1922        case SIGNAL_TYPE_EDP: {
1923                sink_caps.transaction_type =
1924                        DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1925                sink_caps.signal = SIGNAL_TYPE_EDP;
1926                break;
1927        }
1928
1929        case SIGNAL_TYPE_DISPLAY_PORT: {
1930                sink_caps.transaction_type =
1931                        DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1932                sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1933                break;
1934        }
1935
1936        default:
1937                DC_ERROR("Invalid connector type! signal:%d\n",
1938                        link->connector_signal);
1939                return;
1940        }
1941
1942        sink_init_data.link = link;
1943        sink_init_data.sink_signal = sink_caps.signal;
1944
1945        sink = dc_sink_create(&sink_init_data);
1946        if (!sink) {
1947                DC_ERROR("Failed to create sink!\n");
1948                return;
1949        }
1950
1951        /* dc_sink_create returns a new reference */
1952        link->local_sink = sink;
1953
1954        edid_status = dm_helpers_read_local_edid(
1955                        link->ctx,
1956                        link,
1957                        sink);
1958
1959        if (edid_status != EDID_OK)
1960                DC_ERROR("Failed to read EDID");
1961
1962}
1963
1964static void dm_gpureset_commit_state(struct dc_state *dc_state,
1965                                     struct amdgpu_display_manager *dm)
1966{
1967        struct {
1968                struct dc_surface_update surface_updates[MAX_SURFACES];
1969                struct dc_plane_info plane_infos[MAX_SURFACES];
1970                struct dc_scaling_info scaling_infos[MAX_SURFACES];
1971                struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1972                struct dc_stream_update stream_update;
1973        } * bundle;
1974        int k, m;
1975
1976        bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1977
1978        if (!bundle) {
1979                dm_error("Failed to allocate update bundle\n");
1980                goto cleanup;
1981        }
1982
1983        for (k = 0; k < dc_state->stream_count; k++) {
1984                bundle->stream_update.stream = dc_state->streams[k];
1985
1986                for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1987                        bundle->surface_updates[m].surface =
1988                                dc_state->stream_status->plane_states[m];
1989                        bundle->surface_updates[m].surface->force_full_update =
1990                                true;
1991                }
1992                dc_commit_updates_for_stream(
1993                        dm->dc, bundle->surface_updates,
1994                        dc_state->stream_status->plane_count,
1995                        dc_state->streams[k], &bundle->stream_update, dc_state);
1996        }
1997
1998cleanup:
1999        kfree(bundle);
2000
2001        return;
2002}
2003
2004static void dm_set_dpms_off(struct dc_link *link)
2005{
2006        struct dc_stream_state *stream_state;
2007        struct amdgpu_dm_connector *aconnector = link->priv;
2008        struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2009        struct dc_stream_update stream_update;
2010        bool dpms_off = true;
2011
2012        memset(&stream_update, 0, sizeof(stream_update));
2013        stream_update.dpms_off = &dpms_off;
2014
2015        mutex_lock(&adev->dm.dc_lock);
2016        stream_state = dc_stream_find_from_link(link);
2017
2018        if (stream_state == NULL) {
2019                DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2020                mutex_unlock(&adev->dm.dc_lock);
2021                return;
2022        }
2023
2024        stream_update.stream = stream_state;
2025        dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2026                                     stream_state, &stream_update,
2027                                     stream_state->ctx->dc->current_state);
2028        mutex_unlock(&adev->dm.dc_lock);
2029}
2030
2031static int dm_resume(void *handle)
2032{
2033        struct amdgpu_device *adev = handle;
2034        struct drm_device *ddev = adev_to_drm(adev);
2035        struct amdgpu_display_manager *dm = &adev->dm;
2036        struct amdgpu_dm_connector *aconnector;
2037        struct drm_connector *connector;
2038        struct drm_connector_list_iter iter;
2039        struct drm_crtc *crtc;
2040        struct drm_crtc_state *new_crtc_state;
2041        struct dm_crtc_state *dm_new_crtc_state;
2042        struct drm_plane *plane;
2043        struct drm_plane_state *new_plane_state;
2044        struct dm_plane_state *dm_new_plane_state;
2045        struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2046        enum dc_connection_type new_connection_type = dc_connection_none;
2047        struct dc_state *dc_state;
2048        int i, r, j;
2049
2050        if (amdgpu_in_reset(adev)) {
2051                dc_state = dm->cached_dc_state;
2052
2053                r = dm_dmub_hw_init(adev);
2054                if (r)
2055                        DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2056
2057                dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2058                dc_resume(dm->dc);
2059
2060                amdgpu_dm_irq_resume_early(adev);
2061
2062                for (i = 0; i < dc_state->stream_count; i++) {
2063                        dc_state->streams[i]->mode_changed = true;
2064                        for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2065                                dc_state->stream_status->plane_states[j]->update_flags.raw
2066                                        = 0xffffffff;
2067                        }
2068                }
2069
2070                WARN_ON(!dc_commit_state(dm->dc, dc_state));
2071
2072                dm_gpureset_commit_state(dm->cached_dc_state, dm);
2073
2074                dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2075
2076                dc_release_state(dm->cached_dc_state);
2077                dm->cached_dc_state = NULL;
2078
2079                amdgpu_dm_irq_resume_late(adev);
2080
2081                mutex_unlock(&dm->dc_lock);
2082
2083                return 0;
2084        }
2085        /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2086        dc_release_state(dm_state->context);
2087        dm_state->context = dc_create_state(dm->dc);
2088        /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2089        dc_resource_state_construct(dm->dc, dm_state->context);
2090
2091        /* Before powering on DC we need to re-initialize DMUB. */
2092        r = dm_dmub_hw_init(adev);
2093        if (r)
2094                DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2095
2096        /* power on hardware */
2097        dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2098
2099        /* program HPD filter */
2100        dc_resume(dm->dc);
2101
2102        /*
2103         * early enable HPD Rx IRQ, should be done before set mode as short
2104         * pulse interrupts are used for MST
2105         */
2106        amdgpu_dm_irq_resume_early(adev);
2107
2108        /* On resume we need to rewrite the MSTM control bits to enable MST*/
2109        s3_handle_mst(ddev, false);
2110
2111        /* Do detection*/
2112        drm_connector_list_iter_begin(ddev, &iter);
2113        drm_for_each_connector_iter(connector, &iter) {
2114                aconnector = to_amdgpu_dm_connector(connector);
2115
2116                /*
2117                 * this is the case when traversing through already created
2118                 * MST connectors, should be skipped
2119                 */
2120                if (aconnector->mst_port)
2121                        continue;
2122
2123                mutex_lock(&aconnector->hpd_lock);
2124                if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2125                        DRM_ERROR("KMS: Failed to detect connector\n");
2126
2127                if (aconnector->base.force && new_connection_type == dc_connection_none)
2128                        emulated_link_detect(aconnector->dc_link);
2129                else
2130                        dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2131
2132                if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2133                        aconnector->fake_enable = false;
2134
2135                if (aconnector->dc_sink)
2136                        dc_sink_release(aconnector->dc_sink);
2137                aconnector->dc_sink = NULL;
2138                amdgpu_dm_update_connector_after_detect(aconnector);
2139                mutex_unlock(&aconnector->hpd_lock);
2140        }
2141        drm_connector_list_iter_end(&iter);
2142
2143        /* Force mode set in atomic commit */
2144        for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2145                new_crtc_state->active_changed = true;
2146
2147        /*
2148         * atomic_check is expected to create the dc states. We need to release
2149         * them here, since they were duplicated as part of the suspend
2150         * procedure.
2151         */
2152        for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2153                dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2154                if (dm_new_crtc_state->stream) {
2155                        WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2156                        dc_stream_release(dm_new_crtc_state->stream);
2157                        dm_new_crtc_state->stream = NULL;
2158                }
2159        }
2160
2161        for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2162                dm_new_plane_state = to_dm_plane_state(new_plane_state);
2163                if (dm_new_plane_state->dc_state) {
2164                        WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2165                        dc_plane_state_release(dm_new_plane_state->dc_state);
2166                        dm_new_plane_state->dc_state = NULL;
2167                }
2168        }
2169
2170        drm_atomic_helper_resume(ddev, dm->cached_state);
2171
2172        dm->cached_state = NULL;
2173
2174        amdgpu_dm_irq_resume_late(adev);
2175
2176        amdgpu_dm_smu_write_watermarks_table(adev);
2177
2178        return 0;
2179}
2180
2181/**
2182 * DOC: DM Lifecycle
2183 *
2184 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2185 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2186 * the base driver's device list to be initialized and torn down accordingly.
2187 *
2188 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2189 */
2190
2191static const struct amd_ip_funcs amdgpu_dm_funcs = {
2192        .name = "dm",
2193        .early_init = dm_early_init,
2194        .late_init = dm_late_init,
2195        .sw_init = dm_sw_init,
2196        .sw_fini = dm_sw_fini,
2197        .hw_init = dm_hw_init,
2198        .hw_fini = dm_hw_fini,
2199        .suspend = dm_suspend,
2200        .resume = dm_resume,
2201        .is_idle = dm_is_idle,
2202        .wait_for_idle = dm_wait_for_idle,
2203        .check_soft_reset = dm_check_soft_reset,
2204        .soft_reset = dm_soft_reset,
2205        .set_clockgating_state = dm_set_clockgating_state,
2206        .set_powergating_state = dm_set_powergating_state,
2207};
2208
2209const struct amdgpu_ip_block_version dm_ip_block =
2210{
2211        .type = AMD_IP_BLOCK_TYPE_DCE,
2212        .major = 1,
2213        .minor = 0,
2214        .rev = 0,
2215        .funcs = &amdgpu_dm_funcs,
2216};
2217
2218
2219/**
2220 * DOC: atomic
2221 *
2222 * *WIP*
2223 */
2224
2225static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2226        .fb_create = amdgpu_display_user_framebuffer_create,
2227        .get_format_info = amd_get_format_info,
2228        .output_poll_changed = drm_fb_helper_output_poll_changed,
2229        .atomic_check = amdgpu_dm_atomic_check,
2230        .atomic_commit = drm_atomic_helper_commit,
2231};
2232
2233static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2234        .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2235};
2236
2237static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2238{
2239        u32 max_cll, min_cll, max, min, q, r;
2240        struct amdgpu_dm_backlight_caps *caps;
2241        struct amdgpu_display_manager *dm;
2242        struct drm_connector *conn_base;
2243        struct amdgpu_device *adev;
2244        struct dc_link *link = NULL;
2245        static const u8 pre_computed_values[] = {
2246                50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2247                71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2248
2249        if (!aconnector || !aconnector->dc_link)
2250                return;
2251
2252        link = aconnector->dc_link;
2253        if (link->connector_signal != SIGNAL_TYPE_EDP)
2254                return;
2255
2256        conn_base = &aconnector->base;
2257        adev = drm_to_adev(conn_base->dev);
2258        dm = &adev->dm;
2259        caps = &dm->backlight_caps;
2260        caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2261        caps->aux_support = false;
2262        max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2263        min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2264
2265        if (caps->ext_caps->bits.oled == 1 ||
2266            caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2267            caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2268                caps->aux_support = true;
2269
2270        if (amdgpu_backlight == 0)
2271                caps->aux_support = false;
2272        else if (amdgpu_backlight == 1)
2273                caps->aux_support = true;
2274
2275        /* From the specification (CTA-861-G), for calculating the maximum
2276         * luminance we need to use:
2277         *      Luminance = 50*2**(CV/32)
2278         * Where CV is a one-byte value.
2279         * For calculating this expression we may need float point precision;
2280         * to avoid this complexity level, we take advantage that CV is divided
2281         * by a constant. From the Euclids division algorithm, we know that CV
2282         * can be written as: CV = 32*q + r. Next, we replace CV in the
2283         * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2284         * need to pre-compute the value of r/32. For pre-computing the values
2285         * We just used the following Ruby line:
2286         *      (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2287         * The results of the above expressions can be verified at
2288         * pre_computed_values.
2289         */
2290        q = max_cll >> 5;
2291        r = max_cll % 32;
2292        max = (1 << q) * pre_computed_values[r];
2293
2294        // min luminance: maxLum * (CV/255)^2 / 100
2295        q = DIV_ROUND_CLOSEST(min_cll, 255);
2296        min = max * DIV_ROUND_CLOSEST((q * q), 100);
2297
2298        caps->aux_max_input_signal = max;
2299        caps->aux_min_input_signal = min;
2300}
2301
2302void amdgpu_dm_update_connector_after_detect(
2303                struct amdgpu_dm_connector *aconnector)
2304{
2305        struct drm_connector *connector = &aconnector->base;
2306        struct drm_device *dev = connector->dev;
2307        struct dc_sink *sink;
2308
2309        /* MST handled by drm_mst framework */
2310        if (aconnector->mst_mgr.mst_state == true)
2311                return;
2312
2313        sink = aconnector->dc_link->local_sink;
2314        if (sink)
2315                dc_sink_retain(sink);
2316
2317        /*
2318         * Edid mgmt connector gets first update only in mode_valid hook and then
2319         * the connector sink is set to either fake or physical sink depends on link status.
2320         * Skip if already done during boot.
2321         */
2322        if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2323                        && aconnector->dc_em_sink) {
2324
2325                /*
2326                 * For S3 resume with headless use eml_sink to fake stream
2327                 * because on resume connector->sink is set to NULL
2328                 */
2329                mutex_lock(&dev->mode_config.mutex);
2330
2331                if (sink) {
2332                        if (aconnector->dc_sink) {
2333                                amdgpu_dm_update_freesync_caps(connector, NULL);
2334                                /*
2335                                 * retain and release below are used to
2336                                 * bump up refcount for sink because the link doesn't point
2337                                 * to it anymore after disconnect, so on next crtc to connector
2338                                 * reshuffle by UMD we will get into unwanted dc_sink release
2339                                 */
2340                                dc_sink_release(aconnector->dc_sink);
2341                        }
2342                        aconnector->dc_sink = sink;
2343                        dc_sink_retain(aconnector->dc_sink);
2344                        amdgpu_dm_update_freesync_caps(connector,
2345                                        aconnector->edid);
2346                } else {
2347                        amdgpu_dm_update_freesync_caps(connector, NULL);
2348                        if (!aconnector->dc_sink) {
2349                                aconnector->dc_sink = aconnector->dc_em_sink;
2350                                dc_sink_retain(aconnector->dc_sink);
2351                        }
2352                }
2353
2354                mutex_unlock(&dev->mode_config.mutex);
2355
2356                if (sink)
2357                        dc_sink_release(sink);
2358                return;
2359        }
2360
2361        /*
2362         * TODO: temporary guard to look for proper fix
2363         * if this sink is MST sink, we should not do anything
2364         */
2365        if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2366                dc_sink_release(sink);
2367                return;
2368        }
2369
2370        if (aconnector->dc_sink == sink) {
2371                /*
2372                 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2373                 * Do nothing!!
2374                 */
2375                DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2376                                aconnector->connector_id);
2377                if (sink)
2378                        dc_sink_release(sink);
2379                return;
2380        }
2381
2382        DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2383                aconnector->connector_id, aconnector->dc_sink, sink);
2384
2385        mutex_lock(&dev->mode_config.mutex);
2386
2387        /*
2388         * 1. Update status of the drm connector
2389         * 2. Send an event and let userspace tell us what to do
2390         */
2391        if (sink) {
2392                /*
2393                 * TODO: check if we still need the S3 mode update workaround.
2394                 * If yes, put it here.
2395                 */
2396                if (aconnector->dc_sink) {
2397                        amdgpu_dm_update_freesync_caps(connector, NULL);
2398                        dc_sink_release(aconnector->dc_sink);
2399                }
2400
2401                aconnector->dc_sink = sink;
2402                dc_sink_retain(aconnector->dc_sink);
2403                if (sink->dc_edid.length == 0) {
2404                        aconnector->edid = NULL;
2405                        if (aconnector->dc_link->aux_mode) {
2406                                drm_dp_cec_unset_edid(
2407                                        &aconnector->dm_dp_aux.aux);
2408                        }
2409                } else {
2410                        aconnector->edid =
2411                                (struct edid *)sink->dc_edid.raw_edid;
2412
2413                        drm_connector_update_edid_property(connector,
2414                                                           aconnector->edid);
2415                        if (aconnector->dc_link->aux_mode)
2416                                drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2417                                                    aconnector->edid);
2418                }
2419
2420                amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2421                update_connector_ext_caps(aconnector);
2422        } else {
2423                drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2424                amdgpu_dm_update_freesync_caps(connector, NULL);
2425                drm_connector_update_edid_property(connector, NULL);
2426                aconnector->num_modes = 0;
2427                dc_sink_release(aconnector->dc_sink);
2428                aconnector->dc_sink = NULL;
2429                aconnector->edid = NULL;
2430#ifdef CONFIG_DRM_AMD_DC_HDCP
2431                /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2432                if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2433                        connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2434#endif
2435        }
2436
2437        mutex_unlock(&dev->mode_config.mutex);
2438
2439        update_subconnector_property(aconnector);
2440
2441        if (sink)
2442                dc_sink_release(sink);
2443}
2444
2445static void handle_hpd_irq(void *param)
2446{
2447        struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2448        struct drm_connector *connector = &aconnector->base;
2449        struct drm_device *dev = connector->dev;
2450        enum dc_connection_type new_connection_type = dc_connection_none;
2451#ifdef CONFIG_DRM_AMD_DC_HDCP
2452        struct amdgpu_device *adev = drm_to_adev(dev);
2453        struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2454#endif
2455
2456        /*
2457         * In case of failure or MST no need to update connector status or notify the OS
2458         * since (for MST case) MST does this in its own context.
2459         */
2460        mutex_lock(&aconnector->hpd_lock);
2461
2462#ifdef CONFIG_DRM_AMD_DC_HDCP
2463        if (adev->dm.hdcp_workqueue) {
2464                hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2465                dm_con_state->update_hdcp = true;
2466        }
2467#endif
2468        if (aconnector->fake_enable)
2469                aconnector->fake_enable = false;
2470
2471        if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2472                DRM_ERROR("KMS: Failed to detect connector\n");
2473
2474        if (aconnector->base.force && new_connection_type == dc_connection_none) {
2475                emulated_link_detect(aconnector->dc_link);
2476
2477
2478                drm_modeset_lock_all(dev);
2479                dm_restore_drm_connector_state(dev, connector);
2480                drm_modeset_unlock_all(dev);
2481
2482                if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2483                        drm_kms_helper_hotplug_event(dev);
2484
2485        } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2486                if (new_connection_type == dc_connection_none &&
2487                    aconnector->dc_link->type == dc_connection_none)
2488                        dm_set_dpms_off(aconnector->dc_link);
2489
2490                amdgpu_dm_update_connector_after_detect(aconnector);
2491
2492                drm_modeset_lock_all(dev);
2493                dm_restore_drm_connector_state(dev, connector);
2494                drm_modeset_unlock_all(dev);
2495
2496                if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2497                        drm_kms_helper_hotplug_event(dev);
2498        }
2499        mutex_unlock(&aconnector->hpd_lock);
2500
2501}
2502
2503static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2504{
2505        uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2506        uint8_t dret;
2507        bool new_irq_handled = false;
2508        int dpcd_addr;
2509        int dpcd_bytes_to_read;
2510
2511        const int max_process_count = 30;
2512        int process_count = 0;
2513
2514        const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2515
2516        if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2517                dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2518                /* DPCD 0x200 - 0x201 for downstream IRQ */
2519                dpcd_addr = DP_SINK_COUNT;
2520        } else {
2521                dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2522                /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2523                dpcd_addr = DP_SINK_COUNT_ESI;
2524        }
2525
2526        dret = drm_dp_dpcd_read(
2527                &aconnector->dm_dp_aux.aux,
2528                dpcd_addr,
2529                esi,
2530                dpcd_bytes_to_read);
2531
2532        while (dret == dpcd_bytes_to_read &&
2533                process_count < max_process_count) {
2534                uint8_t retry;
2535                dret = 0;
2536
2537                process_count++;
2538
2539                DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2540                /* handle HPD short pulse irq */
2541                if (aconnector->mst_mgr.mst_state)
2542                        drm_dp_mst_hpd_irq(
2543                                &aconnector->mst_mgr,
2544                                esi,
2545                                &new_irq_handled);
2546
2547                if (new_irq_handled) {
2548                        /* ACK at DPCD to notify down stream */
2549                        const int ack_dpcd_bytes_to_write =
2550                                dpcd_bytes_to_read - 1;
2551
2552                        for (retry = 0; retry < 3; retry++) {
2553                                uint8_t wret;
2554
2555                                wret = drm_dp_dpcd_write(
2556                                        &aconnector->dm_dp_aux.aux,
2557                                        dpcd_addr + 1,
2558                                        &esi[1],
2559                                        ack_dpcd_bytes_to_write);
2560                                if (wret == ack_dpcd_bytes_to_write)
2561                                        break;
2562                        }
2563
2564                        /* check if there is new irq to be handled */
2565                        dret = drm_dp_dpcd_read(
2566                                &aconnector->dm_dp_aux.aux,
2567                                dpcd_addr,
2568                                esi,
2569                                dpcd_bytes_to_read);
2570
2571                        new_irq_handled = false;
2572                } else {
2573                        break;
2574                }
2575        }
2576
2577        if (process_count == max_process_count)
2578                DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2579}
2580
2581static void handle_hpd_rx_irq(void *param)
2582{
2583        struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2584        struct drm_connector *connector = &aconnector->base;
2585        struct drm_device *dev = connector->dev;
2586        struct dc_link *dc_link = aconnector->dc_link;
2587        bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2588        bool result = false;
2589        enum dc_connection_type new_connection_type = dc_connection_none;
2590        struct amdgpu_device *adev = drm_to_adev(dev);
2591        union hpd_irq_data hpd_irq_data;
2592
2593        memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2594
2595        /*
2596         * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2597         * conflict, after implement i2c helper, this mutex should be
2598         * retired.
2599         */
2600        if (dc_link->type != dc_connection_mst_branch)
2601                mutex_lock(&aconnector->hpd_lock);
2602
2603        read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2604
2605        if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2606                (dc_link->type == dc_connection_mst_branch)) {
2607                if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2608                        result = true;
2609                        dm_handle_hpd_rx_irq(aconnector);
2610                        goto out;
2611                } else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2612                        result = false;
2613                        dm_handle_hpd_rx_irq(aconnector);
2614                        goto out;
2615                }
2616        }
2617
2618        mutex_lock(&adev->dm.dc_lock);
2619#ifdef CONFIG_DRM_AMD_DC_HDCP
2620        result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2621#else
2622        result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2623#endif
2624        mutex_unlock(&adev->dm.dc_lock);
2625
2626out:
2627        if (result && !is_mst_root_connector) {
2628                /* Downstream Port status changed. */
2629                if (!dc_link_detect_sink(dc_link, &new_connection_type))
2630                        DRM_ERROR("KMS: Failed to detect connector\n");
2631
2632                if (aconnector->base.force && new_connection_type == dc_connection_none) {
2633                        emulated_link_detect(dc_link);
2634
2635                        if (aconnector->fake_enable)
2636                                aconnector->fake_enable = false;
2637
2638                        amdgpu_dm_update_connector_after_detect(aconnector);
2639
2640
2641                        drm_modeset_lock_all(dev);
2642                        dm_restore_drm_connector_state(dev, connector);
2643                        drm_modeset_unlock_all(dev);
2644
2645                        drm_kms_helper_hotplug_event(dev);
2646                } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2647
2648                        if (aconnector->fake_enable)
2649                                aconnector->fake_enable = false;
2650
2651                        amdgpu_dm_update_connector_after_detect(aconnector);
2652
2653
2654                        drm_modeset_lock_all(dev);
2655                        dm_restore_drm_connector_state(dev, connector);
2656                        drm_modeset_unlock_all(dev);
2657
2658                        drm_kms_helper_hotplug_event(dev);
2659                }
2660        }
2661#ifdef CONFIG_DRM_AMD_DC_HDCP
2662        if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2663                if (adev->dm.hdcp_workqueue)
2664                        hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2665        }
2666#endif
2667
2668        if (dc_link->type != dc_connection_mst_branch) {
2669                drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2670                mutex_unlock(&aconnector->hpd_lock);
2671        }
2672}
2673
2674static void register_hpd_handlers(struct amdgpu_device *adev)
2675{
2676        struct drm_device *dev = adev_to_drm(adev);
2677        struct drm_connector *connector;
2678        struct amdgpu_dm_connector *aconnector;
2679        const struct dc_link *dc_link;
2680        struct dc_interrupt_params int_params = {0};
2681
2682        int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2683        int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2684
2685        list_for_each_entry(connector,
2686                        &dev->mode_config.connector_list, head) {
2687
2688                aconnector = to_amdgpu_dm_connector(connector);
2689                dc_link = aconnector->dc_link;
2690
2691                if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2692                        int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2693                        int_params.irq_source = dc_link->irq_source_hpd;
2694
2695                        amdgpu_dm_irq_register_interrupt(adev, &int_params,
2696                                        handle_hpd_irq,
2697                                        (void *) aconnector);
2698                }
2699
2700                if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2701
2702                        /* Also register for DP short pulse (hpd_rx). */
2703                        int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2704                        int_params.irq_source = dc_link->irq_source_hpd_rx;
2705
2706                        amdgpu_dm_irq_register_interrupt(adev, &int_params,
2707                                        handle_hpd_rx_irq,
2708                                        (void *) aconnector);
2709                }
2710        }
2711}
2712
2713#if defined(CONFIG_DRM_AMD_DC_SI)
2714/* Register IRQ sources and initialize IRQ callbacks */
2715static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2716{
2717        struct dc *dc = adev->dm.dc;
2718        struct common_irq_params *c_irq_params;
2719        struct dc_interrupt_params int_params = {0};
2720        int r;
2721        int i;
2722        unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2723
2724        int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2725        int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2726
2727        /*
2728         * Actions of amdgpu_irq_add_id():
2729         * 1. Register a set() function with base driver.
2730         *    Base driver will call set() function to enable/disable an
2731         *    interrupt in DC hardware.
2732         * 2. Register amdgpu_dm_irq_handler().
2733         *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2734         *    coming from DC hardware.
2735         *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2736         *    for acknowledging and handling. */
2737
2738        /* Use VBLANK interrupt */
2739        for (i = 0; i < adev->mode_info.num_crtc; i++) {
2740                r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2741                if (r) {
2742                        DRM_ERROR("Failed to add crtc irq id!\n");
2743                        return r;
2744                }
2745
2746                int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2747                int_params.irq_source =
2748                        dc_interrupt_to_irq_source(dc, i+1 , 0);
2749
2750                c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2751
2752                c_irq_params->adev = adev;
2753                c_irq_params->irq_src = int_params.irq_source;
2754
2755                amdgpu_dm_irq_register_interrupt(adev, &int_params,
2756                                dm_crtc_high_irq, c_irq_params);
2757        }
2758
2759        /* Use GRPH_PFLIP interrupt */
2760        for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2761                        i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2762                r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2763                if (r) {
2764                        DRM_ERROR("Failed to add page flip irq id!\n");
2765                        return r;
2766                }
2767
2768                int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2769                int_params.irq_source =
2770                        dc_interrupt_to_irq_source(dc, i, 0);
2771
2772                c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2773
2774                c_irq_params->adev = adev;
2775                c_irq_params->irq_src = int_params.irq_source;
2776
2777                amdgpu_dm_irq_register_interrupt(adev, &int_params,
2778                                dm_pflip_high_irq, c_irq_params);
2779
2780        }
2781
2782        /* HPD */
2783        r = amdgpu_irq_add_id(adev, client_id,
2784                        VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2785        if (r) {
2786                DRM_ERROR("Failed to add hpd irq id!\n");
2787                return r;
2788        }
2789
2790        register_hpd_handlers(adev);
2791
2792        return 0;
2793}
2794#endif
2795
2796/* Register IRQ sources and initialize IRQ callbacks */
2797static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2798{
2799        struct dc *dc = adev->dm.dc;
2800        struct common_irq_params *c_irq_params;
2801        struct dc_interrupt_params int_params = {0};
2802        int r;
2803        int i;
2804        unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2805
2806        if (adev->asic_type >= CHIP_VEGA10)
2807                client_id = SOC15_IH_CLIENTID_DCE;
2808
2809        int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2810        int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2811
2812        /*
2813         * Actions of amdgpu_irq_add_id():
2814         * 1. Register a set() function with base driver.
2815         *    Base driver will call set() function to enable/disable an
2816         *    interrupt in DC hardware.
2817         * 2. Register amdgpu_dm_irq_handler().
2818         *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2819         *    coming from DC hardware.
2820         *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2821         *    for acknowledging and handling. */
2822
2823        /* Use VBLANK interrupt */
2824        for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2825                r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2826                if (r) {
2827                        DRM_ERROR("Failed to add crtc irq id!\n");
2828                        return r;
2829                }
2830
2831                int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2832                int_params.irq_source =
2833                        dc_interrupt_to_irq_source(dc, i, 0);
2834
2835                c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2836
2837                c_irq_params->adev = adev;
2838                c_irq_params->irq_src = int_params.irq_source;
2839
2840                amdgpu_dm_irq_register_interrupt(adev, &int_params,
2841                                dm_crtc_high_irq, c_irq_params);
2842        }
2843
2844        /* Use VUPDATE interrupt */
2845        for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2846                r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2847                if (r) {
2848                        DRM_ERROR("Failed to add vupdate irq id!\n");
2849                        return r;
2850                }
2851
2852                int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2853                int_params.irq_source =
2854                        dc_interrupt_to_irq_source(dc, i, 0);
2855
2856                c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2857
2858                c_irq_params->adev = adev;
2859                c_irq_params->irq_src = int_params.irq_source;
2860
2861                amdgpu_dm_irq_register_interrupt(adev, &int_params,
2862                                dm_vupdate_high_irq, c_irq_params);
2863        }
2864
2865        /* Use GRPH_PFLIP interrupt */
2866        for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2867                        i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2868                r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2869                if (r) {
2870                        DRM_ERROR("Failed to add page flip irq id!\n");
2871                        return r;
2872                }
2873
2874                int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2875                int_params.irq_source =
2876                        dc_interrupt_to_irq_source(dc, i, 0);
2877
2878                c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2879
2880                c_irq_params->adev = adev;
2881                c_irq_params->irq_src = int_params.irq_source;
2882
2883                amdgpu_dm_irq_register_interrupt(adev, &int_params,
2884                                dm_pflip_high_irq, c_irq_params);
2885
2886        }
2887
2888        /* HPD */
2889        r = amdgpu_irq_add_id(adev, client_id,
2890                        VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2891        if (r) {
2892                DRM_ERROR("Failed to add hpd irq id!\n");
2893                return r;
2894        }
2895
2896        register_hpd_handlers(adev);
2897
2898        return 0;
2899}
2900
2901#if defined(CONFIG_DRM_AMD_DC_DCN)
2902/* Register IRQ sources and initialize IRQ callbacks */
2903static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2904{
2905        struct dc *dc = adev->dm.dc;
2906        struct common_irq_params *c_irq_params;
2907        struct dc_interrupt_params int_params = {0};
2908        int r;
2909        int i;
2910
2911        int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2912        int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2913
2914        /*
2915         * Actions of amdgpu_irq_add_id():
2916         * 1. Register a set() function with base driver.
2917         *    Base driver will call set() function to enable/disable an
2918         *    interrupt in DC hardware.
2919         * 2. Register amdgpu_dm_irq_handler().
2920         *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2921         *    coming from DC hardware.
2922         *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2923         *    for acknowledging and handling.
2924         */
2925
2926        /* Use VSTARTUP interrupt */
2927        for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2928                        i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2929                        i++) {
2930                r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2931
2932                if (r) {
2933                        DRM_ERROR("Failed to add crtc irq id!\n");
2934                        return r;
2935                }
2936
2937                int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2938                int_params.irq_source =
2939                        dc_interrupt_to_irq_source(dc, i, 0);
2940
2941                c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2942
2943                c_irq_params->adev = adev;
2944                c_irq_params->irq_src = int_params.irq_source;
2945
2946                amdgpu_dm_irq_register_interrupt(
2947                        adev, &int_params, dm_crtc_high_irq, c_irq_params);
2948        }
2949
2950        /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2951         * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2952         * to trigger at end of each vblank, regardless of state of the lock,
2953         * matching DCE behaviour.
2954         */
2955        for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2956             i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2957             i++) {
2958                r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2959
2960                if (r) {
2961                        DRM_ERROR("Failed to add vupdate irq id!\n");
2962                        return r;
2963                }
2964
2965                int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2966                int_params.irq_source =
2967                        dc_interrupt_to_irq_source(dc, i, 0);
2968
2969                c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2970
2971                c_irq_params->adev = adev;
2972                c_irq_params->irq_src = int_params.irq_source;
2973
2974                amdgpu_dm_irq_register_interrupt(adev, &int_params,
2975                                dm_vupdate_high_irq, c_irq_params);
2976        }
2977
2978        /* Use GRPH_PFLIP interrupt */
2979        for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2980                        i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2981                        i++) {
2982                r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2983                if (r) {
2984                        DRM_ERROR("Failed to add page flip irq id!\n");
2985                        return r;
2986                }
2987
2988                int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2989                int_params.irq_source =
2990                        dc_interrupt_to_irq_source(dc, i, 0);
2991
2992                c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2993
2994                c_irq_params->adev = adev;
2995                c_irq_params->irq_src = int_params.irq_source;
2996
2997                amdgpu_dm_irq_register_interrupt(adev, &int_params,
2998                                dm_pflip_high_irq, c_irq_params);
2999
3000        }
3001
3002        /* HPD */
3003        r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3004                        &adev->hpd_irq);
3005        if (r) {
3006                DRM_ERROR("Failed to add hpd irq id!\n");
3007                return r;
3008        }
3009
3010        register_hpd_handlers(adev);
3011
3012        return 0;
3013}
3014#endif
3015
3016/*
3017 * Acquires the lock for the atomic state object and returns
3018 * the new atomic state.
3019 *
3020 * This should only be called during atomic check.
3021 */
3022static int dm_atomic_get_state(struct drm_atomic_state *state,
3023                               struct dm_atomic_state **dm_state)
3024{
3025        struct drm_device *dev = state->dev;
3026        struct amdgpu_device *adev = drm_to_adev(dev);
3027        struct amdgpu_display_manager *dm = &adev->dm;
3028        struct drm_private_state *priv_state;
3029
3030        if (*dm_state)
3031                return 0;
3032
3033        priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3034        if (IS_ERR(priv_state))
3035                return PTR_ERR(priv_state);
3036
3037        *dm_state = to_dm_atomic_state(priv_state);
3038
3039        return 0;
3040}
3041
3042static struct dm_atomic_state *
3043dm_atomic_get_new_state(struct drm_atomic_state *state)
3044{
3045        struct drm_device *dev = state->dev;
3046        struct amdgpu_device *adev = drm_to_adev(dev);
3047        struct amdgpu_display_manager *dm = &adev->dm;
3048        struct drm_private_obj *obj;
3049        struct drm_private_state *new_obj_state;
3050        int i;
3051
3052        for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3053                if (obj->funcs == dm->atomic_obj.funcs)
3054                        return to_dm_atomic_state(new_obj_state);
3055        }
3056
3057        return NULL;
3058}
3059
3060static struct drm_private_state *
3061dm_atomic_duplicate_state(struct drm_private_obj *obj)
3062{
3063        struct dm_atomic_state *old_state, *new_state;
3064
3065        new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3066        if (!new_state)
3067                return NULL;
3068
3069        __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3070
3071        old_state = to_dm_atomic_state(obj->state);
3072
3073        if (old_state && old_state->context)
3074                new_state->context = dc_copy_state(old_state->context);
3075
3076        if (!new_state->context) {
3077                kfree(new_state);
3078                return NULL;
3079        }
3080
3081        return &new_state->base;
3082}
3083
3084static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3085                                    struct drm_private_state *state)
3086{
3087        struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3088
3089        if (dm_state && dm_state->context)
3090                dc_release_state(dm_state->context);
3091
3092        kfree(dm_state);
3093}
3094
3095static struct drm_private_state_funcs dm_atomic_state_funcs = {
3096        .atomic_duplicate_state = dm_atomic_duplicate_state,
3097        .atomic_destroy_state = dm_atomic_destroy_state,
3098};
3099
3100static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3101{
3102        struct dm_atomic_state *state;
3103        int r;
3104
3105        adev->mode_info.mode_config_initialized = true;
3106
3107        adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3108        adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3109
3110        adev_to_drm(adev)->mode_config.max_width = 16384;
3111        adev_to_drm(adev)->mode_config.max_height = 16384;
3112
3113        adev_to_drm(adev)->mode_config.preferred_depth = 24;
3114        adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3115        /* indicates support for immediate flip */
3116        adev_to_drm(adev)->mode_config.async_page_flip = true;
3117
3118        adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3119
3120        state = kzalloc(sizeof(*state), GFP_KERNEL);
3121        if (!state)
3122                return -ENOMEM;
3123
3124        state->context = dc_create_state(adev->dm.dc);
3125        if (!state->context) {
3126                kfree(state);
3127                return -ENOMEM;
3128        }
3129
3130        dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3131
3132        drm_atomic_private_obj_init(adev_to_drm(adev),
3133                                    &adev->dm.atomic_obj,
3134                                    &state->base,
3135                                    &dm_atomic_state_funcs);
3136
3137        r = amdgpu_display_modeset_create_props(adev);
3138        if (r) {
3139                dc_release_state(state->context);
3140                kfree(state);
3141                return r;
3142        }
3143
3144        r = amdgpu_dm_audio_init(adev);
3145        if (r) {
3146                dc_release_state(state->context);
3147                kfree(state);
3148                return r;
3149        }
3150
3151        return 0;
3152}
3153
3154#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3155#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3156#define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3157
3158#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3159        defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3160
3161static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3162{
3163#if defined(CONFIG_ACPI)
3164        struct amdgpu_dm_backlight_caps caps;
3165
3166        memset(&caps, 0, sizeof(caps));
3167
3168        if (dm->backlight_caps.caps_valid)
3169                return;
3170
3171        amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3172        if (caps.caps_valid) {
3173                dm->backlight_caps.caps_valid = true;
3174                if (caps.aux_support)
3175                        return;
3176                dm->backlight_caps.min_input_signal = caps.min_input_signal;
3177                dm->backlight_caps.max_input_signal = caps.max_input_signal;
3178        } else {
3179                dm->backlight_caps.min_input_signal =
3180                                AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3181                dm->backlight_caps.max_input_signal =
3182                                AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3183        }
3184#else
3185        if (dm->backlight_caps.aux_support)
3186                return;
3187
3188        dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3189        dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3190#endif
3191}
3192
3193static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3194                                unsigned *min, unsigned *max)
3195{
3196        if (!caps)
3197                return 0;
3198
3199        if (caps->aux_support) {
3200                // Firmware limits are in nits, DC API wants millinits.
3201                *max = 1000 * caps->aux_max_input_signal;
3202                *min = 1000 * caps->aux_min_input_signal;
3203        } else {
3204                // Firmware limits are 8-bit, PWM control is 16-bit.
3205                *max = 0x101 * caps->max_input_signal;
3206                *min = 0x101 * caps->min_input_signal;
3207        }
3208        return 1;
3209}
3210
3211static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3212                                        uint32_t brightness)
3213{
3214        unsigned min, max;
3215
3216        if (!get_brightness_range(caps, &min, &max))
3217                return brightness;
3218
3219        // Rescale 0..255 to min..max
3220        return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3221                                       AMDGPU_MAX_BL_LEVEL);
3222}
3223
3224static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3225                                      uint32_t brightness)
3226{
3227        unsigned min, max;
3228
3229        if (!get_brightness_range(caps, &min, &max))
3230                return brightness;
3231
3232        if (brightness < min)
3233                return 0;
3234        // Rescale min..max to 0..255
3235        return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3236                                 max - min);
3237}
3238
3239static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3240{
3241        struct amdgpu_display_manager *dm = bl_get_data(bd);
3242        struct amdgpu_dm_backlight_caps caps;
3243        struct dc_link *link = NULL;
3244        u32 brightness;
3245        bool rc;
3246
3247        amdgpu_dm_update_backlight_caps(dm);
3248        caps = dm->backlight_caps;
3249
3250        link = (struct dc_link *)dm->backlight_link;
3251
3252        brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3253        // Change brightness based on AUX property
3254        if (caps.aux_support)
3255                rc = dc_link_set_backlight_level_nits(link, true, brightness,
3256                                                      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3257        else
3258                rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3259
3260        return rc ? 0 : 1;
3261}
3262
3263static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3264{
3265        struct amdgpu_display_manager *dm = bl_get_data(bd);
3266        struct amdgpu_dm_backlight_caps caps;
3267
3268        amdgpu_dm_update_backlight_caps(dm);
3269        caps = dm->backlight_caps;
3270
3271        if (caps.aux_support) {
3272                struct dc_link *link = (struct dc_link *)dm->backlight_link;
3273                u32 avg, peak;
3274                bool rc;
3275
3276                rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3277                if (!rc)
3278                        return bd->props.brightness;
3279                return convert_brightness_to_user(&caps, avg);
3280        } else {
3281                int ret = dc_link_get_backlight_level(dm->backlight_link);
3282
3283                if (ret == DC_ERROR_UNEXPECTED)
3284                        return bd->props.brightness;
3285                return convert_brightness_to_user(&caps, ret);
3286        }
3287}
3288
3289static const struct backlight_ops amdgpu_dm_backlight_ops = {
3290        .options = BL_CORE_SUSPENDRESUME,
3291        .get_brightness = amdgpu_dm_backlight_get_brightness,
3292        .update_status  = amdgpu_dm_backlight_update_status,
3293};
3294
3295static void
3296amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3297{
3298        char bl_name[16];
3299        struct backlight_properties props = { 0 };
3300
3301        amdgpu_dm_update_backlight_caps(dm);
3302
3303        props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3304        props.brightness = AMDGPU_MAX_BL_LEVEL;
3305        props.type = BACKLIGHT_RAW;
3306
3307        snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3308                 adev_to_drm(dm->adev)->primary->index);
3309
3310        dm->backlight_dev = backlight_device_register(bl_name,
3311                                                      adev_to_drm(dm->adev)->dev,
3312                                                      dm,
3313                                                      &amdgpu_dm_backlight_ops,
3314                                                      &props);
3315
3316        if (IS_ERR(dm->backlight_dev))
3317                DRM_ERROR("DM: Backlight registration failed!\n");
3318        else
3319                DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3320}
3321
3322#endif
3323
3324static int initialize_plane(struct amdgpu_display_manager *dm,
3325                            struct amdgpu_mode_info *mode_info, int plane_id,
3326                            enum drm_plane_type plane_type,
3327                            const struct dc_plane_cap *plane_cap)
3328{
3329        struct drm_plane *plane;
3330        unsigned long possible_crtcs;
3331        int ret = 0;
3332
3333        plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3334        if (!plane) {
3335                DRM_ERROR("KMS: Failed to allocate plane\n");
3336                return -ENOMEM;
3337        }
3338        plane->type = plane_type;
3339
3340        /*
3341         * HACK: IGT tests expect that the primary plane for a CRTC
3342         * can only have one possible CRTC. Only expose support for
3343         * any CRTC if they're not going to be used as a primary plane
3344         * for a CRTC - like overlay or underlay planes.
3345         */
3346        possible_crtcs = 1 << plane_id;
3347        if (plane_id >= dm->dc->caps.max_streams)
3348                possible_crtcs = 0xff;
3349
3350        ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3351
3352        if (ret) {
3353                DRM_ERROR("KMS: Failed to initialize plane\n");
3354                kfree(plane);
3355                return ret;
3356        }
3357
3358        if (mode_info)
3359                mode_info->planes[plane_id] = plane;
3360
3361        return ret;
3362}
3363
3364
3365static void register_backlight_device(struct amdgpu_display_manager *dm,
3366                                      struct dc_link *link)
3367{
3368#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3369        defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3370
3371        if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3372            link->type != dc_connection_none) {
3373                /*
3374                 * Event if registration failed, we should continue with
3375                 * DM initialization because not having a backlight control
3376                 * is better then a black screen.
3377                 */
3378                amdgpu_dm_register_backlight_device(dm);
3379
3380                if (dm->backlight_dev)
3381                        dm->backlight_link = link;
3382        }
3383#endif
3384}
3385
3386
3387/*
3388 * In this architecture, the association
3389 * connector -> encoder -> crtc
3390 * id not really requried. The crtc and connector will hold the
3391 * display_index as an abstraction to use with DAL component
3392 *
3393 * Returns 0 on success
3394 */
3395static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3396{
3397        struct amdgpu_display_manager *dm = &adev->dm;
3398        int32_t i;
3399        struct amdgpu_dm_connector *aconnector = NULL;
3400        struct amdgpu_encoder *aencoder = NULL;
3401        struct amdgpu_mode_info *mode_info = &adev->mode_info;
3402        uint32_t link_cnt;
3403        int32_t primary_planes;
3404        enum dc_connection_type new_connection_type = dc_connection_none;
3405        const struct dc_plane_cap *plane;
3406
3407        dm->display_indexes_num = dm->dc->caps.max_streams;
3408        /* Update the actual used number of crtc */
3409        adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3410
3411        link_cnt = dm->dc->caps.max_links;
3412        if (amdgpu_dm_mode_config_init(dm->adev)) {
3413                DRM_ERROR("DM: Failed to initialize mode config\n");
3414                return -EINVAL;
3415        }
3416
3417        /* There is one primary plane per CRTC */
3418        primary_planes = dm->dc->caps.max_streams;
3419        ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3420
3421        /*
3422         * Initialize primary planes, implicit planes for legacy IOCTLS.
3423         * Order is reversed to match iteration order in atomic check.
3424         */
3425        for (i = (primary_planes - 1); i >= 0; i--) {
3426                plane = &dm->dc->caps.planes[i];
3427
3428                if (initialize_plane(dm, mode_info, i,
3429                                     DRM_PLANE_TYPE_PRIMARY, plane)) {
3430                        DRM_ERROR("KMS: Failed to initialize primary plane\n");
3431                        goto fail;
3432                }
3433        }
3434
3435        /*
3436         * Initialize overlay planes, index starting after primary planes.
3437         * These planes have a higher DRM index than the primary planes since
3438         * they should be considered as having a higher z-order.
3439         * Order is reversed to match iteration order in atomic check.
3440         *
3441         * Only support DCN for now, and only expose one so we don't encourage
3442         * userspace to use up all the pipes.
3443         */
3444        for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3445                struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3446
3447                if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3448                        continue;
3449
3450                if (!plane->blends_with_above || !plane->blends_with_below)
3451                        continue;
3452
3453                if (!plane->pixel_format_support.argb8888)
3454                        continue;
3455
3456                if (initialize_plane(dm, NULL, primary_planes + i,
3457                                     DRM_PLANE_TYPE_OVERLAY, plane)) {
3458                        DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3459                        goto fail;
3460                }
3461
3462                /* Only create one overlay plane. */
3463                break;
3464        }
3465
3466        for (i = 0; i < dm->dc->caps.max_streams; i++)
3467                if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3468                        DRM_ERROR("KMS: Failed to initialize crtc\n");
3469                        goto fail;
3470                }
3471
3472        /* loops over all connectors on the board */
3473        for (i = 0; i < link_cnt; i++) {
3474                struct dc_link *link = NULL;
3475
3476                if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3477                        DRM_ERROR(
3478                                "KMS: Cannot support more than %d display indexes\n",
3479                                        AMDGPU_DM_MAX_DISPLAY_INDEX);
3480                        continue;
3481                }
3482
3483                aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3484                if (!aconnector)
3485                        goto fail;
3486
3487                aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3488                if (!aencoder)
3489                        goto fail;
3490
3491                if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3492                        DRM_ERROR("KMS: Failed to initialize encoder\n");
3493                        goto fail;
3494                }
3495
3496                if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3497                        DRM_ERROR("KMS: Failed to initialize connector\n");
3498                        goto fail;
3499                }
3500
3501                link = dc_get_link_at_index(dm->dc, i);
3502
3503                if (!dc_link_detect_sink(link, &new_connection_type))
3504                        DRM_ERROR("KMS: Failed to detect connector\n");
3505
3506                if (aconnector->base.force && new_connection_type == dc_connection_none) {
3507                        emulated_link_detect(link);
3508                        amdgpu_dm_update_connector_after_detect(aconnector);
3509
3510                } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3511                        amdgpu_dm_update_connector_after_detect(aconnector);
3512                        register_backlight_device(dm, link);
3513                        if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3514                                amdgpu_dm_set_psr_caps(link);
3515                }
3516
3517
3518        }
3519
3520        /* Software is initialized. Now we can register interrupt handlers. */
3521        switch (adev->asic_type) {
3522#if defined(CONFIG_DRM_AMD_DC_SI)
3523        case CHIP_TAHITI:
3524        case CHIP_PITCAIRN:
3525        case CHIP_VERDE:
3526        case CHIP_OLAND:
3527                if (dce60_register_irq_handlers(dm->adev)) {
3528                        DRM_ERROR("DM: Failed to initialize IRQ\n");
3529                        goto fail;
3530                }
3531                break;
3532#endif
3533        case CHIP_BONAIRE:
3534        case CHIP_HAWAII:
3535        case CHIP_KAVERI:
3536        case CHIP_KABINI:
3537        case CHIP_MULLINS:
3538        case CHIP_TONGA:
3539        case CHIP_FIJI:
3540        case CHIP_CARRIZO:
3541        case CHIP_STONEY:
3542        case CHIP_POLARIS11:
3543        case CHIP_POLARIS10:
3544        case CHIP_POLARIS12:
3545        case CHIP_VEGAM:
3546        case CHIP_VEGA10:
3547        case CHIP_VEGA12:
3548        case CHIP_VEGA20:
3549                if (dce110_register_irq_handlers(dm->adev)) {
3550                        DRM_ERROR("DM: Failed to initialize IRQ\n");
3551                        goto fail;
3552                }
3553                break;
3554#if defined(CONFIG_DRM_AMD_DC_DCN)
3555        case CHIP_RAVEN:
3556        case CHIP_NAVI12:
3557        case CHIP_NAVI10:
3558        case CHIP_NAVI14:
3559        case CHIP_RENOIR:
3560        case CHIP_SIENNA_CICHLID:
3561        case CHIP_NAVY_FLOUNDER:
3562        case CHIP_DIMGREY_CAVEFISH:
3563        case CHIP_VANGOGH:
3564                if (dcn10_register_irq_handlers(dm->adev)) {
3565                        DRM_ERROR("DM: Failed to initialize IRQ\n");
3566                        goto fail;
3567                }
3568                break;
3569#endif
3570        default:
3571                DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3572                goto fail;
3573        }
3574
3575        return 0;
3576fail:
3577        kfree(aencoder);
3578        kfree(aconnector);
3579
3580        return -EINVAL;
3581}
3582
3583static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3584{
3585        drm_mode_config_cleanup(dm->ddev);
3586        drm_atomic_private_obj_fini(&dm->atomic_obj);
3587        return;
3588}
3589
3590/******************************************************************************
3591 * amdgpu_display_funcs functions
3592 *****************************************************************************/
3593
3594/*
3595 * dm_bandwidth_update - program display watermarks
3596 *
3597 * @adev: amdgpu_device pointer
3598 *
3599 * Calculate and program the display watermarks and line buffer allocation.
3600 */
3601static void dm_bandwidth_update(struct amdgpu_device *adev)
3602{
3603        /* TODO: implement later */
3604}
3605
3606static const struct amdgpu_display_funcs dm_display_funcs = {
3607        .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3608        .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3609        .backlight_set_level = NULL, /* never called for DC */
3610        .backlight_get_level = NULL, /* never called for DC */
3611        .hpd_sense = NULL,/* called unconditionally */
3612        .hpd_set_polarity = NULL, /* called unconditionally */
3613        .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3614        .page_flip_get_scanoutpos =
3615                dm_crtc_get_scanoutpos,/* called unconditionally */
3616        .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3617        .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3618};
3619
3620#if defined(CONFIG_DEBUG_KERNEL_DC)
3621
3622static ssize_t s3_debug_store(struct device *device,
3623                              struct device_attribute *attr,
3624                              const char *buf,
3625                              size_t count)
3626{
3627        int ret;
3628        int s3_state;
3629        struct drm_device *drm_dev = dev_get_drvdata(device);
3630        struct amdgpu_device *adev = drm_to_adev(drm_dev);
3631
3632        ret = kstrtoint(buf, 0, &s3_state);
3633
3634        if (ret == 0) {
3635                if (s3_state) {
3636                        dm_resume(adev);
3637                        drm_kms_helper_hotplug_event(adev_to_drm(adev));
3638                } else
3639                        dm_suspend(adev);
3640        }
3641
3642        return ret == 0 ? count : 0;
3643}
3644
3645DEVICE_ATTR_WO(s3_debug);
3646
3647#endif
3648
3649static int dm_early_init(void *handle)
3650{
3651        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3652
3653        switch (adev->asic_type) {
3654#if defined(CONFIG_DRM_AMD_DC_SI)
3655        case CHIP_TAHITI:
3656        case CHIP_PITCAIRN:
3657        case CHIP_VERDE:
3658                adev->mode_info.num_crtc = 6;
3659                adev->mode_info.num_hpd = 6;
3660                adev->mode_info.num_dig = 6;
3661                break;
3662        case CHIP_OLAND:
3663                adev->mode_info.num_crtc = 2;
3664                adev->mode_info.num_hpd = 2;
3665                adev->mode_info.num_dig = 2;
3666                break;
3667#endif
3668        case CHIP_BONAIRE:
3669        case CHIP_HAWAII:
3670                adev->mode_info.num_crtc = 6;
3671                adev->mode_info.num_hpd = 6;
3672                adev->mode_info.num_dig = 6;
3673                break;
3674        case CHIP_KAVERI:
3675                adev->mode_info.num_crtc = 4;
3676                adev->mode_info.num_hpd = 6;
3677                adev->mode_info.num_dig = 7;
3678                break;
3679        case CHIP_KABINI:
3680        case CHIP_MULLINS:
3681                adev->mode_info.num_crtc = 2;
3682                adev->mode_info.num_hpd = 6;
3683                adev->mode_info.num_dig = 6;
3684                break;
3685        case CHIP_FIJI:
3686        case CHIP_TONGA:
3687                adev->mode_info.num_crtc = 6;
3688                adev->mode_info.num_hpd = 6;
3689                adev->mode_info.num_dig = 7;
3690                break;
3691        case CHIP_CARRIZO:
3692                adev->mode_info.num_crtc = 3;
3693                adev->mode_info.num_hpd = 6;
3694                adev->mode_info.num_dig = 9;
3695                break;
3696        case CHIP_STONEY:
3697                adev->mode_info.num_crtc = 2;
3698                adev->mode_info.num_hpd = 6;
3699                adev->mode_info.num_dig = 9;
3700                break;
3701        case CHIP_POLARIS11:
3702        case CHIP_POLARIS12:
3703                adev->mode_info.num_crtc = 5;
3704                adev->mode_info.num_hpd = 5;
3705                adev->mode_info.num_dig = 5;
3706                break;
3707        case CHIP_POLARIS10:
3708        case CHIP_VEGAM:
3709                adev->mode_info.num_crtc = 6;
3710                adev->mode_info.num_hpd = 6;
3711                adev->mode_info.num_dig = 6;
3712                break;
3713        case CHIP_VEGA10:
3714        case CHIP_VEGA12:
3715        case CHIP_VEGA20:
3716                adev->mode_info.num_crtc = 6;
3717                adev->mode_info.num_hpd = 6;
3718                adev->mode_info.num_dig = 6;
3719                break;
3720#if defined(CONFIG_DRM_AMD_DC_DCN)
3721        case CHIP_RAVEN:
3722        case CHIP_RENOIR:
3723        case CHIP_VANGOGH:
3724                adev->mode_info.num_crtc = 4;
3725                adev->mode_info.num_hpd = 4;
3726                adev->mode_info.num_dig = 4;
3727                break;
3728        case CHIP_NAVI10:
3729        case CHIP_NAVI12:
3730        case CHIP_SIENNA_CICHLID:
3731        case CHIP_NAVY_FLOUNDER:
3732                adev->mode_info.num_crtc = 6;
3733                adev->mode_info.num_hpd = 6;
3734                adev->mode_info.num_dig = 6;
3735                break;
3736        case CHIP_NAVI14:
3737        case CHIP_DIMGREY_CAVEFISH:
3738                adev->mode_info.num_crtc = 5;
3739                adev->mode_info.num_hpd = 5;
3740                adev->mode_info.num_dig = 5;
3741                break;
3742#endif
3743        default:
3744                DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3745                return -EINVAL;
3746        }
3747
3748        amdgpu_dm_set_irq_funcs(adev);
3749
3750        if (adev->mode_info.funcs == NULL)
3751                adev->mode_info.funcs = &dm_display_funcs;
3752
3753        /*
3754         * Note: Do NOT change adev->audio_endpt_rreg and
3755         * adev->audio_endpt_wreg because they are initialised in
3756         * amdgpu_device_init()
3757         */
3758#if defined(CONFIG_DEBUG_KERNEL_DC)
3759        device_create_file(
3760                adev_to_drm(adev)->dev,
3761                &dev_attr_s3_debug);
3762#endif
3763
3764        return 0;
3765}
3766
3767static bool modeset_required(struct drm_crtc_state *crtc_state,
3768                             struct dc_stream_state *new_stream,
3769                             struct dc_stream_state *old_stream)
3770{
3771        return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3772}
3773
3774static bool modereset_required(struct drm_crtc_state *crtc_state)
3775{
3776        return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3777}
3778
3779static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3780{
3781        drm_encoder_cleanup(encoder);
3782        kfree(encoder);
3783}
3784
3785static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3786        .destroy = amdgpu_dm_encoder_destroy,
3787};
3788
3789
3790static void get_min_max_dc_plane_scaling(struct drm_device *dev,
3791                                         struct drm_framebuffer *fb,
3792                                         int *min_downscale, int *max_upscale)
3793{
3794        struct amdgpu_device *adev = drm_to_adev(dev);
3795        struct dc *dc = adev->dm.dc;
3796        /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
3797        struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
3798
3799        switch (fb->format->format) {
3800        case DRM_FORMAT_P010:
3801        case DRM_FORMAT_NV12:
3802        case DRM_FORMAT_NV21:
3803                *max_upscale = plane_cap->max_upscale_factor.nv12;
3804                *min_downscale = plane_cap->max_downscale_factor.nv12;
3805                break;
3806
3807        case DRM_FORMAT_XRGB16161616F:
3808        case DRM_FORMAT_ARGB16161616F:
3809        case DRM_FORMAT_XBGR16161616F:
3810        case DRM_FORMAT_ABGR16161616F:
3811                *max_upscale = plane_cap->max_upscale_factor.fp16;
3812                *min_downscale = plane_cap->max_downscale_factor.fp16;
3813                break;
3814
3815        default:
3816                *max_upscale = plane_cap->max_upscale_factor.argb8888;
3817                *min_downscale = plane_cap->max_downscale_factor.argb8888;
3818                break;
3819        }
3820
3821        /*
3822         * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
3823         * scaling factor of 1.0 == 1000 units.
3824         */
3825        if (*max_upscale == 1)
3826                *max_upscale = 1000;
3827
3828        if (*min_downscale == 1)
3829                *min_downscale = 1000;
3830}
3831
3832
3833static int fill_dc_scaling_info(const struct drm_plane_state *state,
3834                                struct dc_scaling_info *scaling_info)
3835{
3836        int scale_w, scale_h, min_downscale, max_upscale;
3837
3838        memset(scaling_info, 0, sizeof(*scaling_info));
3839
3840        /* Source is fixed 16.16 but we ignore mantissa for now... */
3841        scaling_info->src_rect.x = state->src_x >> 16;
3842        scaling_info->src_rect.y = state->src_y >> 16;
3843
3844        scaling_info->src_rect.width = state->src_w >> 16;
3845        if (scaling_info->src_rect.width == 0)
3846                return -EINVAL;
3847
3848        scaling_info->src_rect.height = state->src_h >> 16;
3849        if (scaling_info->src_rect.height == 0)
3850                return -EINVAL;
3851
3852        scaling_info->dst_rect.x = state->crtc_x;
3853        scaling_info->dst_rect.y = state->crtc_y;
3854
3855        if (state->crtc_w == 0)
3856                return -EINVAL;
3857
3858        scaling_info->dst_rect.width = state->crtc_w;
3859
3860        if (state->crtc_h == 0)
3861                return -EINVAL;
3862
3863        scaling_info->dst_rect.height = state->crtc_h;
3864
3865        /* DRM doesn't specify clipping on destination output. */
3866        scaling_info->clip_rect = scaling_info->dst_rect;
3867
3868        /* Validate scaling per-format with DC plane caps */
3869        if (state->plane && state->plane->dev && state->fb) {
3870                get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
3871                                             &min_downscale, &max_upscale);
3872        } else {
3873                min_downscale = 250;
3874                max_upscale = 16000;
3875        }
3876
3877        scale_w = scaling_info->dst_rect.width * 1000 /
3878                  scaling_info->src_rect.width;
3879
3880        if (scale_w < min_downscale || scale_w > max_upscale)
3881                return -EINVAL;
3882
3883        scale_h = scaling_info->dst_rect.height * 1000 /
3884                  scaling_info->src_rect.height;
3885
3886        if (scale_h < min_downscale || scale_h > max_upscale)
3887                return -EINVAL;
3888
3889        /*
3890         * The "scaling_quality" can be ignored for now, quality = 0 has DC
3891         * assume reasonable defaults based on the format.
3892         */
3893
3894        return 0;
3895}
3896
3897static void
3898fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
3899                                 uint64_t tiling_flags)
3900{
3901        /* Fill GFX8 params */
3902        if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3903                unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3904
3905                bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3906                bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3907                mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3908                tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3909                num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3910
3911                /* XXX fix me for VI */
3912                tiling_info->gfx8.num_banks = num_banks;
3913                tiling_info->gfx8.array_mode =
3914                                DC_ARRAY_2D_TILED_THIN1;
3915                tiling_info->gfx8.tile_split = tile_split;
3916                tiling_info->gfx8.bank_width = bankw;
3917                tiling_info->gfx8.bank_height = bankh;
3918                tiling_info->gfx8.tile_aspect = mtaspect;
3919                tiling_info->gfx8.tile_mode =
3920                                DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3921        } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3922                        == DC_ARRAY_1D_TILED_THIN1) {
3923                tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3924        }
3925
3926        tiling_info->gfx8.pipe_config =
3927                        AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3928}
3929
3930static void
3931fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
3932                                  union dc_tiling_info *tiling_info)
3933{
3934        tiling_info->gfx9.num_pipes =
3935                adev->gfx.config.gb_addr_config_fields.num_pipes;
3936        tiling_info->gfx9.num_banks =
3937                adev->gfx.config.gb_addr_config_fields.num_banks;
3938        tiling_info->gfx9.pipe_interleave =
3939                adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3940        tiling_info->gfx9.num_shader_engines =
3941                adev->gfx.config.gb_addr_config_fields.num_se;
3942        tiling_info->gfx9.max_compressed_frags =
3943                adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3944        tiling_info->gfx9.num_rb_per_se =
3945                adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3946        tiling_info->gfx9.shaderEnable = 1;
3947        if (adev->asic_type == CHIP_SIENNA_CICHLID ||
3948            adev->asic_type == CHIP_NAVY_FLOUNDER ||
3949            adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
3950            adev->asic_type == CHIP_VANGOGH)
3951                tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
3952}
3953
3954static int
3955validate_dcc(struct amdgpu_device *adev,
3956             const enum surface_pixel_format format,
3957             const enum dc_rotation_angle rotation,
3958             const union dc_tiling_info *tiling_info,
3959             const struct dc_plane_dcc_param *dcc,
3960             const struct dc_plane_address *address,
3961             const struct plane_size *plane_size)
3962{
3963        struct dc *dc = adev->dm.dc;
3964        struct dc_dcc_surface_param input;
3965        struct dc_surface_dcc_cap output;
3966
3967        memset(&input, 0, sizeof(input));
3968        memset(&output, 0, sizeof(output));
3969
3970        if (!dcc->enable)
3971                return 0;
3972
3973        if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
3974            !dc->cap_funcs.get_dcc_compression_cap)
3975                return -EINVAL;
3976
3977        input.format = format;
3978        input.surface_size.width = plane_size->surface_size.width;
3979        input.surface_size.height = plane_size->surface_size.height;
3980        input.swizzle_mode = tiling_info->gfx9.swizzle;
3981
3982        if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3983                input.scan = SCAN_DIRECTION_HORIZONTAL;
3984        else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3985                input.scan = SCAN_DIRECTION_VERTICAL;
3986
3987        if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3988                return -EINVAL;
3989
3990        if (!output.capable)
3991                return -EINVAL;
3992
3993        if (dcc->independent_64b_blks == 0 &&
3994            output.grph.rgb.independent_64b_blks != 0)
3995                return -EINVAL;
3996
3997        return 0;
3998}
3999
4000static bool
4001modifier_has_dcc(uint64_t modifier)
4002{
4003        return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4004}
4005
4006static unsigned
4007modifier_gfx9_swizzle_mode(uint64_t modifier)
4008{
4009        if (modifier == DRM_FORMAT_MOD_LINEAR)
4010                return 0;
4011
4012        return AMD_FMT_MOD_GET(TILE, modifier);
4013}
4014
4015static const struct drm_format_info *
4016amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4017{
4018        return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4019}
4020
4021static void
4022fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4023                                    union dc_tiling_info *tiling_info,
4024                                    uint64_t modifier)
4025{
4026        unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4027        unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4028        unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4029        unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4030
4031        fill_gfx9_tiling_info_from_device(adev, tiling_info);
4032
4033        if (!IS_AMD_FMT_MOD(modifier))
4034                return;
4035
4036        tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4037        tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4038
4039        if (adev->family >= AMDGPU_FAMILY_NV) {
4040                tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4041        } else {
4042                tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4043
4044                /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4045        }
4046}
4047
4048enum dm_micro_swizzle {
4049        MICRO_SWIZZLE_Z = 0,
4050        MICRO_SWIZZLE_S = 1,
4051        MICRO_SWIZZLE_D = 2,
4052        MICRO_SWIZZLE_R = 3
4053};
4054
4055static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4056                                          uint32_t format,
4057                                          uint64_t modifier)
4058{
4059        struct amdgpu_device *adev = drm_to_adev(plane->dev);
4060        const struct drm_format_info *info = drm_format_info(format);
4061
4062        enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4063
4064        if (!info)
4065                return false;
4066
4067        /*
4068         * We always have to allow this modifier, because core DRM still
4069         * checks LINEAR support if userspace does not provide modifers.
4070         */
4071        if (modifier == DRM_FORMAT_MOD_LINEAR)
4072                return true;
4073
4074        /*
4075         * For D swizzle the canonical modifier depends on the bpp, so check
4076         * it here.
4077         */
4078        if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4079            adev->family >= AMDGPU_FAMILY_NV) {
4080                if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4081                        return false;
4082        }
4083
4084        if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4085            info->cpp[0] < 8)
4086                return false;
4087
4088        if (modifier_has_dcc(modifier)) {
4089                /* Per radeonsi comments 16/64 bpp are more complicated. */
4090                if (info->cpp[0] != 4)
4091                        return false;
4092                /* We support multi-planar formats, but not when combined with
4093                 * additional DCC metadata planes. */
4094                if (info->num_planes > 1)
4095                        return false;
4096        }
4097
4098        return true;
4099}
4100
4101static void
4102add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4103{
4104        if (!*mods)
4105                return;
4106
4107        if (*cap - *size < 1) {
4108                uint64_t new_cap = *cap * 2;
4109                uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4110
4111                if (!new_mods) {
4112                        kfree(*mods);
4113                        *mods = NULL;
4114                        return;
4115                }
4116
4117                memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4118                kfree(*mods);
4119                *mods = new_mods;
4120                *cap = new_cap;
4121        }
4122
4123        (*mods)[*size] = mod;
4124        *size += 1;
4125}
4126
4127static void
4128add_gfx9_modifiers(const struct amdgpu_device *adev,
4129                   uint64_t **mods, uint64_t *size, uint64_t *capacity)
4130{
4131        int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4132        int pipe_xor_bits = min(8, pipes +
4133                                ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4134        int bank_xor_bits = min(8 - pipe_xor_bits,
4135                                ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4136        int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4137                 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4138
4139
4140        if (adev->family == AMDGPU_FAMILY_RV) {
4141                /* Raven2 and later */
4142                bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4143
4144                /*
4145                 * No _D DCC swizzles yet because we only allow 32bpp, which
4146                 * doesn't support _D on DCN
4147                 */
4148
4149                if (has_constant_encode) {
4150                        add_modifier(mods, size, capacity, AMD_FMT_MOD |
4151                                    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4152                                    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4153                                    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4154                                    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4155                                    AMD_FMT_MOD_SET(DCC, 1) |
4156                                    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4157                                    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4158                                    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4159                }
4160
4161                add_modifier(mods, size, capacity, AMD_FMT_MOD |
4162                            AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4163                            AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4164                            AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4165                            AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4166                            AMD_FMT_MOD_SET(DCC, 1) |
4167                            AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4168                            AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4169                            AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4170
4171                if (has_constant_encode) {
4172                        add_modifier(mods, size, capacity, AMD_FMT_MOD |
4173                                    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4174                                    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4175                                    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4176                                    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4177                                    AMD_FMT_MOD_SET(DCC, 1) |
4178                                    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4179                                    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4180                                    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4181
4182                                    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4183                                    AMD_FMT_MOD_SET(RB, rb) |
4184                                    AMD_FMT_MOD_SET(PIPE, pipes));
4185                }
4186
4187                add_modifier(mods, size, capacity, AMD_FMT_MOD |
4188                            AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4189                            AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4190                            AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4191                            AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4192                            AMD_FMT_MOD_SET(DCC, 1) |
4193                            AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4194                            AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4195                            AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4196                            AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4197                            AMD_FMT_MOD_SET(RB, rb) |
4198                            AMD_FMT_MOD_SET(PIPE, pipes));
4199        }
4200
4201        /*
4202         * Only supported for 64bpp on Raven, will be filtered on format in
4203         * dm_plane_format_mod_supported.
4204         */
4205        add_modifier(mods, size, capacity, AMD_FMT_MOD |
4206                    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4207                    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4208                    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4209                    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4210
4211        if (adev->family == AMDGPU_FAMILY_RV) {
4212                add_modifier(mods, size, capacity, AMD_FMT_MOD |
4213                            AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4214                            AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4215                            AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4216                            AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4217        }
4218
4219        /*
4220         * Only supported for 64bpp on Raven, will be filtered on format in
4221         * dm_plane_format_mod_supported.
4222         */
4223        add_modifier(mods, size, capacity, AMD_FMT_MOD |
4224                    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4225                    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4226
4227        if (adev->family == AMDGPU_FAMILY_RV) {
4228                add_modifier(mods, size, capacity, AMD_FMT_MOD |
4229                            AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4230                            AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4231        }
4232}
4233
4234static void
4235add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4236                      uint64_t **mods, uint64_t *size, uint64_t *capacity)
4237{
4238        int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4239
4240        add_modifier(mods, size, capacity, AMD_FMT_MOD |
4241                    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4242                    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4243                    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4244                    AMD_FMT_MOD_SET(DCC, 1) |
4245                    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4246                    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4247                    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4248
4249        add_modifier(mods, size, capacity, AMD_FMT_MOD |
4250                    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4251                    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4252                    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4253                    AMD_FMT_MOD_SET(DCC, 1) |
4254                    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4255                    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4256                    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4257                    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4258
4259        add_modifier(mods, size, capacity, AMD_FMT_MOD |
4260                    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4261                    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4262                    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4263
4264        add_modifier(mods, size, capacity, AMD_FMT_MOD |
4265                    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4266                    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4267                    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4268
4269
4270        /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4271        add_modifier(mods, size, capacity, AMD_FMT_MOD |
4272                    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4273                    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4274
4275        add_modifier(mods, size, capacity, AMD_FMT_MOD |
4276                    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4277                    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4278}
4279
4280static void
4281add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4282                      uint64_t **mods, uint64_t *size, uint64_t *capacity)
4283{
4284        int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4285        int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4286
4287        add_modifier(mods, size, capacity, AMD_FMT_MOD |
4288                    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4289                    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4290                    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4291                    AMD_FMT_MOD_SET(PACKERS, pkrs) |
4292                    AMD_FMT_MOD_SET(DCC, 1) |
4293                    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4294                    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4295                    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4296                    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4297
4298        add_modifier(mods, size, capacity, AMD_FMT_MOD |
4299                    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4300                    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4301                    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4302                    AMD_FMT_MOD_SET(PACKERS, pkrs) |
4303                    AMD_FMT_MOD_SET(DCC, 1) |
4304                    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4305                    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4306                    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4307                    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4308                    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4309
4310        add_modifier(mods, size, capacity, AMD_FMT_MOD |
4311                    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4312                    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4313                    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4314                    AMD_FMT_MOD_SET(PACKERS, pkrs));
4315
4316        add_modifier(mods, size, capacity, AMD_FMT_MOD |
4317                    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4318                    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4319                    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4320                    AMD_FMT_MOD_SET(PACKERS, pkrs));
4321
4322        /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4323        add_modifier(mods, size, capacity, AMD_FMT_MOD |
4324                    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4325                    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4326
4327        add_modifier(mods, size, capacity, AMD_FMT_MOD |
4328                    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4329                    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4330}
4331
4332static int
4333get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4334{
4335        uint64_t size = 0, capacity = 128;
4336        *mods = NULL;
4337
4338        /* We have not hooked up any pre-GFX9 modifiers. */
4339        if (adev->family < AMDGPU_FAMILY_AI)
4340                return 0;
4341
4342        *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4343
4344        if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4345                add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4346                add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4347                return *mods ? 0 : -ENOMEM;
4348        }
4349
4350        switch (adev->family) {
4351        case AMDGPU_FAMILY_AI:
4352        case AMDGPU_FAMILY_RV:
4353                add_gfx9_modifiers(adev, mods, &size, &capacity);
4354                break;
4355        case AMDGPU_FAMILY_NV:
4356        case AMDGPU_FAMILY_VGH:
4357                if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4358                        add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4359                else
4360                        add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4361                break;
4362        }
4363
4364        add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4365
4366        /* INVALID marks the end of the list. */
4367        add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4368
4369        if (!*mods)
4370                return -ENOMEM;
4371
4372        return 0;
4373}
4374
4375static int
4376fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4377                                          const struct amdgpu_framebuffer *afb,
4378                                          const enum surface_pixel_format format,
4379                                          const enum dc_rotation_angle rotation,
4380                                          const struct plane_size *plane_size,
4381                                          union dc_tiling_info *tiling_info,
4382                                          struct dc_plane_dcc_param *dcc,
4383                                          struct dc_plane_address *address,
4384                                          const bool force_disable_dcc)
4385{
4386        const uint64_t modifier = afb->base.modifier;
4387        int ret;
4388
4389        fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4390        tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4391
4392        if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4393                uint64_t dcc_address = afb->address + afb->base.offsets[1];
4394
4395                dcc->enable = 1;
4396                dcc->meta_pitch = afb->base.pitches[1];
4397                dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4398
4399                address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4400                address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4401        }
4402
4403        ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4404        if (ret)
4405                return ret;
4406
4407        return 0;
4408}
4409
4410static int
4411fill_plane_buffer_attributes(struct amdgpu_device *adev,
4412                             const struct amdgpu_framebuffer *afb,
4413                             const enum surface_pixel_format format,
4414                             const enum dc_rotation_angle rotation,
4415                             const uint64_t tiling_flags,
4416                             union dc_tiling_info *tiling_info,
4417                             struct plane_size *plane_size,
4418                             struct dc_plane_dcc_param *dcc,
4419                             struct dc_plane_address *address,
4420                             bool tmz_surface,
4421                             bool force_disable_dcc)
4422{
4423        const struct drm_framebuffer *fb = &afb->base;
4424        int ret;
4425
4426        memset(tiling_info, 0, sizeof(*tiling_info));
4427        memset(plane_size, 0, sizeof(*plane_size));
4428        memset(dcc, 0, sizeof(*dcc));
4429        memset(address, 0, sizeof(*address));
4430
4431        address->tmz_surface = tmz_surface;
4432
4433        if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4434                uint64_t addr = afb->address + fb->offsets[0];
4435
4436                plane_size->surface_size.x = 0;
4437                plane_size->surface_size.y = 0;
4438                plane_size->surface_size.width = fb->width;
4439                plane_size->surface_size.height = fb->height;
4440                plane_size->surface_pitch =
4441                        fb->pitches[0] / fb->format->cpp[0];
4442
4443                address->type = PLN_ADDR_TYPE_GRAPHICS;
4444                address->grph.addr.low_part = lower_32_bits(addr);
4445                address->grph.addr.high_part = upper_32_bits(addr);
4446        } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4447                uint64_t luma_addr = afb->address + fb->offsets[0];
4448                uint64_t chroma_addr = afb->address + fb->offsets[1];
4449
4450                plane_size->surface_size.x = 0;
4451                plane_size->surface_size.y = 0;
4452                plane_size->surface_size.width = fb->width;
4453                plane_size->surface_size.height = fb->height;
4454                plane_size->surface_pitch =
4455                        fb->pitches[0] / fb->format->cpp[0];
4456
4457                plane_size->chroma_size.x = 0;
4458                plane_size->chroma_size.y = 0;
4459                /* TODO: set these based on surface format */
4460                plane_size->chroma_size.width = fb->width / 2;
4461                plane_size->chroma_size.height = fb->height / 2;
4462
4463                plane_size->chroma_pitch =
4464                        fb->pitches[1] / fb->format->cpp[1];
4465
4466                address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4467                address->video_progressive.luma_addr.low_part =
4468                        lower_32_bits(luma_addr);
4469                address->video_progressive.luma_addr.high_part =
4470                        upper_32_bits(luma_addr);
4471                address->video_progressive.chroma_addr.low_part =
4472                        lower_32_bits(chroma_addr);
4473                address->video_progressive.chroma_addr.high_part =
4474                        upper_32_bits(chroma_addr);
4475        }
4476
4477        if (adev->family >= AMDGPU_FAMILY_AI) {
4478                ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4479                                                                rotation, plane_size,
4480                                                                tiling_info, dcc,
4481                                                                address,
4482                                                                force_disable_dcc);
4483                if (ret)
4484                        return ret;
4485        } else {
4486                fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4487        }
4488
4489        return 0;
4490}
4491
4492static void
4493fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4494                               bool *per_pixel_alpha, bool *global_alpha,
4495                               int *global_alpha_value)
4496{
4497        *per_pixel_alpha = false;
4498        *global_alpha = false;
4499        *global_alpha_value = 0xff;
4500
4501        if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4502                return;
4503
4504        if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4505                static const uint32_t alpha_formats[] = {
4506                        DRM_FORMAT_ARGB8888,
4507                        DRM_FORMAT_RGBA8888,
4508                        DRM_FORMAT_ABGR8888,
4509                };
4510                uint32_t format = plane_state->fb->format->format;
4511                unsigned int i;
4512
4513                for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4514                        if (format == alpha_formats[i]) {
4515                                *per_pixel_alpha = true;
4516                                break;
4517                        }
4518                }
4519        }
4520
4521        if (plane_state->alpha < 0xffff) {
4522                *global_alpha = true;
4523                *global_alpha_value = plane_state->alpha >> 8;
4524        }
4525}
4526
4527static int
4528fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4529                            const enum surface_pixel_format format,
4530                            enum dc_color_space *color_space)
4531{
4532        bool full_range;
4533
4534        *color_space = COLOR_SPACE_SRGB;
4535
4536        /* DRM color properties only affect non-RGB formats. */
4537        if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4538                return 0;
4539
4540        full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4541
4542        switch (plane_state->color_encoding) {
4543        case DRM_COLOR_YCBCR_BT601:
4544                if (full_range)
4545                        *color_space = COLOR_SPACE_YCBCR601;
4546                else
4547                        *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4548                break;
4549
4550        case DRM_COLOR_YCBCR_BT709:
4551                if (full_range)
4552                        *color_space = COLOR_SPACE_YCBCR709;
4553                else
4554                        *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4555                break;
4556
4557        case DRM_COLOR_YCBCR_BT2020:
4558                if (full_range)
4559                        *color_space = COLOR_SPACE_2020_YCBCR;
4560                else
4561                        return -EINVAL;
4562                break;
4563
4564        default:
4565                return -EINVAL;
4566        }
4567
4568        return 0;
4569}
4570
4571static int
4572fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4573                            const struct drm_plane_state *plane_state,
4574                            const uint64_t tiling_flags,
4575                            struct dc_plane_info *plane_info,
4576                            struct dc_plane_address *address,
4577                            bool tmz_surface,
4578                            bool force_disable_dcc)
4579{
4580        const struct drm_framebuffer *fb = plane_state->fb;
4581        const struct amdgpu_framebuffer *afb =
4582                to_amdgpu_framebuffer(plane_state->fb);
4583        struct drm_format_name_buf format_name;
4584        int ret;
4585
4586        memset(plane_info, 0, sizeof(*plane_info));
4587
4588        switch (fb->format->format) {
4589        case DRM_FORMAT_C8:
4590                plane_info->format =
4591                        SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4592                break;
4593        case DRM_FORMAT_RGB565:
4594                plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4595                break;
4596        case DRM_FORMAT_XRGB8888:
4597        case DRM_FORMAT_ARGB8888:
4598                plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4599                break;
4600        case DRM_FORMAT_XRGB2101010:
4601        case DRM_FORMAT_ARGB2101010:
4602                plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4603                break;
4604        case DRM_FORMAT_XBGR2101010:
4605        case DRM_FORMAT_ABGR2101010:
4606                plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4607                break;
4608        case DRM_FORMAT_XBGR8888:
4609        case DRM_FORMAT_ABGR8888:
4610                plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4611                break;
4612        case DRM_FORMAT_NV21:
4613                plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4614                break;
4615        case DRM_FORMAT_NV12:
4616                plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4617                break;
4618        case DRM_FORMAT_P010:
4619                plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4620                break;
4621        case DRM_FORMAT_XRGB16161616F:
4622        case DRM_FORMAT_ARGB16161616F:
4623                plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4624                break;
4625        case DRM_FORMAT_XBGR16161616F:
4626        case DRM_FORMAT_ABGR16161616F:
4627                plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4628                break;
4629        default:
4630                DRM_ERROR(
4631                        "Unsupported screen format %s\n",
4632                        drm_get_format_name(fb->format->format, &format_name));
4633                return -EINVAL;
4634        }
4635
4636        switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4637        case DRM_MODE_ROTATE_0:
4638                plane_info->rotation = ROTATION_ANGLE_0;
4639                break;
4640        case DRM_MODE_ROTATE_90:
4641                plane_info->rotation = ROTATION_ANGLE_90;
4642                break;
4643        case DRM_MODE_ROTATE_180:
4644                plane_info->rotation = ROTATION_ANGLE_180;
4645                break;
4646        case DRM_MODE_ROTATE_270:
4647                plane_info->rotation = ROTATION_ANGLE_270;
4648                break;
4649        default:
4650                plane_info->rotation = ROTATION_ANGLE_0;
4651                break;
4652        }
4653
4654        plane_info->visible = true;
4655        plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4656
4657        plane_info->layer_index = 0;
4658
4659        ret = fill_plane_color_attributes(plane_state, plane_info->format,
4660                                          &plane_info->color_space);
4661        if (ret)
4662                return ret;
4663
4664        ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4665                                           plane_info->rotation, tiling_flags,
4666                                           &plane_info->tiling_info,
4667                                           &plane_info->plane_size,
4668                                           &plane_info->dcc, address, tmz_surface,
4669                                           force_disable_dcc);
4670        if (ret)
4671                return ret;
4672
4673        fill_blending_from_plane_state(
4674                plane_state, &plane_info->per_pixel_alpha,
4675                &plane_info->global_alpha, &plane_info->global_alpha_value);
4676
4677        return 0;
4678}
4679
4680static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4681                                    struct dc_plane_state *dc_plane_state,
4682                                    struct drm_plane_state *plane_state,
4683                                    struct drm_crtc_state *crtc_state)
4684{
4685        struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4686        struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
4687        struct dc_scaling_info scaling_info;
4688        struct dc_plane_info plane_info;
4689        int ret;
4690        bool force_disable_dcc = false;
4691
4692        ret = fill_dc_scaling_info(plane_state, &scaling_info);
4693        if (ret)
4694                return ret;
4695
4696        dc_plane_state->src_rect = scaling_info.src_rect;
4697        dc_plane_state->dst_rect = scaling_info.dst_rect;
4698        dc_plane_state->clip_rect = scaling_info.clip_rect;
4699        dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4700
4701        force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4702        ret = fill_dc_plane_info_and_addr(adev, plane_state,
4703                                          afb->tiling_flags,
4704                                          &plane_info,
4705                                          &dc_plane_state->address,
4706                                          afb->tmz_surface,
4707                                          force_disable_dcc);
4708        if (ret)
4709                return ret;
4710
4711        dc_plane_state->format = plane_info.format;
4712        dc_plane_state->color_space = plane_info.color_space;
4713        dc_plane_state->format = plane_info.format;
4714        dc_plane_state->plane_size = plane_info.plane_size;
4715        dc_plane_state->rotation = plane_info.rotation;
4716        dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4717        dc_plane_state->stereo_format = plane_info.stereo_format;
4718        dc_plane_state->tiling_info = plane_info.tiling_info;
4719        dc_plane_state->visible = plane_info.visible;
4720        dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4721        dc_plane_state->global_alpha = plane_info.global_alpha;
4722        dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4723        dc_plane_state->dcc = plane_info.dcc;
4724        dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4725        dc_plane_state->flip_int_enabled = true;
4726
4727        /*
4728         * Always set input transfer function, since plane state is refreshed
4729         * every time.
4730         */
4731        ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4732        if (ret)
4733                return ret;
4734
4735        return 0;
4736}
4737
4738static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4739                                           const struct dm_connector_state *dm_state,
4740                                           struct dc_stream_state *stream)
4741{
4742        enum amdgpu_rmx_type rmx_type;
4743
4744        struct rect src = { 0 }; /* viewport in composition space*/
4745        struct rect dst = { 0 }; /* stream addressable area */
4746
4747        /* no mode. nothing to be done */
4748        if (!mode)
4749                return;
4750
4751        /* Full screen scaling by default */
4752        src.width = mode->hdisplay;
4753        src.height = mode->vdisplay;
4754        dst.width = stream->timing.h_addressable;
4755        dst.height = stream->timing.v_addressable;
4756
4757        if (dm_state) {
4758                rmx_type = dm_state->scaling;
4759                if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4760                        if (src.width * dst.height <
4761                                        src.height * dst.width) {
4762                                /* height needs less upscaling/more downscaling */
4763                                dst.width = src.width *
4764                                                dst.height / src.height;
4765                        } else {
4766                                /* width needs less upscaling/more downscaling */
4767                                dst.height = src.height *
4768                                                dst.width / src.width;
4769                        }
4770                } else if (rmx_type == RMX_CENTER) {
4771                        dst = src;
4772                }
4773
4774                dst.x = (stream->timing.h_addressable - dst.width) / 2;
4775                dst.y = (stream->timing.v_addressable - dst.height) / 2;
4776
4777                if (dm_state->underscan_enable) {
4778                        dst.x += dm_state->underscan_hborder / 2;
4779                        dst.y += dm_state->underscan_vborder / 2;
4780                        dst.width -= dm_state->underscan_hborder;
4781                        dst.height -= dm_state->underscan_vborder;
4782                }
4783        }
4784
4785        stream->src = src;
4786        stream->dst = dst;
4787
4788        DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
4789                        dst.x, dst.y, dst.width, dst.height);
4790
4791}
4792
4793static enum dc_color_depth
4794convert_color_depth_from_display_info(const struct drm_connector *connector,
4795                                      bool is_y420, int requested_bpc)
4796{
4797        uint8_t bpc;
4798
4799        if (is_y420) {
4800                bpc = 8;
4801
4802                /* Cap display bpc based on HDMI 2.0 HF-VSDB */
4803                if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4804                        bpc = 16;
4805                else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4806                        bpc = 12;
4807                else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4808                        bpc = 10;
4809        } else {
4810                bpc = (uint8_t)connector->display_info.bpc;
4811                /* Assume 8 bpc by default if no bpc is specified. */
4812                bpc = bpc ? bpc : 8;
4813        }
4814
4815        if (requested_bpc > 0) {
4816                /*
4817                 * Cap display bpc based on the user requested value.
4818                 *
4819                 * The value for state->max_bpc may not correctly updated
4820                 * depending on when the connector gets added to the state
4821                 * or if this was called outside of atomic check, so it
4822                 * can't be used directly.
4823                 */
4824                bpc = min_t(u8, bpc, requested_bpc);
4825
4826                /* Round down to the nearest even number. */
4827                bpc = bpc - (bpc & 1);
4828        }
4829
4830        switch (bpc) {
4831        case 0:
4832                /*
4833                 * Temporary Work around, DRM doesn't parse color depth for
4834                 * EDID revision before 1.4
4835                 * TODO: Fix edid parsing
4836                 */
4837                return COLOR_DEPTH_888;
4838        case 6:
4839                return COLOR_DEPTH_666;
4840        case 8:
4841                return COLOR_DEPTH_888;
4842        case 10:
4843                return COLOR_DEPTH_101010;
4844        case 12:
4845                return COLOR_DEPTH_121212;
4846        case 14:
4847                return COLOR_DEPTH_141414;
4848        case 16:
4849                return COLOR_DEPTH_161616;
4850        default:
4851                return COLOR_DEPTH_UNDEFINED;
4852        }
4853}
4854
4855static enum dc_aspect_ratio
4856get_aspect_ratio(const struct drm_display_mode *mode_in)
4857{
4858        /* 1-1 mapping, since both enums follow the HDMI spec. */
4859        return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4860}
4861
4862static enum dc_color_space
4863get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4864{
4865        enum dc_color_space color_space = COLOR_SPACE_SRGB;
4866
4867        switch (dc_crtc_timing->pixel_encoding) {
4868        case PIXEL_ENCODING_YCBCR422:
4869        case PIXEL_ENCODING_YCBCR444:
4870        case PIXEL_ENCODING_YCBCR420:
4871        {
4872                /*
4873                 * 27030khz is the separation point between HDTV and SDTV
4874                 * according to HDMI spec, we use YCbCr709 and YCbCr601
4875                 * respectively
4876                 */
4877                if (dc_crtc_timing->pix_clk_100hz > 270300) {
4878                        if (dc_crtc_timing->flags.Y_ONLY)
4879                                color_space =
4880                                        COLOR_SPACE_YCBCR709_LIMITED;
4881                        else
4882                                color_space = COLOR_SPACE_YCBCR709;
4883                } else {
4884                        if (dc_crtc_timing->flags.Y_ONLY)
4885                                color_space =
4886                                        COLOR_SPACE_YCBCR601_LIMITED;
4887                        else
4888                                color_space = COLOR_SPACE_YCBCR601;
4889                }
4890
4891        }
4892        break;
4893        case PIXEL_ENCODING_RGB:
4894                color_space = COLOR_SPACE_SRGB;
4895                break;
4896
4897        default:
4898                WARN_ON(1);
4899                break;
4900        }
4901
4902        return color_space;
4903}
4904
4905static bool adjust_colour_depth_from_display_info(
4906        struct dc_crtc_timing *timing_out,
4907        const struct drm_display_info *info)
4908{
4909        enum dc_color_depth depth = timing_out->display_color_depth;
4910        int normalized_clk;
4911        do {
4912                normalized_clk = timing_out->pix_clk_100hz / 10;
4913                /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4914                if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4915                        normalized_clk /= 2;
4916                /* Adjusting pix clock following on HDMI spec based on colour depth */
4917                switch (depth) {
4918                case COLOR_DEPTH_888:
4919                        break;
4920                case COLOR_DEPTH_101010:
4921                        normalized_clk = (normalized_clk * 30) / 24;
4922                        break;
4923                case COLOR_DEPTH_121212:
4924                        normalized_clk = (normalized_clk * 36) / 24;
4925                        break;
4926                case COLOR_DEPTH_161616:
4927                        normalized_clk = (normalized_clk * 48) / 24;
4928                        break;
4929                default:
4930                        /* The above depths are the only ones valid for HDMI. */
4931                        return false;
4932                }
4933                if (normalized_clk <= info->max_tmds_clock) {
4934                        timing_out->display_color_depth = depth;
4935                        return true;
4936                }
4937        } while (--depth > COLOR_DEPTH_666);
4938        return false;
4939}
4940
4941static void fill_stream_properties_from_drm_display_mode(
4942        struct dc_stream_state *stream,
4943        const struct drm_display_mode *mode_in,
4944        const struct drm_connector *connector,
4945        const struct drm_connector_state *connector_state,
4946        const struct dc_stream_state *old_stream,
4947        int requested_bpc)
4948{
4949        struct dc_crtc_timing *timing_out = &stream->timing;
4950        const struct drm_display_info *info = &connector->display_info;
4951        struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4952        struct hdmi_vendor_infoframe hv_frame;
4953        struct hdmi_avi_infoframe avi_frame;
4954
4955        memset(&hv_frame, 0, sizeof(hv_frame));
4956        memset(&avi_frame, 0, sizeof(avi_frame));
4957
4958        timing_out->h_border_left = 0;
4959        timing_out->h_border_right = 0;
4960        timing_out->v_border_top = 0;
4961        timing_out->v_border_bottom = 0;
4962        /* TODO: un-hardcode */
4963        if (drm_mode_is_420_only(info, mode_in)
4964                        && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4965                timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4966        else if (drm_mode_is_420_also(info, mode_in)
4967                        && aconnector->force_yuv420_output)
4968                timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4969        else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4970                        && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4971                timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4972        else
4973                timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4974
4975        timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4976        timing_out->display_color_depth = convert_color_depth_from_display_info(
4977                connector,
4978                (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4979                requested_bpc);
4980        timing_out->scan_type = SCANNING_TYPE_NODATA;
4981        timing_out->hdmi_vic = 0;
4982
4983        if(old_stream) {
4984                timing_out->vic = old_stream->timing.vic;
4985                timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4986                timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4987        } else {
4988                timing_out->vic = drm_match_cea_mode(mode_in);
4989                if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4990                        timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4991                if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4992                        timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4993        }
4994
4995        if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4996                drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4997                timing_out->vic = avi_frame.video_code;
4998                drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4999                timing_out->hdmi_vic = hv_frame.vic;
5000        }
5001
5002        timing_out->h_addressable = mode_in->crtc_hdisplay;
5003        timing_out->h_total = mode_in->crtc_htotal;
5004        timing_out->h_sync_width =
5005                mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5006        timing_out->h_front_porch =
5007                mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5008        timing_out->v_total = mode_in->crtc_vtotal;
5009        timing_out->v_addressable = mode_in->crtc_vdisplay;
5010        timing_out->v_front_porch =
5011                mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5012        timing_out->v_sync_width =
5013                mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5014        timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5015        timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5016
5017        stream->output_color_space = get_output_color_space(timing_out);
5018
5019        stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5020        stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5021        if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5022                if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5023                    drm_mode_is_420_also(info, mode_in) &&
5024                    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5025                        timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5026                        adjust_colour_depth_from_display_info(timing_out, info);
5027                }
5028        }
5029}
5030
5031static void fill_audio_info(struct audio_info *audio_info,
5032                            const struct drm_connector *drm_connector,
5033                            const struct dc_sink *dc_sink)
5034{
5035        int i = 0;
5036        int cea_revision = 0;
5037        const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5038
5039        audio_info->manufacture_id = edid_caps->manufacturer_id;
5040        audio_info->product_id = edid_caps->product_id;
5041
5042        cea_revision = drm_connector->display_info.cea_rev;
5043
5044        strscpy(audio_info->display_name,
5045                edid_caps->display_name,
5046                AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5047
5048        if (cea_revision >= 3) {
5049                audio_info->mode_count = edid_caps->audio_mode_count;
5050
5051                for (i = 0; i < audio_info->mode_count; ++i) {
5052                        audio_info->modes[i].format_code =
5053                                        (enum audio_format_code)
5054                                        (edid_caps->audio_modes[i].format_code);
5055                        audio_info->modes[i].channel_count =
5056                                        edid_caps->audio_modes[i].channel_count;
5057                        audio_info->modes[i].sample_rates.all =
5058                                        edid_caps->audio_modes[i].sample_rate;
5059                        audio_info->modes[i].sample_size =
5060                                        edid_caps->audio_modes[i].sample_size;
5061                }
5062        }
5063
5064        audio_info->flags.all = edid_caps->speaker_flags;
5065
5066        /* TODO: We only check for the progressive mode, check for interlace mode too */
5067        if (drm_connector->latency_present[0]) {
5068                audio_info->video_latency = drm_connector->video_latency[0];
5069                audio_info->audio_latency = drm_connector->audio_latency[0];
5070        }
5071
5072        /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5073
5074}
5075
5076static void
5077copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5078                                      struct drm_display_mode *dst_mode)
5079{
5080        dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5081        dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5082        dst_mode->crtc_clock = src_mode->crtc_clock;
5083        dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5084        dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5085        dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5086        dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5087        dst_mode->crtc_htotal = src_mode->crtc_htotal;
5088        dst_mode->crtc_hskew = src_mode->crtc_hskew;
5089        dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5090        dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5091        dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5092        dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5093        dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5094}
5095
5096static void
5097decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5098                                        const struct drm_display_mode *native_mode,
5099                                        bool scale_enabled)
5100{
5101        if (scale_enabled) {
5102                copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5103        } else if (native_mode->clock == drm_mode->clock &&
5104                        native_mode->htotal == drm_mode->htotal &&
5105                        native_mode->vtotal == drm_mode->vtotal) {
5106                copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5107        } else {
5108                /* no scaling nor amdgpu inserted, no need to patch */
5109        }
5110}
5111
5112static struct dc_sink *
5113create_fake_sink(struct amdgpu_dm_connector *aconnector)
5114{
5115        struct dc_sink_init_data sink_init_data = { 0 };
5116        struct dc_sink *sink = NULL;
5117        sink_init_data.link = aconnector->dc_link;
5118        sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5119
5120        sink = dc_sink_create(&sink_init_data);
5121        if (!sink) {
5122                DRM_ERROR("Failed to create sink!\n");
5123                return NULL;
5124        }
5125        sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5126
5127        return sink;
5128}
5129
5130static void set_multisync_trigger_params(
5131                struct dc_stream_state *stream)
5132{
5133        if (stream->triggered_crtc_reset.enabled) {
5134                stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
5135                stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
5136        }
5137}
5138
5139static void set_master_stream(struct dc_stream_state *stream_set[],
5140                              int stream_count)
5141{
5142        int j, highest_rfr = 0, master_stream = 0;
5143
5144        for (j = 0;  j < stream_count; j++) {
5145                if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5146                        int refresh_rate = 0;
5147
5148                        refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5149                                (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5150                        if (refresh_rate > highest_rfr) {
5151                                highest_rfr = refresh_rate;
5152                                master_stream = j;
5153                        }
5154                }
5155        }
5156        for (j = 0;  j < stream_count; j++) {
5157                if (stream_set[j])
5158                        stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5159        }
5160}
5161
5162static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5163{
5164        int i = 0;
5165
5166        if (context->stream_count < 2)
5167                return;
5168        for (i = 0; i < context->stream_count ; i++) {
5169                if (!context->streams[i])
5170                        continue;
5171                /*
5172                 * TODO: add a function to read AMD VSDB bits and set
5173                 * crtc_sync_master.multi_sync_enabled flag
5174                 * For now it's set to false
5175                 */
5176                set_multisync_trigger_params(context->streams[i]);
5177        }
5178        set_master_stream(context->streams, context->stream_count);
5179}
5180
5181static struct dc_stream_state *
5182create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5183                       const struct drm_display_mode *drm_mode,
5184                       const struct dm_connector_state *dm_state,
5185                       const struct dc_stream_state *old_stream,
5186                       int requested_bpc)
5187{
5188        struct drm_display_mode *preferred_mode = NULL;
5189        struct drm_connector *drm_connector;
5190        const struct drm_connector_state *con_state =
5191                dm_state ? &dm_state->base : NULL;
5192        struct dc_stream_state *stream = NULL;
5193        struct drm_display_mode mode = *drm_mode;
5194        bool native_mode_found = false;
5195        bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5196        int mode_refresh;
5197        int preferred_refresh = 0;
5198#if defined(CONFIG_DRM_AMD_DC_DCN)
5199        struct dsc_dec_dpcd_caps dsc_caps;
5200        uint32_t link_bandwidth_kbps;
5201#endif
5202        struct dc_sink *sink = NULL;
5203        if (aconnector == NULL) {
5204                DRM_ERROR("aconnector is NULL!\n");
5205                return stream;
5206        }
5207
5208        drm_connector = &aconnector->base;
5209
5210        if (!aconnector->dc_sink) {
5211                sink = create_fake_sink(aconnector);
5212                if (!sink)
5213                        return stream;
5214        } else {
5215                sink = aconnector->dc_sink;
5216                dc_sink_retain(sink);
5217        }
5218
5219        stream = dc_create_stream_for_sink(sink);
5220
5221        if (stream == NULL) {
5222                DRM_ERROR("Failed to create stream for sink!\n");
5223                goto finish;
5224        }
5225
5226        stream->dm_stream_context = aconnector;
5227
5228        stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5229                drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5230
5231        list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5232                /* Search for preferred mode */
5233                if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5234                        native_mode_found = true;
5235                        break;
5236                }
5237        }
5238        if (!native_mode_found)
5239                preferred_mode = list_first_entry_or_null(
5240                                &aconnector->base.modes,
5241                                struct drm_display_mode,
5242                                head);
5243
5244        mode_refresh = drm_mode_vrefresh(&mode);
5245
5246        if (preferred_mode == NULL) {
5247                /*
5248                 * This may not be an error, the use case is when we have no
5249                 * usermode calls to reset and set mode upon hotplug. In this
5250                 * case, we call set mode ourselves to restore the previous mode
5251                 * and the modelist may not be filled in in time.
5252                 */
5253                DRM_DEBUG_DRIVER("No preferred mode found\n");
5254        } else {
5255                decide_crtc_timing_for_drm_display_mode(
5256                                &mode, preferred_mode,
5257                                dm_state ? (dm_state->scaling != RMX_OFF) : false);
5258                preferred_refresh = drm_mode_vrefresh(preferred_mode);
5259        }
5260
5261        if (!dm_state)
5262                drm_mode_set_crtcinfo(&mode, 0);
5263
5264        /*
5265        * If scaling is enabled and refresh rate didn't change
5266        * we copy the vic and polarities of the old timings
5267        */
5268        if (!scale || mode_refresh != preferred_refresh)
5269                fill_stream_properties_from_drm_display_mode(stream,
5270                        &mode, &aconnector->base, con_state, NULL, requested_bpc);
5271        else
5272                fill_stream_properties_from_drm_display_mode(stream,
5273                        &mode, &aconnector->base, con_state, old_stream, requested_bpc);
5274
5275        stream->timing.flags.DSC = 0;
5276
5277        if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5278#if defined(CONFIG_DRM_AMD_DC_DCN)
5279                dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5280                                      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5281                                      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5282                                      &dsc_caps);
5283                link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5284                                                             dc_link_get_link_cap(aconnector->dc_link));
5285
5286                if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
5287                        /* Set DSC policy according to dsc_clock_en */
5288                        dc_dsc_policy_set_enable_dsc_when_not_needed(
5289                                aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5290
5291                        if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5292                                                  &dsc_caps,
5293                                                  aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5294                                                  0,
5295                                                  link_bandwidth_kbps,
5296                                                  &stream->timing,
5297                                                  &stream->timing.dsc_cfg))
5298                                stream->timing.flags.DSC = 1;
5299                        /* Overwrite the stream flag if DSC is enabled through debugfs */
5300                        if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5301                                stream->timing.flags.DSC = 1;
5302
5303                        if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5304                                stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5305
5306                        if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5307                                stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5308
5309                        if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5310                                stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5311                }
5312#endif
5313        }
5314
5315        update_stream_scaling_settings(&mode, dm_state, stream);
5316
5317        fill_audio_info(
5318                &stream->audio_info,
5319                drm_connector,
5320                sink);
5321
5322        update_stream_signal(stream, sink);
5323
5324        if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5325                mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5326
5327        if (stream->link->psr_settings.psr_feature_enabled) {
5328                //
5329                // should decide stream support vsc sdp colorimetry capability
5330                // before building vsc info packet
5331                //
5332                stream->use_vsc_sdp_for_colorimetry = false;
5333                if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5334                        stream->use_vsc_sdp_for_colorimetry =
5335                                aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5336                } else {
5337                        if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5338                                stream->use_vsc_sdp_for_colorimetry = true;
5339                }
5340                mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5341        }
5342finish:
5343        dc_sink_release(sink);
5344
5345        return stream;
5346}
5347
5348static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5349{
5350        drm_crtc_cleanup(crtc);
5351        kfree(crtc);
5352}
5353
5354static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5355                                  struct drm_crtc_state *state)
5356{
5357        struct dm_crtc_state *cur = to_dm_crtc_state(state);
5358
5359        /* TODO Destroy dc_stream objects are stream object is flattened */
5360        if (cur->stream)
5361                dc_stream_release(cur->stream);
5362
5363
5364        __drm_atomic_helper_crtc_destroy_state(state);
5365
5366
5367        kfree(state);
5368}
5369
5370static void dm_crtc_reset_state(struct drm_crtc *crtc)
5371{
5372        struct dm_crtc_state *state;
5373
5374        if (crtc->state)
5375                dm_crtc_destroy_state(crtc, crtc->state);
5376
5377        state = kzalloc(sizeof(*state), GFP_KERNEL);
5378        if (WARN_ON(!state))
5379                return;
5380
5381        __drm_atomic_helper_crtc_reset(crtc, &state->base);
5382}
5383
5384static struct drm_crtc_state *
5385dm_crtc_duplicate_state(struct drm_crtc *crtc)
5386{
5387        struct dm_crtc_state *state, *cur;
5388
5389        cur = to_dm_crtc_state(crtc->state);
5390
5391        if (WARN_ON(!crtc->state))
5392                return NULL;
5393
5394        state = kzalloc(sizeof(*state), GFP_KERNEL);
5395        if (!state)
5396                return NULL;
5397
5398        __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5399
5400        if (cur->stream) {
5401                state->stream = cur->stream;
5402                dc_stream_retain(state->stream);
5403        }
5404
5405        state->active_planes = cur->active_planes;
5406        state->vrr_infopacket = cur->vrr_infopacket;
5407        state->abm_level = cur->abm_level;
5408        state->vrr_supported = cur->vrr_supported;
5409        state->freesync_config = cur->freesync_config;
5410        state->crc_src = cur->crc_src;
5411        state->cm_has_degamma = cur->cm_has_degamma;
5412        state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5413
5414        /* TODO Duplicate dc_stream after objects are stream object is flattened */
5415
5416        return &state->base;
5417}
5418
5419static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5420{
5421        enum dc_irq_source irq_source;
5422        struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5423        struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5424        int rc;
5425
5426        irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5427
5428        rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5429
5430        DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
5431                         acrtc->crtc_id, enable ? "en" : "dis", rc);
5432        return rc;
5433}
5434
5435static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5436{
5437        enum dc_irq_source irq_source;
5438        struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5439        struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5440        struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5441#if defined(CONFIG_DRM_AMD_DC_DCN)
5442        struct amdgpu_display_manager *dm = &adev->dm;
5443        unsigned long flags;
5444#endif
5445        int rc = 0;
5446
5447        if (enable) {
5448                /* vblank irq on -> Only need vupdate irq in vrr mode */
5449                if (amdgpu_dm_vrr_active(acrtc_state))
5450                        rc = dm_set_vupdate_irq(crtc, true);
5451        } else {
5452                /* vblank irq off -> vupdate irq off */
5453                rc = dm_set_vupdate_irq(crtc, false);
5454        }
5455
5456        if (rc)
5457                return rc;
5458
5459        irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5460
5461        if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
5462                return -EBUSY;
5463
5464        if (amdgpu_in_reset(adev))
5465                return 0;
5466
5467#if defined(CONFIG_DRM_AMD_DC_DCN)
5468        spin_lock_irqsave(&dm->vblank_lock, flags);
5469        dm->vblank_workqueue->dm = dm;
5470        dm->vblank_workqueue->otg_inst = acrtc->otg_inst;
5471        dm->vblank_workqueue->enable = enable;
5472        spin_unlock_irqrestore(&dm->vblank_lock, flags);
5473        schedule_work(&dm->vblank_workqueue->mall_work);
5474#endif
5475
5476        return 0;
5477}
5478
5479static int dm_enable_vblank(struct drm_crtc *crtc)
5480{
5481        return dm_set_vblank(crtc, true);
5482}
5483
5484static void dm_disable_vblank(struct drm_crtc *crtc)
5485{
5486        dm_set_vblank(crtc, false);
5487}
5488
5489/* Implemented only the options currently availible for the driver */
5490static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5491        .reset = dm_crtc_reset_state,
5492        .destroy = amdgpu_dm_crtc_destroy,
5493        .set_config = drm_atomic_helper_set_config,
5494        .page_flip = drm_atomic_helper_page_flip,
5495        .atomic_duplicate_state = dm_crtc_duplicate_state,
5496        .atomic_destroy_state = dm_crtc_destroy_state,
5497        .set_crc_source = amdgpu_dm_crtc_set_crc_source,
5498        .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5499        .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5500        .get_vblank_counter = amdgpu_get_vblank_counter_kms,
5501        .enable_vblank = dm_enable_vblank,
5502        .disable_vblank = dm_disable_vblank,
5503        .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5504};
5505
5506static enum drm_connector_status
5507amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5508{
5509        bool connected;
5510        struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5511
5512        /*
5513         * Notes:
5514         * 1. This interface is NOT called in context of HPD irq.
5515         * 2. This interface *is called* in context of user-mode ioctl. Which
5516         * makes it a bad place for *any* MST-related activity.
5517         */
5518
5519        if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5520            !aconnector->fake_enable)
5521                connected = (aconnector->dc_sink != NULL);
5522        else
5523                connected = (aconnector->base.force == DRM_FORCE_ON);
5524
5525        update_subconnector_property(aconnector);
5526
5527        return (connected ? connector_status_connected :
5528                        connector_status_disconnected);
5529}
5530
5531int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5532                                            struct drm_connector_state *connector_state,
5533                                            struct drm_property *property,
5534                                            uint64_t val)
5535{
5536        struct drm_device *dev = connector->dev;
5537        struct amdgpu_device *adev = drm_to_adev(dev);
5538        struct dm_connector_state *dm_old_state =
5539                to_dm_connector_state(connector->state);
5540        struct dm_connector_state *dm_new_state =
5541                to_dm_connector_state(connector_state);
5542
5543        int ret = -EINVAL;
5544
5545        if (property == dev->mode_config.scaling_mode_property) {
5546                enum amdgpu_rmx_type rmx_type;
5547
5548                switch (val) {
5549                case DRM_MODE_SCALE_CENTER:
5550                        rmx_type = RMX_CENTER;
5551                        break;
5552                case DRM_MODE_SCALE_ASPECT:
5553                        rmx_type = RMX_ASPECT;
5554                        break;
5555                case DRM_MODE_SCALE_FULLSCREEN:
5556                        rmx_type = RMX_FULL;
5557                        break;
5558                case DRM_MODE_SCALE_NONE:
5559                default:
5560                        rmx_type = RMX_OFF;
5561                        break;
5562                }
5563
5564                if (dm_old_state->scaling == rmx_type)
5565                        return 0;
5566
5567                dm_new_state->scaling = rmx_type;
5568                ret = 0;
5569        } else if (property == adev->mode_info.underscan_hborder_property) {
5570                dm_new_state->underscan_hborder = val;
5571                ret = 0;
5572        } else if (property == adev->mode_info.underscan_vborder_property) {
5573                dm_new_state->underscan_vborder = val;
5574                ret = 0;
5575        } else if (property == adev->mode_info.underscan_property) {
5576                dm_new_state->underscan_enable = val;
5577                ret = 0;
5578        } else if (property == adev->mode_info.abm_level_property) {
5579                dm_new_state->abm_level = val;
5580                ret = 0;
5581        }
5582
5583        return ret;
5584}
5585
5586int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5587                                            const struct drm_connector_state *state,
5588                                            struct drm_property *property,
5589                                            uint64_t *val)
5590{
5591        struct drm_device *dev = connector->dev;
5592        struct amdgpu_device *adev = drm_to_adev(dev);
5593        struct dm_connector_state *dm_state =
5594                to_dm_connector_state(state);
5595        int ret = -EINVAL;
5596
5597        if (property == dev->mode_config.scaling_mode_property) {
5598                switch (dm_state->scaling) {
5599                case RMX_CENTER:
5600                        *val = DRM_MODE_SCALE_CENTER;
5601                        break;
5602                case RMX_ASPECT:
5603                        *val = DRM_MODE_SCALE_ASPECT;
5604                        break;
5605                case RMX_FULL:
5606                        *val = DRM_MODE_SCALE_FULLSCREEN;
5607                        break;
5608                case RMX_OFF:
5609                default:
5610                        *val = DRM_MODE_SCALE_NONE;
5611                        break;
5612                }
5613                ret = 0;
5614        } else if (property == adev->mode_info.underscan_hborder_property) {
5615                *val = dm_state->underscan_hborder;
5616                ret = 0;
5617        } else if (property == adev->mode_info.underscan_vborder_property) {
5618                *val = dm_state->underscan_vborder;
5619                ret = 0;
5620        } else if (property == adev->mode_info.underscan_property) {
5621                *val = dm_state->underscan_enable;
5622                ret = 0;
5623        } else if (property == adev->mode_info.abm_level_property) {
5624                *val = dm_state->abm_level;
5625                ret = 0;
5626        }
5627
5628        return ret;
5629}
5630
5631static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5632{
5633        struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5634
5635        drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5636}
5637
5638static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5639{
5640        struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5641        const struct dc_link *link = aconnector->dc_link;
5642        struct amdgpu_device *adev = drm_to_adev(connector->dev);
5643        struct amdgpu_display_manager *dm = &adev->dm;
5644
5645        /*
5646         * Call only if mst_mgr was iniitalized before since it's not done
5647         * for all connector types.
5648         */
5649        if (aconnector->mst_mgr.dev)
5650                drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5651
5652#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5653        defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5654
5655        if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5656            link->type != dc_connection_none &&
5657            dm->backlight_dev) {
5658                backlight_device_unregister(dm->backlight_dev);
5659                dm->backlight_dev = NULL;
5660        }
5661#endif
5662
5663        if (aconnector->dc_em_sink)
5664                dc_sink_release(aconnector->dc_em_sink);
5665        aconnector->dc_em_sink = NULL;
5666        if (aconnector->dc_sink)
5667                dc_sink_release(aconnector->dc_sink);
5668        aconnector->dc_sink = NULL;
5669
5670        drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
5671        drm_connector_unregister(connector);
5672        drm_connector_cleanup(connector);
5673        if (aconnector->i2c) {
5674                i2c_del_adapter(&aconnector->i2c->base);
5675                kfree(aconnector->i2c);
5676        }
5677        kfree(aconnector->dm_dp_aux.aux.name);
5678
5679        kfree(connector);
5680}
5681
5682void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5683{
5684        struct dm_connector_state *state =
5685                to_dm_connector_state(connector->state);
5686
5687        if (connector->state)
5688                __drm_atomic_helper_connector_destroy_state(connector->state);
5689
5690        kfree(state);
5691
5692        state = kzalloc(sizeof(*state), GFP_KERNEL);
5693
5694        if (state) {
5695                state->scaling = RMX_OFF;
5696                state->underscan_enable = false;
5697                state->underscan_hborder = 0;
5698                state->underscan_vborder = 0;
5699                state->base.max_requested_bpc = 8;
5700                state->vcpi_slots = 0;
5701                state->pbn = 0;
5702                if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5703                        state->abm_level = amdgpu_dm_abm_level;
5704
5705                __drm_atomic_helper_connector_reset(connector, &state->base);
5706        }
5707}
5708
5709struct drm_connector_state *
5710amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
5711{
5712        struct dm_connector_state *state =
5713                to_dm_connector_state(connector->state);
5714
5715        struct dm_connector_state *new_state =
5716                        kmemdup(state, sizeof(*state), GFP_KERNEL);
5717
5718        if (!new_state)
5719                return NULL;
5720
5721        __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5722
5723        new_state->freesync_capable = state->freesync_capable;
5724        new_state->abm_level = state->abm_level;
5725        new_state->scaling = state->scaling;
5726        new_state->underscan_enable = state->underscan_enable;
5727        new_state->underscan_hborder = state->underscan_hborder;
5728        new_state->underscan_vborder = state->underscan_vborder;
5729        new_state->vcpi_slots = state->vcpi_slots;
5730        new_state->pbn = state->pbn;
5731        return &new_state->base;
5732}
5733
5734static int
5735amdgpu_dm_connector_late_register(struct drm_connector *connector)
5736{
5737        struct amdgpu_dm_connector *amdgpu_dm_connector =
5738                to_amdgpu_dm_connector(connector);
5739        int r;
5740
5741        if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5742            (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5743                amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5744                r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5745                if (r)
5746                        return r;
5747        }
5748
5749#if defined(CONFIG_DEBUG_FS)
5750        connector_debugfs_init(amdgpu_dm_connector);
5751#endif
5752
5753        return 0;
5754}
5755
5756static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5757        .reset = amdgpu_dm_connector_funcs_reset,
5758        .detect = amdgpu_dm_connector_detect,
5759        .fill_modes = drm_helper_probe_single_connector_modes,
5760        .destroy = amdgpu_dm_connector_destroy,
5761        .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5762        .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5763        .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
5764        .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
5765        .late_register = amdgpu_dm_connector_late_register,
5766        .early_unregister = amdgpu_dm_connector_unregister
5767};
5768
5769static int get_modes(struct drm_connector *connector)
5770{
5771        return amdgpu_dm_connector_get_modes(connector);
5772}
5773
5774static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
5775{
5776        struct dc_sink_init_data init_params = {
5777                        .link = aconnector->dc_link,
5778                        .sink_signal = SIGNAL_TYPE_VIRTUAL
5779        };
5780        struct edid *edid;
5781
5782        if (!aconnector->base.edid_blob_ptr) {
5783                DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5784                                aconnector->base.name);
5785
5786                aconnector->base.force = DRM_FORCE_OFF;
5787                aconnector->base.override_edid = false;
5788                return;
5789        }
5790
5791        edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5792
5793        aconnector->edid = edid;
5794
5795        aconnector->dc_em_sink = dc_link_add_remote_sink(
5796                aconnector->dc_link,
5797                (uint8_t *)edid,
5798                (edid->extensions + 1) * EDID_LENGTH,
5799                &init_params);
5800
5801        if (aconnector->base.force == DRM_FORCE_ON) {
5802                aconnector->dc_sink = aconnector->dc_link->local_sink ?
5803                aconnector->dc_link->local_sink :
5804                aconnector->dc_em_sink;
5805                dc_sink_retain(aconnector->dc_sink);
5806        }
5807}
5808
5809static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5810{
5811        struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5812
5813        /*
5814         * In case of headless boot with force on for DP managed connector
5815         * Those settings have to be != 0 to get initial modeset
5816         */
5817        if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5818                link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5819                link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5820        }
5821
5822
5823        aconnector->base.override_edid = true;
5824        create_eml_sink(aconnector);
5825}
5826
5827static struct dc_stream_state *
5828create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5829                                const struct drm_display_mode *drm_mode,
5830                                const struct dm_connector_state *dm_state,
5831                                const struct dc_stream_state *old_stream)
5832{
5833        struct drm_connector *connector = &aconnector->base;
5834        struct amdgpu_device *adev = drm_to_adev(connector->dev);
5835        struct dc_stream_state *stream;
5836        const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5837        int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5838        enum dc_status dc_result = DC_OK;
5839
5840        do {
5841                stream = create_stream_for_sink(aconnector, drm_mode,
5842                                                dm_state, old_stream,
5843                                                requested_bpc);
5844                if (stream == NULL) {
5845                        DRM_ERROR("Failed to create stream for sink!\n");
5846                        break;
5847                }
5848
5849                dc_result = dc_validate_stream(adev->dm.dc, stream);
5850
5851                if (dc_result != DC_OK) {
5852                        DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
5853                                      drm_mode->hdisplay,
5854                                      drm_mode->vdisplay,
5855                                      drm_mode->clock,
5856                                      dc_result,
5857                                      dc_status_to_str(dc_result));
5858
5859                        dc_stream_release(stream);
5860                        stream = NULL;
5861                        requested_bpc -= 2; /* lower bpc to retry validation */
5862                }
5863
5864        } while (stream == NULL && requested_bpc >= 6);
5865
5866        return stream;
5867}
5868
5869enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5870                                   struct drm_display_mode *mode)
5871{
5872        int result = MODE_ERROR;
5873        struct dc_sink *dc_sink;
5874        /* TODO: Unhardcode stream count */
5875        struct dc_stream_state *stream;
5876        struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5877
5878        if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5879                        (mode->flags & DRM_MODE_FLAG_DBLSCAN))
5880                return result;
5881
5882        /*
5883         * Only run this the first time mode_valid is called to initilialize
5884         * EDID mgmt
5885         */
5886        if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5887                !aconnector->dc_em_sink)
5888                handle_edid_mgmt(aconnector);
5889
5890        dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
5891
5892        if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
5893                                aconnector->base.force != DRM_FORCE_ON) {
5894                DRM_ERROR("dc_sink is NULL!\n");
5895                goto fail;
5896        }
5897
5898        stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5899        if (stream) {
5900                dc_stream_release(stream);
5901                result = MODE_OK;
5902        }
5903
5904fail:
5905        /* TODO: error handling*/
5906        return result;
5907}
5908
5909static int fill_hdr_info_packet(const struct drm_connector_state *state,
5910                                struct dc_info_packet *out)
5911{
5912        struct hdmi_drm_infoframe frame;
5913        unsigned char buf[30]; /* 26 + 4 */
5914        ssize_t len;
5915        int ret, i;
5916
5917        memset(out, 0, sizeof(*out));
5918
5919        if (!state->hdr_output_metadata)
5920                return 0;
5921
5922        ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5923        if (ret)
5924                return ret;
5925
5926        len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5927        if (len < 0)
5928                return (int)len;
5929
5930        /* Static metadata is a fixed 26 bytes + 4 byte header. */
5931        if (len != 30)
5932                return -EINVAL;
5933
5934        /* Prepare the infopacket for DC. */
5935        switch (state->connector->connector_type) {
5936        case DRM_MODE_CONNECTOR_HDMIA:
5937                out->hb0 = 0x87; /* type */
5938                out->hb1 = 0x01; /* version */
5939                out->hb2 = 0x1A; /* length */
5940                out->sb[0] = buf[3]; /* checksum */
5941                i = 1;
5942                break;
5943
5944        case DRM_MODE_CONNECTOR_DisplayPort:
5945        case DRM_MODE_CONNECTOR_eDP:
5946                out->hb0 = 0x00; /* sdp id, zero */
5947                out->hb1 = 0x87; /* type */
5948                out->hb2 = 0x1D; /* payload len - 1 */
5949                out->hb3 = (0x13 << 2); /* sdp version */
5950                out->sb[0] = 0x01; /* version */
5951                out->sb[1] = 0x1A; /* length */
5952                i = 2;
5953                break;
5954
5955        default:
5956                return -EINVAL;
5957        }
5958
5959        memcpy(&out->sb[i], &buf[4], 26);
5960        out->valid = true;
5961
5962        print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5963                       sizeof(out->sb), false);
5964
5965        return 0;
5966}
5967
5968static bool
5969is_hdr_metadata_different(const struct drm_connector_state *old_state,
5970                          const struct drm_connector_state *new_state)
5971{
5972        struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5973        struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5974
5975        if (old_blob != new_blob) {
5976                if (old_blob && new_blob &&
5977                    old_blob->length == new_blob->length)
5978                        return memcmp(old_blob->data, new_blob->data,
5979                                      old_blob->length);
5980
5981                return true;
5982        }
5983
5984        return false;
5985}
5986
5987static int
5988amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
5989                                 struct drm_atomic_state *state)
5990{
5991        struct drm_connector_state *new_con_state =
5992                drm_atomic_get_new_connector_state(state, conn);
5993        struct drm_connector_state *old_con_state =
5994                drm_atomic_get_old_connector_state(state, conn);
5995        struct drm_crtc *crtc = new_con_state->crtc;
5996        struct drm_crtc_state *new_crtc_state;
5997        int ret;
5998
5999        trace_amdgpu_dm_connector_atomic_check(new_con_state);
6000
6001        if (!crtc)
6002                return 0;
6003
6004        if (is_hdr_metadata_different(old_con_state, new_con_state)) {
6005                struct dc_info_packet hdr_infopacket;
6006
6007                ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6008                if (ret)
6009                        return ret;
6010
6011                new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6012                if (IS_ERR(new_crtc_state))
6013                        return PTR_ERR(new_crtc_state);
6014
6015                /*
6016                 * DC considers the stream backends changed if the
6017                 * static metadata changes. Forcing the modeset also
6018                 * gives a simple way for userspace to switch from
6019                 * 8bpc to 10bpc when setting the metadata to enter
6020                 * or exit HDR.
6021                 *
6022                 * Changing the static metadata after it's been
6023                 * set is permissible, however. So only force a
6024                 * modeset if we're entering or exiting HDR.
6025                 */
6026                new_crtc_state->mode_changed =
6027                        !old_con_state->hdr_output_metadata ||
6028                        !new_con_state->hdr_output_metadata;
6029        }
6030
6031        return 0;
6032}
6033
6034static const struct drm_connector_helper_funcs
6035amdgpu_dm_connector_helper_funcs = {
6036        /*
6037         * If hotplugging a second bigger display in FB Con mode, bigger resolution
6038         * modes will be filtered by drm_mode_validate_size(), and those modes
6039         * are missing after user start lightdm. So we need to renew modes list.
6040         * in get_modes call back, not just return the modes count
6041         */
6042        .get_modes = get_modes,
6043        .mode_valid = amdgpu_dm_connector_mode_valid,
6044        .atomic_check = amdgpu_dm_connector_atomic_check,
6045};
6046
6047static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6048{
6049}
6050
6051static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
6052{
6053        struct drm_atomic_state *state = new_crtc_state->state;
6054        struct drm_plane *plane;
6055        int num_active = 0;
6056
6057        drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6058                struct drm_plane_state *new_plane_state;
6059
6060                /* Cursor planes are "fake". */
6061                if (plane->type == DRM_PLANE_TYPE_CURSOR)
6062                        continue;
6063
6064                new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6065
6066                if (!new_plane_state) {
6067                        /*
6068                         * The plane is enable on the CRTC and hasn't changed
6069                         * state. This means that it previously passed
6070                         * validation and is therefore enabled.
6071                         */
6072                        num_active += 1;
6073                        continue;
6074                }
6075
6076                /* We need a framebuffer to be considered enabled. */
6077                num_active += (new_plane_state->fb != NULL);
6078        }
6079
6080        return num_active;
6081}
6082
6083static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6084                                         struct drm_crtc_state *new_crtc_state)
6085{
6086        struct dm_crtc_state *dm_new_crtc_state =
6087                to_dm_crtc_state(new_crtc_state);
6088
6089        dm_new_crtc_state->active_planes = 0;
6090
6091        if (!dm_new_crtc_state->stream)
6092                return;
6093
6094        dm_new_crtc_state->active_planes =
6095                count_crtc_active_planes(new_crtc_state);
6096}
6097
6098static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6099                                       struct drm_atomic_state *state)
6100{
6101        struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6102                                                                          crtc);
6103        struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6104        struct dc *dc = adev->dm.dc;
6105        struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6106        int ret = -EINVAL;
6107
6108        trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6109
6110        dm_update_crtc_active_planes(crtc, crtc_state);
6111
6112        if (unlikely(!dm_crtc_state->stream &&
6113                     modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
6114                WARN_ON(1);
6115                return ret;
6116        }
6117
6118        /*
6119         * We require the primary plane to be enabled whenever the CRTC is, otherwise
6120         * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6121         * planes are disabled, which is not supported by the hardware. And there is legacy
6122         * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6123         */
6124        if (crtc_state->enable &&
6125            !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6126                DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6127                return -EINVAL;
6128        }
6129
6130        /* In some use cases, like reset, no stream is attached */
6131        if (!dm_crtc_state->stream)
6132                return 0;
6133
6134        if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6135                return 0;
6136
6137        DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6138        return ret;
6139}
6140
6141static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6142                                      const struct drm_display_mode *mode,
6143                                      struct drm_display_mode *adjusted_mode)
6144{
6145        return true;
6146}
6147
6148static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6149        .disable = dm_crtc_helper_disable,
6150        .atomic_check = dm_crtc_helper_atomic_check,
6151        .mode_fixup = dm_crtc_helper_mode_fixup,
6152        .get_scanout_position = amdgpu_crtc_get_scanout_position,
6153};
6154
6155static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6156{
6157
6158}
6159
6160static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6161{
6162        switch (display_color_depth) {
6163                case COLOR_DEPTH_666:
6164                        return 6;
6165                case COLOR_DEPTH_888:
6166                        return 8;
6167                case COLOR_DEPTH_101010:
6168                        return 10;
6169                case COLOR_DEPTH_121212:
6170                        return 12;
6171                case COLOR_DEPTH_141414:
6172                        return 14;
6173                case COLOR_DEPTH_161616:
6174                        return 16;
6175                default:
6176                        break;
6177                }
6178        return 0;
6179}
6180
6181static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6182                                          struct drm_crtc_state *crtc_state,
6183                                          struct drm_connector_state *conn_state)
6184{
6185        struct drm_atomic_state *state = crtc_state->state;
6186        struct drm_connector *connector = conn_state->connector;
6187        struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6188        struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6189        const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6190        struct drm_dp_mst_topology_mgr *mst_mgr;
6191        struct drm_dp_mst_port *mst_port;
6192        enum dc_color_depth color_depth;
6193        int clock, bpp = 0;
6194        bool is_y420 = false;
6195
6196        if (!aconnector->port || !aconnector->dc_sink)
6197                return 0;
6198
6199        mst_port = aconnector->port;
6200        mst_mgr = &aconnector->mst_port->mst_mgr;
6201
6202        if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6203                return 0;
6204
6205        if (!state->duplicated) {
6206                int max_bpc = conn_state->max_requested_bpc;
6207                is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6208                                aconnector->force_yuv420_output;
6209                color_depth = convert_color_depth_from_display_info(connector,
6210                                                                    is_y420,
6211                                                                    max_bpc);
6212                bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6213                clock = adjusted_mode->clock;
6214                dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6215        }
6216        dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6217                                                                           mst_mgr,
6218                                                                           mst_port,
6219                                                                           dm_new_connector_state->pbn,
6220                                                                           dm_mst_get_pbn_divider(aconnector->dc_link));
6221        if (dm_new_connector_state->vcpi_slots < 0) {
6222                DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6223                return dm_new_connector_state->vcpi_slots;
6224        }
6225        return 0;
6226}
6227
6228const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6229        .disable = dm_encoder_helper_disable,
6230        .atomic_check = dm_encoder_helper_atomic_check
6231};
6232
6233#if defined(CONFIG_DRM_AMD_DC_DCN)
6234static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6235                                            struct dc_state *dc_state)
6236{
6237        struct dc_stream_state *stream = NULL;
6238        struct drm_connector *connector;
6239        struct drm_connector_state *new_con_state, *old_con_state;
6240        struct amdgpu_dm_connector *aconnector;
6241        struct dm_connector_state *dm_conn_state;
6242        int i, j, clock, bpp;
6243        int vcpi, pbn_div, pbn = 0;
6244
6245        for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6246
6247                aconnector = to_amdgpu_dm_connector(connector);
6248
6249                if (!aconnector->port)
6250                        continue;
6251
6252                if (!new_con_state || !new_con_state->crtc)
6253                        continue;
6254
6255                dm_conn_state = to_dm_connector_state(new_con_state);
6256
6257                for (j = 0; j < dc_state->stream_count; j++) {
6258                        stream = dc_state->streams[j];
6259                        if (!stream)
6260                                continue;
6261
6262                        if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6263                                break;
6264
6265                        stream = NULL;
6266                }
6267
6268                if (!stream)
6269                        continue;
6270
6271                if (stream->timing.flags.DSC != 1) {
6272                        drm_dp_mst_atomic_enable_dsc(state,
6273                                                     aconnector->port,
6274                                                     dm_conn_state->pbn,
6275                                                     0,
6276                                                     false);
6277                        continue;
6278                }
6279
6280                pbn_div = dm_mst_get_pbn_divider(stream->link);
6281                bpp = stream->timing.dsc_cfg.bits_per_pixel;
6282                clock = stream->timing.pix_clk_100hz / 10;
6283                pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6284                vcpi = drm_dp_mst_atomic_enable_dsc(state,
6285                                                    aconnector->port,
6286                                                    pbn, pbn_div,
6287                                                    true);
6288                if (vcpi < 0)
6289                        return vcpi;
6290
6291                dm_conn_state->pbn = pbn;
6292                dm_conn_state->vcpi_slots = vcpi;
6293        }
6294        return 0;
6295}
6296#endif
6297
6298static void dm_drm_plane_reset(struct drm_plane *plane)
6299{
6300        struct dm_plane_state *amdgpu_state = NULL;
6301
6302        if (plane->state)
6303                plane->funcs->atomic_destroy_state(plane, plane->state);
6304
6305        amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6306        WARN_ON(amdgpu_state == NULL);
6307
6308        if (amdgpu_state)
6309                __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6310}
6311
6312static struct drm_plane_state *
6313dm_drm_plane_duplicate_state(struct drm_plane *plane)
6314{
6315        struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6316
6317        old_dm_plane_state = to_dm_plane_state(plane->state);
6318        dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6319        if (!dm_plane_state)
6320                return NULL;
6321
6322        __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6323
6324        if (old_dm_plane_state->dc_state) {
6325                dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6326                dc_plane_state_retain(dm_plane_state->dc_state);
6327        }
6328
6329        return &dm_plane_state->base;
6330}
6331
6332static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6333                                struct drm_plane_state *state)
6334{
6335        struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6336
6337        if (dm_plane_state->dc_state)
6338                dc_plane_state_release(dm_plane_state->dc_state);
6339
6340        drm_atomic_helper_plane_destroy_state(plane, state);
6341}
6342
6343static const struct drm_plane_funcs dm_plane_funcs = {
6344        .update_plane   = drm_atomic_helper_update_plane,
6345        .disable_plane  = drm_atomic_helper_disable_plane,
6346        .destroy        = drm_primary_helper_destroy,
6347        .reset = dm_drm_plane_reset,
6348        .atomic_duplicate_state = dm_drm_plane_duplicate_state,
6349        .atomic_destroy_state = dm_drm_plane_destroy_state,
6350        .format_mod_supported = dm_plane_format_mod_supported,
6351};
6352
6353static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6354                                      struct drm_plane_state *new_state)
6355{
6356        struct amdgpu_framebuffer *afb;
6357        struct drm_gem_object *obj;
6358        struct amdgpu_device *adev;
6359        struct amdgpu_bo *rbo;
6360        struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6361        struct list_head list;
6362        struct ttm_validate_buffer tv;
6363        struct ww_acquire_ctx ticket;
6364        uint32_t domain;
6365        int r;
6366
6367        if (!new_state->fb) {
6368                DRM_DEBUG_DRIVER("No FB bound\n");
6369                return 0;
6370        }
6371
6372        afb = to_amdgpu_framebuffer(new_state->fb);
6373        obj = new_state->fb->obj[0];
6374        rbo = gem_to_amdgpu_bo(obj);
6375        adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6376        INIT_LIST_HEAD(&list);
6377
6378        tv.bo = &rbo->tbo;
6379        tv.num_shared = 1;
6380        list_add(&tv.head, &list);
6381
6382        r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6383        if (r) {
6384                dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6385                return r;
6386        }
6387
6388        if (plane->type != DRM_PLANE_TYPE_CURSOR)
6389                domain = amdgpu_display_supported_domains(adev, rbo->flags);
6390        else
6391                domain = AMDGPU_GEM_DOMAIN_VRAM;
6392
6393        r = amdgpu_bo_pin(rbo, domain);
6394        if (unlikely(r != 0)) {
6395                if (r != -ERESTARTSYS)
6396                        DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6397                ttm_eu_backoff_reservation(&ticket, &list);
6398                return r;
6399        }
6400
6401        r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6402        if (unlikely(r != 0)) {
6403                amdgpu_bo_unpin(rbo);
6404                ttm_eu_backoff_reservation(&ticket, &list);
6405                DRM_ERROR("%p bind failed\n", rbo);
6406                return r;
6407        }
6408
6409        ttm_eu_backoff_reservation(&ticket, &list);
6410
6411        afb->address = amdgpu_bo_gpu_offset(rbo);
6412
6413        amdgpu_bo_ref(rbo);
6414
6415        /**
6416         * We don't do surface updates on planes that have been newly created,
6417         * but we also don't have the afb->address during atomic check.
6418         *
6419         * Fill in buffer attributes depending on the address here, but only on
6420         * newly created planes since they're not being used by DC yet and this
6421         * won't modify global state.
6422         */
6423        dm_plane_state_old = to_dm_plane_state(plane->state);
6424        dm_plane_state_new = to_dm_plane_state(new_state);
6425
6426        if (dm_plane_state_new->dc_state &&
6427            dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6428                struct dc_plane_state *plane_state =
6429                        dm_plane_state_new->dc_state;
6430                bool force_disable_dcc = !plane_state->dcc.enable;
6431
6432                fill_plane_buffer_attributes(
6433                        adev, afb, plane_state->format, plane_state->rotation,
6434                        afb->tiling_flags,
6435                        &plane_state->tiling_info, &plane_state->plane_size,
6436                        &plane_state->dcc, &plane_state->address,
6437                        afb->tmz_surface, force_disable_dcc);
6438        }
6439
6440        return 0;
6441}
6442
6443static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6444                                       struct drm_plane_state *old_state)
6445{
6446        struct amdgpu_bo *rbo;
6447        int r;
6448
6449        if (!old_state->fb)
6450                return;
6451
6452        rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
6453        r = amdgpu_bo_reserve(rbo, false);
6454        if (unlikely(r)) {
6455                DRM_ERROR("failed to reserve rbo before unpin\n");
6456                return;
6457        }
6458
6459        amdgpu_bo_unpin(rbo);
6460        amdgpu_bo_unreserve(rbo);
6461        amdgpu_bo_unref(&rbo);
6462}
6463
6464static int dm_plane_helper_check_state(struct drm_plane_state *state,
6465                                       struct drm_crtc_state *new_crtc_state)
6466{
6467        struct drm_framebuffer *fb = state->fb;
6468        int min_downscale, max_upscale;
6469        int min_scale = 0;
6470        int max_scale = INT_MAX;
6471
6472        /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
6473        if (fb && state->crtc) {
6474                /* Validate viewport to cover the case when only the position changes */
6475                if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
6476                        int viewport_width = state->crtc_w;
6477                        int viewport_height = state->crtc_h;
6478
6479                        if (state->crtc_x < 0)
6480                                viewport_width += state->crtc_x;
6481                        else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
6482                                viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
6483
6484                        if (state->crtc_y < 0)
6485                                viewport_height += state->crtc_y;
6486                        else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
6487                                viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
6488
6489                        /* If completely outside of screen, viewport_width and/or viewport_height will be negative,
6490                         * which is still OK to satisfy the condition below, thereby also covering these cases
6491                         * (when plane is completely outside of screen).
6492                         * x2 for width is because of pipe-split.
6493                         */
6494                        if (viewport_width < MIN_VIEWPORT_SIZE*2 || viewport_height < MIN_VIEWPORT_SIZE)
6495                                return -EINVAL;
6496                }
6497
6498                /* Get min/max allowed scaling factors from plane caps. */
6499                get_min_max_dc_plane_scaling(state->crtc->dev, fb,
6500                                             &min_downscale, &max_upscale);
6501                /*
6502                 * Convert to drm convention: 16.16 fixed point, instead of dc's
6503                 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
6504                 * dst/src, so min_scale = 1.0 / max_upscale, etc.
6505                 */
6506                min_scale = (1000 << 16) / max_upscale;
6507                max_scale = (1000 << 16) / min_downscale;
6508        }
6509
6510        return drm_atomic_helper_check_plane_state(
6511                state, new_crtc_state, min_scale, max_scale, true, true);
6512}
6513
6514static int dm_plane_atomic_check(struct drm_plane *plane,
6515                                 struct drm_plane_state *state)
6516{
6517        struct amdgpu_device *adev = drm_to_adev(plane->dev);
6518        struct dc *dc = adev->dm.dc;
6519        struct dm_plane_state *dm_plane_state;
6520        struct dc_scaling_info scaling_info;
6521        struct drm_crtc_state *new_crtc_state;
6522        int ret;
6523
6524        trace_amdgpu_dm_plane_atomic_check(state);
6525
6526        dm_plane_state = to_dm_plane_state(state);
6527
6528        if (!dm_plane_state->dc_state)
6529                return 0;
6530
6531        new_crtc_state =
6532                drm_atomic_get_new_crtc_state(state->state, state->crtc);
6533        if (!new_crtc_state)
6534                return -EINVAL;
6535
6536        ret = dm_plane_helper_check_state(state, new_crtc_state);
6537        if (ret)
6538                return ret;
6539
6540        ret = fill_dc_scaling_info(state, &scaling_info);
6541        if (ret)
6542                return ret;
6543
6544        if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
6545                return 0;
6546
6547        return -EINVAL;
6548}
6549
6550static int dm_plane_atomic_async_check(struct drm_plane *plane,
6551                                       struct drm_plane_state *new_plane_state)
6552{
6553        /* Only support async updates on cursor planes. */
6554        if (plane->type != DRM_PLANE_TYPE_CURSOR)
6555                return -EINVAL;
6556
6557        return 0;
6558}
6559
6560static void dm_plane_atomic_async_update(struct drm_plane *plane,
6561                                         struct drm_plane_state *new_state)
6562{
6563        struct drm_plane_state *old_state =
6564                drm_atomic_get_old_plane_state(new_state->state, plane);
6565
6566        trace_amdgpu_dm_atomic_update_cursor(new_state);
6567
6568        swap(plane->state->fb, new_state->fb);
6569
6570        plane->state->src_x = new_state->src_x;
6571        plane->state->src_y = new_state->src_y;
6572        plane->state->src_w = new_state->src_w;
6573        plane->state->src_h = new_state->src_h;
6574        plane->state->crtc_x = new_state->crtc_x;
6575        plane->state->crtc_y = new_state->crtc_y;
6576        plane->state->crtc_w = new_state->crtc_w;
6577        plane->state->crtc_h = new_state->crtc_h;
6578
6579        handle_cursor_update(plane, old_state);
6580}
6581
6582static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6583        .prepare_fb = dm_plane_helper_prepare_fb,
6584        .cleanup_fb = dm_plane_helper_cleanup_fb,
6585        .atomic_check = dm_plane_atomic_check,
6586        .atomic_async_check = dm_plane_atomic_async_check,
6587        .atomic_async_update = dm_plane_atomic_async_update
6588};
6589
6590/*
6591 * TODO: these are currently initialized to rgb formats only.
6592 * For future use cases we should either initialize them dynamically based on
6593 * plane capabilities, or initialize this array to all formats, so internal drm
6594 * check will succeed, and let DC implement proper check
6595 */
6596static const uint32_t rgb_formats[] = {
6597        DRM_FORMAT_XRGB8888,
6598        DRM_FORMAT_ARGB8888,
6599        DRM_FORMAT_RGBA8888,
6600        DRM_FORMAT_XRGB2101010,
6601        DRM_FORMAT_XBGR2101010,
6602        DRM_FORMAT_ARGB2101010,
6603        DRM_FORMAT_ABGR2101010,
6604        DRM_FORMAT_XBGR8888,
6605        DRM_FORMAT_ABGR8888,
6606        DRM_FORMAT_RGB565,
6607};
6608
6609static const uint32_t overlay_formats[] = {
6610        DRM_FORMAT_XRGB8888,
6611        DRM_FORMAT_ARGB8888,
6612        DRM_FORMAT_RGBA8888,
6613        DRM_FORMAT_XBGR8888,
6614        DRM_FORMAT_ABGR8888,
6615        DRM_FORMAT_RGB565
6616};
6617
6618static const u32 cursor_formats[] = {
6619        DRM_FORMAT_ARGB8888
6620};
6621
6622static int get_plane_formats(const struct drm_plane *plane,
6623                             const struct dc_plane_cap *plane_cap,
6624                             uint32_t *formats, int max_formats)
6625{
6626        int i, num_formats = 0;
6627
6628        /*
6629         * TODO: Query support for each group of formats directly from
6630         * DC plane caps. This will require adding more formats to the
6631         * caps list.
6632         */
6633
6634        switch (plane->type) {
6635        case DRM_PLANE_TYPE_PRIMARY:
6636                for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6637                        if (num_formats >= max_formats)
6638                                break;
6639
6640                        formats[num_formats++] = rgb_formats[i];
6641                }
6642
6643                if (plane_cap && plane_cap->pixel_format_support.nv12)
6644                        formats[num_formats++] = DRM_FORMAT_NV12;
6645                if (plane_cap && plane_cap->pixel_format_support.p010)
6646                        formats[num_formats++] = DRM_FORMAT_P010;
6647                if (plane_cap && plane_cap->pixel_format_support.fp16) {
6648                        formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6649                        formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
6650                        formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6651                        formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
6652                }
6653                break;
6654
6655        case DRM_PLANE_TYPE_OVERLAY:
6656                for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6657                        if (num_formats >= max_formats)
6658                                break;
6659
6660                        formats[num_formats++] = overlay_formats[i];
6661                }
6662                break;
6663
6664        case DRM_PLANE_TYPE_CURSOR:
6665                for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
6666                        if (num_formats >= max_formats)
6667                                break;
6668
6669                        formats[num_formats++] = cursor_formats[i];
6670                }
6671                break;
6672        }
6673
6674        return num_formats;
6675}
6676
6677static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
6678                                struct drm_plane *plane,
6679                                unsigned long possible_crtcs,
6680                                const struct dc_plane_cap *plane_cap)
6681{
6682        uint32_t formats[32];
6683        int num_formats;
6684        int res = -EPERM;
6685        unsigned int supported_rotations;
6686        uint64_t *modifiers = NULL;
6687
6688        num_formats = get_plane_formats(plane, plane_cap, formats,
6689                                        ARRAY_SIZE(formats));
6690
6691        res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
6692        if (res)
6693                return res;
6694
6695        res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
6696                                       &dm_plane_funcs, formats, num_formats,
6697                                       modifiers, plane->type, NULL);
6698        kfree(modifiers);
6699        if (res)
6700                return res;
6701
6702        if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
6703            plane_cap && plane_cap->per_pixel_alpha) {
6704                unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
6705                                          BIT(DRM_MODE_BLEND_PREMULTI);
6706
6707                drm_plane_create_alpha_property(plane);
6708                drm_plane_create_blend_mode_property(plane, blend_caps);
6709        }
6710
6711        if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
6712            plane_cap &&
6713            (plane_cap->pixel_format_support.nv12 ||
6714             plane_cap->pixel_format_support.p010)) {
6715                /* This only affects YUV formats. */
6716                drm_plane_create_color_properties(
6717                        plane,
6718                        BIT(DRM_COLOR_YCBCR_BT601) |
6719                        BIT(DRM_COLOR_YCBCR_BT709) |
6720                        BIT(DRM_COLOR_YCBCR_BT2020),
6721                        BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6722                        BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6723                        DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6724        }
6725
6726        supported_rotations =
6727                DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6728                DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6729
6730        if (dm->adev->asic_type >= CHIP_BONAIRE &&
6731            plane->type != DRM_PLANE_TYPE_CURSOR)
6732                drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
6733                                                   supported_rotations);
6734
6735        drm_plane_helper_add(plane, &dm_plane_helper_funcs);
6736
6737        /* Create (reset) the plane state */
6738        if (plane->funcs->reset)
6739                plane->funcs->reset(plane);
6740
6741        return 0;
6742}
6743
6744static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6745                               struct drm_plane *plane,
6746                               uint32_t crtc_index)
6747{
6748        struct amdgpu_crtc *acrtc = NULL;
6749        struct drm_plane *cursor_plane;
6750
6751        int res = -ENOMEM;
6752
6753        cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
6754        if (!cursor_plane)
6755                goto fail;
6756
6757        cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
6758        res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
6759
6760        acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
6761        if (!acrtc)
6762                goto fail;
6763
6764        res = drm_crtc_init_with_planes(
6765                        dm->ddev,
6766                        &acrtc->base,
6767                        plane,
6768                        cursor_plane,
6769                        &amdgpu_dm_crtc_funcs, NULL);
6770
6771        if (res)
6772                goto fail;
6773
6774        drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6775
6776        /* Create (reset) the plane state */
6777        if (acrtc->base.funcs->reset)
6778                acrtc->base.funcs->reset(&acrtc->base);
6779
6780        acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
6781        acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
6782
6783        acrtc->crtc_id = crtc_index;
6784        acrtc->base.enabled = false;
6785        acrtc->otg_inst = -1;
6786
6787        dm->adev->mode_info.crtcs[crtc_index] = acrtc;
6788        drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
6789                                   true, MAX_COLOR_LUT_ENTRIES);
6790        drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
6791
6792        return 0;
6793
6794fail:
6795        kfree(acrtc);
6796        kfree(cursor_plane);
6797        return res;
6798}
6799
6800
6801static int to_drm_connector_type(enum signal_type st)
6802{
6803        switch (st) {
6804        case SIGNAL_TYPE_HDMI_TYPE_A:
6805                return DRM_MODE_CONNECTOR_HDMIA;
6806        case SIGNAL_TYPE_EDP:
6807                return DRM_MODE_CONNECTOR_eDP;
6808        case SIGNAL_TYPE_LVDS:
6809                return DRM_MODE_CONNECTOR_LVDS;
6810        case SIGNAL_TYPE_RGB:
6811                return DRM_MODE_CONNECTOR_VGA;
6812        case SIGNAL_TYPE_DISPLAY_PORT:
6813        case SIGNAL_TYPE_DISPLAY_PORT_MST:
6814                return DRM_MODE_CONNECTOR_DisplayPort;
6815        case SIGNAL_TYPE_DVI_DUAL_LINK:
6816        case SIGNAL_TYPE_DVI_SINGLE_LINK:
6817                return DRM_MODE_CONNECTOR_DVID;
6818        case SIGNAL_TYPE_VIRTUAL:
6819                return DRM_MODE_CONNECTOR_VIRTUAL;
6820
6821        default:
6822                return DRM_MODE_CONNECTOR_Unknown;
6823        }
6824}
6825
6826static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6827{
6828        struct drm_encoder *encoder;
6829
6830        /* There is only one encoder per connector */
6831        drm_connector_for_each_possible_encoder(connector, encoder)
6832                return encoder;
6833
6834        return NULL;
6835}
6836
6837static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6838{
6839        struct drm_encoder *encoder;
6840        struct amdgpu_encoder *amdgpu_encoder;
6841
6842        encoder = amdgpu_dm_connector_to_encoder(connector);
6843
6844        if (encoder == NULL)
6845                return;
6846
6847        amdgpu_encoder = to_amdgpu_encoder(encoder);
6848
6849        amdgpu_encoder->native_mode.clock = 0;
6850
6851        if (!list_empty(&connector->probed_modes)) {
6852                struct drm_display_mode *preferred_mode = NULL;
6853
6854                list_for_each_entry(preferred_mode,
6855                                    &connector->probed_modes,
6856                                    head) {
6857                        if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6858                                amdgpu_encoder->native_mode = *preferred_mode;
6859
6860                        break;
6861                }
6862
6863        }
6864}
6865
6866static struct drm_display_mode *
6867amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6868                             char *name,
6869                             int hdisplay, int vdisplay)
6870{
6871        struct drm_device *dev = encoder->dev;
6872        struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6873        struct drm_display_mode *mode = NULL;
6874        struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6875
6876        mode = drm_mode_duplicate(dev, native_mode);
6877
6878        if (mode == NULL)
6879                return NULL;
6880
6881        mode->hdisplay = hdisplay;
6882        mode->vdisplay = vdisplay;
6883        mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6884        strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6885
6886        return mode;
6887
6888}
6889
6890static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6891                                                 struct drm_connector *connector)
6892{
6893        struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6894        struct drm_display_mode *mode = NULL;
6895        struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6896        struct amdgpu_dm_connector *amdgpu_dm_connector =
6897                                to_amdgpu_dm_connector(connector);
6898        int i;
6899        int n;
6900        struct mode_size {
6901                char name[DRM_DISPLAY_MODE_LEN];
6902                int w;
6903                int h;
6904        } common_modes[] = {
6905                {  "640x480",  640,  480},
6906                {  "800x600",  800,  600},
6907                { "1024x768", 1024,  768},
6908                { "1280x720", 1280,  720},
6909                { "1280x800", 1280,  800},
6910                {"1280x1024", 1280, 1024},
6911                { "1440x900", 1440,  900},
6912                {"1680x1050", 1680, 1050},
6913                {"1600x1200", 1600, 1200},
6914                {"1920x1080", 1920, 1080},
6915                {"1920x1200", 1920, 1200}
6916        };
6917
6918        n = ARRAY_SIZE(common_modes);
6919
6920        for (i = 0; i < n; i++) {
6921                struct drm_display_mode *curmode = NULL;
6922                bool mode_existed = false;
6923
6924                if (common_modes[i].w > native_mode->hdisplay ||
6925                    common_modes[i].h > native_mode->vdisplay ||
6926                   (common_modes[i].w == native_mode->hdisplay &&
6927                    common_modes[i].h == native_mode->vdisplay))
6928                        continue;
6929
6930                list_for_each_entry(curmode, &connector->probed_modes, head) {
6931                        if (common_modes[i].w == curmode->hdisplay &&
6932                            common_modes[i].h == curmode->vdisplay) {
6933                                mode_existed = true;
6934                                break;
6935                        }
6936                }
6937
6938                if (mode_existed)
6939                        continue;
6940
6941                mode = amdgpu_dm_create_common_mode(encoder,
6942                                common_modes[i].name, common_modes[i].w,
6943                                common_modes[i].h);
6944                drm_mode_probed_add(connector, mode);
6945                amdgpu_dm_connector->num_modes++;
6946        }
6947}
6948
6949static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6950                                              struct edid *edid)
6951{
6952        struct amdgpu_dm_connector *amdgpu_dm_connector =
6953                        to_amdgpu_dm_connector(connector);
6954
6955        if (edid) {
6956                /* empty probed_modes */
6957                INIT_LIST_HEAD(&connector->probed_modes);
6958                amdgpu_dm_connector->num_modes =
6959                                drm_add_edid_modes(connector, edid);
6960
6961                /* sorting the probed modes before calling function
6962                 * amdgpu_dm_get_native_mode() since EDID can have
6963                 * more than one preferred mode. The modes that are
6964                 * later in the probed mode list could be of higher
6965                 * and preferred resolution. For example, 3840x2160
6966                 * resolution in base EDID preferred timing and 4096x2160
6967                 * preferred resolution in DID extension block later.
6968                 */
6969                drm_mode_sort(&connector->probed_modes);
6970                amdgpu_dm_get_native_mode(connector);
6971        } else {
6972                amdgpu_dm_connector->num_modes = 0;
6973        }
6974}
6975
6976static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6977{
6978        struct amdgpu_dm_connector *amdgpu_dm_connector =
6979                        to_amdgpu_dm_connector(connector);
6980        struct drm_encoder *encoder;
6981        struct edid *edid = amdgpu_dm_connector->edid;
6982
6983        encoder = amdgpu_dm_connector_to_encoder(connector);
6984
6985        if (!drm_edid_is_valid(edid)) {
6986                amdgpu_dm_connector->num_modes =
6987                                drm_add_modes_noedid(connector, 640, 480);
6988        } else {
6989                amdgpu_dm_connector_ddc_get_modes(connector, edid);
6990                amdgpu_dm_connector_add_common_modes(encoder, connector);
6991        }
6992        amdgpu_dm_fbc_init(connector);
6993
6994        return amdgpu_dm_connector->num_modes;
6995}
6996
6997void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6998                                     struct amdgpu_dm_connector *aconnector,
6999                                     int connector_type,
7000                                     struct dc_link *link,
7001                                     int link_index)
7002{
7003        struct amdgpu_device *adev = drm_to_adev(dm->ddev);
7004
7005        /*
7006         * Some of the properties below require access to state, like bpc.
7007         * Allocate some default initial connector state with our reset helper.
7008         */
7009        if (aconnector->base.funcs->reset)
7010                aconnector->base.funcs->reset(&aconnector->base);
7011
7012        aconnector->connector_id = link_index;
7013        aconnector->dc_link = link;
7014        aconnector->base.interlace_allowed = false;
7015        aconnector->base.doublescan_allowed = false;
7016        aconnector->base.stereo_allowed = false;
7017        aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7018        aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
7019        aconnector->audio_inst = -1;
7020        mutex_init(&aconnector->hpd_lock);
7021
7022        /*
7023         * configure support HPD hot plug connector_>polled default value is 0
7024         * which means HPD hot plug not supported
7025         */
7026        switch (connector_type) {
7027        case DRM_MODE_CONNECTOR_HDMIA:
7028                aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7029                aconnector->base.ycbcr_420_allowed =
7030                        link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
7031                break;
7032        case DRM_MODE_CONNECTOR_DisplayPort:
7033                aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7034                aconnector->base.ycbcr_420_allowed =
7035                        link->link_enc->features.dp_ycbcr420_supported ? true : false;
7036                break;
7037        case DRM_MODE_CONNECTOR_DVID:
7038                aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7039                break;
7040        default:
7041                break;
7042        }
7043
7044        drm_object_attach_property(&aconnector->base.base,
7045                                dm->ddev->mode_config.scaling_mode_property,
7046                                DRM_MODE_SCALE_NONE);
7047
7048        drm_object_attach_property(&aconnector->base.base,
7049                                adev->mode_info.underscan_property,
7050                                UNDERSCAN_OFF);
7051        drm_object_attach_property(&aconnector->base.base,
7052                                adev->mode_info.underscan_hborder_property,
7053                                0);
7054        drm_object_attach_property(&aconnector->base.base,
7055                                adev->mode_info.underscan_vborder_property,
7056                                0);
7057
7058        if (!aconnector->mst_port)
7059                drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7060
7061        /* This defaults to the max in the range, but we want 8bpc for non-edp. */
7062        aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7063        aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7064
7065        if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7066            (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7067                drm_object_attach_property(&aconnector->base.base,
7068                                adev->mode_info.abm_level_property, 0);
7069        }
7070
7071        if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7072            connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7073            connector_type == DRM_MODE_CONNECTOR_eDP) {
7074                drm_object_attach_property(
7075                        &aconnector->base.base,
7076                        dm->ddev->mode_config.hdr_output_metadata_property, 0);
7077
7078                if (!aconnector->mst_port)
7079                        drm_connector_attach_vrr_capable_property(&aconnector->base);
7080
7081#ifdef CONFIG_DRM_AMD_DC_HDCP
7082                if (adev->dm.hdcp_workqueue)
7083                        drm_connector_attach_content_protection_property(&aconnector->base, true);
7084#endif
7085        }
7086}
7087
7088static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7089                              struct i2c_msg *msgs, int num)
7090{
7091        struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7092        struct ddc_service *ddc_service = i2c->ddc_service;
7093        struct i2c_command cmd;
7094        int i;
7095        int result = -EIO;
7096
7097        cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7098
7099        if (!cmd.payloads)
7100                return result;
7101
7102        cmd.number_of_payloads = num;
7103        cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7104        cmd.speed = 100;
7105
7106        for (i = 0; i < num; i++) {
7107                cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7108                cmd.payloads[i].address = msgs[i].addr;
7109                cmd.payloads[i].length = msgs[i].len;
7110                cmd.payloads[i].data = msgs[i].buf;
7111        }
7112
7113        if (dc_submit_i2c(
7114                        ddc_service->ctx->dc,
7115                        ddc_service->ddc_pin->hw_info.ddc_channel,
7116                        &cmd))
7117                result = num;
7118
7119        kfree(cmd.payloads);
7120        return result;
7121}
7122
7123static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7124{
7125        return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7126}
7127
7128static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7129        .master_xfer = amdgpu_dm_i2c_xfer,
7130        .functionality = amdgpu_dm_i2c_func,
7131};
7132
7133static struct amdgpu_i2c_adapter *
7134create_i2c(struct ddc_service *ddc_service,
7135           int link_index,
7136           int *res)
7137{
7138        struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7139        struct amdgpu_i2c_adapter *i2c;
7140
7141        i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7142        if (!i2c)
7143                return NULL;
7144        i2c->base.owner = THIS_MODULE;
7145        i2c->base.class = I2C_CLASS_DDC;
7146        i2c->base.dev.parent = &adev->pdev->dev;
7147        i2c->base.algo = &amdgpu_dm_i2c_algo;
7148        snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7149        i2c_set_adapdata(&i2c->base, i2c);
7150        i2c->ddc_service = ddc_service;
7151        i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7152
7153        return i2c;
7154}
7155
7156
7157/*
7158 * Note: this function assumes that dc_link_detect() was called for the
7159 * dc_link which will be represented by this aconnector.
7160 */
7161static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7162                                    struct amdgpu_dm_connector *aconnector,
7163                                    uint32_t link_index,
7164                                    struct amdgpu_encoder *aencoder)
7165{
7166        int res = 0;
7167        int connector_type;
7168        struct dc *dc = dm->dc;
7169        struct dc_link *link = dc_get_link_at_index(dc, link_index);
7170        struct amdgpu_i2c_adapter *i2c;
7171
7172        link->priv = aconnector;
7173
7174        DRM_DEBUG_DRIVER("%s()\n", __func__);
7175
7176        i2c = create_i2c(link->ddc, link->link_index, &res);
7177        if (!i2c) {
7178                DRM_ERROR("Failed to create i2c adapter data\n");
7179                return -ENOMEM;
7180        }
7181
7182        aconnector->i2c = i2c;
7183        res = i2c_add_adapter(&i2c->base);
7184
7185        if (res) {
7186                DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7187                goto out_free;
7188        }
7189
7190        connector_type = to_drm_connector_type(link->connector_signal);
7191
7192        res = drm_connector_init_with_ddc(
7193                        dm->ddev,
7194                        &aconnector->base,
7195                        &amdgpu_dm_connector_funcs,
7196                        connector_type,
7197                        &i2c->base);
7198
7199        if (res) {
7200                DRM_ERROR("connector_init failed\n");
7201                aconnector->connector_id = -1;
7202                goto out_free;
7203        }
7204
7205        drm_connector_helper_add(
7206                        &aconnector->base,
7207                        &amdgpu_dm_connector_helper_funcs);
7208
7209        amdgpu_dm_connector_init_helper(
7210                dm,
7211                aconnector,
7212                connector_type,
7213                link,
7214                link_index);
7215
7216        drm_connector_attach_encoder(
7217                &aconnector->base, &aencoder->base);
7218
7219        if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7220                || connector_type == DRM_MODE_CONNECTOR_eDP)
7221                amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7222
7223out_free:
7224        if (res) {
7225                kfree(i2c);
7226                aconnector->i2c = NULL;
7227        }
7228        return res;
7229}
7230
7231int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7232{
7233        switch (adev->mode_info.num_crtc) {
7234        case 1:
7235                return 0x1;
7236        case 2:
7237                return 0x3;
7238        case 3:
7239                return 0x7;
7240        case 4:
7241                return 0xf;
7242        case 5:
7243                return 0x1f;
7244        case 6:
7245        default:
7246                return 0x3f;
7247        }
7248}
7249
7250static int amdgpu_dm_encoder_init(struct drm_device *dev,
7251                                  struct amdgpu_encoder *aencoder,
7252                                  uint32_t link_index)
7253{
7254        struct amdgpu_device *adev = drm_to_adev(dev);
7255
7256        int res = drm_encoder_init(dev,
7257                                   &aencoder->base,
7258                                   &amdgpu_dm_encoder_funcs,
7259                                   DRM_MODE_ENCODER_TMDS,
7260                                   NULL);
7261
7262        aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7263
7264        if (!res)
7265                aencoder->encoder_id = link_index;
7266        else
7267                aencoder->encoder_id = -1;
7268
7269        drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7270
7271        return res;
7272}
7273
7274static void manage_dm_interrupts(struct amdgpu_device *adev,
7275                                 struct amdgpu_crtc *acrtc,
7276                                 bool enable)
7277{
7278        /*
7279         * We have no guarantee that the frontend index maps to the same
7280         * backend index - some even map to more than one.
7281         *
7282         * TODO: Use a different interrupt or check DC itself for the mapping.
7283         */
7284        int irq_type =
7285                amdgpu_display_crtc_idx_to_irq_type(
7286                        adev,
7287                        acrtc->crtc_id);
7288
7289        if (enable) {
7290                drm_crtc_vblank_on(&acrtc->base);
7291                amdgpu_irq_get(
7292                        adev,
7293                        &adev->pageflip_irq,
7294                        irq_type);
7295        } else {
7296
7297                amdgpu_irq_put(
7298                        adev,
7299                        &adev->pageflip_irq,
7300                        irq_type);
7301                drm_crtc_vblank_off(&acrtc->base);
7302        }
7303}
7304
7305static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7306                                      struct amdgpu_crtc *acrtc)
7307{
7308        int irq_type =
7309                amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7310
7311        /**
7312         * This reads the current state for the IRQ and force reapplies
7313         * the setting to hardware.
7314         */
7315        amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7316}
7317
7318static bool
7319is_scaling_state_different(const struct dm_connector_state *dm_state,
7320                           const struct dm_connector_state *old_dm_state)
7321{
7322        if (dm_state->scaling != old_dm_state->scaling)
7323                return true;
7324        if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7325                if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7326                        return true;
7327        } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7328                if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7329                        return true;
7330        } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7331                   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7332                return true;
7333        return false;
7334}
7335
7336#ifdef CONFIG_DRM_AMD_DC_HDCP
7337static bool is_content_protection_different(struct drm_connector_state *state,
7338                                            const struct drm_connector_state *old_state,
7339                                            const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7340{
7341        struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7342        struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7343
7344        /* Handle: Type0/1 change */
7345        if (old_state->hdcp_content_type != state->hdcp_content_type &&
7346            state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7347                state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7348                return true;
7349        }
7350
7351        /* CP is being re enabled, ignore this
7352         *
7353         * Handles:     ENABLED -> DESIRED
7354         */
7355        if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7356            state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7357                state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7358                return false;
7359        }
7360
7361        /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7362         *
7363         * Handles:     UNDESIRED -> ENABLED
7364         */
7365        if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7366            state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7367                state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7368
7369        /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7370         * hot-plug, headless s3, dpms
7371         *
7372         * Handles:     DESIRED -> DESIRED (Special case)
7373         */
7374        if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7375            connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7376                dm_con_state->update_hdcp = false;
7377                return true;
7378        }
7379
7380        /*
7381         * Handles:     UNDESIRED -> UNDESIRED
7382         *              DESIRED -> DESIRED
7383         *              ENABLED -> ENABLED
7384         */
7385        if (old_state->content_protection == state->content_protection)
7386                return false;
7387
7388        /*
7389         * Handles:     UNDESIRED -> DESIRED
7390         *              DESIRED -> UNDESIRED
7391         *              ENABLED -> UNDESIRED
7392         */
7393        if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
7394                return true;
7395
7396        /*
7397         * Handles:     DESIRED -> ENABLED
7398         */
7399        return false;
7400}
7401
7402#endif
7403static void remove_stream(struct amdgpu_device *adev,
7404                          struct amdgpu_crtc *acrtc,
7405                          struct dc_stream_state *stream)
7406{
7407        /* this is the update mode case */
7408
7409        acrtc->otg_inst = -1;
7410        acrtc->enabled = false;
7411}
7412
7413static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
7414                               struct dc_cursor_position *position)
7415{
7416        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7417        int x, y;
7418        int xorigin = 0, yorigin = 0;
7419
7420        position->enable = false;
7421        position->x = 0;
7422        position->y = 0;
7423
7424        if (!crtc || !plane->state->fb)
7425                return 0;
7426
7427        if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
7428            (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
7429                DRM_ERROR("%s: bad cursor width or height %d x %d\n",
7430                          __func__,
7431                          plane->state->crtc_w,
7432                          plane->state->crtc_h);
7433                return -EINVAL;
7434        }
7435
7436        x = plane->state->crtc_x;
7437        y = plane->state->crtc_y;
7438
7439        if (x <= -amdgpu_crtc->max_cursor_width ||
7440            y <= -amdgpu_crtc->max_cursor_height)
7441                return 0;
7442
7443        if (x < 0) {
7444                xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
7445                x = 0;
7446        }
7447        if (y < 0) {
7448                yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
7449                y = 0;
7450        }
7451        position->enable = true;
7452        position->translate_by_source = true;
7453        position->x = x;
7454        position->y = y;
7455        position->x_hotspot = xorigin;
7456        position->y_hotspot = yorigin;
7457
7458        return 0;
7459}
7460
7461static void handle_cursor_update(struct drm_plane *plane,
7462                                 struct drm_plane_state *old_plane_state)
7463{
7464        struct amdgpu_device *adev = drm_to_adev(plane->dev);
7465        struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
7466        struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
7467        struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
7468        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7469        uint64_t address = afb ? afb->address : 0;
7470        struct dc_cursor_position position;
7471        struct dc_cursor_attributes attributes;
7472        int ret;
7473
7474        if (!plane->state->fb && !old_plane_state->fb)
7475                return;
7476
7477        DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
7478                         __func__,
7479                         amdgpu_crtc->crtc_id,
7480                         plane->state->crtc_w,
7481                         plane->state->crtc_h);
7482
7483        ret = get_cursor_position(plane, crtc, &position);
7484        if (ret)
7485                return;
7486
7487        if (!position.enable) {
7488                /* turn off cursor */
7489                if (crtc_state && crtc_state->stream) {
7490                        mutex_lock(&adev->dm.dc_lock);
7491                        dc_stream_set_cursor_position(crtc_state->stream,
7492                                                      &position);
7493                        mutex_unlock(&adev->dm.dc_lock);
7494                }
7495                return;
7496        }
7497
7498        amdgpu_crtc->cursor_width = plane->state->crtc_w;
7499        amdgpu_crtc->cursor_height = plane->state->crtc_h;
7500
7501        memset(&attributes, 0, sizeof(attributes));
7502        attributes.address.high_part = upper_32_bits(address);
7503        attributes.address.low_part  = lower_32_bits(address);
7504        attributes.width             = plane->state->crtc_w;
7505        attributes.height            = plane->state->crtc_h;
7506        attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
7507        attributes.rotation_angle    = 0;
7508        attributes.attribute_flags.value = 0;
7509
7510        attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
7511
7512        if (crtc_state->stream) {
7513                mutex_lock(&adev->dm.dc_lock);
7514                if (!dc_stream_set_cursor_attributes(crtc_state->stream,
7515                                                         &attributes))
7516                        DRM_ERROR("DC failed to set cursor attributes\n");
7517
7518                if (!dc_stream_set_cursor_position(crtc_state->stream,
7519                                                   &position))
7520                        DRM_ERROR("DC failed to set cursor position\n");
7521                mutex_unlock(&adev->dm.dc_lock);
7522        }
7523}
7524
7525static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
7526{
7527
7528        assert_spin_locked(&acrtc->base.dev->event_lock);
7529        WARN_ON(acrtc->event);
7530
7531        acrtc->event = acrtc->base.state->event;
7532
7533        /* Set the flip status */
7534        acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7535
7536        /* Mark this event as consumed */
7537        acrtc->base.state->event = NULL;
7538
7539        DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7540                                                 acrtc->crtc_id);
7541}
7542
7543static void update_freesync_state_on_stream(
7544        struct amdgpu_display_manager *dm,
7545        struct dm_crtc_state *new_crtc_state,
7546        struct dc_stream_state *new_stream,
7547        struct dc_plane_state *surface,
7548        u32 flip_timestamp_in_us)
7549{
7550        struct mod_vrr_params vrr_params;
7551        struct dc_info_packet vrr_infopacket = {0};
7552        struct amdgpu_device *adev = dm->adev;
7553        struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7554        unsigned long flags;
7555
7556        if (!new_stream)
7557                return;
7558
7559        /*
7560         * TODO: Determine why min/max totals and vrefresh can be 0 here.
7561         * For now it's sufficient to just guard against these conditions.
7562         */
7563
7564        if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7565                return;
7566
7567        spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7568        vrr_params = acrtc->dm_irq_params.vrr_params;
7569
7570        if (surface) {
7571                mod_freesync_handle_preflip(
7572                        dm->freesync_module,
7573                        surface,
7574                        new_stream,
7575                        flip_timestamp_in_us,
7576                        &vrr_params);
7577
7578                if (adev->family < AMDGPU_FAMILY_AI &&
7579                    amdgpu_dm_vrr_active(new_crtc_state)) {
7580                        mod_freesync_handle_v_update(dm->freesync_module,
7581                                                     new_stream, &vrr_params);
7582
7583                        /* Need to call this before the frame ends. */
7584                        dc_stream_adjust_vmin_vmax(dm->dc,
7585                                                   new_crtc_state->stream,
7586                                                   &vrr_params.adjust);
7587                }
7588        }
7589
7590        mod_freesync_build_vrr_infopacket(
7591                dm->freesync_module,
7592                new_stream,
7593                &vrr_params,
7594                PACKET_TYPE_VRR,
7595                TRANSFER_FUNC_UNKNOWN,
7596                &vrr_infopacket);
7597
7598        new_crtc_state->freesync_timing_changed |=
7599                (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7600                        &vrr_params.adjust,
7601                        sizeof(vrr_params.adjust)) != 0);
7602
7603        new_crtc_state->freesync_vrr_info_changed |=
7604                (memcmp(&new_crtc_state->vrr_infopacket,
7605                        &vrr_infopacket,
7606                        sizeof(vrr_infopacket)) != 0);
7607
7608        acrtc->dm_irq_params.vrr_params = vrr_params;
7609        new_crtc_state->vrr_infopacket = vrr_infopacket;
7610
7611        new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
7612        new_stream->vrr_infopacket = vrr_infopacket;
7613
7614        if (new_crtc_state->freesync_vrr_info_changed)
7615                DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
7616                              new_crtc_state->base.crtc->base.id,
7617                              (int)new_crtc_state->base.vrr_enabled,
7618                              (int)vrr_params.state);
7619
7620        spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7621}
7622
7623static void update_stream_irq_parameters(
7624        struct amdgpu_display_manager *dm,
7625        struct dm_crtc_state *new_crtc_state)
7626{
7627        struct dc_stream_state *new_stream = new_crtc_state->stream;
7628        struct mod_vrr_params vrr_params;
7629        struct mod_freesync_config config = new_crtc_state->freesync_config;
7630        struct amdgpu_device *adev = dm->adev;
7631        struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7632        unsigned long flags;
7633
7634        if (!new_stream)
7635                return;
7636
7637        /*
7638         * TODO: Determine why min/max totals and vrefresh can be 0 here.
7639         * For now it's sufficient to just guard against these conditions.
7640         */
7641        if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7642                return;
7643
7644        spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7645        vrr_params = acrtc->dm_irq_params.vrr_params;
7646
7647        if (new_crtc_state->vrr_supported &&
7648            config.min_refresh_in_uhz &&
7649            config.max_refresh_in_uhz) {
7650                config.state = new_crtc_state->base.vrr_enabled ?
7651                        VRR_STATE_ACTIVE_VARIABLE :
7652                        VRR_STATE_INACTIVE;
7653        } else {
7654                config.state = VRR_STATE_UNSUPPORTED;
7655        }
7656
7657        mod_freesync_build_vrr_params(dm->freesync_module,
7658                                      new_stream,
7659                                      &config, &vrr_params);
7660
7661        new_crtc_state->freesync_timing_changed |=
7662                (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7663                        &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
7664
7665        new_crtc_state->freesync_config = config;
7666        /* Copy state for access from DM IRQ handler */
7667        acrtc->dm_irq_params.freesync_config = config;
7668        acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
7669        acrtc->dm_irq_params.vrr_params = vrr_params;
7670        spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7671}
7672
7673static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
7674                                            struct dm_crtc_state *new_state)
7675{
7676        bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
7677        bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
7678
7679        if (!old_vrr_active && new_vrr_active) {
7680                /* Transition VRR inactive -> active:
7681                 * While VRR is active, we must not disable vblank irq, as a
7682                 * reenable after disable would compute bogus vblank/pflip
7683                 * timestamps if it likely happened inside display front-porch.
7684                 *
7685                 * We also need vupdate irq for the actual core vblank handling
7686                 * at end of vblank.
7687                 */
7688                dm_set_vupdate_irq(new_state->base.crtc, true);
7689                drm_crtc_vblank_get(new_state->base.crtc);
7690                DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
7691                                 __func__, new_state->base.crtc->base.id);
7692        } else if (old_vrr_active && !new_vrr_active) {
7693                /* Transition VRR active -> inactive:
7694                 * Allow vblank irq disable again for fixed refresh rate.
7695                 */
7696                dm_set_vupdate_irq(new_state->base.crtc, false);
7697                drm_crtc_vblank_put(new_state->base.crtc);
7698                DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
7699                                 __func__, new_state->base.crtc->base.id);
7700        }
7701}
7702
7703static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
7704{
7705        struct drm_plane *plane;
7706        struct drm_plane_state *old_plane_state, *new_plane_state;
7707        int i;
7708
7709        /*
7710         * TODO: Make this per-stream so we don't issue redundant updates for
7711         * commits with multiple streams.
7712         */
7713        for_each_oldnew_plane_in_state(state, plane, old_plane_state,
7714                                       new_plane_state, i)
7715                if (plane->type == DRM_PLANE_TYPE_CURSOR)
7716                        handle_cursor_update(plane, old_plane_state);
7717}
7718
7719static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
7720                                    struct dc_state *dc_state,
7721                                    struct drm_device *dev,
7722                                    struct amdgpu_display_manager *dm,
7723                                    struct drm_crtc *pcrtc,
7724                                    bool wait_for_vblank)
7725{
7726        uint32_t i;
7727        uint64_t timestamp_ns;
7728        struct drm_plane *plane;
7729        struct drm_plane_state *old_plane_state, *new_plane_state;
7730        struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
7731        struct drm_crtc_state *new_pcrtc_state =
7732                        drm_atomic_get_new_crtc_state(state, pcrtc);
7733        struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
7734        struct dm_crtc_state *dm_old_crtc_state =
7735                        to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
7736        int planes_count = 0, vpos, hpos;
7737        long r;
7738        unsigned long flags;
7739        struct amdgpu_bo *abo;
7740        uint32_t target_vblank, last_flip_vblank;
7741        bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
7742        bool pflip_present = false;
7743        struct {
7744                struct dc_surface_update surface_updates[MAX_SURFACES];
7745                struct dc_plane_info plane_infos[MAX_SURFACES];
7746                struct dc_scaling_info scaling_infos[MAX_SURFACES];
7747                struct dc_flip_addrs flip_addrs[MAX_SURFACES];
7748                struct dc_stream_update stream_update;
7749        } *bundle;
7750
7751        bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
7752
7753        if (!bundle) {
7754                dm_error("Failed to allocate update bundle\n");
7755                goto cleanup;
7756        }
7757
7758        /*
7759         * Disable the cursor first if we're disabling all the planes.
7760         * It'll remain on the screen after the planes are re-enabled
7761         * if we don't.
7762         */
7763        if (acrtc_state->active_planes == 0)
7764                amdgpu_dm_commit_cursors(state);
7765
7766        /* update planes when needed */
7767        for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
7768                struct drm_crtc *crtc = new_plane_state->crtc;
7769                struct drm_crtc_state *new_crtc_state;
7770                struct drm_framebuffer *fb = new_plane_state->fb;
7771                struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
7772                bool plane_needs_flip;
7773                struct dc_plane_state *dc_plane;
7774                struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
7775
7776                /* Cursor plane is handled after stream updates */
7777                if (plane->type == DRM_PLANE_TYPE_CURSOR)
7778                        continue;
7779
7780                if (!fb || !crtc || pcrtc != crtc)
7781                        continue;
7782
7783                new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7784                if (!new_crtc_state->active)
7785                        continue;
7786
7787                dc_plane = dm_new_plane_state->dc_state;
7788
7789                bundle->surface_updates[planes_count].surface = dc_plane;
7790                if (new_pcrtc_state->color_mgmt_changed) {
7791                        bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7792                        bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
7793                        bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
7794                }
7795
7796                fill_dc_scaling_info(new_plane_state,
7797                                     &bundle->scaling_infos[planes_count]);
7798
7799                bundle->surface_updates[planes_count].scaling_info =
7800                        &bundle->scaling_infos[planes_count];
7801
7802                plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
7803
7804                pflip_present = pflip_present || plane_needs_flip;
7805
7806                if (!plane_needs_flip) {
7807                        planes_count += 1;
7808                        continue;
7809                }
7810
7811                abo = gem_to_amdgpu_bo(fb->obj[0]);
7812
7813                /*
7814                 * Wait for all fences on this FB. Do limited wait to avoid
7815                 * deadlock during GPU reset when this fence will not signal
7816                 * but we hold reservation lock for the BO.
7817                 */
7818                r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
7819                                                        false,
7820                                                        msecs_to_jiffies(5000));
7821                if (unlikely(r <= 0))
7822                        DRM_ERROR("Waiting for fences timed out!");
7823
7824                fill_dc_plane_info_and_addr(
7825                        dm->adev, new_plane_state,
7826                        afb->tiling_flags,
7827                        &bundle->plane_infos[planes_count],
7828                        &bundle->flip_addrs[planes_count].address,
7829                        afb->tmz_surface, false);
7830
7831                DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
7832                                 new_plane_state->plane->index,
7833                                 bundle->plane_infos[planes_count].dcc.enable);
7834
7835                bundle->surface_updates[planes_count].plane_info =
7836                        &bundle->plane_infos[planes_count];
7837
7838                /*
7839                 * Only allow immediate flips for fast updates that don't
7840                 * change FB pitch, DCC state, rotation or mirroing.
7841                 */
7842                bundle->flip_addrs[planes_count].flip_immediate =
7843                        crtc->state->async_flip &&
7844                        acrtc_state->update_type == UPDATE_TYPE_FAST;
7845
7846                timestamp_ns = ktime_get_ns();
7847                bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7848                bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7849                bundle->surface_updates[planes_count].surface = dc_plane;
7850
7851                if (!bundle->surface_updates[planes_count].surface) {
7852                        DRM_ERROR("No surface for CRTC: id=%d\n",
7853                                        acrtc_attach->crtc_id);
7854                        continue;
7855                }
7856
7857                if (plane == pcrtc->primary)
7858                        update_freesync_state_on_stream(
7859                                dm,
7860                                acrtc_state,
7861                                acrtc_state->stream,
7862                                dc_plane,
7863                                bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7864
7865                DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7866                                 __func__,
7867                                 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7868                                 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7869
7870                planes_count += 1;
7871
7872        }
7873
7874        if (pflip_present) {
7875                if (!vrr_active) {
7876                        /* Use old throttling in non-vrr fixed refresh rate mode
7877                         * to keep flip scheduling based on target vblank counts
7878                         * working in a backwards compatible way, e.g., for
7879                         * clients using the GLX_OML_sync_control extension or
7880                         * DRI3/Present extension with defined target_msc.
7881                         */
7882                        last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7883                }
7884                else {
7885                        /* For variable refresh rate mode only:
7886                         * Get vblank of last completed flip to avoid > 1 vrr
7887                         * flips per video frame by use of throttling, but allow
7888                         * flip programming anywhere in the possibly large
7889                         * variable vrr vblank interval for fine-grained flip
7890                         * timing control and more opportunity to avoid stutter
7891                         * on late submission of flips.
7892                         */
7893                        spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7894                        last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
7895                        spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7896                }
7897
7898                target_vblank = last_flip_vblank + wait_for_vblank;
7899
7900                /*
7901                 * Wait until we're out of the vertical blank period before the one
7902                 * targeted by the flip
7903                 */
7904                while ((acrtc_attach->enabled &&
7905                        (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7906                                                            0, &vpos, &hpos, NULL,
7907                                                            NULL, &pcrtc->hwmode)
7908                         & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7909                        (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7910                        (int)(target_vblank -
7911                          amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7912                        usleep_range(1000, 1100);
7913                }
7914
7915                /**
7916                 * Prepare the flip event for the pageflip interrupt to handle.
7917                 *
7918                 * This only works in the case where we've already turned on the
7919                 * appropriate hardware blocks (eg. HUBP) so in the transition case
7920                 * from 0 -> n planes we have to skip a hardware generated event
7921                 * and rely on sending it from software.
7922                 */
7923                if (acrtc_attach->base.state->event &&
7924                    acrtc_state->active_planes > 0) {
7925                        drm_crtc_vblank_get(pcrtc);
7926
7927                        spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7928
7929                        WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7930                        prepare_flip_isr(acrtc_attach);
7931
7932                        spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7933                }
7934
7935                if (acrtc_state->stream) {
7936                        if (acrtc_state->freesync_vrr_info_changed)
7937                                bundle->stream_update.vrr_infopacket =
7938                                        &acrtc_state->stream->vrr_infopacket;
7939                }
7940        }
7941
7942        /* Update the planes if changed or disable if we don't have any. */
7943        if ((planes_count || acrtc_state->active_planes == 0) &&
7944                acrtc_state->stream) {
7945                bundle->stream_update.stream = acrtc_state->stream;
7946                if (new_pcrtc_state->mode_changed) {
7947                        bundle->stream_update.src = acrtc_state->stream->src;
7948                        bundle->stream_update.dst = acrtc_state->stream->dst;
7949                }
7950
7951                if (new_pcrtc_state->color_mgmt_changed) {
7952                        /*
7953                         * TODO: This isn't fully correct since we've actually
7954                         * already modified the stream in place.
7955                         */
7956                        bundle->stream_update.gamut_remap =
7957                                &acrtc_state->stream->gamut_remap_matrix;
7958                        bundle->stream_update.output_csc_transform =
7959                                &acrtc_state->stream->csc_color_matrix;
7960                        bundle->stream_update.out_transfer_func =
7961                                acrtc_state->stream->out_transfer_func;
7962                }
7963
7964                acrtc_state->stream->abm_level = acrtc_state->abm_level;
7965                if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7966                        bundle->stream_update.abm_level = &acrtc_state->abm_level;
7967
7968                /*
7969                 * If FreeSync state on the stream has changed then we need to
7970                 * re-adjust the min/max bounds now that DC doesn't handle this
7971                 * as part of commit.
7972                 */
7973                if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7974                    amdgpu_dm_vrr_active(acrtc_state)) {
7975                        spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7976                        dc_stream_adjust_vmin_vmax(
7977                                dm->dc, acrtc_state->stream,
7978                                &acrtc_attach->dm_irq_params.vrr_params.adjust);
7979                        spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7980                }
7981                mutex_lock(&dm->dc_lock);
7982                if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7983                                acrtc_state->stream->link->psr_settings.psr_allow_active)
7984                        amdgpu_dm_psr_disable(acrtc_state->stream);
7985
7986                dc_commit_updates_for_stream(dm->dc,
7987                                                     bundle->surface_updates,
7988                                                     planes_count,
7989                                                     acrtc_state->stream,
7990                                                     &bundle->stream_update,
7991                                                     dc_state);
7992
7993                /**
7994                 * Enable or disable the interrupts on the backend.
7995                 *
7996                 * Most pipes are put into power gating when unused.
7997                 *
7998                 * When power gating is enabled on a pipe we lose the
7999                 * interrupt enablement state when power gating is disabled.
8000                 *
8001                 * So we need to update the IRQ control state in hardware
8002                 * whenever the pipe turns on (since it could be previously
8003                 * power gated) or off (since some pipes can't be power gated
8004                 * on some ASICs).
8005                 */
8006                if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
8007                        dm_update_pflip_irq_state(drm_to_adev(dev),
8008                                                  acrtc_attach);
8009
8010                if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8011                                acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
8012                                !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8013                        amdgpu_dm_link_setup_psr(acrtc_state->stream);
8014                else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
8015                                acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
8016                                !acrtc_state->stream->link->psr_settings.psr_allow_active) {
8017                        amdgpu_dm_psr_enable(acrtc_state->stream);
8018                }
8019
8020                mutex_unlock(&dm->dc_lock);
8021        }
8022
8023        /*
8024         * Update cursor state *after* programming all the planes.
8025         * This avoids redundant programming in the case where we're going
8026         * to be disabling a single plane - those pipes are being disabled.
8027         */
8028        if (acrtc_state->active_planes)
8029                amdgpu_dm_commit_cursors(state);
8030
8031cleanup:
8032        kfree(bundle);
8033}
8034
8035static void amdgpu_dm_commit_audio(struct drm_device *dev,
8036                                   struct drm_atomic_state *state)
8037{
8038        struct amdgpu_device *adev = drm_to_adev(dev);
8039        struct amdgpu_dm_connector *aconnector;
8040        struct drm_connector *connector;
8041        struct drm_connector_state *old_con_state, *new_con_state;
8042        struct drm_crtc_state *new_crtc_state;
8043        struct dm_crtc_state *new_dm_crtc_state;
8044        const struct dc_stream_status *status;
8045        int i, inst;
8046
8047        /* Notify device removals. */
8048        for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8049                if (old_con_state->crtc != new_con_state->crtc) {
8050                        /* CRTC changes require notification. */
8051                        goto notify;
8052                }
8053
8054                if (!new_con_state->crtc)
8055                        continue;
8056
8057                new_crtc_state = drm_atomic_get_new_crtc_state(
8058                        state, new_con_state->crtc);
8059
8060                if (!new_crtc_state)
8061                        continue;
8062
8063                if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8064                        continue;
8065
8066        notify:
8067                aconnector = to_amdgpu_dm_connector(connector);
8068
8069                mutex_lock(&adev->dm.audio_lock);
8070                inst = aconnector->audio_inst;
8071                aconnector->audio_inst = -1;
8072                mutex_unlock(&adev->dm.audio_lock);
8073
8074                amdgpu_dm_audio_eld_notify(adev, inst);
8075        }
8076
8077        /* Notify audio device additions. */
8078        for_each_new_connector_in_state(state, connector, new_con_state, i) {
8079                if (!new_con_state->crtc)
8080                        continue;
8081
8082                new_crtc_state = drm_atomic_get_new_crtc_state(
8083                        state, new_con_state->crtc);
8084
8085                if (!new_crtc_state)
8086                        continue;
8087
8088                if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8089                        continue;
8090
8091                new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8092                if (!new_dm_crtc_state->stream)
8093                        continue;
8094
8095                status = dc_stream_get_status(new_dm_crtc_state->stream);
8096                if (!status)
8097                        continue;
8098
8099                aconnector = to_amdgpu_dm_connector(connector);
8100
8101                mutex_lock(&adev->dm.audio_lock);
8102                inst = status->audio_inst;
8103                aconnector->audio_inst = inst;
8104                mutex_unlock(&adev->dm.audio_lock);
8105
8106                amdgpu_dm_audio_eld_notify(adev, inst);
8107        }
8108}
8109
8110/*
8111 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8112 * @crtc_state: the DRM CRTC state
8113 * @stream_state: the DC stream state.
8114 *
8115 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8116 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8117 */
8118static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8119                                                struct dc_stream_state *stream_state)
8120{
8121        stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8122}
8123
8124/**
8125 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8126 * @state: The atomic state to commit
8127 *
8128 * This will tell DC to commit the constructed DC state from atomic_check,
8129 * programming the hardware. Any failures here implies a hardware failure, since
8130 * atomic check should have filtered anything non-kosher.
8131 */
8132static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8133{
8134        struct drm_device *dev = state->dev;
8135        struct amdgpu_device *adev = drm_to_adev(dev);
8136        struct amdgpu_display_manager *dm = &adev->dm;
8137        struct dm_atomic_state *dm_state;
8138        struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8139        uint32_t i, j;
8140        struct drm_crtc *crtc;
8141        struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8142        unsigned long flags;
8143        bool wait_for_vblank = true;
8144        struct drm_connector *connector;
8145        struct drm_connector_state *old_con_state, *new_con_state;
8146        struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8147        int crtc_disable_count = 0;
8148        bool mode_set_reset_required = false;
8149
8150        trace_amdgpu_dm_atomic_commit_tail_begin(state);
8151
8152        drm_atomic_helper_update_legacy_modeset_state(dev, state);
8153
8154        dm_state = dm_atomic_get_new_state(state);
8155        if (dm_state && dm_state->context) {
8156                dc_state = dm_state->context;
8157        } else {
8158                /* No state changes, retain current state. */
8159                dc_state_temp = dc_create_state(dm->dc);
8160                ASSERT(dc_state_temp);
8161                dc_state = dc_state_temp;
8162                dc_resource_state_copy_construct_current(dm->dc, dc_state);
8163        }
8164
8165        for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8166                                       new_crtc_state, i) {
8167                struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8168
8169                dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8170
8171                if (old_crtc_state->active &&
8172                    (!new_crtc_state->active ||
8173                     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8174                        manage_dm_interrupts(adev, acrtc, false);
8175                        dc_stream_release(dm_old_crtc_state->stream);
8176                }
8177        }
8178
8179        drm_atomic_helper_calc_timestamping_constants(state);
8180
8181        /* update changed items */
8182        for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8183                struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8184
8185                dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8186                dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8187
8188                DRM_DEBUG_DRIVER(
8189                        "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8190                        "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8191                        "connectors_changed:%d\n",
8192                        acrtc->crtc_id,
8193                        new_crtc_state->enable,
8194                        new_crtc_state->active,
8195                        new_crtc_state->planes_changed,
8196                        new_crtc_state->mode_changed,
8197                        new_crtc_state->active_changed,
8198                        new_crtc_state->connectors_changed);
8199
8200                /* Disable cursor if disabling crtc */
8201                if (old_crtc_state->active && !new_crtc_state->active) {
8202                        struct dc_cursor_position position;
8203
8204                        memset(&position, 0, sizeof(position));
8205                        mutex_lock(&dm->dc_lock);
8206                        dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8207                        mutex_unlock(&dm->dc_lock);
8208                }
8209
8210                /* Copy all transient state flags into dc state */
8211                if (dm_new_crtc_state->stream) {
8212                        amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8213                                                            dm_new_crtc_state->stream);
8214                }
8215
8216                /* handles headless hotplug case, updating new_state and
8217                 * aconnector as needed
8218                 */
8219
8220                if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8221
8222                        DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8223
8224                        if (!dm_new_crtc_state->stream) {
8225                                /*
8226                                 * this could happen because of issues with
8227                                 * userspace notifications delivery.
8228                                 * In this case userspace tries to set mode on
8229                                 * display which is disconnected in fact.
8230                                 * dc_sink is NULL in this case on aconnector.
8231                                 * We expect reset mode will come soon.
8232                                 *
8233                                 * This can also happen when unplug is done
8234                                 * during resume sequence ended
8235                                 *
8236                                 * In this case, we want to pretend we still
8237                                 * have a sink to keep the pipe running so that
8238                                 * hw state is consistent with the sw state
8239                                 */
8240                                DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8241                                                __func__, acrtc->base.base.id);
8242                                continue;
8243                        }
8244
8245                        if (dm_old_crtc_state->stream)
8246                                remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8247
8248                        pm_runtime_get_noresume(dev->dev);
8249
8250                        acrtc->enabled = true;
8251                        acrtc->hw_mode = new_crtc_state->mode;
8252                        crtc->hwmode = new_crtc_state->mode;
8253                        mode_set_reset_required = true;
8254                } else if (modereset_required(new_crtc_state)) {
8255                        DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8256                        /* i.e. reset mode */
8257                        if (dm_old_crtc_state->stream)
8258                                remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8259                        mode_set_reset_required = true;
8260                }
8261        } /* for_each_crtc_in_state() */
8262
8263        if (dc_state) {
8264                /* if there mode set or reset, disable eDP PSR */
8265                if (mode_set_reset_required)
8266                        amdgpu_dm_psr_disable_all(dm);
8267
8268                dm_enable_per_frame_crtc_master_sync(dc_state);
8269                mutex_lock(&dm->dc_lock);
8270                WARN_ON(!dc_commit_state(dm->dc, dc_state));
8271                mutex_unlock(&dm->dc_lock);
8272        }
8273
8274        for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8275                struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8276
8277                dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8278
8279                if (dm_new_crtc_state->stream != NULL) {
8280                        const struct dc_stream_status *status =
8281                                        dc_stream_get_status(dm_new_crtc_state->stream);
8282
8283                        if (!status)
8284                                status = dc_stream_get_status_from_state(dc_state,
8285                                                                         dm_new_crtc_state->stream);
8286                        if (!status)
8287                                DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8288                        else
8289                                acrtc->otg_inst = status->primary_otg_inst;
8290                }
8291        }
8292#ifdef CONFIG_DRM_AMD_DC_HDCP
8293        for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8294                struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8295                struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8296                struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8297
8298                new_crtc_state = NULL;
8299
8300                if (acrtc)
8301                        new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8302
8303                dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8304
8305                if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8306                    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8307                        hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8308                        new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8309                        dm_new_con_state->update_hdcp = true;
8310                        continue;
8311                }
8312
8313                if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8314                        hdcp_update_display(
8315                                adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8316                                new_con_state->hdcp_content_type,
8317                                new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
8318                                                                                                         : false);
8319        }
8320#endif
8321
8322        /* Handle connector state changes */
8323        for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8324                struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8325                struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8326                struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8327                struct dc_surface_update dummy_updates[MAX_SURFACES];
8328                struct dc_stream_update stream_update;
8329                struct dc_info_packet hdr_packet;
8330                struct dc_stream_status *status = NULL;
8331                bool abm_changed, hdr_changed, scaling_changed;
8332
8333                memset(&dummy_updates, 0, sizeof(dummy_updates));
8334                memset(&stream_update, 0, sizeof(stream_update));
8335
8336                if (acrtc) {
8337                        new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8338                        old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8339                }
8340
8341                /* Skip any modesets/resets */
8342                if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8343                        continue;
8344
8345                dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8346                dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8347
8348                scaling_changed = is_scaling_state_different(dm_new_con_state,
8349                                                             dm_old_con_state);
8350
8351                abm_changed = dm_new_crtc_state->abm_level !=
8352                              dm_old_crtc_state->abm_level;
8353
8354                hdr_changed =
8355                        is_hdr_metadata_different(old_con_state, new_con_state);
8356
8357                if (!scaling_changed && !abm_changed && !hdr_changed)
8358                        continue;
8359
8360                stream_update.stream = dm_new_crtc_state->stream;
8361                if (scaling_changed) {
8362                        update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8363                                        dm_new_con_state, dm_new_crtc_state->stream);
8364
8365                        stream_update.src = dm_new_crtc_state->stream->src;
8366                        stream_update.dst = dm_new_crtc_state->stream->dst;
8367                }
8368
8369                if (abm_changed) {
8370                        dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8371
8372                        stream_update.abm_level = &dm_new_crtc_state->abm_level;
8373                }
8374
8375                if (hdr_changed) {
8376                        fill_hdr_info_packet(new_con_state, &hdr_packet);
8377                        stream_update.hdr_static_metadata = &hdr_packet;
8378                }
8379
8380                status = dc_stream_get_status(dm_new_crtc_state->stream);
8381                WARN_ON(!status);
8382                WARN_ON(!status->plane_count);
8383
8384                /*
8385                 * TODO: DC refuses to perform stream updates without a dc_surface_update.
8386                 * Here we create an empty update on each plane.
8387                 * To fix this, DC should permit updating only stream properties.
8388                 */
8389                for (j = 0; j < status->plane_count; j++)
8390                        dummy_updates[j].surface = status->plane_states[0];
8391
8392
8393                mutex_lock(&dm->dc_lock);
8394                dc_commit_updates_for_stream(dm->dc,
8395                                                     dummy_updates,
8396                                                     status->plane_count,
8397                                                     dm_new_crtc_state->stream,
8398                                                     &stream_update,
8399                                                     dc_state);
8400                mutex_unlock(&dm->dc_lock);
8401        }
8402
8403        /* Count number of newly disabled CRTCs for dropping PM refs later. */
8404        for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
8405                                      new_crtc_state, i) {
8406                if (old_crtc_state->active && !new_crtc_state->active)
8407                        crtc_disable_count++;
8408
8409                dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8410                dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8411
8412                /* For freesync config update on crtc state and params for irq */
8413                update_stream_irq_parameters(dm, dm_new_crtc_state);
8414
8415                /* Handle vrr on->off / off->on transitions */
8416                amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
8417                                                dm_new_crtc_state);
8418        }
8419
8420        /**
8421         * Enable interrupts for CRTCs that are newly enabled or went through
8422         * a modeset. It was intentionally deferred until after the front end
8423         * state was modified to wait until the OTG was on and so the IRQ
8424         * handlers didn't access stale or invalid state.
8425         */
8426        for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8427                struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8428
8429                dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8430
8431                if (new_crtc_state->active &&
8432                    (!old_crtc_state->active ||
8433                     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8434                        dc_stream_retain(dm_new_crtc_state->stream);
8435                        acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8436                        manage_dm_interrupts(adev, acrtc, true);
8437
8438#ifdef CONFIG_DEBUG_FS
8439                        /**
8440                         * Frontend may have changed so reapply the CRC capture
8441                         * settings for the stream.
8442                         */
8443                        dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8444
8445                        if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
8446                                amdgpu_dm_crtc_configure_crc_source(
8447                                        crtc, dm_new_crtc_state,
8448                                        dm_new_crtc_state->crc_src);
8449                        }
8450#endif
8451                }
8452        }
8453
8454        for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
8455                if (new_crtc_state->async_flip)
8456                        wait_for_vblank = false;
8457
8458        /* update planes when needed per crtc*/
8459        for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
8460                dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8461
8462                if (dm_new_crtc_state->stream)
8463                        amdgpu_dm_commit_planes(state, dc_state, dev,
8464                                                dm, crtc, wait_for_vblank);
8465        }
8466
8467        /* Update audio instances for each connector. */
8468        amdgpu_dm_commit_audio(dev, state);
8469
8470        /*
8471         * send vblank event on all events not handled in flip and
8472         * mark consumed event for drm_atomic_helper_commit_hw_done
8473         */
8474        spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8475        for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8476
8477                if (new_crtc_state->event)
8478                        drm_send_event_locked(dev, &new_crtc_state->event->base);
8479
8480                new_crtc_state->event = NULL;
8481        }
8482        spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8483
8484        /* Signal HW programming completion */
8485        drm_atomic_helper_commit_hw_done(state);
8486
8487        if (wait_for_vblank)
8488                drm_atomic_helper_wait_for_flip_done(dev, state);
8489
8490        drm_atomic_helper_cleanup_planes(dev, state);
8491
8492        /* return the stolen vga memory back to VRAM */
8493        if (!adev->mman.keep_stolen_vga_memory)
8494                amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
8495        amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
8496
8497        /*
8498         * Finally, drop a runtime PM reference for each newly disabled CRTC,
8499         * so we can put the GPU into runtime suspend if we're not driving any
8500         * displays anymore
8501         */
8502        for (i = 0; i < crtc_disable_count; i++)
8503                pm_runtime_put_autosuspend(dev->dev);
8504        pm_runtime_mark_last_busy(dev->dev);
8505
8506        if (dc_state_temp)
8507                dc_release_state(dc_state_temp);
8508}
8509
8510
8511static int dm_force_atomic_commit(struct drm_connector *connector)
8512{
8513        int ret = 0;
8514        struct drm_device *ddev = connector->dev;
8515        struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
8516        struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8517        struct drm_plane *plane = disconnected_acrtc->base.primary;
8518        struct drm_connector_state *conn_state;
8519        struct drm_crtc_state *crtc_state;
8520        struct drm_plane_state *plane_state;
8521
8522        if (!state)
8523                return -ENOMEM;
8524
8525        state->acquire_ctx = ddev->mode_config.acquire_ctx;
8526
8527        /* Construct an atomic state to restore previous display setting */
8528
8529        /*
8530         * Attach connectors to drm_atomic_state
8531         */
8532        conn_state = drm_atomic_get_connector_state(state, connector);
8533
8534        ret = PTR_ERR_OR_ZERO(conn_state);
8535        if (ret)
8536                goto out;
8537
8538        /* Attach crtc to drm_atomic_state*/
8539        crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
8540
8541        ret = PTR_ERR_OR_ZERO(crtc_state);
8542        if (ret)
8543                goto out;
8544
8545        /* force a restore */
8546        crtc_state->mode_changed = true;
8547
8548        /* Attach plane to drm_atomic_state */
8549        plane_state = drm_atomic_get_plane_state(state, plane);
8550
8551        ret = PTR_ERR_OR_ZERO(plane_state);
8552        if (ret)
8553                goto out;
8554
8555        /* Call commit internally with the state we just constructed */
8556        ret = drm_atomic_commit(state);
8557
8558out:
8559        drm_atomic_state_put(state);
8560        if (ret)
8561                DRM_ERROR("Restoring old state failed with %i\n", ret);
8562
8563        return ret;
8564}
8565
8566/*
8567 * This function handles all cases when set mode does not come upon hotplug.
8568 * This includes when a display is unplugged then plugged back into the
8569 * same port and when running without usermode desktop manager supprot
8570 */
8571void dm_restore_drm_connector_state(struct drm_device *dev,
8572                                    struct drm_connector *connector)
8573{
8574        struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8575        struct amdgpu_crtc *disconnected_acrtc;
8576        struct dm_crtc_state *acrtc_state;
8577
8578        if (!aconnector->dc_sink || !connector->state || !connector->encoder)
8579                return;
8580
8581        disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8582        if (!disconnected_acrtc)
8583                return;
8584
8585        acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
8586        if (!acrtc_state->stream)
8587                return;
8588
8589        /*
8590         * If the previous sink is not released and different from the current,
8591         * we deduce we are in a state where we can not rely on usermode call
8592         * to turn on the display, so we do it here
8593         */
8594        if (acrtc_state->stream->sink != aconnector->dc_sink)
8595                dm_force_atomic_commit(&aconnector->base);
8596}
8597
8598/*
8599 * Grabs all modesetting locks to serialize against any blocking commits,
8600 * Waits for completion of all non blocking commits.
8601 */
8602static int do_aquire_global_lock(struct drm_device *dev,
8603                                 struct drm_atomic_state *state)
8604{
8605        struct drm_crtc *crtc;
8606        struct drm_crtc_commit *commit;
8607        long ret;
8608
8609        /*
8610         * Adding all modeset locks to aquire_ctx will
8611         * ensure that when the framework release it the
8612         * extra locks we are locking here will get released to
8613         */
8614        ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
8615        if (ret)
8616                return ret;
8617
8618        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8619                spin_lock(&crtc->commit_lock);
8620                commit = list_first_entry_or_null(&crtc->commit_list,
8621                                struct drm_crtc_commit, commit_entry);
8622                if (commit)
8623                        drm_crtc_commit_get(commit);
8624                spin_unlock(&crtc->commit_lock);
8625
8626                if (!commit)
8627                        continue;
8628
8629                /*
8630                 * Make sure all pending HW programming completed and
8631                 * page flips done
8632                 */
8633                ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
8634
8635                if (ret > 0)
8636                        ret = wait_for_completion_interruptible_timeout(
8637                                        &commit->flip_done, 10*HZ);
8638
8639                if (ret == 0)
8640                        DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
8641                                  "timed out\n", crtc->base.id, crtc->name);
8642
8643                drm_crtc_commit_put(commit);
8644        }
8645
8646        return ret < 0 ? ret : 0;
8647}
8648
8649static void get_freesync_config_for_crtc(
8650        struct dm_crtc_state *new_crtc_state,
8651        struct dm_connector_state *new_con_state)
8652{
8653        struct mod_freesync_config config = {0};
8654        struct amdgpu_dm_connector *aconnector =
8655                        to_amdgpu_dm_connector(new_con_state->base.connector);
8656        struct drm_display_mode *mode = &new_crtc_state->base.mode;
8657        int vrefresh = drm_mode_vrefresh(mode);
8658
8659        new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
8660                                        vrefresh >= aconnector->min_vfreq &&
8661                                        vrefresh <= aconnector->max_vfreq;
8662
8663        if (new_crtc_state->vrr_supported) {
8664                new_crtc_state->stream->ignore_msa_timing_param = true;
8665                config.state = new_crtc_state->base.vrr_enabled ?
8666                                VRR_STATE_ACTIVE_VARIABLE :
8667                                VRR_STATE_INACTIVE;
8668                config.min_refresh_in_uhz =
8669                                aconnector->min_vfreq * 1000000;
8670                config.max_refresh_in_uhz =
8671                                aconnector->max_vfreq * 1000000;
8672                config.vsif_supported = true;
8673                config.btr = true;
8674        }
8675
8676        new_crtc_state->freesync_config = config;
8677}
8678
8679static void reset_freesync_config_for_crtc(
8680        struct dm_crtc_state *new_crtc_state)
8681{
8682        new_crtc_state->vrr_supported = false;
8683
8684        memset(&new_crtc_state->vrr_infopacket, 0,
8685               sizeof(new_crtc_state->vrr_infopacket));
8686}
8687
8688static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
8689                                struct drm_atomic_state *state,
8690                                struct drm_crtc *crtc,
8691                                struct drm_crtc_state *old_crtc_state,
8692                                struct drm_crtc_state *new_crtc_state,
8693                                bool enable,
8694                                bool *lock_and_validation_needed)
8695{
8696        struct dm_atomic_state *dm_state = NULL;
8697        struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8698        struct dc_stream_state *new_stream;
8699        int ret = 0;
8700
8701        /*
8702         * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
8703         * update changed items
8704         */
8705        struct amdgpu_crtc *acrtc = NULL;
8706        struct amdgpu_dm_connector *aconnector = NULL;
8707        struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
8708        struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
8709
8710        new_stream = NULL;
8711
8712        dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8713        dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8714        acrtc = to_amdgpu_crtc(crtc);
8715        aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
8716
8717        /* TODO This hack should go away */
8718        if (aconnector && enable) {
8719                /* Make sure fake sink is created in plug-in scenario */
8720                drm_new_conn_state = drm_atomic_get_new_connector_state(state,
8721                                                            &aconnector->base);
8722                drm_old_conn_state = drm_atomic_get_old_connector_state(state,
8723                                                            &aconnector->base);
8724
8725                if (IS_ERR(drm_new_conn_state)) {
8726                        ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
8727                        goto fail;
8728                }
8729
8730                dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
8731                dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
8732
8733                if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8734                        goto skip_modeset;
8735
8736                new_stream = create_validate_stream_for_sink(aconnector,
8737                                                             &new_crtc_state->mode,
8738                                                             dm_new_conn_state,
8739                                                             dm_old_crtc_state->stream);
8740
8741                /*
8742                 * we can have no stream on ACTION_SET if a display
8743                 * was disconnected during S3, in this case it is not an
8744                 * error, the OS will be updated after detection, and
8745                 * will do the right thing on next atomic commit
8746                 */
8747
8748                if (!new_stream) {
8749                        DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8750                                        __func__, acrtc->base.base.id);
8751                        ret = -ENOMEM;
8752                        goto fail;
8753                }
8754
8755                /*
8756                 * TODO: Check VSDB bits to decide whether this should
8757                 * be enabled or not.
8758                 */
8759                new_stream->triggered_crtc_reset.enabled =
8760                        dm->force_timing_sync;
8761
8762                dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8763
8764                ret = fill_hdr_info_packet(drm_new_conn_state,
8765                                           &new_stream->hdr_static_metadata);
8766                if (ret)
8767                        goto fail;
8768
8769                /*
8770                 * If we already removed the old stream from the context
8771                 * (and set the new stream to NULL) then we can't reuse
8772                 * the old stream even if the stream and scaling are unchanged.
8773                 * We'll hit the BUG_ON and black screen.
8774                 *
8775                 * TODO: Refactor this function to allow this check to work
8776                 * in all conditions.
8777                 */
8778                if (dm_new_crtc_state->stream &&
8779                    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
8780                    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8781                        new_crtc_state->mode_changed = false;
8782                        DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8783                                         new_crtc_state->mode_changed);
8784                }
8785        }
8786
8787        /* mode_changed flag may get updated above, need to check again */
8788        if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8789                goto skip_modeset;
8790
8791        DRM_DEBUG_DRIVER(
8792                "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8793                "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8794                "connectors_changed:%d\n",
8795                acrtc->crtc_id,
8796                new_crtc_state->enable,
8797                new_crtc_state->active,
8798                new_crtc_state->planes_changed,
8799                new_crtc_state->mode_changed,
8800                new_crtc_state->active_changed,
8801                new_crtc_state->connectors_changed);
8802
8803        /* Remove stream for any changed/disabled CRTC */
8804        if (!enable) {
8805
8806                if (!dm_old_crtc_state->stream)
8807                        goto skip_modeset;
8808
8809                ret = dm_atomic_get_state(state, &dm_state);
8810                if (ret)
8811                        goto fail;
8812
8813                DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8814                                crtc->base.id);
8815
8816                /* i.e. reset mode */
8817                if (dc_remove_stream_from_ctx(
8818                                dm->dc,
8819                                dm_state->context,
8820                                dm_old_crtc_state->stream) != DC_OK) {
8821                        ret = -EINVAL;
8822                        goto fail;
8823                }
8824
8825                dc_stream_release(dm_old_crtc_state->stream);
8826                dm_new_crtc_state->stream = NULL;
8827
8828                reset_freesync_config_for_crtc(dm_new_crtc_state);
8829
8830                *lock_and_validation_needed = true;
8831
8832        } else {/* Add stream for any updated/enabled CRTC */
8833                /*
8834                 * Quick fix to prevent NULL pointer on new_stream when
8835                 * added MST connectors not found in existing crtc_state in the chained mode
8836                 * TODO: need to dig out the root cause of that
8837                 */
8838                if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
8839                        goto skip_modeset;
8840
8841                if (modereset_required(new_crtc_state))
8842                        goto skip_modeset;
8843
8844                if (modeset_required(new_crtc_state, new_stream,
8845                                     dm_old_crtc_state->stream)) {
8846
8847                        WARN_ON(dm_new_crtc_state->stream);
8848
8849                        ret = dm_atomic_get_state(state, &dm_state);
8850                        if (ret)
8851                                goto fail;
8852
8853                        dm_new_crtc_state->stream = new_stream;
8854
8855                        dc_stream_retain(new_stream);
8856
8857                        DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8858                                                crtc->base.id);
8859
8860                        if (dc_add_stream_to_ctx(
8861                                        dm->dc,
8862                                        dm_state->context,
8863                                        dm_new_crtc_state->stream) != DC_OK) {
8864                                ret = -EINVAL;
8865                                goto fail;
8866                        }
8867
8868                        *lock_and_validation_needed = true;
8869                }
8870        }
8871
8872skip_modeset:
8873        /* Release extra reference */
8874        if (new_stream)
8875                 dc_stream_release(new_stream);
8876
8877        /*
8878         * We want to do dc stream updates that do not require a
8879         * full modeset below.
8880         */
8881        if (!(enable && aconnector && new_crtc_state->active))
8882                return 0;
8883        /*
8884         * Given above conditions, the dc state cannot be NULL because:
8885         * 1. We're in the process of enabling CRTCs (just been added
8886         *    to the dc context, or already is on the context)
8887         * 2. Has a valid connector attached, and
8888         * 3. Is currently active and enabled.
8889         * => The dc stream state currently exists.
8890         */
8891        BUG_ON(dm_new_crtc_state->stream == NULL);
8892
8893        /* Scaling or underscan settings */
8894        if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8895                update_stream_scaling_settings(
8896                        &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8897
8898        /* ABM settings */
8899        dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8900
8901        /*
8902         * Color management settings. We also update color properties
8903         * when a modeset is needed, to ensure it gets reprogrammed.
8904         */
8905        if (dm_new_crtc_state->base.color_mgmt_changed ||
8906            drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8907                ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8908                if (ret)
8909                        goto fail;
8910        }
8911
8912        /* Update Freesync settings. */
8913        get_freesync_config_for_crtc(dm_new_crtc_state,
8914                                     dm_new_conn_state);
8915
8916        return ret;
8917
8918fail:
8919        if (new_stream)
8920                dc_stream_release(new_stream);
8921        return ret;
8922}
8923
8924static bool should_reset_plane(struct drm_atomic_state *state,
8925                               struct drm_plane *plane,
8926                               struct drm_plane_state *old_plane_state,
8927                               struct drm_plane_state *new_plane_state)
8928{
8929        struct drm_plane *other;
8930        struct drm_plane_state *old_other_state, *new_other_state;
8931        struct drm_crtc_state *new_crtc_state;
8932        int i;
8933
8934        /*
8935         * TODO: Remove this hack once the checks below are sufficient
8936         * enough to determine when we need to reset all the planes on
8937         * the stream.
8938         */
8939        if (state->allow_modeset)
8940                return true;
8941
8942        /* Exit early if we know that we're adding or removing the plane. */
8943        if (old_plane_state->crtc != new_plane_state->crtc)
8944                return true;
8945
8946        /* old crtc == new_crtc == NULL, plane not in context. */
8947        if (!new_plane_state->crtc)
8948                return false;
8949
8950        new_crtc_state =
8951                drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8952
8953        if (!new_crtc_state)
8954                return true;
8955
8956        /* CRTC Degamma changes currently require us to recreate planes. */
8957        if (new_crtc_state->color_mgmt_changed)
8958                return true;
8959
8960        if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8961                return true;
8962
8963        /*
8964         * If there are any new primary or overlay planes being added or
8965         * removed then the z-order can potentially change. To ensure
8966         * correct z-order and pipe acquisition the current DC architecture
8967         * requires us to remove and recreate all existing planes.
8968         *
8969         * TODO: Come up with a more elegant solution for this.
8970         */
8971        for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8972                struct amdgpu_framebuffer *old_afb, *new_afb;
8973                if (other->type == DRM_PLANE_TYPE_CURSOR)
8974                        continue;
8975
8976                if (old_other_state->crtc != new_plane_state->crtc &&
8977                    new_other_state->crtc != new_plane_state->crtc)
8978                        continue;
8979
8980                if (old_other_state->crtc != new_other_state->crtc)
8981                        return true;
8982
8983                /* Src/dst size and scaling updates. */
8984                if (old_other_state->src_w != new_other_state->src_w ||
8985                    old_other_state->src_h != new_other_state->src_h ||
8986                    old_other_state->crtc_w != new_other_state->crtc_w ||
8987                    old_other_state->crtc_h != new_other_state->crtc_h)
8988                        return true;
8989
8990                /* Rotation / mirroring updates. */
8991                if (old_other_state->rotation != new_other_state->rotation)
8992                        return true;
8993
8994                /* Blending updates. */
8995                if (old_other_state->pixel_blend_mode !=
8996                    new_other_state->pixel_blend_mode)
8997                        return true;
8998
8999                /* Alpha updates. */
9000                if (old_other_state->alpha != new_other_state->alpha)
9001                        return true;
9002
9003                /* Colorspace changes. */
9004                if (old_other_state->color_range != new_other_state->color_range ||
9005                    old_other_state->color_encoding != new_other_state->color_encoding)
9006                        return true;
9007
9008                /* Framebuffer checks fall at the end. */
9009                if (!old_other_state->fb || !new_other_state->fb)
9010                        continue;
9011
9012                /* Pixel format changes can require bandwidth updates. */
9013                if (old_other_state->fb->format != new_other_state->fb->format)
9014                        return true;
9015
9016                old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9017                new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9018
9019                /* Tiling and DCC changes also require bandwidth updates. */
9020                if (old_afb->tiling_flags != new_afb->tiling_flags ||
9021                    old_afb->base.modifier != new_afb->base.modifier)
9022                        return true;
9023        }
9024
9025        return false;
9026}
9027
9028static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9029                              struct drm_plane_state *new_plane_state,
9030                              struct drm_framebuffer *fb)
9031{
9032        struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9033        struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
9034        unsigned int pitch;
9035        bool linear;
9036
9037        if (fb->width > new_acrtc->max_cursor_width ||
9038            fb->height > new_acrtc->max_cursor_height) {
9039                DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9040                                 new_plane_state->fb->width,
9041                                 new_plane_state->fb->height);
9042                return -EINVAL;
9043        }
9044        if (new_plane_state->src_w != fb->width << 16 ||
9045            new_plane_state->src_h != fb->height << 16) {
9046                DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9047                return -EINVAL;
9048        }
9049
9050        /* Pitch in pixels */
9051        pitch = fb->pitches[0] / fb->format->cpp[0];
9052
9053        if (fb->width != pitch) {
9054                DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9055                                 fb->width, pitch);
9056                return -EINVAL;
9057        }
9058
9059        switch (pitch) {
9060        case 64:
9061        case 128:
9062        case 256:
9063                /* FB pitch is supported by cursor plane */
9064                break;
9065        default:
9066                DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9067                return -EINVAL;
9068        }
9069
9070        /* Core DRM takes care of checking FB modifiers, so we only need to
9071         * check tiling flags when the FB doesn't have a modifier. */
9072        if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9073                if (adev->family < AMDGPU_FAMILY_AI) {
9074                        linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9075                                 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9076                                 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9077                } else {
9078                        linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9079                }
9080                if (!linear) {
9081                        DRM_DEBUG_ATOMIC("Cursor FB not linear");
9082                        return -EINVAL;
9083                }
9084        }
9085
9086        return 0;
9087}
9088
9089static int dm_update_plane_state(struct dc *dc,
9090                                 struct drm_atomic_state *state,
9091                                 struct drm_plane *plane,
9092                                 struct drm_plane_state *old_plane_state,
9093                                 struct drm_plane_state *new_plane_state,
9094                                 bool enable,
9095                                 bool *lock_and_validation_needed)
9096{
9097
9098        struct dm_atomic_state *dm_state = NULL;
9099        struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9100        struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9101        struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9102        struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9103        struct amdgpu_crtc *new_acrtc;
9104        bool needs_reset;
9105        int ret = 0;
9106
9107
9108        new_plane_crtc = new_plane_state->crtc;
9109        old_plane_crtc = old_plane_state->crtc;
9110        dm_new_plane_state = to_dm_plane_state(new_plane_state);
9111        dm_old_plane_state = to_dm_plane_state(old_plane_state);
9112
9113        if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9114                if (!enable || !new_plane_crtc ||
9115                        drm_atomic_plane_disabling(plane->state, new_plane_state))
9116                        return 0;
9117
9118                new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9119
9120                if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9121                        DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9122                        return -EINVAL;
9123                }
9124
9125                if (new_plane_state->fb) {
9126                        ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9127                                                 new_plane_state->fb);
9128                        if (ret)
9129                                return ret;
9130                }
9131
9132                return 0;
9133        }
9134
9135        needs_reset = should_reset_plane(state, plane, old_plane_state,
9136                                         new_plane_state);
9137
9138        /* Remove any changed/removed planes */
9139        if (!enable) {
9140                if (!needs_reset)
9141                        return 0;
9142
9143                if (!old_plane_crtc)
9144                        return 0;
9145
9146                old_crtc_state = drm_atomic_get_old_crtc_state(
9147                                state, old_plane_crtc);
9148                dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9149
9150                if (!dm_old_crtc_state->stream)
9151                        return 0;
9152
9153                DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9154                                plane->base.id, old_plane_crtc->base.id);
9155
9156                ret = dm_atomic_get_state(state, &dm_state);
9157                if (ret)
9158                        return ret;
9159
9160                if (!dc_remove_plane_from_context(
9161                                dc,
9162                                dm_old_crtc_state->stream,
9163                                dm_old_plane_state->dc_state,
9164                                dm_state->context)) {
9165
9166                        return -EINVAL;
9167                }
9168
9169
9170                dc_plane_state_release(dm_old_plane_state->dc_state);
9171                dm_new_plane_state->dc_state = NULL;
9172
9173                *lock_and_validation_needed = true;
9174
9175        } else { /* Add new planes */
9176                struct dc_plane_state *dc_new_plane_state;
9177
9178                if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9179                        return 0;
9180
9181                if (!new_plane_crtc)
9182                        return 0;
9183
9184                new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9185                dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9186
9187                if (!dm_new_crtc_state->stream)
9188                        return 0;
9189
9190                if (!needs_reset)
9191                        return 0;
9192
9193                ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9194                if (ret)
9195                        return ret;
9196
9197                WARN_ON(dm_new_plane_state->dc_state);
9198
9199                dc_new_plane_state = dc_create_plane_state(dc);
9200                if (!dc_new_plane_state)
9201                        return -ENOMEM;
9202
9203                DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
9204                                plane->base.id, new_plane_crtc->base.id);
9205
9206                ret = fill_dc_plane_attributes(
9207                        drm_to_adev(new_plane_crtc->dev),
9208                        dc_new_plane_state,
9209                        new_plane_state,
9210                        new_crtc_state);
9211                if (ret) {
9212                        dc_plane_state_release(dc_new_plane_state);
9213                        return ret;
9214                }
9215
9216                ret = dm_atomic_get_state(state, &dm_state);
9217                if (ret) {
9218                        dc_plane_state_release(dc_new_plane_state);
9219                        return ret;
9220                }
9221
9222                /*
9223                 * Any atomic check errors that occur after this will
9224                 * not need a release. The plane state will be attached
9225                 * to the stream, and therefore part of the atomic
9226                 * state. It'll be released when the atomic state is
9227                 * cleaned.
9228                 */
9229                if (!dc_add_plane_to_context(
9230                                dc,
9231                                dm_new_crtc_state->stream,
9232                                dc_new_plane_state,
9233                                dm_state->context)) {
9234
9235                        dc_plane_state_release(dc_new_plane_state);
9236                        return -EINVAL;
9237                }
9238
9239                dm_new_plane_state->dc_state = dc_new_plane_state;
9240
9241                /* Tell DC to do a full surface update every time there
9242                 * is a plane change. Inefficient, but works for now.
9243                 */
9244                dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9245
9246                *lock_and_validation_needed = true;
9247        }
9248
9249
9250        return ret;
9251}
9252
9253static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9254                                struct drm_crtc *crtc,
9255                                struct drm_crtc_state *new_crtc_state)
9256{
9257        struct drm_plane_state *new_cursor_state, *new_primary_state;
9258        int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
9259
9260        /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9261         * cursor per pipe but it's going to inherit the scaling and
9262         * positioning from the underlying pipe. Check the cursor plane's
9263         * blending properties match the primary plane's. */
9264
9265        new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
9266        new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
9267        if (!new_cursor_state || !new_primary_state || !new_cursor_state->fb) {
9268                return 0;
9269        }
9270
9271        cursor_scale_w = new_cursor_state->crtc_w * 1000 /
9272                         (new_cursor_state->src_w >> 16);
9273        cursor_scale_h = new_cursor_state->crtc_h * 1000 /
9274                         (new_cursor_state->src_h >> 16);
9275
9276        primary_scale_w = new_primary_state->crtc_w * 1000 /
9277                         (new_primary_state->src_w >> 16);
9278        primary_scale_h = new_primary_state->crtc_h * 1000 /
9279                         (new_primary_state->src_h >> 16);
9280
9281        if (cursor_scale_w != primary_scale_w ||
9282            cursor_scale_h != primary_scale_h) {
9283                DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
9284                return -EINVAL;
9285        }
9286
9287        return 0;
9288}
9289
9290#if defined(CONFIG_DRM_AMD_DC_DCN)
9291static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9292{
9293        struct drm_connector *connector;
9294        struct drm_connector_state *conn_state;
9295        struct amdgpu_dm_connector *aconnector = NULL;
9296        int i;
9297        for_each_new_connector_in_state(state, connector, conn_state, i) {
9298                if (conn_state->crtc != crtc)
9299                        continue;
9300
9301                aconnector = to_amdgpu_dm_connector(connector);
9302                if (!aconnector->port || !aconnector->mst_port)
9303                        aconnector = NULL;
9304                else
9305                        break;
9306        }
9307
9308        if (!aconnector)
9309                return 0;
9310
9311        return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9312}
9313#endif
9314
9315/**
9316 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
9317 * @dev: The DRM device
9318 * @state: The atomic state to commit
9319 *
9320 * Validate that the given atomic state is programmable by DC into hardware.
9321 * This involves constructing a &struct dc_state reflecting the new hardware
9322 * state we wish to commit, then querying DC to see if it is programmable. It's
9323 * important not to modify the existing DC state. Otherwise, atomic_check
9324 * may unexpectedly commit hardware changes.
9325 *
9326 * When validating the DC state, it's important that the right locks are
9327 * acquired. For full updates case which removes/adds/updates streams on one
9328 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
9329 * that any such full update commit will wait for completion of any outstanding
9330 * flip using DRMs synchronization events.
9331 *
9332 * Note that DM adds the affected connectors for all CRTCs in state, when that
9333 * might not seem necessary. This is because DC stream creation requires the
9334 * DC sink, which is tied to the DRM connector state. Cleaning this up should
9335 * be possible but non-trivial - a possible TODO item.
9336 *
9337 * Return: -Error code if validation failed.
9338 */
9339static int amdgpu_dm_atomic_check(struct drm_device *dev,
9340                                  struct drm_atomic_state *state)
9341{
9342        struct amdgpu_device *adev = drm_to_adev(dev);
9343        struct dm_atomic_state *dm_state = NULL;
9344        struct dc *dc = adev->dm.dc;
9345        struct drm_connector *connector;
9346        struct drm_connector_state *old_con_state, *new_con_state;
9347        struct drm_crtc *crtc;
9348        struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9349        struct drm_plane *plane;
9350        struct drm_plane_state *old_plane_state, *new_plane_state;
9351        enum dc_status status;
9352        int ret, i;
9353        bool lock_and_validation_needed = false;
9354        struct dm_crtc_state *dm_old_crtc_state;
9355
9356        trace_amdgpu_dm_atomic_check_begin(state);
9357
9358        ret = drm_atomic_helper_check_modeset(dev, state);
9359        if (ret)
9360                goto fail;
9361
9362        /* Check connector changes */
9363        for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9364                struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9365                struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9366
9367                /* Skip connectors that are disabled or part of modeset already. */
9368                if (!old_con_state->crtc && !new_con_state->crtc)
9369                        continue;
9370
9371                if (!new_con_state->crtc)
9372                        continue;
9373
9374                new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
9375                if (IS_ERR(new_crtc_state)) {
9376                        ret = PTR_ERR(new_crtc_state);
9377                        goto fail;
9378                }
9379
9380                if (dm_old_con_state->abm_level !=
9381                    dm_new_con_state->abm_level)
9382                        new_crtc_state->connectors_changed = true;
9383        }
9384
9385#if defined(CONFIG_DRM_AMD_DC_DCN)
9386        if (adev->asic_type >= CHIP_NAVI10) {
9387                for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9388                        if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9389                                ret = add_affected_mst_dsc_crtcs(state, crtc);
9390                                if (ret)
9391                                        goto fail;
9392                        }
9393                }
9394        }
9395#endif
9396        for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9397                dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9398
9399                if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
9400                    !new_crtc_state->color_mgmt_changed &&
9401                    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
9402                        dm_old_crtc_state->dsc_force_changed == false)
9403                        continue;
9404
9405                if (!new_crtc_state->enable)
9406                        continue;
9407
9408                ret = drm_atomic_add_affected_connectors(state, crtc);
9409                if (ret)
9410                        return ret;
9411
9412                ret = drm_atomic_add_affected_planes(state, crtc);
9413                if (ret)
9414                        goto fail;
9415
9416                if (dm_old_crtc_state->dsc_force_changed)
9417                        new_crtc_state->mode_changed = true;
9418        }
9419
9420        /*
9421         * Add all primary and overlay planes on the CRTC to the state
9422         * whenever a plane is enabled to maintain correct z-ordering
9423         * and to enable fast surface updates.
9424         */
9425        drm_for_each_crtc(crtc, dev) {
9426                bool modified = false;
9427
9428                for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9429                        if (plane->type == DRM_PLANE_TYPE_CURSOR)
9430                                continue;
9431
9432                        if (new_plane_state->crtc == crtc ||
9433                            old_plane_state->crtc == crtc) {
9434                                modified = true;
9435                                break;
9436                        }
9437                }
9438
9439                if (!modified)
9440                        continue;
9441
9442                drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
9443                        if (plane->type == DRM_PLANE_TYPE_CURSOR)
9444                                continue;
9445
9446                        new_plane_state =
9447                                drm_atomic_get_plane_state(state, plane);
9448
9449                        if (IS_ERR(new_plane_state)) {
9450                                ret = PTR_ERR(new_plane_state);
9451                                goto fail;
9452                        }
9453                }
9454        }
9455
9456        /* Remove exiting planes if they are modified */
9457        for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9458                ret = dm_update_plane_state(dc, state, plane,
9459                                            old_plane_state,
9460                                            new_plane_state,
9461                                            false,
9462                                            &lock_and_validation_needed);
9463                if (ret)
9464                        goto fail;
9465        }
9466
9467        /* Disable all crtcs which require disable */
9468        for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9469                ret = dm_update_crtc_state(&adev->dm, state, crtc,
9470                                           old_crtc_state,
9471                                           new_crtc_state,
9472                                           false,
9473                                           &lock_and_validation_needed);
9474                if (ret)
9475                        goto fail;
9476        }
9477
9478        /* Enable all crtcs which require enable */
9479        for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9480                ret = dm_update_crtc_state(&adev->dm, state, crtc,
9481                                           old_crtc_state,
9482                                           new_crtc_state,
9483                                           true,
9484                                           &lock_and_validation_needed);
9485                if (ret)
9486                        goto fail;
9487        }
9488
9489        /* Add new/modified planes */
9490        for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9491                ret = dm_update_plane_state(dc, state, plane,
9492                                            old_plane_state,
9493                                            new_plane_state,
9494                                            true,
9495                                            &lock_and_validation_needed);
9496                if (ret)
9497                        goto fail;
9498        }
9499
9500        /* Run this here since we want to validate the streams we created */
9501        ret = drm_atomic_helper_check_planes(dev, state);
9502        if (ret)
9503                goto fail;
9504
9505        /* Check cursor planes scaling */
9506        for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9507                ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
9508                if (ret)
9509                        goto fail;
9510        }
9511
9512        if (state->legacy_cursor_update) {
9513                /*
9514                 * This is a fast cursor update coming from the plane update
9515                 * helper, check if it can be done asynchronously for better
9516                 * performance.
9517                 */
9518                state->async_update =
9519                        !drm_atomic_helper_async_check(dev, state);
9520
9521                /*
9522                 * Skip the remaining global validation if this is an async
9523                 * update. Cursor updates can be done without affecting
9524                 * state or bandwidth calcs and this avoids the performance
9525                 * penalty of locking the private state object and
9526                 * allocating a new dc_state.
9527                 */
9528                if (state->async_update)
9529                        return 0;
9530        }
9531
9532        /* Check scaling and underscan changes*/
9533        /* TODO Removed scaling changes validation due to inability to commit
9534         * new stream into context w\o causing full reset. Need to
9535         * decide how to handle.
9536         */
9537        for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9538                struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9539                struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9540                struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9541
9542                /* Skip any modesets/resets */
9543                if (!acrtc || drm_atomic_crtc_needs_modeset(
9544                                drm_atomic_get_new_crtc_state(state, &acrtc->base)))
9545                        continue;
9546
9547                /* Skip any thing not scale or underscan changes */
9548                if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
9549                        continue;
9550
9551                lock_and_validation_needed = true;
9552        }
9553
9554        /**
9555         * Streams and planes are reset when there are changes that affect
9556         * bandwidth. Anything that affects bandwidth needs to go through
9557         * DC global validation to ensure that the configuration can be applied
9558         * to hardware.
9559         *
9560         * We have to currently stall out here in atomic_check for outstanding
9561         * commits to finish in this case because our IRQ handlers reference
9562         * DRM state directly - we can end up disabling interrupts too early
9563         * if we don't.
9564         *
9565         * TODO: Remove this stall and drop DM state private objects.
9566         */
9567        if (lock_and_validation_needed) {
9568                ret = dm_atomic_get_state(state, &dm_state);
9569                if (ret)
9570                        goto fail;
9571
9572                ret = do_aquire_global_lock(dev, state);
9573                if (ret)
9574                        goto fail;
9575
9576#if defined(CONFIG_DRM_AMD_DC_DCN)
9577                if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
9578                        goto fail;
9579
9580                ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
9581                if (ret)
9582                        goto fail;
9583#endif
9584
9585                /*
9586                 * Perform validation of MST topology in the state:
9587                 * We need to perform MST atomic check before calling
9588                 * dc_validate_global_state(), or there is a chance
9589                 * to get stuck in an infinite loop and hang eventually.
9590                 */
9591                ret = drm_dp_mst_atomic_check(state);
9592                if (ret)
9593                        goto fail;
9594                status = dc_validate_global_state(dc, dm_state->context, false);
9595                if (status != DC_OK) {
9596                        DC_LOG_WARNING("DC global validation failure: %s (%d)",
9597                                       dc_status_to_str(status), status);
9598                        ret = -EINVAL;
9599                        goto fail;
9600                }
9601        } else {
9602                /*
9603                 * The commit is a fast update. Fast updates shouldn't change
9604                 * the DC context, affect global validation, and can have their
9605                 * commit work done in parallel with other commits not touching
9606                 * the same resource. If we have a new DC context as part of
9607                 * the DM atomic state from validation we need to free it and
9608                 * retain the existing one instead.
9609                 *
9610                 * Furthermore, since the DM atomic state only contains the DC
9611                 * context and can safely be annulled, we can free the state
9612                 * and clear the associated private object now to free
9613                 * some memory and avoid a possible use-after-free later.
9614                 */
9615
9616                for (i = 0; i < state->num_private_objs; i++) {
9617                        struct drm_private_obj *obj = state->private_objs[i].ptr;
9618
9619                        if (obj->funcs == adev->dm.atomic_obj.funcs) {
9620                                int j = state->num_private_objs-1;
9621
9622                                dm_atomic_destroy_state(obj,
9623                                                state->private_objs[i].state);
9624
9625                                /* If i is not at the end of the array then the
9626                                 * last element needs to be moved to where i was
9627                                 * before the array can safely be truncated.
9628                                 */
9629                                if (i != j)
9630                                        state->private_objs[i] =
9631                                                state->private_objs[j];
9632
9633                                state->private_objs[j].ptr = NULL;
9634                                state->private_objs[j].state = NULL;
9635                                state->private_objs[j].old_state = NULL;
9636                                state->private_objs[j].new_state = NULL;
9637
9638                                state->num_private_objs = j;
9639                                break;
9640                        }
9641                }
9642        }
9643
9644        /* Store the overall update type for use later in atomic check. */
9645        for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
9646                struct dm_crtc_state *dm_new_crtc_state =
9647                        to_dm_crtc_state(new_crtc_state);
9648
9649                dm_new_crtc_state->update_type = lock_and_validation_needed ?
9650                                                         UPDATE_TYPE_FULL :
9651                                                         UPDATE_TYPE_FAST;
9652        }
9653
9654        /* Must be success */
9655        WARN_ON(ret);
9656
9657        trace_amdgpu_dm_atomic_check_finish(state, ret);
9658
9659        return ret;
9660
9661fail:
9662        if (ret == -EDEADLK)
9663                DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
9664        else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
9665                DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
9666        else
9667                DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
9668
9669        trace_amdgpu_dm_atomic_check_finish(state, ret);
9670
9671        return ret;
9672}
9673
9674static bool is_dp_capable_without_timing_msa(struct dc *dc,
9675                                             struct amdgpu_dm_connector *amdgpu_dm_connector)
9676{
9677        uint8_t dpcd_data;
9678        bool capable = false;
9679
9680        if (amdgpu_dm_connector->dc_link &&
9681                dm_helpers_dp_read_dpcd(
9682                                NULL,
9683                                amdgpu_dm_connector->dc_link,
9684                                DP_DOWN_STREAM_PORT_COUNT,
9685                                &dpcd_data,
9686                                sizeof(dpcd_data))) {
9687                capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
9688        }
9689
9690        return capable;
9691}
9692void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
9693                                        struct edid *edid)
9694{
9695        int i;
9696        bool edid_check_required;
9697        struct detailed_timing *timing;
9698        struct detailed_non_pixel *data;
9699        struct detailed_data_monitor_range *range;
9700        struct amdgpu_dm_connector *amdgpu_dm_connector =
9701                        to_amdgpu_dm_connector(connector);
9702        struct dm_connector_state *dm_con_state = NULL;
9703
9704        struct drm_device *dev = connector->dev;
9705        struct amdgpu_device *adev = drm_to_adev(dev);
9706        bool freesync_capable = false;
9707
9708        if (!connector->state) {
9709                DRM_ERROR("%s - Connector has no state", __func__);
9710                goto update;
9711        }
9712
9713        if (!edid) {
9714                dm_con_state = to_dm_connector_state(connector->state);
9715
9716                amdgpu_dm_connector->min_vfreq = 0;
9717                amdgpu_dm_connector->max_vfreq = 0;
9718                amdgpu_dm_connector->pixel_clock_mhz = 0;
9719
9720                goto update;
9721        }
9722
9723        dm_con_state = to_dm_connector_state(connector->state);
9724
9725        edid_check_required = false;
9726        if (!amdgpu_dm_connector->dc_sink) {
9727                DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
9728                goto update;
9729        }
9730        if (!adev->dm.freesync_module)
9731                goto update;
9732        /*
9733         * if edid non zero restrict freesync only for dp and edp
9734         */
9735        if (edid) {
9736                if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
9737                        || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
9738                        edid_check_required = is_dp_capable_without_timing_msa(
9739                                                adev->dm.dc,
9740                                                amdgpu_dm_connector);
9741                }
9742        }
9743        if (edid_check_required == true && (edid->version > 1 ||
9744           (edid->version == 1 && edid->revision > 1))) {
9745                for (i = 0; i < 4; i++) {
9746
9747                        timing  = &edid->detailed_timings[i];
9748                        data    = &timing->data.other_data;
9749                        range   = &data->data.range;
9750                        /*
9751                         * Check if monitor has continuous frequency mode
9752                         */
9753                        if (data->type != EDID_DETAIL_MONITOR_RANGE)
9754                                continue;
9755                        /*
9756                         * Check for flag range limits only. If flag == 1 then
9757                         * no additional timing information provided.
9758                         * Default GTF, GTF Secondary curve and CVT are not
9759                         * supported
9760                         */
9761                        if (range->flags != 1)
9762                                continue;
9763
9764                        amdgpu_dm_connector->min_vfreq = range->min_vfreq;
9765                        amdgpu_dm_connector->max_vfreq = range->max_vfreq;
9766                        amdgpu_dm_connector->pixel_clock_mhz =
9767                                range->pixel_clock_mhz * 10;
9768
9769                        connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
9770                        connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
9771
9772                        break;
9773                }
9774
9775                if (amdgpu_dm_connector->max_vfreq -
9776                    amdgpu_dm_connector->min_vfreq > 10) {
9777
9778                        freesync_capable = true;
9779                }
9780        }
9781
9782update:
9783        if (dm_con_state)
9784                dm_con_state->freesync_capable = freesync_capable;
9785
9786        if (connector->vrr_capable_property)
9787                drm_connector_set_vrr_capable_property(connector,
9788                                                       freesync_capable);
9789}
9790
9791static void amdgpu_dm_set_psr_caps(struct dc_link *link)
9792{
9793        uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
9794
9795        if (!(link->connector_signal & SIGNAL_TYPE_EDP))
9796                return;
9797        if (link->type == dc_connection_none)
9798                return;
9799        if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
9800                                        dpcd_data, sizeof(dpcd_data))) {
9801                link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
9802
9803                if (dpcd_data[0] == 0) {
9804                        link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
9805                        link->psr_settings.psr_feature_enabled = false;
9806                } else {
9807                        link->psr_settings.psr_version = DC_PSR_VERSION_1;
9808                        link->psr_settings.psr_feature_enabled = true;
9809                }
9810
9811                DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
9812        }
9813}
9814
9815/*
9816 * amdgpu_dm_link_setup_psr() - configure psr link
9817 * @stream: stream state
9818 *
9819 * Return: true if success
9820 */
9821static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
9822{
9823        struct dc_link *link = NULL;
9824        struct psr_config psr_config = {0};
9825        struct psr_context psr_context = {0};
9826        bool ret = false;
9827
9828        if (stream == NULL)
9829                return false;
9830
9831        link = stream->link;
9832
9833        psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
9834
9835        if (psr_config.psr_version > 0) {
9836                psr_config.psr_exit_link_training_required = 0x1;
9837                psr_config.psr_frame_capture_indication_req = 0;
9838                psr_config.psr_rfb_setup_time = 0x37;
9839                psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
9840                psr_config.allow_smu_optimizations = 0x0;
9841
9842                ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
9843
9844        }
9845        DRM_DEBUG_DRIVER("PSR link: %d\n",      link->psr_settings.psr_feature_enabled);
9846
9847        return ret;
9848}
9849
9850/*
9851 * amdgpu_dm_psr_enable() - enable psr f/w
9852 * @stream: stream state
9853 *
9854 * Return: true if success
9855 */
9856bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9857{
9858        struct dc_link *link = stream->link;
9859        unsigned int vsync_rate_hz = 0;
9860        struct dc_static_screen_params params = {0};
9861        /* Calculate number of static frames before generating interrupt to
9862         * enter PSR.
9863         */
9864        // Init fail safe of 2 frames static
9865        unsigned int num_frames_static = 2;
9866
9867        DRM_DEBUG_DRIVER("Enabling psr...\n");
9868
9869        vsync_rate_hz = div64_u64(div64_u64((
9870                        stream->timing.pix_clk_100hz * 100),
9871                        stream->timing.v_total),
9872                        stream->timing.h_total);
9873
9874        /* Round up
9875         * Calculate number of frames such that at least 30 ms of time has
9876         * passed.
9877         */
9878        if (vsync_rate_hz != 0) {
9879                unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
9880                num_frames_static = (30000 / frame_time_microsec) + 1;
9881        }
9882
9883        params.triggers.cursor_update = true;
9884        params.triggers.overlay_update = true;
9885        params.triggers.surface_update = true;
9886        params.num_frames = num_frames_static;
9887
9888        dc_stream_set_static_screen_params(link->ctx->dc,
9889                                           &stream, 1,
9890                                           &params);
9891
9892        return dc_link_set_psr_allow_active(link, true, false, false);
9893}
9894
9895/*
9896 * amdgpu_dm_psr_disable() - disable psr f/w
9897 * @stream:  stream state
9898 *
9899 * Return: true if success
9900 */
9901static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9902{
9903
9904        DRM_DEBUG_DRIVER("Disabling psr...\n");
9905
9906        return dc_link_set_psr_allow_active(stream->link, false, true, false);
9907}
9908
9909/*
9910 * amdgpu_dm_psr_disable() - disable psr f/w
9911 * if psr is enabled on any stream
9912 *
9913 * Return: true if success
9914 */
9915static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
9916{
9917        DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
9918        return dc_set_psr_allow_active(dm->dc, false);
9919}
9920
9921void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
9922{
9923        struct amdgpu_device *adev = drm_to_adev(dev);
9924        struct dc *dc = adev->dm.dc;
9925        int i;
9926
9927        mutex_lock(&adev->dm.dc_lock);
9928        if (dc->current_state) {
9929                for (i = 0; i < dc->current_state->stream_count; ++i)
9930                        dc->current_state->streams[i]
9931                                ->triggered_crtc_reset.enabled =
9932                                adev->dm.force_timing_sync;
9933
9934                dm_enable_per_frame_crtc_master_sync(dc->current_state);
9935                dc_trigger_sync(dc, dc->current_state);
9936        }
9937        mutex_unlock(&adev->dm.dc_lock);
9938}
9939
9940void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
9941                       uint32_t value, const char *func_name)
9942{
9943#ifdef DM_CHECK_ADDR_0
9944        if (address == 0) {
9945                DC_ERR("invalid register write. address = 0");
9946                return;
9947        }
9948#endif
9949        cgs_write_register(ctx->cgs_device, address, value);
9950        trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
9951}
9952
9953uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
9954                          const char *func_name)
9955{
9956        uint32_t value;
9957#ifdef DM_CHECK_ADDR_0
9958        if (address == 0) {
9959                DC_ERR("invalid register read; address = 0\n");
9960                return 0;
9961        }
9962#endif
9963
9964        if (ctx->dmub_srv &&
9965            ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
9966            !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
9967                ASSERT(false);
9968                return 0;
9969        }
9970
9971        value = cgs_read_register(ctx->cgs_device, address);
9972
9973        trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
9974
9975        return value;
9976}
9977