linux/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
<<
>>
Prefs
   1/*
   2 * Copyright 2014 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23#include <drm/drmP.h>
  24#include "amdgpu.h"
  25#include "amdgpu_pm.h"
  26#include "amdgpu_i2c.h"
  27#include "cikd.h"
  28#include "atom.h"
  29#include "amdgpu_atombios.h"
  30#include "atombios_crtc.h"
  31#include "atombios_encoders.h"
  32#include "amdgpu_pll.h"
  33#include "amdgpu_connectors.h"
  34#include "dce_v8_0.h"
  35
  36#include "dce/dce_8_0_d.h"
  37#include "dce/dce_8_0_sh_mask.h"
  38
  39#include "gca/gfx_7_2_enum.h"
  40
  41#include "gmc/gmc_7_1_d.h"
  42#include "gmc/gmc_7_1_sh_mask.h"
  43
  44#include "oss/oss_2_0_d.h"
  45#include "oss/oss_2_0_sh_mask.h"
  46
  47static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev);
  48static void dce_v8_0_set_irq_funcs(struct amdgpu_device *adev);
  49
  50static const u32 crtc_offsets[6] =
  51{
  52        CRTC0_REGISTER_OFFSET,
  53        CRTC1_REGISTER_OFFSET,
  54        CRTC2_REGISTER_OFFSET,
  55        CRTC3_REGISTER_OFFSET,
  56        CRTC4_REGISTER_OFFSET,
  57        CRTC5_REGISTER_OFFSET
  58};
  59
  60static const u32 hpd_offsets[] =
  61{
  62        HPD0_REGISTER_OFFSET,
  63        HPD1_REGISTER_OFFSET,
  64        HPD2_REGISTER_OFFSET,
  65        HPD3_REGISTER_OFFSET,
  66        HPD4_REGISTER_OFFSET,
  67        HPD5_REGISTER_OFFSET
  68};
  69
  70static const uint32_t dig_offsets[] = {
  71        CRTC0_REGISTER_OFFSET,
  72        CRTC1_REGISTER_OFFSET,
  73        CRTC2_REGISTER_OFFSET,
  74        CRTC3_REGISTER_OFFSET,
  75        CRTC4_REGISTER_OFFSET,
  76        CRTC5_REGISTER_OFFSET,
  77        (0x13830 - 0x7030) >> 2,
  78};
  79
  80static const struct {
  81        uint32_t        reg;
  82        uint32_t        vblank;
  83        uint32_t        vline;
  84        uint32_t        hpd;
  85
  86} interrupt_status_offsets[6] = { {
  87        .reg = mmDISP_INTERRUPT_STATUS,
  88        .vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
  89        .vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
  90        .hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
  91}, {
  92        .reg = mmDISP_INTERRUPT_STATUS_CONTINUE,
  93        .vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
  94        .vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
  95        .hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
  96}, {
  97        .reg = mmDISP_INTERRUPT_STATUS_CONTINUE2,
  98        .vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
  99        .vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
 100        .hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
 101}, {
 102        .reg = mmDISP_INTERRUPT_STATUS_CONTINUE3,
 103        .vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
 104        .vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
 105        .hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
 106}, {
 107        .reg = mmDISP_INTERRUPT_STATUS_CONTINUE4,
 108        .vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
 109        .vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
 110        .hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
 111}, {
 112        .reg = mmDISP_INTERRUPT_STATUS_CONTINUE5,
 113        .vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
 114        .vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
 115        .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
 116} };
 117
 118static u32 dce_v8_0_audio_endpt_rreg(struct amdgpu_device *adev,
 119                                     u32 block_offset, u32 reg)
 120{
 121        unsigned long flags;
 122        u32 r;
 123
 124        spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
 125        WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
 126        r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset);
 127        spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
 128
 129        return r;
 130}
 131
 132static void dce_v8_0_audio_endpt_wreg(struct amdgpu_device *adev,
 133                                      u32 block_offset, u32 reg, u32 v)
 134{
 135        unsigned long flags;
 136
 137        spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
 138        WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
 139        WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v);
 140        spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
 141}
 142
 143static bool dce_v8_0_is_in_vblank(struct amdgpu_device *adev, int crtc)
 144{
 145        if (RREG32(mmCRTC_STATUS + crtc_offsets[crtc]) &
 146                        CRTC_V_BLANK_START_END__CRTC_V_BLANK_START_MASK)
 147                return true;
 148        else
 149                return false;
 150}
 151
 152static bool dce_v8_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
 153{
 154        u32 pos1, pos2;
 155
 156        pos1 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
 157        pos2 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
 158
 159        if (pos1 != pos2)
 160                return true;
 161        else
 162                return false;
 163}
 164
 165/**
 166 * dce_v8_0_vblank_wait - vblank wait asic callback.
 167 *
 168 * @adev: amdgpu_device pointer
 169 * @crtc: crtc to wait for vblank on
 170 *
 171 * Wait for vblank on the requested crtc (evergreen+).
 172 */
 173static void dce_v8_0_vblank_wait(struct amdgpu_device *adev, int crtc)
 174{
 175        unsigned i = 100;
 176
 177        if (crtc >= adev->mode_info.num_crtc)
 178                return;
 179
 180        if (!(RREG32(mmCRTC_CONTROL + crtc_offsets[crtc]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK))
 181                return;
 182
 183        /* depending on when we hit vblank, we may be close to active; if so,
 184         * wait for another frame.
 185         */
 186        while (dce_v8_0_is_in_vblank(adev, crtc)) {
 187                if (i++ == 100) {
 188                        i = 0;
 189                        if (!dce_v8_0_is_counter_moving(adev, crtc))
 190                                break;
 191                }
 192        }
 193
 194        while (!dce_v8_0_is_in_vblank(adev, crtc)) {
 195                if (i++ == 100) {
 196                        i = 0;
 197                        if (!dce_v8_0_is_counter_moving(adev, crtc))
 198                                break;
 199                }
 200        }
 201}
 202
 203static u32 dce_v8_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
 204{
 205        if (crtc >= adev->mode_info.num_crtc)
 206                return 0;
 207        else
 208                return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
 209}
 210
 211static void dce_v8_0_pageflip_interrupt_init(struct amdgpu_device *adev)
 212{
 213        unsigned i;
 214
 215        /* Enable pflip interrupts */
 216        for (i = 0; i < adev->mode_info.num_crtc; i++)
 217                amdgpu_irq_get(adev, &adev->pageflip_irq, i);
 218}
 219
 220static void dce_v8_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
 221{
 222        unsigned i;
 223
 224        /* Disable pflip interrupts */
 225        for (i = 0; i < adev->mode_info.num_crtc; i++)
 226                amdgpu_irq_put(adev, &adev->pageflip_irq, i);
 227}
 228
 229/**
 230 * dce_v8_0_page_flip - pageflip callback.
 231 *
 232 * @adev: amdgpu_device pointer
 233 * @crtc_id: crtc to cleanup pageflip on
 234 * @crtc_base: new address of the crtc (GPU MC address)
 235 *
 236 * Triggers the actual pageflip by updating the primary
 237 * surface base address.
 238 */
 239static void dce_v8_0_page_flip(struct amdgpu_device *adev,
 240                               int crtc_id, u64 crtc_base, bool async)
 241{
 242        struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
 243
 244        /* flip at hsync for async, default is vsync */
 245        WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ?
 246               GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK : 0);
 247        /* update the primary scanout addresses */
 248        WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
 249               upper_32_bits(crtc_base));
 250        /* writing to the low address triggers the update */
 251        WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
 252               lower_32_bits(crtc_base));
 253        /* post the write */
 254        RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
 255}
 256
 257static int dce_v8_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
 258                                        u32 *vbl, u32 *position)
 259{
 260        if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
 261                return -EINVAL;
 262
 263        *vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
 264        *position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
 265
 266        return 0;
 267}
 268
 269/**
 270 * dce_v8_0_hpd_sense - hpd sense callback.
 271 *
 272 * @adev: amdgpu_device pointer
 273 * @hpd: hpd (hotplug detect) pin
 274 *
 275 * Checks if a digital monitor is connected (evergreen+).
 276 * Returns true if connected, false if not connected.
 277 */
 278static bool dce_v8_0_hpd_sense(struct amdgpu_device *adev,
 279                               enum amdgpu_hpd_id hpd)
 280{
 281        bool connected = false;
 282
 283        if (hpd >= adev->mode_info.num_hpd)
 284                return connected;
 285
 286        if (RREG32(mmDC_HPD1_INT_STATUS + hpd_offsets[hpd]) &
 287            DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
 288                connected = true;
 289
 290        return connected;
 291}
 292
 293/**
 294 * dce_v8_0_hpd_set_polarity - hpd set polarity callback.
 295 *
 296 * @adev: amdgpu_device pointer
 297 * @hpd: hpd (hotplug detect) pin
 298 *
 299 * Set the polarity of the hpd pin (evergreen+).
 300 */
 301static void dce_v8_0_hpd_set_polarity(struct amdgpu_device *adev,
 302                                      enum amdgpu_hpd_id hpd)
 303{
 304        u32 tmp;
 305        bool connected = dce_v8_0_hpd_sense(adev, hpd);
 306
 307        if (hpd >= adev->mode_info.num_hpd)
 308                return;
 309
 310        tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
 311        if (connected)
 312                tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
 313        else
 314                tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
 315        WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
 316}
 317
 318/**
 319 * dce_v8_0_hpd_init - hpd setup callback.
 320 *
 321 * @adev: amdgpu_device pointer
 322 *
 323 * Setup the hpd pins used by the card (evergreen+).
 324 * Enable the pin, set the polarity, and enable the hpd interrupts.
 325 */
 326static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
 327{
 328        struct drm_device *dev = adev->ddev;
 329        struct drm_connector *connector;
 330        u32 tmp;
 331
 332        list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 333                struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
 334
 335                if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
 336                        continue;
 337
 338                tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
 339                tmp |= DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
 340                WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
 341
 342                if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
 343                    connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
 344                        /* don't try to enable hpd on eDP or LVDS avoid breaking the
 345                         * aux dp channel on imac and help (but not completely fix)
 346                         * https://bugzilla.redhat.com/show_bug.cgi?id=726143
 347                         * also avoid interrupt storms during dpms.
 348                         */
 349                        tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
 350                        tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
 351                        WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
 352                        continue;
 353                }
 354
 355                dce_v8_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
 356                amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
 357        }
 358}
 359
 360/**
 361 * dce_v8_0_hpd_fini - hpd tear down callback.
 362 *
 363 * @adev: amdgpu_device pointer
 364 *
 365 * Tear down the hpd pins used by the card (evergreen+).
 366 * Disable the hpd interrupts.
 367 */
 368static void dce_v8_0_hpd_fini(struct amdgpu_device *adev)
 369{
 370        struct drm_device *dev = adev->ddev;
 371        struct drm_connector *connector;
 372        u32 tmp;
 373
 374        list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 375                struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
 376
 377                if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
 378                        continue;
 379
 380                tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
 381                tmp &= ~DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
 382                WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], 0);
 383
 384                amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
 385        }
 386}
 387
 388static u32 dce_v8_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
 389{
 390        return mmDC_GPIO_HPD_A;
 391}
 392
 393static bool dce_v8_0_is_display_hung(struct amdgpu_device *adev)
 394{
 395        u32 crtc_hung = 0;
 396        u32 crtc_status[6];
 397        u32 i, j, tmp;
 398
 399        for (i = 0; i < adev->mode_info.num_crtc; i++) {
 400                if (RREG32(mmCRTC_CONTROL + crtc_offsets[i]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK) {
 401                        crtc_status[i] = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
 402                        crtc_hung |= (1 << i);
 403                }
 404        }
 405
 406        for (j = 0; j < 10; j++) {
 407                for (i = 0; i < adev->mode_info.num_crtc; i++) {
 408                        if (crtc_hung & (1 << i)) {
 409                                tmp = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
 410                                if (tmp != crtc_status[i])
 411                                        crtc_hung &= ~(1 << i);
 412                        }
 413                }
 414                if (crtc_hung == 0)
 415                        return false;
 416                udelay(100);
 417        }
 418
 419        return true;
 420}
 421
 422static void dce_v8_0_set_vga_render_state(struct amdgpu_device *adev,
 423                                          bool render)
 424{
 425        u32 tmp;
 426
 427        /* Lockout access through VGA aperture*/
 428        tmp = RREG32(mmVGA_HDP_CONTROL);
 429        if (render)
 430                tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 0);
 431        else
 432                tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
 433        WREG32(mmVGA_HDP_CONTROL, tmp);
 434
 435        /* disable VGA render */
 436        tmp = RREG32(mmVGA_RENDER_CONTROL);
 437        if (render)
 438                tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 1);
 439        else
 440                tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
 441        WREG32(mmVGA_RENDER_CONTROL, tmp);
 442}
 443
 444static int dce_v8_0_get_num_crtc(struct amdgpu_device *adev)
 445{
 446        int num_crtc = 0;
 447
 448        switch (adev->asic_type) {
 449        case CHIP_BONAIRE:
 450        case CHIP_HAWAII:
 451                num_crtc = 6;
 452                break;
 453        case CHIP_KAVERI:
 454                num_crtc = 4;
 455                break;
 456        case CHIP_KABINI:
 457        case CHIP_MULLINS:
 458                num_crtc = 2;
 459                break;
 460        default:
 461                num_crtc = 0;
 462        }
 463        return num_crtc;
 464}
 465
 466void dce_v8_0_disable_dce(struct amdgpu_device *adev)
 467{
 468        /*Disable VGA render and enabled crtc, if has DCE engine*/
 469        if (amdgpu_atombios_has_dce_engine_info(adev)) {
 470                u32 tmp;
 471                int crtc_enabled, i;
 472
 473                dce_v8_0_set_vga_render_state(adev, false);
 474
 475                /*Disable crtc*/
 476                for (i = 0; i < dce_v8_0_get_num_crtc(adev); i++) {
 477                        crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
 478                                                                         CRTC_CONTROL, CRTC_MASTER_EN);
 479                        if (crtc_enabled) {
 480                                WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
 481                                tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
 482                                tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
 483                                WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
 484                                WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
 485                        }
 486                }
 487        }
 488}
 489
 490static void dce_v8_0_program_fmt(struct drm_encoder *encoder)
 491{
 492        struct drm_device *dev = encoder->dev;
 493        struct amdgpu_device *adev = dev->dev_private;
 494        struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
 495        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
 496        struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
 497        int bpc = 0;
 498        u32 tmp = 0;
 499        enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
 500
 501        if (connector) {
 502                struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
 503                bpc = amdgpu_connector_get_monitor_bpc(connector);
 504                dither = amdgpu_connector->dither;
 505        }
 506
 507        /* LVDS/eDP FMT is set up by atom */
 508        if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
 509                return;
 510
 511        /* not needed for analog */
 512        if ((amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
 513            (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
 514                return;
 515
 516        if (bpc == 0)
 517                return;
 518
 519        switch (bpc) {
 520        case 6:
 521                if (dither == AMDGPU_FMT_DITHER_ENABLE)
 522                        /* XXX sort out optimal dither settings */
 523                        tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
 524                                FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
 525                                FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
 526                                (0 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT));
 527                else
 528                        tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
 529                        (0 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT));
 530                break;
 531        case 8:
 532                if (dither == AMDGPU_FMT_DITHER_ENABLE)
 533                        /* XXX sort out optimal dither settings */
 534                        tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
 535                                FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
 536                                FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK |
 537                                FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
 538                                (1 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT));
 539                else
 540                        tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
 541                        (1 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT));
 542                break;
 543        case 10:
 544                if (dither == AMDGPU_FMT_DITHER_ENABLE)
 545                        /* XXX sort out optimal dither settings */
 546                        tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
 547                                FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
 548                                FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK |
 549                                FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
 550                                (2 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT));
 551                else
 552                        tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
 553                        (2 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT));
 554                break;
 555        default:
 556                /* not needed */
 557                break;
 558        }
 559
 560        WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
 561}
 562
 563
 564/* display watermark setup */
 565/**
 566 * dce_v8_0_line_buffer_adjust - Set up the line buffer
 567 *
 568 * @adev: amdgpu_device pointer
 569 * @amdgpu_crtc: the selected display controller
 570 * @mode: the current display mode on the selected display
 571 * controller
 572 *
 573 * Setup up the line buffer allocation for
 574 * the selected display controller (CIK).
 575 * Returns the line buffer size in pixels.
 576 */
 577static u32 dce_v8_0_line_buffer_adjust(struct amdgpu_device *adev,
 578                                       struct amdgpu_crtc *amdgpu_crtc,
 579                                       struct drm_display_mode *mode)
 580{
 581        u32 tmp, buffer_alloc, i;
 582        u32 pipe_offset = amdgpu_crtc->crtc_id * 0x8;
 583        /*
 584         * Line Buffer Setup
 585         * There are 6 line buffers, one for each display controllers.
 586         * There are 3 partitions per LB. Select the number of partitions
 587         * to enable based on the display width.  For display widths larger
 588         * than 4096, you need use to use 2 display controllers and combine
 589         * them using the stereo blender.
 590         */
 591        if (amdgpu_crtc->base.enabled && mode) {
 592                if (mode->crtc_hdisplay < 1920) {
 593                        tmp = 1;
 594                        buffer_alloc = 2;
 595                } else if (mode->crtc_hdisplay < 2560) {
 596                        tmp = 2;
 597                        buffer_alloc = 2;
 598                } else if (mode->crtc_hdisplay < 4096) {
 599                        tmp = 0;
 600                        buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
 601                } else {
 602                        DRM_DEBUG_KMS("Mode too big for LB!\n");
 603                        tmp = 0;
 604                        buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
 605                }
 606        } else {
 607                tmp = 1;
 608                buffer_alloc = 0;
 609        }
 610
 611        WREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset,
 612              (tmp << LB_MEMORY_CTRL__LB_MEMORY_CONFIG__SHIFT) |
 613              (0x6B0 << LB_MEMORY_CTRL__LB_MEMORY_SIZE__SHIFT));
 614
 615        WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
 616               (buffer_alloc << PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT));
 617        for (i = 0; i < adev->usec_timeout; i++) {
 618                if (RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
 619                    PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK)
 620                        break;
 621                udelay(1);
 622        }
 623
 624        if (amdgpu_crtc->base.enabled && mode) {
 625                switch (tmp) {
 626                case 0:
 627                default:
 628                        return 4096 * 2;
 629                case 1:
 630                        return 1920 * 2;
 631                case 2:
 632                        return 2560 * 2;
 633                }
 634        }
 635
 636        /* controller not enabled, so no lb used */
 637        return 0;
 638}
 639
 640/**
 641 * cik_get_number_of_dram_channels - get the number of dram channels
 642 *
 643 * @adev: amdgpu_device pointer
 644 *
 645 * Look up the number of video ram channels (CIK).
 646 * Used for display watermark bandwidth calculations
 647 * Returns the number of dram channels
 648 */
 649static u32 cik_get_number_of_dram_channels(struct amdgpu_device *adev)
 650{
 651        u32 tmp = RREG32(mmMC_SHARED_CHMAP);
 652
 653        switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
 654        case 0:
 655        default:
 656                return 1;
 657        case 1:
 658                return 2;
 659        case 2:
 660                return 4;
 661        case 3:
 662                return 8;
 663        case 4:
 664                return 3;
 665        case 5:
 666                return 6;
 667        case 6:
 668                return 10;
 669        case 7:
 670                return 12;
 671        case 8:
 672                return 16;
 673        }
 674}
 675
 676struct dce8_wm_params {
 677        u32 dram_channels; /* number of dram channels */
 678        u32 yclk;          /* bandwidth per dram data pin in kHz */
 679        u32 sclk;          /* engine clock in kHz */
 680        u32 disp_clk;      /* display clock in kHz */
 681        u32 src_width;     /* viewport width */
 682        u32 active_time;   /* active display time in ns */
 683        u32 blank_time;    /* blank time in ns */
 684        bool interlaced;    /* mode is interlaced */
 685        fixed20_12 vsc;    /* vertical scale ratio */
 686        u32 num_heads;     /* number of active crtcs */
 687        u32 bytes_per_pixel; /* bytes per pixel display + overlay */
 688        u32 lb_size;       /* line buffer allocated to pipe */
 689        u32 vtaps;         /* vertical scaler taps */
 690};
 691
 692/**
 693 * dce_v8_0_dram_bandwidth - get the dram bandwidth
 694 *
 695 * @wm: watermark calculation data
 696 *
 697 * Calculate the raw dram bandwidth (CIK).
 698 * Used for display watermark bandwidth calculations
 699 * Returns the dram bandwidth in MBytes/s
 700 */
 701static u32 dce_v8_0_dram_bandwidth(struct dce8_wm_params *wm)
 702{
 703        /* Calculate raw DRAM Bandwidth */
 704        fixed20_12 dram_efficiency; /* 0.7 */
 705        fixed20_12 yclk, dram_channels, bandwidth;
 706        fixed20_12 a;
 707
 708        a.full = dfixed_const(1000);
 709        yclk.full = dfixed_const(wm->yclk);
 710        yclk.full = dfixed_div(yclk, a);
 711        dram_channels.full = dfixed_const(wm->dram_channels * 4);
 712        a.full = dfixed_const(10);
 713        dram_efficiency.full = dfixed_const(7);
 714        dram_efficiency.full = dfixed_div(dram_efficiency, a);
 715        bandwidth.full = dfixed_mul(dram_channels, yclk);
 716        bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
 717
 718        return dfixed_trunc(bandwidth);
 719}
 720
 721/**
 722 * dce_v8_0_dram_bandwidth_for_display - get the dram bandwidth for display
 723 *
 724 * @wm: watermark calculation data
 725 *
 726 * Calculate the dram bandwidth used for display (CIK).
 727 * Used for display watermark bandwidth calculations
 728 * Returns the dram bandwidth for display in MBytes/s
 729 */
 730static u32 dce_v8_0_dram_bandwidth_for_display(struct dce8_wm_params *wm)
 731{
 732        /* Calculate DRAM Bandwidth and the part allocated to display. */
 733        fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
 734        fixed20_12 yclk, dram_channels, bandwidth;
 735        fixed20_12 a;
 736
 737        a.full = dfixed_const(1000);
 738        yclk.full = dfixed_const(wm->yclk);
 739        yclk.full = dfixed_div(yclk, a);
 740        dram_channels.full = dfixed_const(wm->dram_channels * 4);
 741        a.full = dfixed_const(10);
 742        disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
 743        disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
 744        bandwidth.full = dfixed_mul(dram_channels, yclk);
 745        bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
 746
 747        return dfixed_trunc(bandwidth);
 748}
 749
 750/**
 751 * dce_v8_0_data_return_bandwidth - get the data return bandwidth
 752 *
 753 * @wm: watermark calculation data
 754 *
 755 * Calculate the data return bandwidth used for display (CIK).
 756 * Used for display watermark bandwidth calculations
 757 * Returns the data return bandwidth in MBytes/s
 758 */
 759static u32 dce_v8_0_data_return_bandwidth(struct dce8_wm_params *wm)
 760{
 761        /* Calculate the display Data return Bandwidth */
 762        fixed20_12 return_efficiency; /* 0.8 */
 763        fixed20_12 sclk, bandwidth;
 764        fixed20_12 a;
 765
 766        a.full = dfixed_const(1000);
 767        sclk.full = dfixed_const(wm->sclk);
 768        sclk.full = dfixed_div(sclk, a);
 769        a.full = dfixed_const(10);
 770        return_efficiency.full = dfixed_const(8);
 771        return_efficiency.full = dfixed_div(return_efficiency, a);
 772        a.full = dfixed_const(32);
 773        bandwidth.full = dfixed_mul(a, sclk);
 774        bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
 775
 776        return dfixed_trunc(bandwidth);
 777}
 778
 779/**
 780 * dce_v8_0_dmif_request_bandwidth - get the dmif bandwidth
 781 *
 782 * @wm: watermark calculation data
 783 *
 784 * Calculate the dmif bandwidth used for display (CIK).
 785 * Used for display watermark bandwidth calculations
 786 * Returns the dmif bandwidth in MBytes/s
 787 */
 788static u32 dce_v8_0_dmif_request_bandwidth(struct dce8_wm_params *wm)
 789{
 790        /* Calculate the DMIF Request Bandwidth */
 791        fixed20_12 disp_clk_request_efficiency; /* 0.8 */
 792        fixed20_12 disp_clk, bandwidth;
 793        fixed20_12 a, b;
 794
 795        a.full = dfixed_const(1000);
 796        disp_clk.full = dfixed_const(wm->disp_clk);
 797        disp_clk.full = dfixed_div(disp_clk, a);
 798        a.full = dfixed_const(32);
 799        b.full = dfixed_mul(a, disp_clk);
 800
 801        a.full = dfixed_const(10);
 802        disp_clk_request_efficiency.full = dfixed_const(8);
 803        disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
 804
 805        bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
 806
 807        return dfixed_trunc(bandwidth);
 808}
 809
 810/**
 811 * dce_v8_0_available_bandwidth - get the min available bandwidth
 812 *
 813 * @wm: watermark calculation data
 814 *
 815 * Calculate the min available bandwidth used for display (CIK).
 816 * Used for display watermark bandwidth calculations
 817 * Returns the min available bandwidth in MBytes/s
 818 */
 819static u32 dce_v8_0_available_bandwidth(struct dce8_wm_params *wm)
 820{
 821        /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
 822        u32 dram_bandwidth = dce_v8_0_dram_bandwidth(wm);
 823        u32 data_return_bandwidth = dce_v8_0_data_return_bandwidth(wm);
 824        u32 dmif_req_bandwidth = dce_v8_0_dmif_request_bandwidth(wm);
 825
 826        return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
 827}
 828
 829/**
 830 * dce_v8_0_average_bandwidth - get the average available bandwidth
 831 *
 832 * @wm: watermark calculation data
 833 *
 834 * Calculate the average available bandwidth used for display (CIK).
 835 * Used for display watermark bandwidth calculations
 836 * Returns the average available bandwidth in MBytes/s
 837 */
 838static u32 dce_v8_0_average_bandwidth(struct dce8_wm_params *wm)
 839{
 840        /* Calculate the display mode Average Bandwidth
 841         * DisplayMode should contain the source and destination dimensions,
 842         * timing, etc.
 843         */
 844        fixed20_12 bpp;
 845        fixed20_12 line_time;
 846        fixed20_12 src_width;
 847        fixed20_12 bandwidth;
 848        fixed20_12 a;
 849
 850        a.full = dfixed_const(1000);
 851        line_time.full = dfixed_const(wm->active_time + wm->blank_time);
 852        line_time.full = dfixed_div(line_time, a);
 853        bpp.full = dfixed_const(wm->bytes_per_pixel);
 854        src_width.full = dfixed_const(wm->src_width);
 855        bandwidth.full = dfixed_mul(src_width, bpp);
 856        bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
 857        bandwidth.full = dfixed_div(bandwidth, line_time);
 858
 859        return dfixed_trunc(bandwidth);
 860}
 861
 862/**
 863 * dce_v8_0_latency_watermark - get the latency watermark
 864 *
 865 * @wm: watermark calculation data
 866 *
 867 * Calculate the latency watermark (CIK).
 868 * Used for display watermark bandwidth calculations
 869 * Returns the latency watermark in ns
 870 */
 871static u32 dce_v8_0_latency_watermark(struct dce8_wm_params *wm)
 872{
 873        /* First calculate the latency in ns */
 874        u32 mc_latency = 2000; /* 2000 ns. */
 875        u32 available_bandwidth = dce_v8_0_available_bandwidth(wm);
 876        u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
 877        u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
 878        u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
 879        u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
 880                (wm->num_heads * cursor_line_pair_return_time);
 881        u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
 882        u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
 883        u32 tmp, dmif_size = 12288;
 884        fixed20_12 a, b, c;
 885
 886        if (wm->num_heads == 0)
 887                return 0;
 888
 889        a.full = dfixed_const(2);
 890        b.full = dfixed_const(1);
 891        if ((wm->vsc.full > a.full) ||
 892            ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
 893            (wm->vtaps >= 5) ||
 894            ((wm->vsc.full >= a.full) && wm->interlaced))
 895                max_src_lines_per_dst_line = 4;
 896        else
 897                max_src_lines_per_dst_line = 2;
 898
 899        a.full = dfixed_const(available_bandwidth);
 900        b.full = dfixed_const(wm->num_heads);
 901        a.full = dfixed_div(a, b);
 902        tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
 903        tmp = min(dfixed_trunc(a), tmp);
 904
 905        lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
 906
 907        a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
 908        b.full = dfixed_const(1000);
 909        c.full = dfixed_const(lb_fill_bw);
 910        b.full = dfixed_div(c, b);
 911        a.full = dfixed_div(a, b);
 912        line_fill_time = dfixed_trunc(a);
 913
 914        if (line_fill_time < wm->active_time)
 915                return latency;
 916        else
 917                return latency + (line_fill_time - wm->active_time);
 918
 919}
 920
 921/**
 922 * dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display - check
 923 * average and available dram bandwidth
 924 *
 925 * @wm: watermark calculation data
 926 *
 927 * Check if the display average bandwidth fits in the display
 928 * dram bandwidth (CIK).
 929 * Used for display watermark bandwidth calculations
 930 * Returns true if the display fits, false if not.
 931 */
 932static bool dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce8_wm_params *wm)
 933{
 934        if (dce_v8_0_average_bandwidth(wm) <=
 935            (dce_v8_0_dram_bandwidth_for_display(wm) / wm->num_heads))
 936                return true;
 937        else
 938                return false;
 939}
 940
 941/**
 942 * dce_v8_0_average_bandwidth_vs_available_bandwidth - check
 943 * average and available bandwidth
 944 *
 945 * @wm: watermark calculation data
 946 *
 947 * Check if the display average bandwidth fits in the display
 948 * available bandwidth (CIK).
 949 * Used for display watermark bandwidth calculations
 950 * Returns true if the display fits, false if not.
 951 */
 952static bool dce_v8_0_average_bandwidth_vs_available_bandwidth(struct dce8_wm_params *wm)
 953{
 954        if (dce_v8_0_average_bandwidth(wm) <=
 955            (dce_v8_0_available_bandwidth(wm) / wm->num_heads))
 956                return true;
 957        else
 958                return false;
 959}
 960
 961/**
 962 * dce_v8_0_check_latency_hiding - check latency hiding
 963 *
 964 * @wm: watermark calculation data
 965 *
 966 * Check latency hiding (CIK).
 967 * Used for display watermark bandwidth calculations
 968 * Returns true if the display fits, false if not.
 969 */
 970static bool dce_v8_0_check_latency_hiding(struct dce8_wm_params *wm)
 971{
 972        u32 lb_partitions = wm->lb_size / wm->src_width;
 973        u32 line_time = wm->active_time + wm->blank_time;
 974        u32 latency_tolerant_lines;
 975        u32 latency_hiding;
 976        fixed20_12 a;
 977
 978        a.full = dfixed_const(1);
 979        if (wm->vsc.full > a.full)
 980                latency_tolerant_lines = 1;
 981        else {
 982                if (lb_partitions <= (wm->vtaps + 1))
 983                        latency_tolerant_lines = 1;
 984                else
 985                        latency_tolerant_lines = 2;
 986        }
 987
 988        latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
 989
 990        if (dce_v8_0_latency_watermark(wm) <= latency_hiding)
 991                return true;
 992        else
 993                return false;
 994}
 995
 996/**
 997 * dce_v8_0_program_watermarks - program display watermarks
 998 *
 999 * @adev: amdgpu_device pointer
1000 * @amdgpu_crtc: the selected display controller
1001 * @lb_size: line buffer size
1002 * @num_heads: number of display controllers in use
1003 *
1004 * Calculate and program the display watermarks for the
1005 * selected display controller (CIK).
1006 */
1007static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
1008                                        struct amdgpu_crtc *amdgpu_crtc,
1009                                        u32 lb_size, u32 num_heads)
1010{
1011        struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
1012        struct dce8_wm_params wm_low, wm_high;
1013        u32 active_time;
1014        u32 line_time = 0;
1015        u32 latency_watermark_a = 0, latency_watermark_b = 0;
1016        u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
1017
1018        if (amdgpu_crtc->base.enabled && num_heads && mode) {
1019                active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
1020                                            (u32)mode->clock);
1021                line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
1022                                          (u32)mode->clock);
1023                line_time = min(line_time, (u32)65535);
1024
1025                /* watermark for high clocks */
1026                if (adev->pm.dpm_enabled) {
1027                        wm_high.yclk =
1028                                amdgpu_dpm_get_mclk(adev, false) * 10;
1029                        wm_high.sclk =
1030                                amdgpu_dpm_get_sclk(adev, false) * 10;
1031                } else {
1032                        wm_high.yclk = adev->pm.current_mclk * 10;
1033                        wm_high.sclk = adev->pm.current_sclk * 10;
1034                }
1035
1036                wm_high.disp_clk = mode->clock;
1037                wm_high.src_width = mode->crtc_hdisplay;
1038                wm_high.active_time = active_time;
1039                wm_high.blank_time = line_time - wm_high.active_time;
1040                wm_high.interlaced = false;
1041                if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1042                        wm_high.interlaced = true;
1043                wm_high.vsc = amdgpu_crtc->vsc;
1044                wm_high.vtaps = 1;
1045                if (amdgpu_crtc->rmx_type != RMX_OFF)
1046                        wm_high.vtaps = 2;
1047                wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
1048                wm_high.lb_size = lb_size;
1049                wm_high.dram_channels = cik_get_number_of_dram_channels(adev);
1050                wm_high.num_heads = num_heads;
1051
1052                /* set for high clocks */
1053                latency_watermark_a = min(dce_v8_0_latency_watermark(&wm_high), (u32)65535);
1054
1055                /* possibly force display priority to high */
1056                /* should really do this at mode validation time... */
1057                if (!dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
1058                    !dce_v8_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
1059                    !dce_v8_0_check_latency_hiding(&wm_high) ||
1060                    (adev->mode_info.disp_priority == 2)) {
1061                        DRM_DEBUG_KMS("force priority to high\n");
1062                }
1063
1064                /* watermark for low clocks */
1065                if (adev->pm.dpm_enabled) {
1066                        wm_low.yclk =
1067                                amdgpu_dpm_get_mclk(adev, true) * 10;
1068                        wm_low.sclk =
1069                                amdgpu_dpm_get_sclk(adev, true) * 10;
1070                } else {
1071                        wm_low.yclk = adev->pm.current_mclk * 10;
1072                        wm_low.sclk = adev->pm.current_sclk * 10;
1073                }
1074
1075                wm_low.disp_clk = mode->clock;
1076                wm_low.src_width = mode->crtc_hdisplay;
1077                wm_low.active_time = active_time;
1078                wm_low.blank_time = line_time - wm_low.active_time;
1079                wm_low.interlaced = false;
1080                if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1081                        wm_low.interlaced = true;
1082                wm_low.vsc = amdgpu_crtc->vsc;
1083                wm_low.vtaps = 1;
1084                if (amdgpu_crtc->rmx_type != RMX_OFF)
1085                        wm_low.vtaps = 2;
1086                wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
1087                wm_low.lb_size = lb_size;
1088                wm_low.dram_channels = cik_get_number_of_dram_channels(adev);
1089                wm_low.num_heads = num_heads;
1090
1091                /* set for low clocks */
1092                latency_watermark_b = min(dce_v8_0_latency_watermark(&wm_low), (u32)65535);
1093
1094                /* possibly force display priority to high */
1095                /* should really do this at mode validation time... */
1096                if (!dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
1097                    !dce_v8_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
1098                    !dce_v8_0_check_latency_hiding(&wm_low) ||
1099                    (adev->mode_info.disp_priority == 2)) {
1100                        DRM_DEBUG_KMS("force priority to high\n");
1101                }
1102                lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
1103        }
1104
1105        /* select wm A */
1106        wm_mask = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset);
1107        tmp = wm_mask;
1108        tmp &= ~(3 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1109        tmp |= (1 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1110        WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1111        WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
1112               ((latency_watermark_a << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
1113                (line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
1114        /* select wm B */
1115        tmp = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset);
1116        tmp &= ~(3 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1117        tmp |= (2 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1118        WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1119        WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
1120               ((latency_watermark_b << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
1121                (line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
1122        /* restore original selection */
1123        WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, wm_mask);
1124
1125        /* save values for DPM */
1126        amdgpu_crtc->line_time = line_time;
1127        amdgpu_crtc->wm_high = latency_watermark_a;
1128        amdgpu_crtc->wm_low = latency_watermark_b;
1129        /* Save number of lines the linebuffer leads before the scanout */
1130        amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
1131}
1132
1133/**
1134 * dce_v8_0_bandwidth_update - program display watermarks
1135 *
1136 * @adev: amdgpu_device pointer
1137 *
1138 * Calculate and program the display watermarks and line
1139 * buffer allocation (CIK).
1140 */
1141static void dce_v8_0_bandwidth_update(struct amdgpu_device *adev)
1142{
1143        struct drm_display_mode *mode = NULL;
1144        u32 num_heads = 0, lb_size;
1145        int i;
1146
1147        amdgpu_update_display_priority(adev);
1148
1149        for (i = 0; i < adev->mode_info.num_crtc; i++) {
1150                if (adev->mode_info.crtcs[i]->base.enabled)
1151                        num_heads++;
1152        }
1153        for (i = 0; i < adev->mode_info.num_crtc; i++) {
1154                mode = &adev->mode_info.crtcs[i]->base.mode;
1155                lb_size = dce_v8_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode);
1156                dce_v8_0_program_watermarks(adev, adev->mode_info.crtcs[i],
1157                                            lb_size, num_heads);
1158        }
1159}
1160
1161static void dce_v8_0_audio_get_connected_pins(struct amdgpu_device *adev)
1162{
1163        int i;
1164        u32 offset, tmp;
1165
1166        for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1167                offset = adev->mode_info.audio.pin[i].offset;
1168                tmp = RREG32_AUDIO_ENDPT(offset,
1169                                         ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
1170                if (((tmp &
1171                AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK) >>
1172                AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT) == 1)
1173                        adev->mode_info.audio.pin[i].connected = false;
1174                else
1175                        adev->mode_info.audio.pin[i].connected = true;
1176        }
1177}
1178
1179static struct amdgpu_audio_pin *dce_v8_0_audio_get_pin(struct amdgpu_device *adev)
1180{
1181        int i;
1182
1183        dce_v8_0_audio_get_connected_pins(adev);
1184
1185        for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1186                if (adev->mode_info.audio.pin[i].connected)
1187                        return &adev->mode_info.audio.pin[i];
1188        }
1189        DRM_ERROR("No connected audio pins found!\n");
1190        return NULL;
1191}
1192
1193static void dce_v8_0_afmt_audio_select_pin(struct drm_encoder *encoder)
1194{
1195        struct amdgpu_device *adev = encoder->dev->dev_private;
1196        struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1197        struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1198        u32 offset;
1199
1200        if (!dig || !dig->afmt || !dig->afmt->pin)
1201                return;
1202
1203        offset = dig->afmt->offset;
1204
1205        WREG32(mmAFMT_AUDIO_SRC_CONTROL + offset,
1206               (dig->afmt->pin->id << AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT__SHIFT));
1207}
1208
1209static void dce_v8_0_audio_write_latency_fields(struct drm_encoder *encoder,
1210                                                struct drm_display_mode *mode)
1211{
1212        struct amdgpu_device *adev = encoder->dev->dev_private;
1213        struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1214        struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1215        struct drm_connector *connector;
1216        struct amdgpu_connector *amdgpu_connector = NULL;
1217        u32 tmp = 0, offset;
1218
1219        if (!dig || !dig->afmt || !dig->afmt->pin)
1220                return;
1221
1222        offset = dig->afmt->pin->offset;
1223
1224        list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1225                if (connector->encoder == encoder) {
1226                        amdgpu_connector = to_amdgpu_connector(connector);
1227                        break;
1228                }
1229        }
1230
1231        if (!amdgpu_connector) {
1232                DRM_ERROR("Couldn't find encoder's connector\n");
1233                return;
1234        }
1235
1236        if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
1237                if (connector->latency_present[1])
1238                        tmp =
1239                        (connector->video_latency[1] <<
1240                         AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1241                        (connector->audio_latency[1] <<
1242                         AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1243                else
1244                        tmp =
1245                        (0 <<
1246                         AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1247                        (0 <<
1248                         AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1249        } else {
1250                if (connector->latency_present[0])
1251                        tmp =
1252                        (connector->video_latency[0] <<
1253                         AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1254                        (connector->audio_latency[0] <<
1255                         AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1256                else
1257                        tmp =
1258                        (0 <<
1259                         AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1260                        (0 <<
1261                         AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1262
1263        }
1264        WREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
1265}
1266
1267static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
1268{
1269        struct amdgpu_device *adev = encoder->dev->dev_private;
1270        struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1271        struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1272        struct drm_connector *connector;
1273        struct amdgpu_connector *amdgpu_connector = NULL;
1274        u32 offset, tmp;
1275        u8 *sadb = NULL;
1276        int sad_count;
1277
1278        if (!dig || !dig->afmt || !dig->afmt->pin)
1279                return;
1280
1281        offset = dig->afmt->pin->offset;
1282
1283        list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1284                if (connector->encoder == encoder) {
1285                        amdgpu_connector = to_amdgpu_connector(connector);
1286                        break;
1287                }
1288        }
1289
1290        if (!amdgpu_connector) {
1291                DRM_ERROR("Couldn't find encoder's connector\n");
1292                return;
1293        }
1294
1295        sad_count = drm_edid_to_speaker_allocation(amdgpu_connector_edid(connector), &sadb);
1296        if (sad_count < 0) {
1297                DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
1298                sad_count = 0;
1299        }
1300
1301        /* program the speaker allocation */
1302        tmp = RREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
1303        tmp &= ~(AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK |
1304                AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK);
1305        /* set HDMI mode */
1306        tmp |= AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK;
1307        if (sad_count)
1308                tmp |= (sadb[0] << AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT);
1309        else
1310                tmp |= (5 << AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT); /* stereo */
1311        WREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
1312
1313        kfree(sadb);
1314}
1315
1316static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder)
1317{
1318        struct amdgpu_device *adev = encoder->dev->dev_private;
1319        struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1320        struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1321        u32 offset;
1322        struct drm_connector *connector;
1323        struct amdgpu_connector *amdgpu_connector = NULL;
1324        struct cea_sad *sads;
1325        int i, sad_count;
1326
1327        static const u16 eld_reg_to_type[][2] = {
1328                { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
1329                { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
1330                { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
1331                { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
1332                { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
1333                { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
1334                { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
1335                { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
1336                { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
1337                { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
1338                { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
1339                { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
1340        };
1341
1342        if (!dig || !dig->afmt || !dig->afmt->pin)
1343                return;
1344
1345        offset = dig->afmt->pin->offset;
1346
1347        list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1348                if (connector->encoder == encoder) {
1349                        amdgpu_connector = to_amdgpu_connector(connector);
1350                        break;
1351                }
1352        }
1353
1354        if (!amdgpu_connector) {
1355                DRM_ERROR("Couldn't find encoder's connector\n");
1356                return;
1357        }
1358
1359        sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
1360        if (sad_count <= 0) {
1361                DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
1362                return;
1363        }
1364        BUG_ON(!sads);
1365
1366        for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
1367                u32 value = 0;
1368                u8 stereo_freqs = 0;
1369                int max_channels = -1;
1370                int j;
1371
1372                for (j = 0; j < sad_count; j++) {
1373                        struct cea_sad *sad = &sads[j];
1374
1375                        if (sad->format == eld_reg_to_type[i][1]) {
1376                                if (sad->channels > max_channels) {
1377                                        value = (sad->channels <<
1378                                                 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT) |
1379                                                (sad->byte2 <<
1380                                                 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT) |
1381                                                (sad->freq <<
1382                                                 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT);
1383                                        max_channels = sad->channels;
1384                                }
1385
1386                                if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
1387                                        stereo_freqs |= sad->freq;
1388                                else
1389                                        break;
1390                        }
1391                }
1392
1393                value |= (stereo_freqs <<
1394                        AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT);
1395
1396                WREG32_AUDIO_ENDPT(offset, eld_reg_to_type[i][0], value);
1397        }
1398
1399        kfree(sads);
1400}
1401
1402static void dce_v8_0_audio_enable(struct amdgpu_device *adev,
1403                                  struct amdgpu_audio_pin *pin,
1404                                  bool enable)
1405{
1406        if (!pin)
1407                return;
1408
1409        WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
1410                enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0);
1411}
1412
1413static const u32 pin_offsets[7] =
1414{
1415        (0x1780 - 0x1780),
1416        (0x1786 - 0x1780),
1417        (0x178c - 0x1780),
1418        (0x1792 - 0x1780),
1419        (0x1798 - 0x1780),
1420        (0x179d - 0x1780),
1421        (0x17a4 - 0x1780),
1422};
1423
1424static int dce_v8_0_audio_init(struct amdgpu_device *adev)
1425{
1426        int i;
1427
1428        if (!amdgpu_audio)
1429                return 0;
1430
1431        adev->mode_info.audio.enabled = true;
1432
1433        if (adev->asic_type == CHIP_KAVERI) /* KV: 4 streams, 7 endpoints */
1434                adev->mode_info.audio.num_pins = 7;
1435        else if ((adev->asic_type == CHIP_KABINI) ||
1436                 (adev->asic_type == CHIP_MULLINS)) /* KB/ML: 2 streams, 3 endpoints */
1437                adev->mode_info.audio.num_pins = 3;
1438        else if ((adev->asic_type == CHIP_BONAIRE) ||
1439                 (adev->asic_type == CHIP_HAWAII))/* BN/HW: 6 streams, 7 endpoints */
1440                adev->mode_info.audio.num_pins = 7;
1441        else
1442                adev->mode_info.audio.num_pins = 3;
1443
1444        for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1445                adev->mode_info.audio.pin[i].channels = -1;
1446                adev->mode_info.audio.pin[i].rate = -1;
1447                adev->mode_info.audio.pin[i].bits_per_sample = -1;
1448                adev->mode_info.audio.pin[i].status_bits = 0;
1449                adev->mode_info.audio.pin[i].category_code = 0;
1450                adev->mode_info.audio.pin[i].connected = false;
1451                adev->mode_info.audio.pin[i].offset = pin_offsets[i];
1452                adev->mode_info.audio.pin[i].id = i;
1453                /* disable audio.  it will be set up later */
1454                /* XXX remove once we switch to ip funcs */
1455                dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1456        }
1457
1458        return 0;
1459}
1460
1461static void dce_v8_0_audio_fini(struct amdgpu_device *adev)
1462{
1463        int i;
1464
1465        if (!amdgpu_audio)
1466                return;
1467
1468        if (!adev->mode_info.audio.enabled)
1469                return;
1470
1471        for (i = 0; i < adev->mode_info.audio.num_pins; i++)
1472                dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1473
1474        adev->mode_info.audio.enabled = false;
1475}
1476
1477/*
1478 * update the N and CTS parameters for a given pixel clock rate
1479 */
1480static void dce_v8_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
1481{
1482        struct drm_device *dev = encoder->dev;
1483        struct amdgpu_device *adev = dev->dev_private;
1484        struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
1485        struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1486        struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1487        uint32_t offset = dig->afmt->offset;
1488
1489        WREG32(mmHDMI_ACR_32_0 + offset, (acr.cts_32khz << HDMI_ACR_32_0__HDMI_ACR_CTS_32__SHIFT));
1490        WREG32(mmHDMI_ACR_32_1 + offset, acr.n_32khz);
1491
1492        WREG32(mmHDMI_ACR_44_0 + offset, (acr.cts_44_1khz << HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT));
1493        WREG32(mmHDMI_ACR_44_1 + offset, acr.n_44_1khz);
1494
1495        WREG32(mmHDMI_ACR_48_0 + offset, (acr.cts_48khz << HDMI_ACR_48_0__HDMI_ACR_CTS_48__SHIFT));
1496        WREG32(mmHDMI_ACR_48_1 + offset, acr.n_48khz);
1497}
1498
1499/*
1500 * build a HDMI Video Info Frame
1501 */
1502static void dce_v8_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
1503                                               void *buffer, size_t size)
1504{
1505        struct drm_device *dev = encoder->dev;
1506        struct amdgpu_device *adev = dev->dev_private;
1507        struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1508        struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1509        uint32_t offset = dig->afmt->offset;
1510        uint8_t *frame = buffer + 3;
1511        uint8_t *header = buffer;
1512
1513        WREG32(mmAFMT_AVI_INFO0 + offset,
1514                frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
1515        WREG32(mmAFMT_AVI_INFO1 + offset,
1516                frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
1517        WREG32(mmAFMT_AVI_INFO2 + offset,
1518                frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
1519        WREG32(mmAFMT_AVI_INFO3 + offset,
1520                frame[0xC] | (frame[0xD] << 8) | (header[1] << 24));
1521}
1522
1523static void dce_v8_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
1524{
1525        struct drm_device *dev = encoder->dev;
1526        struct amdgpu_device *adev = dev->dev_private;
1527        struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1528        struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1529        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1530        u32 dto_phase = 24 * 1000;
1531        u32 dto_modulo = clock;
1532
1533        if (!dig || !dig->afmt)
1534                return;
1535
1536        /* XXX two dtos; generally use dto0 for hdmi */
1537        /* Express [24MHz / target pixel clock] as an exact rational
1538         * number (coefficient of two integer numbers.  DCCG_AUDIO_DTOx_PHASE
1539         * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
1540         */
1541        WREG32(mmDCCG_AUDIO_DTO_SOURCE, (amdgpu_crtc->crtc_id << DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO0_SOURCE_SEL__SHIFT));
1542        WREG32(mmDCCG_AUDIO_DTO0_PHASE, dto_phase);
1543        WREG32(mmDCCG_AUDIO_DTO0_MODULE, dto_modulo);
1544}
1545
1546/*
1547 * update the info frames with the data from the current display mode
1548 */
1549static void dce_v8_0_afmt_setmode(struct drm_encoder *encoder,
1550                                  struct drm_display_mode *mode)
1551{
1552        struct drm_device *dev = encoder->dev;
1553        struct amdgpu_device *adev = dev->dev_private;
1554        struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1555        struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1556        struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
1557        u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
1558        struct hdmi_avi_infoframe frame;
1559        uint32_t offset, val;
1560        ssize_t err;
1561        int bpc = 8;
1562
1563        if (!dig || !dig->afmt)
1564                return;
1565
1566        /* Silent, r600_hdmi_enable will raise WARN for us */
1567        if (!dig->afmt->enabled)
1568                return;
1569
1570        offset = dig->afmt->offset;
1571
1572        /* hdmi deep color mode general control packets setup, if bpc > 8 */
1573        if (encoder->crtc) {
1574                struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1575                bpc = amdgpu_crtc->bpc;
1576        }
1577
1578        /* disable audio prior to setting up hw */
1579        dig->afmt->pin = dce_v8_0_audio_get_pin(adev);
1580        dce_v8_0_audio_enable(adev, dig->afmt->pin, false);
1581
1582        dce_v8_0_audio_set_dto(encoder, mode->clock);
1583
1584        WREG32(mmHDMI_VBI_PACKET_CONTROL + offset,
1585               HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK); /* send null packets when required */
1586
1587        WREG32(mmAFMT_AUDIO_CRC_CONTROL + offset, 0x1000);
1588
1589        val = RREG32(mmHDMI_CONTROL + offset);
1590        val &= ~HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK;
1591        val &= ~HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH_MASK;
1592
1593        switch (bpc) {
1594        case 0:
1595        case 6:
1596        case 8:
1597        case 16:
1598        default:
1599                DRM_DEBUG("%s: Disabling hdmi deep color for %d bpc.\n",
1600                          connector->name, bpc);
1601                break;
1602        case 10:
1603                val |= HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK;
1604                val |= 1 << HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT;
1605                DRM_DEBUG("%s: Enabling hdmi deep color 30 for 10 bpc.\n",
1606                          connector->name);
1607                break;
1608        case 12:
1609                val |= HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK;
1610                val |= 2 << HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT;
1611                DRM_DEBUG("%s: Enabling hdmi deep color 36 for 12 bpc.\n",
1612                          connector->name);
1613                break;
1614        }
1615
1616        WREG32(mmHDMI_CONTROL + offset, val);
1617
1618        WREG32(mmHDMI_VBI_PACKET_CONTROL + offset,
1619               HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK | /* send null packets when required */
1620               HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND_MASK | /* send general control packets */
1621               HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK); /* send general control packets every frame */
1622
1623        WREG32(mmHDMI_INFOFRAME_CONTROL0 + offset,
1624               HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND_MASK | /* enable audio info frames (frames won't be set until audio is enabled) */
1625               HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT_MASK); /* required for audio info values to be updated */
1626
1627        WREG32(mmAFMT_INFOFRAME_CONTROL0 + offset,
1628               AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE_MASK); /* required for audio info values to be updated */
1629
1630        WREG32(mmHDMI_INFOFRAME_CONTROL1 + offset,
1631               (2 << HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE__SHIFT)); /* anything other than 0 */
1632
1633        WREG32(mmHDMI_GC + offset, 0); /* unset HDMI_GC_AVMUTE */
1634
1635        WREG32(mmHDMI_AUDIO_PACKET_CONTROL + offset,
1636               (1 << HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN__SHIFT) | /* set the default audio delay */
1637               (3 << HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_PACKETS_PER_LINE__SHIFT)); /* should be suffient for all audio modes and small enough for all hblanks */
1638
1639        WREG32(mmAFMT_AUDIO_PACKET_CONTROL + offset,
1640               AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE_MASK); /* allow 60958 channel status fields to be updated */
1641
1642        /* fglrx clears sth in AFMT_AUDIO_PACKET_CONTROL2 here */
1643
1644        if (bpc > 8)
1645                WREG32(mmHDMI_ACR_PACKET_CONTROL + offset,
1646                       HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK); /* allow hw to sent ACR packets when required */
1647        else
1648                WREG32(mmHDMI_ACR_PACKET_CONTROL + offset,
1649                       HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE_MASK | /* select SW CTS value */
1650                       HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK); /* allow hw to sent ACR packets when required */
1651
1652        dce_v8_0_afmt_update_ACR(encoder, mode->clock);
1653
1654        WREG32(mmAFMT_60958_0 + offset,
1655               (1 << AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L__SHIFT));
1656
1657        WREG32(mmAFMT_60958_1 + offset,
1658               (2 << AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R__SHIFT));
1659
1660        WREG32(mmAFMT_60958_2 + offset,
1661               (3 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2__SHIFT) |
1662               (4 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3__SHIFT) |
1663               (5 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4__SHIFT) |
1664               (6 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5__SHIFT) |
1665               (7 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6__SHIFT) |
1666               (8 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7__SHIFT));
1667
1668        dce_v8_0_audio_write_speaker_allocation(encoder);
1669
1670
1671        WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + offset,
1672               (0xff << AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT));
1673
1674        dce_v8_0_afmt_audio_select_pin(encoder);
1675        dce_v8_0_audio_write_sad_regs(encoder);
1676        dce_v8_0_audio_write_latency_fields(encoder, mode);
1677
1678        err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false);
1679        if (err < 0) {
1680                DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
1681                return;
1682        }
1683
1684        err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
1685        if (err < 0) {
1686                DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
1687                return;
1688        }
1689
1690        dce_v8_0_afmt_update_avi_infoframe(encoder, buffer, sizeof(buffer));
1691
1692        WREG32_OR(mmHDMI_INFOFRAME_CONTROL0 + offset,
1693                  HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK | /* enable AVI info frames */
1694                  HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_CONT_MASK); /* required for audio info values to be updated */
1695
1696        WREG32_P(mmHDMI_INFOFRAME_CONTROL1 + offset,
1697                 (2 << HDMI_INFOFRAME_CONTROL1__HDMI_AVI_INFO_LINE__SHIFT), /* anything other than 0 */
1698                 ~HDMI_INFOFRAME_CONTROL1__HDMI_AVI_INFO_LINE_MASK);
1699
1700        WREG32_OR(mmAFMT_AUDIO_PACKET_CONTROL + offset,
1701                  AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_MASK); /* send audio packets */
1702
1703        WREG32(mmAFMT_RAMP_CONTROL0 + offset, 0x00FFFFFF);
1704        WREG32(mmAFMT_RAMP_CONTROL1 + offset, 0x007FFFFF);
1705        WREG32(mmAFMT_RAMP_CONTROL2 + offset, 0x00000001);
1706        WREG32(mmAFMT_RAMP_CONTROL3 + offset, 0x00000001);
1707
1708        /* enable audio after setting up hw */
1709        dce_v8_0_audio_enable(adev, dig->afmt->pin, true);
1710}
1711
1712static void dce_v8_0_afmt_enable(struct drm_encoder *encoder, bool enable)
1713{
1714        struct drm_device *dev = encoder->dev;
1715        struct amdgpu_device *adev = dev->dev_private;
1716        struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1717        struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1718
1719        if (!dig || !dig->afmt)
1720                return;
1721
1722        /* Silent, r600_hdmi_enable will raise WARN for us */
1723        if (enable && dig->afmt->enabled)
1724                return;
1725        if (!enable && !dig->afmt->enabled)
1726                return;
1727
1728        if (!enable && dig->afmt->pin) {
1729                dce_v8_0_audio_enable(adev, dig->afmt->pin, false);
1730                dig->afmt->pin = NULL;
1731        }
1732
1733        dig->afmt->enabled = enable;
1734
1735        DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
1736                  enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
1737}
1738
1739static int dce_v8_0_afmt_init(struct amdgpu_device *adev)
1740{
1741        int i;
1742
1743        for (i = 0; i < adev->mode_info.num_dig; i++)
1744                adev->mode_info.afmt[i] = NULL;
1745
1746        /* DCE8 has audio blocks tied to DIG encoders */
1747        for (i = 0; i < adev->mode_info.num_dig; i++) {
1748                adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
1749                if (adev->mode_info.afmt[i]) {
1750                        adev->mode_info.afmt[i]->offset = dig_offsets[i];
1751                        adev->mode_info.afmt[i]->id = i;
1752                } else {
1753                        int j;
1754                        for (j = 0; j < i; j++) {
1755                                kfree(adev->mode_info.afmt[j]);
1756                                adev->mode_info.afmt[j] = NULL;
1757                        }
1758                        return -ENOMEM;
1759                }
1760        }
1761        return 0;
1762}
1763
1764static void dce_v8_0_afmt_fini(struct amdgpu_device *adev)
1765{
1766        int i;
1767
1768        for (i = 0; i < adev->mode_info.num_dig; i++) {
1769                kfree(adev->mode_info.afmt[i]);
1770                adev->mode_info.afmt[i] = NULL;
1771        }
1772}
1773
1774static const u32 vga_control_regs[6] =
1775{
1776        mmD1VGA_CONTROL,
1777        mmD2VGA_CONTROL,
1778        mmD3VGA_CONTROL,
1779        mmD4VGA_CONTROL,
1780        mmD5VGA_CONTROL,
1781        mmD6VGA_CONTROL,
1782};
1783
1784static void dce_v8_0_vga_enable(struct drm_crtc *crtc, bool enable)
1785{
1786        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1787        struct drm_device *dev = crtc->dev;
1788        struct amdgpu_device *adev = dev->dev_private;
1789        u32 vga_control;
1790
1791        vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
1792        if (enable)
1793                WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | 1);
1794        else
1795                WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control);
1796}
1797
1798static void dce_v8_0_grph_enable(struct drm_crtc *crtc, bool enable)
1799{
1800        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1801        struct drm_device *dev = crtc->dev;
1802        struct amdgpu_device *adev = dev->dev_private;
1803
1804        if (enable)
1805                WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1);
1806        else
1807                WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 0);
1808}
1809
1810static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
1811                                     struct drm_framebuffer *fb,
1812                                     int x, int y, int atomic)
1813{
1814        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1815        struct drm_device *dev = crtc->dev;
1816        struct amdgpu_device *adev = dev->dev_private;
1817        struct amdgpu_framebuffer *amdgpu_fb;
1818        struct drm_framebuffer *target_fb;
1819        struct drm_gem_object *obj;
1820        struct amdgpu_bo *abo;
1821        uint64_t fb_location, tiling_flags;
1822        uint32_t fb_format, fb_pitch_pixels;
1823        u32 fb_swap = (GRPH_ENDIAN_NONE << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1824        u32 pipe_config;
1825        u32 viewport_w, viewport_h;
1826        int r;
1827        bool bypass_lut = false;
1828        struct drm_format_name_buf format_name;
1829
1830        /* no fb bound */
1831        if (!atomic && !crtc->primary->fb) {
1832                DRM_DEBUG_KMS("No FB bound\n");
1833                return 0;
1834        }
1835
1836        if (atomic) {
1837                amdgpu_fb = to_amdgpu_framebuffer(fb);
1838                target_fb = fb;
1839        } else {
1840                amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
1841                target_fb = crtc->primary->fb;
1842        }
1843
1844        /* If atomic, assume fb object is pinned & idle & fenced and
1845         * just update base pointers
1846         */
1847        obj = amdgpu_fb->obj;
1848        abo = gem_to_amdgpu_bo(obj);
1849        r = amdgpu_bo_reserve(abo, false);
1850        if (unlikely(r != 0))
1851                return r;
1852
1853        if (atomic) {
1854                fb_location = amdgpu_bo_gpu_offset(abo);
1855        } else {
1856                r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
1857                if (unlikely(r != 0)) {
1858                        amdgpu_bo_unreserve(abo);
1859                        return -EINVAL;
1860                }
1861        }
1862
1863        amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
1864        amdgpu_bo_unreserve(abo);
1865
1866        pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
1867
1868        switch (target_fb->format->format) {
1869        case DRM_FORMAT_C8:
1870                fb_format = ((GRPH_DEPTH_8BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1871                             (GRPH_FORMAT_INDEXED << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1872                break;
1873        case DRM_FORMAT_XRGB4444:
1874        case DRM_FORMAT_ARGB4444:
1875                fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1876                             (GRPH_FORMAT_ARGB4444 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1877#ifdef __BIG_ENDIAN
1878                fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1879#endif
1880                break;
1881        case DRM_FORMAT_XRGB1555:
1882        case DRM_FORMAT_ARGB1555:
1883                fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1884                             (GRPH_FORMAT_ARGB1555 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1885#ifdef __BIG_ENDIAN
1886                fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1887#endif
1888                break;
1889        case DRM_FORMAT_BGRX5551:
1890        case DRM_FORMAT_BGRA5551:
1891                fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1892                             (GRPH_FORMAT_BGRA5551 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1893#ifdef __BIG_ENDIAN
1894                fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1895#endif
1896                break;
1897        case DRM_FORMAT_RGB565:
1898                fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1899                             (GRPH_FORMAT_ARGB565 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1900#ifdef __BIG_ENDIAN
1901                fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1902#endif
1903                break;
1904        case DRM_FORMAT_XRGB8888:
1905        case DRM_FORMAT_ARGB8888:
1906                fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1907                             (GRPH_FORMAT_ARGB8888 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1908#ifdef __BIG_ENDIAN
1909                fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1910#endif
1911                break;
1912        case DRM_FORMAT_XRGB2101010:
1913        case DRM_FORMAT_ARGB2101010:
1914                fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1915                             (GRPH_FORMAT_ARGB2101010 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1916#ifdef __BIG_ENDIAN
1917                fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1918#endif
1919                /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1920                bypass_lut = true;
1921                break;
1922        case DRM_FORMAT_BGRX1010102:
1923        case DRM_FORMAT_BGRA1010102:
1924                fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1925                             (GRPH_FORMAT_BGRA1010102 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1926#ifdef __BIG_ENDIAN
1927                fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1928#endif
1929                /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1930                bypass_lut = true;
1931                break;
1932        default:
1933                DRM_ERROR("Unsupported screen format %s\n",
1934                          drm_get_format_name(target_fb->format->format, &format_name));
1935                return -EINVAL;
1936        }
1937
1938        if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) {
1939                unsigned bankw, bankh, mtaspect, tile_split, num_banks;
1940
1941                bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
1942                bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
1943                mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
1944                tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
1945                num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
1946
1947                fb_format |= (num_banks << GRPH_CONTROL__GRPH_NUM_BANKS__SHIFT);
1948                fb_format |= (GRPH_ARRAY_2D_TILED_THIN1 << GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT);
1949                fb_format |= (tile_split << GRPH_CONTROL__GRPH_TILE_SPLIT__SHIFT);
1950                fb_format |= (bankw << GRPH_CONTROL__GRPH_BANK_WIDTH__SHIFT);
1951                fb_format |= (bankh << GRPH_CONTROL__GRPH_BANK_HEIGHT__SHIFT);
1952                fb_format |= (mtaspect << GRPH_CONTROL__GRPH_MACRO_TILE_ASPECT__SHIFT);
1953                fb_format |= (DISPLAY_MICRO_TILING << GRPH_CONTROL__GRPH_MICRO_TILE_MODE__SHIFT);
1954        } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
1955                fb_format |= (GRPH_ARRAY_1D_TILED_THIN1 << GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT);
1956        }
1957
1958        fb_format |= (pipe_config << GRPH_CONTROL__GRPH_PIPE_CONFIG__SHIFT);
1959
1960        dce_v8_0_vga_enable(crtc, false);
1961
1962        /* Make sure surface address is updated at vertical blank rather than
1963         * horizontal blank
1964         */
1965        WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, 0);
1966
1967        WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
1968               upper_32_bits(fb_location));
1969        WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
1970               upper_32_bits(fb_location));
1971        WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
1972               (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
1973        WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
1974               (u32) fb_location & GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK);
1975        WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
1976        WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
1977
1978        /*
1979         * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
1980         * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
1981         * retain the full precision throughout the pipeline.
1982         */
1983        WREG32_P(mmGRPH_LUT_10BIT_BYPASS_CONTROL + amdgpu_crtc->crtc_offset,
1984                 (bypass_lut ? LUT_10BIT_BYPASS_EN : 0),
1985                 ~LUT_10BIT_BYPASS_EN);
1986
1987        if (bypass_lut)
1988                DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
1989
1990        WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
1991        WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
1992        WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0);
1993        WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
1994        WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
1995        WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
1996
1997        fb_pitch_pixels = target_fb->pitches[0] / target_fb->format->cpp[0];
1998        WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
1999
2000        dce_v8_0_grph_enable(crtc, true);
2001
2002        WREG32(mmLB_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
2003               target_fb->height);
2004
2005        x &= ~3;
2006        y &= ~1;
2007        WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset,
2008               (x << 16) | y);
2009        viewport_w = crtc->mode.hdisplay;
2010        viewport_h = (crtc->mode.vdisplay + 1) & ~1;
2011        WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
2012               (viewport_w << 16) | viewport_h);
2013
2014        /* set pageflip to happen anywhere in vblank interval */
2015        WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
2016
2017        if (!atomic && fb && fb != crtc->primary->fb) {
2018                amdgpu_fb = to_amdgpu_framebuffer(fb);
2019                abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
2020                r = amdgpu_bo_reserve(abo, true);
2021                if (unlikely(r != 0))
2022                        return r;
2023                amdgpu_bo_unpin(abo);
2024                amdgpu_bo_unreserve(abo);
2025        }
2026
2027        /* Bytes per pixel may have changed */
2028        dce_v8_0_bandwidth_update(adev);
2029
2030        return 0;
2031}
2032
2033static void dce_v8_0_set_interleave(struct drm_crtc *crtc,
2034                                    struct drm_display_mode *mode)
2035{
2036        struct drm_device *dev = crtc->dev;
2037        struct amdgpu_device *adev = dev->dev_private;
2038        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2039
2040        if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2041                WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset,
2042                       LB_DATA_FORMAT__INTERLEAVE_EN__SHIFT);
2043        else
2044                WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset, 0);
2045}
2046
2047static void dce_v8_0_crtc_load_lut(struct drm_crtc *crtc)
2048{
2049        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2050        struct drm_device *dev = crtc->dev;
2051        struct amdgpu_device *adev = dev->dev_private;
2052        u16 *r, *g, *b;
2053        int i;
2054
2055        DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
2056
2057        WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
2058               ((INPUT_CSC_BYPASS << INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT) |
2059                (INPUT_CSC_BYPASS << INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE__SHIFT)));
2060        WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset,
2061               PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_BYPASS_MASK);
2062        WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset,
2063               PRESCALE_OVL_CONTROL__OVL_PRESCALE_BYPASS_MASK);
2064        WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2065               ((INPUT_GAMMA_USE_LUT << INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT) |
2066                (INPUT_GAMMA_USE_LUT << INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE__SHIFT)));
2067
2068        WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
2069
2070        WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
2071        WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
2072        WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
2073
2074        WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
2075        WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
2076        WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
2077
2078        WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
2079        WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
2080
2081        WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
2082        r = crtc->gamma_store;
2083        g = r + crtc->gamma_size;
2084        b = g + crtc->gamma_size;
2085        for (i = 0; i < 256; i++) {
2086                WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
2087                       ((*r++ & 0xffc0) << 14) |
2088                       ((*g++ & 0xffc0) << 4) |
2089                       (*b++ >> 6));
2090        }
2091
2092        WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2093               ((DEGAMMA_BYPASS << DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT) |
2094                (DEGAMMA_BYPASS << DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT) |
2095                (DEGAMMA_BYPASS << DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT)));
2096        WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset,
2097               ((GAMUT_REMAP_BYPASS << GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT) |
2098                (GAMUT_REMAP_BYPASS << GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE__SHIFT)));
2099        WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2100               ((REGAMMA_BYPASS << REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT) |
2101                (REGAMMA_BYPASS << REGAMMA_CONTROL__OVL_REGAMMA_MODE__SHIFT)));
2102        WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
2103               ((OUTPUT_CSC_BYPASS << OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT) |
2104                (OUTPUT_CSC_BYPASS << OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE__SHIFT)));
2105        /* XXX match this to the depth of the crtc fmt block, move to modeset? */
2106        WREG32(0x1a50 + amdgpu_crtc->crtc_offset, 0);
2107        /* XXX this only needs to be programmed once per crtc at startup,
2108         * not sure where the best place for it is
2109         */
2110        WREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset,
2111               ALPHA_CONTROL__CURSOR_ALPHA_BLND_ENA_MASK);
2112}
2113
2114static int dce_v8_0_pick_dig_encoder(struct drm_encoder *encoder)
2115{
2116        struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2117        struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
2118
2119        switch (amdgpu_encoder->encoder_id) {
2120        case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
2121                if (dig->linkb)
2122                        return 1;
2123                else
2124                        return 0;
2125                break;
2126        case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2127                if (dig->linkb)
2128                        return 3;
2129                else
2130                        return 2;
2131                break;
2132        case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2133                if (dig->linkb)
2134                        return 5;
2135                else
2136                        return 4;
2137                break;
2138        case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
2139                return 6;
2140                break;
2141        default:
2142                DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
2143                return 0;
2144        }
2145}
2146
2147/**
2148 * dce_v8_0_pick_pll - Allocate a PPLL for use by the crtc.
2149 *
2150 * @crtc: drm crtc
2151 *
2152 * Returns the PPLL (Pixel PLL) to be used by the crtc.  For DP monitors
2153 * a single PPLL can be used for all DP crtcs/encoders.  For non-DP
2154 * monitors a dedicated PPLL must be used.  If a particular board has
2155 * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
2156 * as there is no need to program the PLL itself.  If we are not able to
2157 * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
2158 * avoid messing up an existing monitor.
2159 *
2160 * Asic specific PLL information
2161 *
2162 * DCE 8.x
2163 * KB/KV
2164 * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP)
2165 * CI
2166 * - PPLL0, PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
2167 *
2168 */
2169static u32 dce_v8_0_pick_pll(struct drm_crtc *crtc)
2170{
2171        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2172        struct drm_device *dev = crtc->dev;
2173        struct amdgpu_device *adev = dev->dev_private;
2174        u32 pll_in_use;
2175        int pll;
2176
2177        if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
2178                if (adev->clock.dp_extclk)
2179                        /* skip PPLL programming if using ext clock */
2180                        return ATOM_PPLL_INVALID;
2181                else {
2182                        /* use the same PPLL for all DP monitors */
2183                        pll = amdgpu_pll_get_shared_dp_ppll(crtc);
2184                        if (pll != ATOM_PPLL_INVALID)
2185                                return pll;
2186                }
2187        } else {
2188                /* use the same PPLL for all monitors with the same clock */
2189                pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
2190                if (pll != ATOM_PPLL_INVALID)
2191                        return pll;
2192        }
2193        /* otherwise, pick one of the plls */
2194        if ((adev->asic_type == CHIP_KABINI) ||
2195            (adev->asic_type == CHIP_MULLINS)) {
2196                /* KB/ML has PPLL1 and PPLL2 */
2197                pll_in_use = amdgpu_pll_get_use_mask(crtc);
2198                if (!(pll_in_use & (1 << ATOM_PPLL2)))
2199                        return ATOM_PPLL2;
2200                if (!(pll_in_use & (1 << ATOM_PPLL1)))
2201                        return ATOM_PPLL1;
2202                DRM_ERROR("unable to allocate a PPLL\n");
2203                return ATOM_PPLL_INVALID;
2204        } else {
2205                /* CI/KV has PPLL0, PPLL1, and PPLL2 */
2206                pll_in_use = amdgpu_pll_get_use_mask(crtc);
2207                if (!(pll_in_use & (1 << ATOM_PPLL2)))
2208                        return ATOM_PPLL2;
2209                if (!(pll_in_use & (1 << ATOM_PPLL1)))
2210                        return ATOM_PPLL1;
2211                if (!(pll_in_use & (1 << ATOM_PPLL0)))
2212                        return ATOM_PPLL0;
2213                DRM_ERROR("unable to allocate a PPLL\n");
2214                return ATOM_PPLL_INVALID;
2215        }
2216        return ATOM_PPLL_INVALID;
2217}
2218
2219static void dce_v8_0_lock_cursor(struct drm_crtc *crtc, bool lock)
2220{
2221        struct amdgpu_device *adev = crtc->dev->dev_private;
2222        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2223        uint32_t cur_lock;
2224
2225        cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset);
2226        if (lock)
2227                cur_lock |= CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
2228        else
2229                cur_lock &= ~CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
2230        WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
2231}
2232
2233static void dce_v8_0_hide_cursor(struct drm_crtc *crtc)
2234{
2235        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2236        struct amdgpu_device *adev = crtc->dev->dev_private;
2237
2238        WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
2239                   (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
2240                   (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
2241}
2242
2243static void dce_v8_0_show_cursor(struct drm_crtc *crtc)
2244{
2245        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2246        struct amdgpu_device *adev = crtc->dev->dev_private;
2247
2248        WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2249               upper_32_bits(amdgpu_crtc->cursor_addr));
2250        WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2251               lower_32_bits(amdgpu_crtc->cursor_addr));
2252
2253        WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
2254                   CUR_CONTROL__CURSOR_EN_MASK |
2255                   (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
2256                   (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
2257}
2258
2259static int dce_v8_0_cursor_move_locked(struct drm_crtc *crtc,
2260                                       int x, int y)
2261{
2262        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2263        struct amdgpu_device *adev = crtc->dev->dev_private;
2264        int xorigin = 0, yorigin = 0;
2265
2266        amdgpu_crtc->cursor_x = x;
2267        amdgpu_crtc->cursor_y = y;
2268
2269        /* avivo cursor are offset into the total surface */
2270        x += crtc->x;
2271        y += crtc->y;
2272        DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
2273
2274        if (x < 0) {
2275                xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
2276                x = 0;
2277        }
2278        if (y < 0) {
2279                yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
2280                y = 0;
2281        }
2282
2283        WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
2284        WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
2285        WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2286               ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
2287
2288        return 0;
2289}
2290
2291static int dce_v8_0_crtc_cursor_move(struct drm_crtc *crtc,
2292                                     int x, int y)
2293{
2294        int ret;
2295
2296        dce_v8_0_lock_cursor(crtc, true);
2297        ret = dce_v8_0_cursor_move_locked(crtc, x, y);
2298        dce_v8_0_lock_cursor(crtc, false);
2299
2300        return ret;
2301}
2302
2303static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
2304                                     struct drm_file *file_priv,
2305                                     uint32_t handle,
2306                                     uint32_t width,
2307                                     uint32_t height,
2308                                     int32_t hot_x,
2309                                     int32_t hot_y)
2310{
2311        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2312        struct drm_gem_object *obj;
2313        struct amdgpu_bo *aobj;
2314        int ret;
2315
2316        if (!handle) {
2317                /* turn off cursor */
2318                dce_v8_0_hide_cursor(crtc);
2319                obj = NULL;
2320                goto unpin;
2321        }
2322
2323        if ((width > amdgpu_crtc->max_cursor_width) ||
2324            (height > amdgpu_crtc->max_cursor_height)) {
2325                DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
2326                return -EINVAL;
2327        }
2328
2329        obj = drm_gem_object_lookup(file_priv, handle);
2330        if (!obj) {
2331                DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
2332                return -ENOENT;
2333        }
2334
2335        aobj = gem_to_amdgpu_bo(obj);
2336        ret = amdgpu_bo_reserve(aobj, false);
2337        if (ret != 0) {
2338                drm_gem_object_put_unlocked(obj);
2339                return ret;
2340        }
2341
2342        ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
2343        amdgpu_bo_unreserve(aobj);
2344        if (ret) {
2345                DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
2346                drm_gem_object_put_unlocked(obj);
2347                return ret;
2348        }
2349
2350        dce_v8_0_lock_cursor(crtc, true);
2351
2352        if (width != amdgpu_crtc->cursor_width ||
2353            height != amdgpu_crtc->cursor_height ||
2354            hot_x != amdgpu_crtc->cursor_hot_x ||
2355            hot_y != amdgpu_crtc->cursor_hot_y) {
2356                int x, y;
2357
2358                x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
2359                y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
2360
2361                dce_v8_0_cursor_move_locked(crtc, x, y);
2362
2363                amdgpu_crtc->cursor_width = width;
2364                amdgpu_crtc->cursor_height = height;
2365                amdgpu_crtc->cursor_hot_x = hot_x;
2366                amdgpu_crtc->cursor_hot_y = hot_y;
2367        }
2368
2369        dce_v8_0_show_cursor(crtc);
2370        dce_v8_0_lock_cursor(crtc, false);
2371
2372unpin:
2373        if (amdgpu_crtc->cursor_bo) {
2374                struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2375                ret = amdgpu_bo_reserve(aobj, true);
2376                if (likely(ret == 0)) {
2377                        amdgpu_bo_unpin(aobj);
2378                        amdgpu_bo_unreserve(aobj);
2379                }
2380                drm_gem_object_put_unlocked(amdgpu_crtc->cursor_bo);
2381        }
2382
2383        amdgpu_crtc->cursor_bo = obj;
2384        return 0;
2385}
2386
2387static void dce_v8_0_cursor_reset(struct drm_crtc *crtc)
2388{
2389        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2390
2391        if (amdgpu_crtc->cursor_bo) {
2392                dce_v8_0_lock_cursor(crtc, true);
2393
2394                dce_v8_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
2395                                            amdgpu_crtc->cursor_y);
2396
2397                dce_v8_0_show_cursor(crtc);
2398
2399                dce_v8_0_lock_cursor(crtc, false);
2400        }
2401}
2402
2403static int dce_v8_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
2404                                   u16 *blue, uint32_t size,
2405                                   struct drm_modeset_acquire_ctx *ctx)
2406{
2407        dce_v8_0_crtc_load_lut(crtc);
2408
2409        return 0;
2410}
2411
2412static void dce_v8_0_crtc_destroy(struct drm_crtc *crtc)
2413{
2414        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2415
2416        drm_crtc_cleanup(crtc);
2417        kfree(amdgpu_crtc);
2418}
2419
2420static const struct drm_crtc_funcs dce_v8_0_crtc_funcs = {
2421        .cursor_set2 = dce_v8_0_crtc_cursor_set2,
2422        .cursor_move = dce_v8_0_crtc_cursor_move,
2423        .gamma_set = dce_v8_0_crtc_gamma_set,
2424        .set_config = amdgpu_crtc_set_config,
2425        .destroy = dce_v8_0_crtc_destroy,
2426        .page_flip_target = amdgpu_crtc_page_flip_target,
2427};
2428
2429static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2430{
2431        struct drm_device *dev = crtc->dev;
2432        struct amdgpu_device *adev = dev->dev_private;
2433        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2434        unsigned type;
2435
2436        switch (mode) {
2437        case DRM_MODE_DPMS_ON:
2438                amdgpu_crtc->enabled = true;
2439                amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
2440                dce_v8_0_vga_enable(crtc, true);
2441                amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2442                dce_v8_0_vga_enable(crtc, false);
2443                /* Make sure VBLANK and PFLIP interrupts are still enabled */
2444                type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
2445                amdgpu_irq_update(adev, &adev->crtc_irq, type);
2446                amdgpu_irq_update(adev, &adev->pageflip_irq, type);
2447                drm_crtc_vblank_on(crtc);
2448                dce_v8_0_crtc_load_lut(crtc);
2449                break;
2450        case DRM_MODE_DPMS_STANDBY:
2451        case DRM_MODE_DPMS_SUSPEND:
2452        case DRM_MODE_DPMS_OFF:
2453                drm_crtc_vblank_off(crtc);
2454                if (amdgpu_crtc->enabled) {
2455                        dce_v8_0_vga_enable(crtc, true);
2456                        amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
2457                        dce_v8_0_vga_enable(crtc, false);
2458                }
2459                amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
2460                amdgpu_crtc->enabled = false;
2461                break;
2462        }
2463        /* adjust pm to dpms */
2464        amdgpu_pm_compute_clocks(adev);
2465}
2466
2467static void dce_v8_0_crtc_prepare(struct drm_crtc *crtc)
2468{
2469        /* disable crtc pair power gating before programming */
2470        amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
2471        amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
2472        dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2473}
2474
2475static void dce_v8_0_crtc_commit(struct drm_crtc *crtc)
2476{
2477        dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
2478        amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
2479}
2480
2481static void dce_v8_0_crtc_disable(struct drm_crtc *crtc)
2482{
2483        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2484        struct drm_device *dev = crtc->dev;
2485        struct amdgpu_device *adev = dev->dev_private;
2486        struct amdgpu_atom_ss ss;
2487        int i;
2488
2489        dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2490        if (crtc->primary->fb) {
2491                int r;
2492                struct amdgpu_framebuffer *amdgpu_fb;
2493                struct amdgpu_bo *abo;
2494
2495                amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
2496                abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
2497                r = amdgpu_bo_reserve(abo, true);
2498                if (unlikely(r))
2499                        DRM_ERROR("failed to reserve abo before unpin\n");
2500                else {
2501                        amdgpu_bo_unpin(abo);
2502                        amdgpu_bo_unreserve(abo);
2503                }
2504        }
2505        /* disable the GRPH */
2506        dce_v8_0_grph_enable(crtc, false);
2507
2508        amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
2509
2510        for (i = 0; i < adev->mode_info.num_crtc; i++) {
2511                if (adev->mode_info.crtcs[i] &&
2512                    adev->mode_info.crtcs[i]->enabled &&
2513                    i != amdgpu_crtc->crtc_id &&
2514                    amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
2515                        /* one other crtc is using this pll don't turn
2516                         * off the pll
2517                         */
2518                        goto done;
2519                }
2520        }
2521
2522        switch (amdgpu_crtc->pll_id) {
2523        case ATOM_PPLL1:
2524        case ATOM_PPLL2:
2525                /* disable the ppll */
2526                amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2527                                                 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2528                break;
2529        case ATOM_PPLL0:
2530                /* disable the ppll */
2531                if ((adev->asic_type == CHIP_KAVERI) ||
2532                    (adev->asic_type == CHIP_BONAIRE) ||
2533                    (adev->asic_type == CHIP_HAWAII))
2534                        amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2535                                                  0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2536                break;
2537        default:
2538                break;
2539        }
2540done:
2541        amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2542        amdgpu_crtc->adjusted_clock = 0;
2543        amdgpu_crtc->encoder = NULL;
2544        amdgpu_crtc->connector = NULL;
2545}
2546
2547static int dce_v8_0_crtc_mode_set(struct drm_crtc *crtc,
2548                                  struct drm_display_mode *mode,
2549                                  struct drm_display_mode *adjusted_mode,
2550                                  int x, int y, struct drm_framebuffer *old_fb)
2551{
2552        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2553
2554        if (!amdgpu_crtc->adjusted_clock)
2555                return -EINVAL;
2556
2557        amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
2558        amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
2559        dce_v8_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2560        amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
2561        amdgpu_atombios_crtc_scaler_setup(crtc);
2562        dce_v8_0_cursor_reset(crtc);
2563        /* update the hw version fpr dpm */
2564        amdgpu_crtc->hw_mode = *adjusted_mode;
2565
2566        return 0;
2567}
2568
2569static bool dce_v8_0_crtc_mode_fixup(struct drm_crtc *crtc,
2570                                     const struct drm_display_mode *mode,
2571                                     struct drm_display_mode *adjusted_mode)
2572{
2573        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2574        struct drm_device *dev = crtc->dev;
2575        struct drm_encoder *encoder;
2576
2577        /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
2578        list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2579                if (encoder->crtc == crtc) {
2580                        amdgpu_crtc->encoder = encoder;
2581                        amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
2582                        break;
2583                }
2584        }
2585        if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
2586                amdgpu_crtc->encoder = NULL;
2587                amdgpu_crtc->connector = NULL;
2588                return false;
2589        }
2590        if (!amdgpu_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
2591                return false;
2592        if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
2593                return false;
2594        /* pick pll */
2595        amdgpu_crtc->pll_id = dce_v8_0_pick_pll(crtc);
2596        /* if we can't get a PPLL for a non-DP encoder, fail */
2597        if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
2598            !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
2599                return false;
2600
2601        return true;
2602}
2603
2604static int dce_v8_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
2605                                  struct drm_framebuffer *old_fb)
2606{
2607        return dce_v8_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2608}
2609
2610static int dce_v8_0_crtc_set_base_atomic(struct drm_crtc *crtc,
2611                                         struct drm_framebuffer *fb,
2612                                         int x, int y, enum mode_set_atomic state)
2613{
2614       return dce_v8_0_crtc_do_set_base(crtc, fb, x, y, 1);
2615}
2616
2617static const struct drm_crtc_helper_funcs dce_v8_0_crtc_helper_funcs = {
2618        .dpms = dce_v8_0_crtc_dpms,
2619        .mode_fixup = dce_v8_0_crtc_mode_fixup,
2620        .mode_set = dce_v8_0_crtc_mode_set,
2621        .mode_set_base = dce_v8_0_crtc_set_base,
2622        .mode_set_base_atomic = dce_v8_0_crtc_set_base_atomic,
2623        .prepare = dce_v8_0_crtc_prepare,
2624        .commit = dce_v8_0_crtc_commit,
2625        .disable = dce_v8_0_crtc_disable,
2626};
2627
2628static int dce_v8_0_crtc_init(struct amdgpu_device *adev, int index)
2629{
2630        struct amdgpu_crtc *amdgpu_crtc;
2631
2632        amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
2633                              (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
2634        if (amdgpu_crtc == NULL)
2635                return -ENOMEM;
2636
2637        drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v8_0_crtc_funcs);
2638
2639        drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
2640        amdgpu_crtc->crtc_id = index;
2641        adev->mode_info.crtcs[index] = amdgpu_crtc;
2642
2643        amdgpu_crtc->max_cursor_width = CIK_CURSOR_WIDTH;
2644        amdgpu_crtc->max_cursor_height = CIK_CURSOR_HEIGHT;
2645        adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
2646        adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
2647
2648        amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id];
2649
2650        amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2651        amdgpu_crtc->adjusted_clock = 0;
2652        amdgpu_crtc->encoder = NULL;
2653        amdgpu_crtc->connector = NULL;
2654        drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v8_0_crtc_helper_funcs);
2655
2656        return 0;
2657}
2658
2659static int dce_v8_0_early_init(void *handle)
2660{
2661        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2662
2663        adev->audio_endpt_rreg = &dce_v8_0_audio_endpt_rreg;
2664        adev->audio_endpt_wreg = &dce_v8_0_audio_endpt_wreg;
2665
2666        dce_v8_0_set_display_funcs(adev);
2667
2668        adev->mode_info.num_crtc = dce_v8_0_get_num_crtc(adev);
2669
2670        switch (adev->asic_type) {
2671        case CHIP_BONAIRE:
2672        case CHIP_HAWAII:
2673                adev->mode_info.num_hpd = 6;
2674                adev->mode_info.num_dig = 6;
2675                break;
2676        case CHIP_KAVERI:
2677                adev->mode_info.num_hpd = 6;
2678                adev->mode_info.num_dig = 7;
2679                break;
2680        case CHIP_KABINI:
2681        case CHIP_MULLINS:
2682                adev->mode_info.num_hpd = 6;
2683                adev->mode_info.num_dig = 6; /* ? */
2684                break;
2685        default:
2686                /* FIXME: not supported yet */
2687                return -EINVAL;
2688        }
2689
2690        dce_v8_0_set_irq_funcs(adev);
2691
2692        return 0;
2693}
2694
2695static int dce_v8_0_sw_init(void *handle)
2696{
2697        int r, i;
2698        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2699
2700        for (i = 0; i < adev->mode_info.num_crtc; i++) {
2701                r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
2702                if (r)
2703                        return r;
2704        }
2705
2706        for (i = 8; i < 20; i += 2) {
2707                r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq);
2708                if (r)
2709                        return r;
2710        }
2711
2712        /* HPD hotplug */
2713        r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 42, &adev->hpd_irq);
2714        if (r)
2715                return r;
2716
2717        adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
2718
2719        adev->ddev->mode_config.async_page_flip = true;
2720
2721        adev->ddev->mode_config.max_width = 16384;
2722        adev->ddev->mode_config.max_height = 16384;
2723
2724        adev->ddev->mode_config.preferred_depth = 24;
2725        adev->ddev->mode_config.prefer_shadow = 1;
2726
2727        adev->ddev->mode_config.fb_base = adev->mc.aper_base;
2728
2729        r = amdgpu_modeset_create_props(adev);
2730        if (r)
2731                return r;
2732
2733        adev->ddev->mode_config.max_width = 16384;
2734        adev->ddev->mode_config.max_height = 16384;
2735
2736        /* allocate crtcs */
2737        for (i = 0; i < adev->mode_info.num_crtc; i++) {
2738                r = dce_v8_0_crtc_init(adev, i);
2739                if (r)
2740                        return r;
2741        }
2742
2743        if (amdgpu_atombios_get_connector_info_from_object_table(adev))
2744                amdgpu_print_display_setup(adev->ddev);
2745        else
2746                return -EINVAL;
2747
2748        /* setup afmt */
2749        r = dce_v8_0_afmt_init(adev);
2750        if (r)
2751                return r;
2752
2753        r = dce_v8_0_audio_init(adev);
2754        if (r)
2755                return r;
2756
2757        drm_kms_helper_poll_init(adev->ddev);
2758
2759        adev->mode_info.mode_config_initialized = true;
2760        return 0;
2761}
2762
2763static int dce_v8_0_sw_fini(void *handle)
2764{
2765        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2766
2767        kfree(adev->mode_info.bios_hardcoded_edid);
2768
2769        drm_kms_helper_poll_fini(adev->ddev);
2770
2771        dce_v8_0_audio_fini(adev);
2772
2773        dce_v8_0_afmt_fini(adev);
2774
2775        drm_mode_config_cleanup(adev->ddev);
2776        adev->mode_info.mode_config_initialized = false;
2777
2778        return 0;
2779}
2780
2781static int dce_v8_0_hw_init(void *handle)
2782{
2783        int i;
2784        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2785
2786        /* disable vga render */
2787        dce_v8_0_set_vga_render_state(adev, false);
2788        /* init dig PHYs, disp eng pll */
2789        amdgpu_atombios_encoder_init_dig(adev);
2790        amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
2791
2792        /* initialize hpd */
2793        dce_v8_0_hpd_init(adev);
2794
2795        for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2796                dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2797        }
2798
2799        dce_v8_0_pageflip_interrupt_init(adev);
2800
2801        return 0;
2802}
2803
2804static int dce_v8_0_hw_fini(void *handle)
2805{
2806        int i;
2807        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2808
2809        dce_v8_0_hpd_fini(adev);
2810
2811        for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2812                dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2813        }
2814
2815        dce_v8_0_pageflip_interrupt_fini(adev);
2816
2817        return 0;
2818}
2819
2820static int dce_v8_0_suspend(void *handle)
2821{
2822        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2823
2824        adev->mode_info.bl_level =
2825                amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
2826
2827        return dce_v8_0_hw_fini(handle);
2828}
2829
2830static int dce_v8_0_resume(void *handle)
2831{
2832        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2833        int ret;
2834
2835        amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
2836                                                           adev->mode_info.bl_level);
2837
2838        ret = dce_v8_0_hw_init(handle);
2839
2840        /* turn on the BL */
2841        if (adev->mode_info.bl_encoder) {
2842                u8 bl_level = amdgpu_display_backlight_get_level(adev,
2843                                                                  adev->mode_info.bl_encoder);
2844                amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
2845                                                    bl_level);
2846        }
2847
2848        return ret;
2849}
2850
2851static bool dce_v8_0_is_idle(void *handle)
2852{
2853        return true;
2854}
2855
2856static int dce_v8_0_wait_for_idle(void *handle)
2857{
2858        return 0;
2859}
2860
2861static int dce_v8_0_soft_reset(void *handle)
2862{
2863        u32 srbm_soft_reset = 0, tmp;
2864        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2865
2866        if (dce_v8_0_is_display_hung(adev))
2867                srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
2868
2869        if (srbm_soft_reset) {
2870                tmp = RREG32(mmSRBM_SOFT_RESET);
2871                tmp |= srbm_soft_reset;
2872                dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
2873                WREG32(mmSRBM_SOFT_RESET, tmp);
2874                tmp = RREG32(mmSRBM_SOFT_RESET);
2875
2876                udelay(50);
2877
2878                tmp &= ~srbm_soft_reset;
2879                WREG32(mmSRBM_SOFT_RESET, tmp);
2880                tmp = RREG32(mmSRBM_SOFT_RESET);
2881
2882                /* Wait a little for things to settle down */
2883                udelay(50);
2884        }
2885        return 0;
2886}
2887
2888static void dce_v8_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
2889                                                     int crtc,
2890                                                     enum amdgpu_interrupt_state state)
2891{
2892        u32 reg_block, lb_interrupt_mask;
2893
2894        if (crtc >= adev->mode_info.num_crtc) {
2895                DRM_DEBUG("invalid crtc %d\n", crtc);
2896                return;
2897        }
2898
2899        switch (crtc) {
2900        case 0:
2901                reg_block = CRTC0_REGISTER_OFFSET;
2902                break;
2903        case 1:
2904                reg_block = CRTC1_REGISTER_OFFSET;
2905                break;
2906        case 2:
2907                reg_block = CRTC2_REGISTER_OFFSET;
2908                break;
2909        case 3:
2910                reg_block = CRTC3_REGISTER_OFFSET;
2911                break;
2912        case 4:
2913                reg_block = CRTC4_REGISTER_OFFSET;
2914                break;
2915        case 5:
2916                reg_block = CRTC5_REGISTER_OFFSET;
2917                break;
2918        default:
2919                DRM_DEBUG("invalid crtc %d\n", crtc);
2920                return;
2921        }
2922
2923        switch (state) {
2924        case AMDGPU_IRQ_STATE_DISABLE:
2925                lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
2926                lb_interrupt_mask &= ~LB_INTERRUPT_MASK__VBLANK_INTERRUPT_MASK_MASK;
2927                WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
2928                break;
2929        case AMDGPU_IRQ_STATE_ENABLE:
2930                lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
2931                lb_interrupt_mask |= LB_INTERRUPT_MASK__VBLANK_INTERRUPT_MASK_MASK;
2932                WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
2933                break;
2934        default:
2935                break;
2936        }
2937}
2938
2939static void dce_v8_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
2940                                                    int crtc,
2941                                                    enum amdgpu_interrupt_state state)
2942{
2943        u32 reg_block, lb_interrupt_mask;
2944
2945        if (crtc >= adev->mode_info.num_crtc) {
2946                DRM_DEBUG("invalid crtc %d\n", crtc);
2947                return;
2948        }
2949
2950        switch (crtc) {
2951        case 0:
2952                reg_block = CRTC0_REGISTER_OFFSET;
2953                break;
2954        case 1:
2955                reg_block = CRTC1_REGISTER_OFFSET;
2956                break;
2957        case 2:
2958                reg_block = CRTC2_REGISTER_OFFSET;
2959                break;
2960        case 3:
2961                reg_block = CRTC3_REGISTER_OFFSET;
2962                break;
2963        case 4:
2964                reg_block = CRTC4_REGISTER_OFFSET;
2965                break;
2966        case 5:
2967                reg_block = CRTC5_REGISTER_OFFSET;
2968                break;
2969        default:
2970                DRM_DEBUG("invalid crtc %d\n", crtc);
2971                return;
2972        }
2973
2974        switch (state) {
2975        case AMDGPU_IRQ_STATE_DISABLE:
2976                lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
2977                lb_interrupt_mask &= ~LB_INTERRUPT_MASK__VLINE_INTERRUPT_MASK_MASK;
2978                WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
2979                break;
2980        case AMDGPU_IRQ_STATE_ENABLE:
2981                lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
2982                lb_interrupt_mask |= LB_INTERRUPT_MASK__VLINE_INTERRUPT_MASK_MASK;
2983                WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
2984                break;
2985        default:
2986                break;
2987        }
2988}
2989
2990static int dce_v8_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
2991                                            struct amdgpu_irq_src *src,
2992                                            unsigned type,
2993                                            enum amdgpu_interrupt_state state)
2994{
2995        u32 dc_hpd_int_cntl;
2996
2997        if (type >= adev->mode_info.num_hpd) {
2998                DRM_DEBUG("invalid hdp %d\n", type);
2999                return 0;
3000        }
3001
3002        switch (state) {
3003        case AMDGPU_IRQ_STATE_DISABLE:
3004                dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
3005                dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
3006                WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
3007                break;
3008        case AMDGPU_IRQ_STATE_ENABLE:
3009                dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
3010                dc_hpd_int_cntl |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
3011                WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
3012                break;
3013        default:
3014                break;
3015        }
3016
3017        return 0;
3018}
3019
3020static int dce_v8_0_set_crtc_interrupt_state(struct amdgpu_device *adev,
3021                                             struct amdgpu_irq_src *src,
3022                                             unsigned type,
3023                                             enum amdgpu_interrupt_state state)
3024{
3025        switch (type) {
3026        case AMDGPU_CRTC_IRQ_VBLANK1:
3027                dce_v8_0_set_crtc_vblank_interrupt_state(adev, 0, state);
3028                break;
3029        case AMDGPU_CRTC_IRQ_VBLANK2:
3030                dce_v8_0_set_crtc_vblank_interrupt_state(adev, 1, state);
3031                break;
3032        case AMDGPU_CRTC_IRQ_VBLANK3:
3033                dce_v8_0_set_crtc_vblank_interrupt_state(adev, 2, state);
3034                break;
3035        case AMDGPU_CRTC_IRQ_VBLANK4:
3036                dce_v8_0_set_crtc_vblank_interrupt_state(adev, 3, state);
3037                break;
3038        case AMDGPU_CRTC_IRQ_VBLANK5:
3039                dce_v8_0_set_crtc_vblank_interrupt_state(adev, 4, state);
3040                break;
3041        case AMDGPU_CRTC_IRQ_VBLANK6:
3042                dce_v8_0_set_crtc_vblank_interrupt_state(adev, 5, state);
3043                break;
3044        case AMDGPU_CRTC_IRQ_VLINE1:
3045                dce_v8_0_set_crtc_vline_interrupt_state(adev, 0, state);
3046                break;
3047        case AMDGPU_CRTC_IRQ_VLINE2:
3048                dce_v8_0_set_crtc_vline_interrupt_state(adev, 1, state);
3049                break;
3050        case AMDGPU_CRTC_IRQ_VLINE3:
3051                dce_v8_0_set_crtc_vline_interrupt_state(adev, 2, state);
3052                break;
3053        case AMDGPU_CRTC_IRQ_VLINE4:
3054                dce_v8_0_set_crtc_vline_interrupt_state(adev, 3, state);
3055                break;
3056        case AMDGPU_CRTC_IRQ_VLINE5:
3057                dce_v8_0_set_crtc_vline_interrupt_state(adev, 4, state);
3058                break;
3059        case AMDGPU_CRTC_IRQ_VLINE6:
3060                dce_v8_0_set_crtc_vline_interrupt_state(adev, 5, state);
3061                break;
3062        default:
3063                break;
3064        }
3065        return 0;
3066}
3067
3068static int dce_v8_0_crtc_irq(struct amdgpu_device *adev,
3069                             struct amdgpu_irq_src *source,
3070                             struct amdgpu_iv_entry *entry)
3071{
3072        unsigned crtc = entry->src_id - 1;
3073        uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
3074        unsigned irq_type = amdgpu_crtc_idx_to_irq_type(adev, crtc);
3075
3076        switch (entry->src_data[0]) {
3077        case 0: /* vblank */
3078                if (disp_int & interrupt_status_offsets[crtc].vblank)
3079                        WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], LB_VBLANK_STATUS__VBLANK_ACK_MASK);
3080                else
3081                        DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3082
3083                if (amdgpu_irq_enabled(adev, source, irq_type)) {
3084                        drm_handle_vblank(adev->ddev, crtc);
3085                }
3086                DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
3087                break;
3088        case 1: /* vline */
3089                if (disp_int & interrupt_status_offsets[crtc].vline)
3090                        WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], LB_VLINE_STATUS__VLINE_ACK_MASK);
3091                else
3092                        DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3093
3094                DRM_DEBUG("IH: D%d vline\n", crtc + 1);
3095                break;
3096        default:
3097                DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
3098                break;
3099        }
3100
3101        return 0;
3102}
3103
3104static int dce_v8_0_set_pageflip_interrupt_state(struct amdgpu_device *adev,
3105                                                 struct amdgpu_irq_src *src,
3106                                                 unsigned type,
3107                                                 enum amdgpu_interrupt_state state)
3108{
3109        u32 reg;
3110
3111        if (type >= adev->mode_info.num_crtc) {
3112                DRM_ERROR("invalid pageflip crtc %d\n", type);
3113                return -EINVAL;
3114        }
3115
3116        reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
3117        if (state == AMDGPU_IRQ_STATE_DISABLE)
3118                WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3119                       reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3120        else
3121                WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3122                       reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3123
3124        return 0;
3125}
3126
3127static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev,
3128                                struct amdgpu_irq_src *source,
3129                                struct amdgpu_iv_entry *entry)
3130{
3131        unsigned long flags;
3132        unsigned crtc_id;
3133        struct amdgpu_crtc *amdgpu_crtc;
3134        struct amdgpu_flip_work *works;
3135
3136        crtc_id = (entry->src_id - 8) >> 1;
3137        amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
3138
3139        if (crtc_id >= adev->mode_info.num_crtc) {
3140                DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
3141                return -EINVAL;
3142        }
3143
3144        if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
3145            GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
3146                WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
3147                       GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
3148
3149        /* IRQ could occur when in initial stage */
3150        if (amdgpu_crtc == NULL)
3151                return 0;
3152
3153        spin_lock_irqsave(&adev->ddev->event_lock, flags);
3154        works = amdgpu_crtc->pflip_works;
3155        if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
3156                DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
3157                                                "AMDGPU_FLIP_SUBMITTED(%d)\n",
3158                                                amdgpu_crtc->pflip_status,
3159                                                AMDGPU_FLIP_SUBMITTED);
3160                spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
3161                return 0;
3162        }
3163
3164        /* page flip completed. clean up */
3165        amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
3166        amdgpu_crtc->pflip_works = NULL;
3167
3168        /* wakeup usersapce */
3169        if (works->event)
3170                drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
3171
3172        spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
3173
3174        drm_crtc_vblank_put(&amdgpu_crtc->base);
3175        schedule_work(&works->unpin_work);
3176
3177        return 0;
3178}
3179
3180static int dce_v8_0_hpd_irq(struct amdgpu_device *adev,
3181                            struct amdgpu_irq_src *source,
3182                            struct amdgpu_iv_entry *entry)
3183{
3184        uint32_t disp_int, mask, tmp;
3185        unsigned hpd;
3186
3187        if (entry->src_data[0] >= adev->mode_info.num_hpd) {
3188                DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
3189                return 0;
3190        }
3191
3192        hpd = entry->src_data[0];
3193        disp_int = RREG32(interrupt_status_offsets[hpd].reg);
3194        mask = interrupt_status_offsets[hpd].hpd;
3195
3196        if (disp_int & mask) {
3197                tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
3198                tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
3199                WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
3200                schedule_work(&adev->hotplug_work);
3201                DRM_DEBUG("IH: HPD%d\n", hpd + 1);
3202        }
3203
3204        return 0;
3205
3206}
3207
3208static int dce_v8_0_set_clockgating_state(void *handle,
3209                                          enum amd_clockgating_state state)
3210{
3211        return 0;
3212}
3213
3214static int dce_v8_0_set_powergating_state(void *handle,
3215                                          enum amd_powergating_state state)
3216{
3217        return 0;
3218}
3219
3220static const struct amd_ip_funcs dce_v8_0_ip_funcs = {
3221        .name = "dce_v8_0",
3222        .early_init = dce_v8_0_early_init,
3223        .late_init = NULL,
3224        .sw_init = dce_v8_0_sw_init,
3225        .sw_fini = dce_v8_0_sw_fini,
3226        .hw_init = dce_v8_0_hw_init,
3227        .hw_fini = dce_v8_0_hw_fini,
3228        .suspend = dce_v8_0_suspend,
3229        .resume = dce_v8_0_resume,
3230        .is_idle = dce_v8_0_is_idle,
3231        .wait_for_idle = dce_v8_0_wait_for_idle,
3232        .soft_reset = dce_v8_0_soft_reset,
3233        .set_clockgating_state = dce_v8_0_set_clockgating_state,
3234        .set_powergating_state = dce_v8_0_set_powergating_state,
3235};
3236
3237static void
3238dce_v8_0_encoder_mode_set(struct drm_encoder *encoder,
3239                          struct drm_display_mode *mode,
3240                          struct drm_display_mode *adjusted_mode)
3241{
3242        struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3243
3244        amdgpu_encoder->pixel_clock = adjusted_mode->clock;
3245
3246        /* need to call this here rather than in prepare() since we need some crtc info */
3247        amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3248
3249        /* set scaler clears this on some chips */
3250        dce_v8_0_set_interleave(encoder->crtc, mode);
3251
3252        if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
3253                dce_v8_0_afmt_enable(encoder, true);
3254                dce_v8_0_afmt_setmode(encoder, adjusted_mode);
3255        }
3256}
3257
3258static void dce_v8_0_encoder_prepare(struct drm_encoder *encoder)
3259{
3260        struct amdgpu_device *adev = encoder->dev->dev_private;
3261        struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3262        struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
3263
3264        if ((amdgpu_encoder->active_device &
3265             (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
3266            (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
3267             ENCODER_OBJECT_ID_NONE)) {
3268                struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
3269                if (dig) {
3270                        dig->dig_encoder = dce_v8_0_pick_dig_encoder(encoder);
3271                        if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
3272                                dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
3273                }
3274        }
3275
3276        amdgpu_atombios_scratch_regs_lock(adev, true);
3277
3278        if (connector) {
3279                struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
3280
3281                /* select the clock/data port if it uses a router */
3282                if (amdgpu_connector->router.cd_valid)
3283                        amdgpu_i2c_router_select_cd_port(amdgpu_connector);
3284
3285                /* turn eDP panel on for mode set */
3286                if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
3287                        amdgpu_atombios_encoder_set_edp_panel_power(connector,
3288                                                             ATOM_TRANSMITTER_ACTION_POWER_ON);
3289        }
3290
3291        /* this is needed for the pll/ss setup to work correctly in some cases */
3292        amdgpu_atombios_encoder_set_crtc_source(encoder);
3293        /* set up the FMT blocks */
3294        dce_v8_0_program_fmt(encoder);
3295}
3296
3297static void dce_v8_0_encoder_commit(struct drm_encoder *encoder)
3298{
3299        struct drm_device *dev = encoder->dev;
3300        struct amdgpu_device *adev = dev->dev_private;
3301
3302        /* need to call this here as we need the crtc set up */
3303        amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
3304        amdgpu_atombios_scratch_regs_lock(adev, false);
3305}
3306
3307static void dce_v8_0_encoder_disable(struct drm_encoder *encoder)
3308{
3309        struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3310        struct amdgpu_encoder_atom_dig *dig;
3311
3312        amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3313
3314        if (amdgpu_atombios_encoder_is_digital(encoder)) {
3315                if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
3316                        dce_v8_0_afmt_enable(encoder, false);
3317                dig = amdgpu_encoder->enc_priv;
3318                dig->dig_encoder = -1;
3319        }
3320        amdgpu_encoder->active_device = 0;
3321}
3322
3323/* these are handled by the primary encoders */
3324static void dce_v8_0_ext_prepare(struct drm_encoder *encoder)
3325{
3326
3327}
3328
3329static void dce_v8_0_ext_commit(struct drm_encoder *encoder)
3330{
3331
3332}
3333
3334static void
3335dce_v8_0_ext_mode_set(struct drm_encoder *encoder,
3336                      struct drm_display_mode *mode,
3337                      struct drm_display_mode *adjusted_mode)
3338{
3339
3340}
3341
3342static void dce_v8_0_ext_disable(struct drm_encoder *encoder)
3343{
3344
3345}
3346
3347static void
3348dce_v8_0_ext_dpms(struct drm_encoder *encoder, int mode)
3349{
3350
3351}
3352
3353static const struct drm_encoder_helper_funcs dce_v8_0_ext_helper_funcs = {
3354        .dpms = dce_v8_0_ext_dpms,
3355        .prepare = dce_v8_0_ext_prepare,
3356        .mode_set = dce_v8_0_ext_mode_set,
3357        .commit = dce_v8_0_ext_commit,
3358        .disable = dce_v8_0_ext_disable,
3359        /* no detect for TMDS/LVDS yet */
3360};
3361
3362static const struct drm_encoder_helper_funcs dce_v8_0_dig_helper_funcs = {
3363        .dpms = amdgpu_atombios_encoder_dpms,
3364        .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3365        .prepare = dce_v8_0_encoder_prepare,
3366        .mode_set = dce_v8_0_encoder_mode_set,
3367        .commit = dce_v8_0_encoder_commit,
3368        .disable = dce_v8_0_encoder_disable,
3369        .detect = amdgpu_atombios_encoder_dig_detect,
3370};
3371
3372static const struct drm_encoder_helper_funcs dce_v8_0_dac_helper_funcs = {
3373        .dpms = amdgpu_atombios_encoder_dpms,
3374        .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3375        .prepare = dce_v8_0_encoder_prepare,
3376        .mode_set = dce_v8_0_encoder_mode_set,
3377        .commit = dce_v8_0_encoder_commit,
3378        .detect = amdgpu_atombios_encoder_dac_detect,
3379};
3380
3381static void dce_v8_0_encoder_destroy(struct drm_encoder *encoder)
3382{
3383        struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3384        if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3385                amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
3386        kfree(amdgpu_encoder->enc_priv);
3387        drm_encoder_cleanup(encoder);
3388        kfree(amdgpu_encoder);
3389}
3390
3391static const struct drm_encoder_funcs dce_v8_0_encoder_funcs = {
3392        .destroy = dce_v8_0_encoder_destroy,
3393};
3394
3395static void dce_v8_0_encoder_add(struct amdgpu_device *adev,
3396                                 uint32_t encoder_enum,
3397                                 uint32_t supported_device,
3398                                 u16 caps)
3399{
3400        struct drm_device *dev = adev->ddev;
3401        struct drm_encoder *encoder;
3402        struct amdgpu_encoder *amdgpu_encoder;
3403
3404        /* see if we already added it */
3405        list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
3406                amdgpu_encoder = to_amdgpu_encoder(encoder);
3407                if (amdgpu_encoder->encoder_enum == encoder_enum) {
3408                        amdgpu_encoder->devices |= supported_device;
3409                        return;
3410                }
3411
3412        }
3413
3414        /* add a new one */
3415        amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
3416        if (!amdgpu_encoder)
3417                return;
3418
3419        encoder = &amdgpu_encoder->base;
3420        switch (adev->mode_info.num_crtc) {
3421        case 1:
3422                encoder->possible_crtcs = 0x1;
3423                break;
3424        case 2:
3425        default:
3426                encoder->possible_crtcs = 0x3;
3427                break;
3428        case 4:
3429                encoder->possible_crtcs = 0xf;
3430                break;
3431        case 6:
3432                encoder->possible_crtcs = 0x3f;
3433                break;
3434        }
3435
3436        amdgpu_encoder->enc_priv = NULL;
3437
3438        amdgpu_encoder->encoder_enum = encoder_enum;
3439        amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
3440        amdgpu_encoder->devices = supported_device;
3441        amdgpu_encoder->rmx_type = RMX_OFF;
3442        amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
3443        amdgpu_encoder->is_ext_encoder = false;
3444        amdgpu_encoder->caps = caps;
3445
3446        switch (amdgpu_encoder->encoder_id) {
3447        case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
3448        case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
3449                drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3450                                 DRM_MODE_ENCODER_DAC, NULL);
3451                drm_encoder_helper_add(encoder, &dce_v8_0_dac_helper_funcs);
3452                break;
3453        case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
3454        case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
3455        case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
3456        case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
3457        case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
3458                if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
3459                        amdgpu_encoder->rmx_type = RMX_FULL;
3460                        drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3461                                         DRM_MODE_ENCODER_LVDS, NULL);
3462                        amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
3463                } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
3464                        drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3465                                         DRM_MODE_ENCODER_DAC, NULL);
3466                        amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3467                } else {
3468                        drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3469                                         DRM_MODE_ENCODER_TMDS, NULL);
3470                        amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3471                }
3472                drm_encoder_helper_add(encoder, &dce_v8_0_dig_helper_funcs);
3473                break;
3474        case ENCODER_OBJECT_ID_SI170B:
3475        case ENCODER_OBJECT_ID_CH7303:
3476        case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
3477        case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
3478        case ENCODER_OBJECT_ID_TITFP513:
3479        case ENCODER_OBJECT_ID_VT1623:
3480        case ENCODER_OBJECT_ID_HDMI_SI1930:
3481        case ENCODER_OBJECT_ID_TRAVIS:
3482        case ENCODER_OBJECT_ID_NUTMEG:
3483                /* these are handled by the primary encoders */
3484                amdgpu_encoder->is_ext_encoder = true;
3485                if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3486                        drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3487                                         DRM_MODE_ENCODER_LVDS, NULL);
3488                else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
3489                        drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3490                                         DRM_MODE_ENCODER_DAC, NULL);
3491                else
3492                        drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3493                                         DRM_MODE_ENCODER_TMDS, NULL);
3494                drm_encoder_helper_add(encoder, &dce_v8_0_ext_helper_funcs);
3495                break;
3496        }
3497}
3498
3499static const struct amdgpu_display_funcs dce_v8_0_display_funcs = {
3500        .bandwidth_update = &dce_v8_0_bandwidth_update,
3501        .vblank_get_counter = &dce_v8_0_vblank_get_counter,
3502        .vblank_wait = &dce_v8_0_vblank_wait,
3503        .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
3504        .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
3505        .hpd_sense = &dce_v8_0_hpd_sense,
3506        .hpd_set_polarity = &dce_v8_0_hpd_set_polarity,
3507        .hpd_get_gpio_reg = &dce_v8_0_hpd_get_gpio_reg,
3508        .page_flip = &dce_v8_0_page_flip,
3509        .page_flip_get_scanoutpos = &dce_v8_0_crtc_get_scanoutpos,
3510        .add_encoder = &dce_v8_0_encoder_add,
3511        .add_connector = &amdgpu_connector_add,
3512};
3513
3514static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev)
3515{
3516        if (adev->mode_info.funcs == NULL)
3517                adev->mode_info.funcs = &dce_v8_0_display_funcs;
3518}
3519
3520static const struct amdgpu_irq_src_funcs dce_v8_0_crtc_irq_funcs = {
3521        .set = dce_v8_0_set_crtc_interrupt_state,
3522        .process = dce_v8_0_crtc_irq,
3523};
3524
3525static const struct amdgpu_irq_src_funcs dce_v8_0_pageflip_irq_funcs = {
3526        .set = dce_v8_0_set_pageflip_interrupt_state,
3527        .process = dce_v8_0_pageflip_irq,
3528};
3529
3530static const struct amdgpu_irq_src_funcs dce_v8_0_hpd_irq_funcs = {
3531        .set = dce_v8_0_set_hpd_interrupt_state,
3532        .process = dce_v8_0_hpd_irq,
3533};
3534
3535static void dce_v8_0_set_irq_funcs(struct amdgpu_device *adev)
3536{
3537        if (adev->mode_info.num_crtc > 0)
3538                adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VLINE1 + adev->mode_info.num_crtc;
3539        else
3540                adev->crtc_irq.num_types = 0;
3541        adev->crtc_irq.funcs = &dce_v8_0_crtc_irq_funcs;
3542
3543        adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
3544        adev->pageflip_irq.funcs = &dce_v8_0_pageflip_irq_funcs;
3545
3546        adev->hpd_irq.num_types = adev->mode_info.num_hpd;
3547        adev->hpd_irq.funcs = &dce_v8_0_hpd_irq_funcs;
3548}
3549
3550const struct amdgpu_ip_block_version dce_v8_0_ip_block =
3551{
3552        .type = AMD_IP_BLOCK_TYPE_DCE,
3553        .major = 8,
3554        .minor = 0,
3555        .rev = 0,
3556        .funcs = &dce_v8_0_ip_funcs,
3557};
3558
3559const struct amdgpu_ip_block_version dce_v8_1_ip_block =
3560{
3561        .type = AMD_IP_BLOCK_TYPE_DCE,
3562        .major = 8,
3563        .minor = 1,
3564        .rev = 0,
3565        .funcs = &dce_v8_0_ip_funcs,
3566};
3567
3568const struct amdgpu_ip_block_version dce_v8_2_ip_block =
3569{
3570        .type = AMD_IP_BLOCK_TYPE_DCE,
3571        .major = 8,
3572        .minor = 2,
3573        .rev = 0,
3574        .funcs = &dce_v8_0_ip_funcs,
3575};
3576
3577const struct amdgpu_ip_block_version dce_v8_3_ip_block =
3578{
3579        .type = AMD_IP_BLOCK_TYPE_DCE,
3580        .major = 8,
3581        .minor = 3,
3582        .rev = 0,
3583        .funcs = &dce_v8_0_ip_funcs,
3584};
3585
3586const struct amdgpu_ip_block_version dce_v8_5_ip_block =
3587{
3588        .type = AMD_IP_BLOCK_TYPE_DCE,
3589        .major = 8,
3590        .minor = 5,
3591        .rev = 0,
3592        .funcs = &dce_v8_0_ip_funcs,
3593};
3594