linux/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
<<
>>
Prefs
   1/*
   2 * Copyright 2014 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23#include <drm/drmP.h>
  24#include "amdgpu.h"
  25#include "amdgpu_pm.h"
  26#include "amdgpu_i2c.h"
  27#include "cikd.h"
  28#include "atom.h"
  29#include "amdgpu_atombios.h"
  30#include "atombios_crtc.h"
  31#include "atombios_encoders.h"
  32#include "amdgpu_pll.h"
  33#include "amdgpu_connectors.h"
  34#include "dce_v8_0.h"
  35
  36#include "dce/dce_8_0_d.h"
  37#include "dce/dce_8_0_sh_mask.h"
  38
  39#include "gca/gfx_7_2_enum.h"
  40
  41#include "gmc/gmc_7_1_d.h"
  42#include "gmc/gmc_7_1_sh_mask.h"
  43
  44#include "oss/oss_2_0_d.h"
  45#include "oss/oss_2_0_sh_mask.h"
  46
  47static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev);
  48static void dce_v8_0_set_irq_funcs(struct amdgpu_device *adev);
  49
  50static const u32 crtc_offsets[6] =
  51{
  52        CRTC0_REGISTER_OFFSET,
  53        CRTC1_REGISTER_OFFSET,
  54        CRTC2_REGISTER_OFFSET,
  55        CRTC3_REGISTER_OFFSET,
  56        CRTC4_REGISTER_OFFSET,
  57        CRTC5_REGISTER_OFFSET
  58};
  59
  60static const u32 hpd_offsets[] =
  61{
  62        HPD0_REGISTER_OFFSET,
  63        HPD1_REGISTER_OFFSET,
  64        HPD2_REGISTER_OFFSET,
  65        HPD3_REGISTER_OFFSET,
  66        HPD4_REGISTER_OFFSET,
  67        HPD5_REGISTER_OFFSET
  68};
  69
  70static const uint32_t dig_offsets[] = {
  71        CRTC0_REGISTER_OFFSET,
  72        CRTC1_REGISTER_OFFSET,
  73        CRTC2_REGISTER_OFFSET,
  74        CRTC3_REGISTER_OFFSET,
  75        CRTC4_REGISTER_OFFSET,
  76        CRTC5_REGISTER_OFFSET,
  77        (0x13830 - 0x7030) >> 2,
  78};
  79
  80static const struct {
  81        uint32_t        reg;
  82        uint32_t        vblank;
  83        uint32_t        vline;
  84        uint32_t        hpd;
  85
  86} interrupt_status_offsets[6] = { {
  87        .reg = mmDISP_INTERRUPT_STATUS,
  88        .vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
  89        .vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
  90        .hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
  91}, {
  92        .reg = mmDISP_INTERRUPT_STATUS_CONTINUE,
  93        .vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
  94        .vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
  95        .hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
  96}, {
  97        .reg = mmDISP_INTERRUPT_STATUS_CONTINUE2,
  98        .vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
  99        .vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
 100        .hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
 101}, {
 102        .reg = mmDISP_INTERRUPT_STATUS_CONTINUE3,
 103        .vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
 104        .vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
 105        .hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
 106}, {
 107        .reg = mmDISP_INTERRUPT_STATUS_CONTINUE4,
 108        .vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
 109        .vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
 110        .hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
 111}, {
 112        .reg = mmDISP_INTERRUPT_STATUS_CONTINUE5,
 113        .vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
 114        .vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
 115        .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
 116} };
 117
 118static u32 dce_v8_0_audio_endpt_rreg(struct amdgpu_device *adev,
 119                                     u32 block_offset, u32 reg)
 120{
 121        unsigned long flags;
 122        u32 r;
 123
 124        spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
 125        WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
 126        r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset);
 127        spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
 128
 129        return r;
 130}
 131
 132static void dce_v8_0_audio_endpt_wreg(struct amdgpu_device *adev,
 133                                      u32 block_offset, u32 reg, u32 v)
 134{
 135        unsigned long flags;
 136
 137        spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
 138        WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
 139        WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v);
 140        spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
 141}
 142
 143static bool dce_v8_0_is_in_vblank(struct amdgpu_device *adev, int crtc)
 144{
 145        if (RREG32(mmCRTC_STATUS + crtc_offsets[crtc]) &
 146                        CRTC_V_BLANK_START_END__CRTC_V_BLANK_START_MASK)
 147                return true;
 148        else
 149                return false;
 150}
 151
 152static bool dce_v8_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
 153{
 154        u32 pos1, pos2;
 155
 156        pos1 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
 157        pos2 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
 158
 159        if (pos1 != pos2)
 160                return true;
 161        else
 162                return false;
 163}
 164
 165/**
 166 * dce_v8_0_vblank_wait - vblank wait asic callback.
 167 *
 168 * @adev: amdgpu_device pointer
 169 * @crtc: crtc to wait for vblank on
 170 *
 171 * Wait for vblank on the requested crtc (evergreen+).
 172 */
 173static void dce_v8_0_vblank_wait(struct amdgpu_device *adev, int crtc)
 174{
 175        unsigned i = 100;
 176
 177        if (crtc >= adev->mode_info.num_crtc)
 178                return;
 179
 180        if (!(RREG32(mmCRTC_CONTROL + crtc_offsets[crtc]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK))
 181                return;
 182
 183        /* depending on when we hit vblank, we may be close to active; if so,
 184         * wait for another frame.
 185         */
 186        while (dce_v8_0_is_in_vblank(adev, crtc)) {
 187                if (i++ == 100) {
 188                        i = 0;
 189                        if (!dce_v8_0_is_counter_moving(adev, crtc))
 190                                break;
 191                }
 192        }
 193
 194        while (!dce_v8_0_is_in_vblank(adev, crtc)) {
 195                if (i++ == 100) {
 196                        i = 0;
 197                        if (!dce_v8_0_is_counter_moving(adev, crtc))
 198                                break;
 199                }
 200        }
 201}
 202
 203static u32 dce_v8_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
 204{
 205        if (crtc >= adev->mode_info.num_crtc)
 206                return 0;
 207        else
 208                return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
 209}
 210
 211static void dce_v8_0_pageflip_interrupt_init(struct amdgpu_device *adev)
 212{
 213        unsigned i;
 214
 215        /* Enable pflip interrupts */
 216        for (i = 0; i < adev->mode_info.num_crtc; i++)
 217                amdgpu_irq_get(adev, &adev->pageflip_irq, i);
 218}
 219
 220static void dce_v8_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
 221{
 222        unsigned i;
 223
 224        /* Disable pflip interrupts */
 225        for (i = 0; i < adev->mode_info.num_crtc; i++)
 226                amdgpu_irq_put(adev, &adev->pageflip_irq, i);
 227}
 228
 229/**
 230 * dce_v8_0_page_flip - pageflip callback.
 231 *
 232 * @adev: amdgpu_device pointer
 233 * @crtc_id: crtc to cleanup pageflip on
 234 * @crtc_base: new address of the crtc (GPU MC address)
 235 *
 236 * Triggers the actual pageflip by updating the primary
 237 * surface base address.
 238 */
 239static void dce_v8_0_page_flip(struct amdgpu_device *adev,
 240                               int crtc_id, u64 crtc_base, bool async)
 241{
 242        struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
 243
 244        /* flip at hsync for async, default is vsync */
 245        WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ?
 246               GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK : 0);
 247        /* update the primary scanout addresses */
 248        WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
 249               upper_32_bits(crtc_base));
 250        /* writing to the low address triggers the update */
 251        WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
 252               lower_32_bits(crtc_base));
 253        /* post the write */
 254        RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
 255}
 256
 257static int dce_v8_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
 258                                        u32 *vbl, u32 *position)
 259{
 260        if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
 261                return -EINVAL;
 262
 263        *vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
 264        *position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
 265
 266        return 0;
 267}
 268
 269/**
 270 * dce_v8_0_hpd_sense - hpd sense callback.
 271 *
 272 * @adev: amdgpu_device pointer
 273 * @hpd: hpd (hotplug detect) pin
 274 *
 275 * Checks if a digital monitor is connected (evergreen+).
 276 * Returns true if connected, false if not connected.
 277 */
 278static bool dce_v8_0_hpd_sense(struct amdgpu_device *adev,
 279                               enum amdgpu_hpd_id hpd)
 280{
 281        bool connected = false;
 282
 283        if (hpd >= adev->mode_info.num_hpd)
 284                return connected;
 285
 286        if (RREG32(mmDC_HPD1_INT_STATUS + hpd_offsets[hpd]) &
 287            DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
 288                connected = true;
 289
 290        return connected;
 291}
 292
 293/**
 294 * dce_v8_0_hpd_set_polarity - hpd set polarity callback.
 295 *
 296 * @adev: amdgpu_device pointer
 297 * @hpd: hpd (hotplug detect) pin
 298 *
 299 * Set the polarity of the hpd pin (evergreen+).
 300 */
 301static void dce_v8_0_hpd_set_polarity(struct amdgpu_device *adev,
 302                                      enum amdgpu_hpd_id hpd)
 303{
 304        u32 tmp;
 305        bool connected = dce_v8_0_hpd_sense(adev, hpd);
 306
 307        if (hpd >= adev->mode_info.num_hpd)
 308                return;
 309
 310        tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
 311        if (connected)
 312                tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
 313        else
 314                tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
 315        WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
 316}
 317
 318/**
 319 * dce_v8_0_hpd_init - hpd setup callback.
 320 *
 321 * @adev: amdgpu_device pointer
 322 *
 323 * Setup the hpd pins used by the card (evergreen+).
 324 * Enable the pin, set the polarity, and enable the hpd interrupts.
 325 */
 326static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
 327{
 328        struct drm_device *dev = adev->ddev;
 329        struct drm_connector *connector;
 330        u32 tmp;
 331
 332        list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 333                struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
 334
 335                if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
 336                        continue;
 337
 338                tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
 339                tmp |= DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
 340                WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
 341
 342                if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
 343                    connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
 344                        /* don't try to enable hpd on eDP or LVDS avoid breaking the
 345                         * aux dp channel on imac and help (but not completely fix)
 346                         * https://bugzilla.redhat.com/show_bug.cgi?id=726143
 347                         * also avoid interrupt storms during dpms.
 348                         */
 349                        tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
 350                        tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
 351                        WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
 352                        continue;
 353                }
 354
 355                dce_v8_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
 356                amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
 357        }
 358}
 359
 360/**
 361 * dce_v8_0_hpd_fini - hpd tear down callback.
 362 *
 363 * @adev: amdgpu_device pointer
 364 *
 365 * Tear down the hpd pins used by the card (evergreen+).
 366 * Disable the hpd interrupts.
 367 */
 368static void dce_v8_0_hpd_fini(struct amdgpu_device *adev)
 369{
 370        struct drm_device *dev = adev->ddev;
 371        struct drm_connector *connector;
 372        u32 tmp;
 373
 374        list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 375                struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
 376
 377                if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
 378                        continue;
 379
 380                tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
 381                tmp &= ~DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
 382                WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], 0);
 383
 384                amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
 385        }
 386}
 387
 388static u32 dce_v8_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
 389{
 390        return mmDC_GPIO_HPD_A;
 391}
 392
 393static bool dce_v8_0_is_display_hung(struct amdgpu_device *adev)
 394{
 395        u32 crtc_hung = 0;
 396        u32 crtc_status[6];
 397        u32 i, j, tmp;
 398
 399        for (i = 0; i < adev->mode_info.num_crtc; i++) {
 400                if (RREG32(mmCRTC_CONTROL + crtc_offsets[i]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK) {
 401                        crtc_status[i] = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
 402                        crtc_hung |= (1 << i);
 403                }
 404        }
 405
 406        for (j = 0; j < 10; j++) {
 407                for (i = 0; i < adev->mode_info.num_crtc; i++) {
 408                        if (crtc_hung & (1 << i)) {
 409                                tmp = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
 410                                if (tmp != crtc_status[i])
 411                                        crtc_hung &= ~(1 << i);
 412                        }
 413                }
 414                if (crtc_hung == 0)
 415                        return false;
 416                udelay(100);
 417        }
 418
 419        return true;
 420}
 421
 422static void dce_v8_0_stop_mc_access(struct amdgpu_device *adev,
 423                                    struct amdgpu_mode_mc_save *save)
 424{
 425        u32 crtc_enabled, tmp;
 426        int i;
 427
 428        save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
 429        save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL);
 430
 431        /* disable VGA render */
 432        tmp = RREG32(mmVGA_RENDER_CONTROL);
 433        tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
 434        WREG32(mmVGA_RENDER_CONTROL, tmp);
 435
 436        /* blank the display controllers */
 437        for (i = 0; i < adev->mode_info.num_crtc; i++) {
 438                crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
 439                                             CRTC_CONTROL, CRTC_MASTER_EN);
 440                if (crtc_enabled) {
 441#if 1
 442                        save->crtc_enabled[i] = true;
 443                        tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
 444                        if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) {
 445                                /*it is correct only for RGB ; black is 0*/
 446                                WREG32(mmCRTC_BLANK_DATA_COLOR + crtc_offsets[i], 0);
 447                                tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1);
 448                                WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
 449                        }
 450                        mdelay(20);
 451#else
 452                        /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
 453                        WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
 454                        tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
 455                        tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
 456                        WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
 457                        WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
 458                        save->crtc_enabled[i] = false;
 459                        /* ***** */
 460#endif
 461                } else {
 462                        save->crtc_enabled[i] = false;
 463                }
 464        }
 465}
 466
 467static void dce_v8_0_resume_mc_access(struct amdgpu_device *adev,
 468                                      struct amdgpu_mode_mc_save *save)
 469{
 470        u32 tmp;
 471        int i;
 472
 473        /* update crtc base addresses */
 474        for (i = 0; i < adev->mode_info.num_crtc; i++) {
 475                WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
 476                       upper_32_bits(adev->mc.vram_start));
 477                WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
 478                       (u32)adev->mc.vram_start);
 479
 480                if (save->crtc_enabled[i]) {
 481                        tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
 482                        tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0);
 483                        WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
 484                }
 485                mdelay(20);
 486        }
 487
 488        WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start));
 489        WREG32(mmVGA_MEMORY_BASE_ADDRESS, lower_32_bits(adev->mc.vram_start));
 490
 491        /* Unlock vga access */
 492        WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control);
 493        mdelay(1);
 494        WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control);
 495}
 496
 497static void dce_v8_0_set_vga_render_state(struct amdgpu_device *adev,
 498                                          bool render)
 499{
 500        u32 tmp;
 501
 502        /* Lockout access through VGA aperture*/
 503        tmp = RREG32(mmVGA_HDP_CONTROL);
 504        if (render)
 505                tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 0);
 506        else
 507                tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
 508        WREG32(mmVGA_HDP_CONTROL, tmp);
 509
 510        /* disable VGA render */
 511        tmp = RREG32(mmVGA_RENDER_CONTROL);
 512        if (render)
 513                tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 1);
 514        else
 515                tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
 516        WREG32(mmVGA_RENDER_CONTROL, tmp);
 517}
 518
 519static int dce_v8_0_get_num_crtc(struct amdgpu_device *adev)
 520{
 521        int num_crtc = 0;
 522
 523        switch (adev->asic_type) {
 524        case CHIP_BONAIRE:
 525        case CHIP_HAWAII:
 526                num_crtc = 6;
 527                break;
 528        case CHIP_KAVERI:
 529                num_crtc = 4;
 530                break;
 531        case CHIP_KABINI:
 532        case CHIP_MULLINS:
 533                num_crtc = 2;
 534                break;
 535        default:
 536                num_crtc = 0;
 537        }
 538        return num_crtc;
 539}
 540
 541void dce_v8_0_disable_dce(struct amdgpu_device *adev)
 542{
 543        /*Disable VGA render and enabled crtc, if has DCE engine*/
 544        if (amdgpu_atombios_has_dce_engine_info(adev)) {
 545                u32 tmp;
 546                int crtc_enabled, i;
 547
 548                dce_v8_0_set_vga_render_state(adev, false);
 549
 550                /*Disable crtc*/
 551                for (i = 0; i < dce_v8_0_get_num_crtc(adev); i++) {
 552                        crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
 553                                                                         CRTC_CONTROL, CRTC_MASTER_EN);
 554                        if (crtc_enabled) {
 555                                WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
 556                                tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
 557                                tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
 558                                WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
 559                                WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
 560                        }
 561                }
 562        }
 563}
 564
 565static void dce_v8_0_program_fmt(struct drm_encoder *encoder)
 566{
 567        struct drm_device *dev = encoder->dev;
 568        struct amdgpu_device *adev = dev->dev_private;
 569        struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
 570        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
 571        struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
 572        int bpc = 0;
 573        u32 tmp = 0;
 574        enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
 575
 576        if (connector) {
 577                struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
 578                bpc = amdgpu_connector_get_monitor_bpc(connector);
 579                dither = amdgpu_connector->dither;
 580        }
 581
 582        /* LVDS/eDP FMT is set up by atom */
 583        if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
 584                return;
 585
 586        /* not needed for analog */
 587        if ((amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
 588            (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
 589                return;
 590
 591        if (bpc == 0)
 592                return;
 593
 594        switch (bpc) {
 595        case 6:
 596                if (dither == AMDGPU_FMT_DITHER_ENABLE)
 597                        /* XXX sort out optimal dither settings */
 598                        tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
 599                                FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
 600                                FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
 601                                (0 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT));
 602                else
 603                        tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
 604                        (0 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT));
 605                break;
 606        case 8:
 607                if (dither == AMDGPU_FMT_DITHER_ENABLE)
 608                        /* XXX sort out optimal dither settings */
 609                        tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
 610                                FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
 611                                FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK |
 612                                FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
 613                                (1 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT));
 614                else
 615                        tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
 616                        (1 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT));
 617                break;
 618        case 10:
 619                if (dither == AMDGPU_FMT_DITHER_ENABLE)
 620                        /* XXX sort out optimal dither settings */
 621                        tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
 622                                FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
 623                                FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK |
 624                                FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
 625                                (2 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT));
 626                else
 627                        tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
 628                        (2 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT));
 629                break;
 630        default:
 631                /* not needed */
 632                break;
 633        }
 634
 635        WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
 636}
 637
 638
 639/* display watermark setup */
 640/**
 641 * dce_v8_0_line_buffer_adjust - Set up the line buffer
 642 *
 643 * @adev: amdgpu_device pointer
 644 * @amdgpu_crtc: the selected display controller
 645 * @mode: the current display mode on the selected display
 646 * controller
 647 *
 648 * Setup up the line buffer allocation for
 649 * the selected display controller (CIK).
 650 * Returns the line buffer size in pixels.
 651 */
 652static u32 dce_v8_0_line_buffer_adjust(struct amdgpu_device *adev,
 653                                       struct amdgpu_crtc *amdgpu_crtc,
 654                                       struct drm_display_mode *mode)
 655{
 656        u32 tmp, buffer_alloc, i;
 657        u32 pipe_offset = amdgpu_crtc->crtc_id * 0x8;
 658        /*
 659         * Line Buffer Setup
 660         * There are 6 line buffers, one for each display controllers.
 661         * There are 3 partitions per LB. Select the number of partitions
 662         * to enable based on the display width.  For display widths larger
 663         * than 4096, you need use to use 2 display controllers and combine
 664         * them using the stereo blender.
 665         */
 666        if (amdgpu_crtc->base.enabled && mode) {
 667                if (mode->crtc_hdisplay < 1920) {
 668                        tmp = 1;
 669                        buffer_alloc = 2;
 670                } else if (mode->crtc_hdisplay < 2560) {
 671                        tmp = 2;
 672                        buffer_alloc = 2;
 673                } else if (mode->crtc_hdisplay < 4096) {
 674                        tmp = 0;
 675                        buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
 676                } else {
 677                        DRM_DEBUG_KMS("Mode too big for LB!\n");
 678                        tmp = 0;
 679                        buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
 680                }
 681        } else {
 682                tmp = 1;
 683                buffer_alloc = 0;
 684        }
 685
 686        WREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset,
 687              (tmp << LB_MEMORY_CTRL__LB_MEMORY_CONFIG__SHIFT) |
 688              (0x6B0 << LB_MEMORY_CTRL__LB_MEMORY_SIZE__SHIFT));
 689
 690        WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
 691               (buffer_alloc << PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT));
 692        for (i = 0; i < adev->usec_timeout; i++) {
 693                if (RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
 694                    PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK)
 695                        break;
 696                udelay(1);
 697        }
 698
 699        if (amdgpu_crtc->base.enabled && mode) {
 700                switch (tmp) {
 701                case 0:
 702                default:
 703                        return 4096 * 2;
 704                case 1:
 705                        return 1920 * 2;
 706                case 2:
 707                        return 2560 * 2;
 708                }
 709        }
 710
 711        /* controller not enabled, so no lb used */
 712        return 0;
 713}
 714
 715/**
 716 * cik_get_number_of_dram_channels - get the number of dram channels
 717 *
 718 * @adev: amdgpu_device pointer
 719 *
 720 * Look up the number of video ram channels (CIK).
 721 * Used for display watermark bandwidth calculations
 722 * Returns the number of dram channels
 723 */
 724static u32 cik_get_number_of_dram_channels(struct amdgpu_device *adev)
 725{
 726        u32 tmp = RREG32(mmMC_SHARED_CHMAP);
 727
 728        switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
 729        case 0:
 730        default:
 731                return 1;
 732        case 1:
 733                return 2;
 734        case 2:
 735                return 4;
 736        case 3:
 737                return 8;
 738        case 4:
 739                return 3;
 740        case 5:
 741                return 6;
 742        case 6:
 743                return 10;
 744        case 7:
 745                return 12;
 746        case 8:
 747                return 16;
 748        }
 749}
 750
 751struct dce8_wm_params {
 752        u32 dram_channels; /* number of dram channels */
 753        u32 yclk;          /* bandwidth per dram data pin in kHz */
 754        u32 sclk;          /* engine clock in kHz */
 755        u32 disp_clk;      /* display clock in kHz */
 756        u32 src_width;     /* viewport width */
 757        u32 active_time;   /* active display time in ns */
 758        u32 blank_time;    /* blank time in ns */
 759        bool interlaced;    /* mode is interlaced */
 760        fixed20_12 vsc;    /* vertical scale ratio */
 761        u32 num_heads;     /* number of active crtcs */
 762        u32 bytes_per_pixel; /* bytes per pixel display + overlay */
 763        u32 lb_size;       /* line buffer allocated to pipe */
 764        u32 vtaps;         /* vertical scaler taps */
 765};
 766
 767/**
 768 * dce_v8_0_dram_bandwidth - get the dram bandwidth
 769 *
 770 * @wm: watermark calculation data
 771 *
 772 * Calculate the raw dram bandwidth (CIK).
 773 * Used for display watermark bandwidth calculations
 774 * Returns the dram bandwidth in MBytes/s
 775 */
 776static u32 dce_v8_0_dram_bandwidth(struct dce8_wm_params *wm)
 777{
 778        /* Calculate raw DRAM Bandwidth */
 779        fixed20_12 dram_efficiency; /* 0.7 */
 780        fixed20_12 yclk, dram_channels, bandwidth;
 781        fixed20_12 a;
 782
 783        a.full = dfixed_const(1000);
 784        yclk.full = dfixed_const(wm->yclk);
 785        yclk.full = dfixed_div(yclk, a);
 786        dram_channels.full = dfixed_const(wm->dram_channels * 4);
 787        a.full = dfixed_const(10);
 788        dram_efficiency.full = dfixed_const(7);
 789        dram_efficiency.full = dfixed_div(dram_efficiency, a);
 790        bandwidth.full = dfixed_mul(dram_channels, yclk);
 791        bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
 792
 793        return dfixed_trunc(bandwidth);
 794}
 795
 796/**
 797 * dce_v8_0_dram_bandwidth_for_display - get the dram bandwidth for display
 798 *
 799 * @wm: watermark calculation data
 800 *
 801 * Calculate the dram bandwidth used for display (CIK).
 802 * Used for display watermark bandwidth calculations
 803 * Returns the dram bandwidth for display in MBytes/s
 804 */
 805static u32 dce_v8_0_dram_bandwidth_for_display(struct dce8_wm_params *wm)
 806{
 807        /* Calculate DRAM Bandwidth and the part allocated to display. */
 808        fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
 809        fixed20_12 yclk, dram_channels, bandwidth;
 810        fixed20_12 a;
 811
 812        a.full = dfixed_const(1000);
 813        yclk.full = dfixed_const(wm->yclk);
 814        yclk.full = dfixed_div(yclk, a);
 815        dram_channels.full = dfixed_const(wm->dram_channels * 4);
 816        a.full = dfixed_const(10);
 817        disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
 818        disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
 819        bandwidth.full = dfixed_mul(dram_channels, yclk);
 820        bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
 821
 822        return dfixed_trunc(bandwidth);
 823}
 824
 825/**
 826 * dce_v8_0_data_return_bandwidth - get the data return bandwidth
 827 *
 828 * @wm: watermark calculation data
 829 *
 830 * Calculate the data return bandwidth used for display (CIK).
 831 * Used for display watermark bandwidth calculations
 832 * Returns the data return bandwidth in MBytes/s
 833 */
 834static u32 dce_v8_0_data_return_bandwidth(struct dce8_wm_params *wm)
 835{
 836        /* Calculate the display Data return Bandwidth */
 837        fixed20_12 return_efficiency; /* 0.8 */
 838        fixed20_12 sclk, bandwidth;
 839        fixed20_12 a;
 840
 841        a.full = dfixed_const(1000);
 842        sclk.full = dfixed_const(wm->sclk);
 843        sclk.full = dfixed_div(sclk, a);
 844        a.full = dfixed_const(10);
 845        return_efficiency.full = dfixed_const(8);
 846        return_efficiency.full = dfixed_div(return_efficiency, a);
 847        a.full = dfixed_const(32);
 848        bandwidth.full = dfixed_mul(a, sclk);
 849        bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
 850
 851        return dfixed_trunc(bandwidth);
 852}
 853
 854/**
 855 * dce_v8_0_dmif_request_bandwidth - get the dmif bandwidth
 856 *
 857 * @wm: watermark calculation data
 858 *
 859 * Calculate the dmif bandwidth used for display (CIK).
 860 * Used for display watermark bandwidth calculations
 861 * Returns the dmif bandwidth in MBytes/s
 862 */
 863static u32 dce_v8_0_dmif_request_bandwidth(struct dce8_wm_params *wm)
 864{
 865        /* Calculate the DMIF Request Bandwidth */
 866        fixed20_12 disp_clk_request_efficiency; /* 0.8 */
 867        fixed20_12 disp_clk, bandwidth;
 868        fixed20_12 a, b;
 869
 870        a.full = dfixed_const(1000);
 871        disp_clk.full = dfixed_const(wm->disp_clk);
 872        disp_clk.full = dfixed_div(disp_clk, a);
 873        a.full = dfixed_const(32);
 874        b.full = dfixed_mul(a, disp_clk);
 875
 876        a.full = dfixed_const(10);
 877        disp_clk_request_efficiency.full = dfixed_const(8);
 878        disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
 879
 880        bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
 881
 882        return dfixed_trunc(bandwidth);
 883}
 884
 885/**
 886 * dce_v8_0_available_bandwidth - get the min available bandwidth
 887 *
 888 * @wm: watermark calculation data
 889 *
 890 * Calculate the min available bandwidth used for display (CIK).
 891 * Used for display watermark bandwidth calculations
 892 * Returns the min available bandwidth in MBytes/s
 893 */
 894static u32 dce_v8_0_available_bandwidth(struct dce8_wm_params *wm)
 895{
 896        /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
 897        u32 dram_bandwidth = dce_v8_0_dram_bandwidth(wm);
 898        u32 data_return_bandwidth = dce_v8_0_data_return_bandwidth(wm);
 899        u32 dmif_req_bandwidth = dce_v8_0_dmif_request_bandwidth(wm);
 900
 901        return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
 902}
 903
 904/**
 905 * dce_v8_0_average_bandwidth - get the average available bandwidth
 906 *
 907 * @wm: watermark calculation data
 908 *
 909 * Calculate the average available bandwidth used for display (CIK).
 910 * Used for display watermark bandwidth calculations
 911 * Returns the average available bandwidth in MBytes/s
 912 */
 913static u32 dce_v8_0_average_bandwidth(struct dce8_wm_params *wm)
 914{
 915        /* Calculate the display mode Average Bandwidth
 916         * DisplayMode should contain the source and destination dimensions,
 917         * timing, etc.
 918         */
 919        fixed20_12 bpp;
 920        fixed20_12 line_time;
 921        fixed20_12 src_width;
 922        fixed20_12 bandwidth;
 923        fixed20_12 a;
 924
 925        a.full = dfixed_const(1000);
 926        line_time.full = dfixed_const(wm->active_time + wm->blank_time);
 927        line_time.full = dfixed_div(line_time, a);
 928        bpp.full = dfixed_const(wm->bytes_per_pixel);
 929        src_width.full = dfixed_const(wm->src_width);
 930        bandwidth.full = dfixed_mul(src_width, bpp);
 931        bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
 932        bandwidth.full = dfixed_div(bandwidth, line_time);
 933
 934        return dfixed_trunc(bandwidth);
 935}
 936
 937/**
 938 * dce_v8_0_latency_watermark - get the latency watermark
 939 *
 940 * @wm: watermark calculation data
 941 *
 942 * Calculate the latency watermark (CIK).
 943 * Used for display watermark bandwidth calculations
 944 * Returns the latency watermark in ns
 945 */
 946static u32 dce_v8_0_latency_watermark(struct dce8_wm_params *wm)
 947{
 948        /* First calculate the latency in ns */
 949        u32 mc_latency = 2000; /* 2000 ns. */
 950        u32 available_bandwidth = dce_v8_0_available_bandwidth(wm);
 951        u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
 952        u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
 953        u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
 954        u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
 955                (wm->num_heads * cursor_line_pair_return_time);
 956        u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
 957        u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
 958        u32 tmp, dmif_size = 12288;
 959        fixed20_12 a, b, c;
 960
 961        if (wm->num_heads == 0)
 962                return 0;
 963
 964        a.full = dfixed_const(2);
 965        b.full = dfixed_const(1);
 966        if ((wm->vsc.full > a.full) ||
 967            ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
 968            (wm->vtaps >= 5) ||
 969            ((wm->vsc.full >= a.full) && wm->interlaced))
 970                max_src_lines_per_dst_line = 4;
 971        else
 972                max_src_lines_per_dst_line = 2;
 973
 974        a.full = dfixed_const(available_bandwidth);
 975        b.full = dfixed_const(wm->num_heads);
 976        a.full = dfixed_div(a, b);
 977        tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
 978        tmp = min(dfixed_trunc(a), tmp);
 979
 980        lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
 981
 982        a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
 983        b.full = dfixed_const(1000);
 984        c.full = dfixed_const(lb_fill_bw);
 985        b.full = dfixed_div(c, b);
 986        a.full = dfixed_div(a, b);
 987        line_fill_time = dfixed_trunc(a);
 988
 989        if (line_fill_time < wm->active_time)
 990                return latency;
 991        else
 992                return latency + (line_fill_time - wm->active_time);
 993
 994}
 995
 996/**
 997 * dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display - check
 998 * average and available dram bandwidth
 999 *
1000 * @wm: watermark calculation data
1001 *
1002 * Check if the display average bandwidth fits in the display
1003 * dram bandwidth (CIK).
1004 * Used for display watermark bandwidth calculations
1005 * Returns true if the display fits, false if not.
1006 */
1007static bool dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce8_wm_params *wm)
1008{
1009        if (dce_v8_0_average_bandwidth(wm) <=
1010            (dce_v8_0_dram_bandwidth_for_display(wm) / wm->num_heads))
1011                return true;
1012        else
1013                return false;
1014}
1015
1016/**
1017 * dce_v8_0_average_bandwidth_vs_available_bandwidth - check
1018 * average and available bandwidth
1019 *
1020 * @wm: watermark calculation data
1021 *
1022 * Check if the display average bandwidth fits in the display
1023 * available bandwidth (CIK).
1024 * Used for display watermark bandwidth calculations
1025 * Returns true if the display fits, false if not.
1026 */
1027static bool dce_v8_0_average_bandwidth_vs_available_bandwidth(struct dce8_wm_params *wm)
1028{
1029        if (dce_v8_0_average_bandwidth(wm) <=
1030            (dce_v8_0_available_bandwidth(wm) / wm->num_heads))
1031                return true;
1032        else
1033                return false;
1034}
1035
1036/**
1037 * dce_v8_0_check_latency_hiding - check latency hiding
1038 *
1039 * @wm: watermark calculation data
1040 *
1041 * Check latency hiding (CIK).
1042 * Used for display watermark bandwidth calculations
1043 * Returns true if the display fits, false if not.
1044 */
1045static bool dce_v8_0_check_latency_hiding(struct dce8_wm_params *wm)
1046{
1047        u32 lb_partitions = wm->lb_size / wm->src_width;
1048        u32 line_time = wm->active_time + wm->blank_time;
1049        u32 latency_tolerant_lines;
1050        u32 latency_hiding;
1051        fixed20_12 a;
1052
1053        a.full = dfixed_const(1);
1054        if (wm->vsc.full > a.full)
1055                latency_tolerant_lines = 1;
1056        else {
1057                if (lb_partitions <= (wm->vtaps + 1))
1058                        latency_tolerant_lines = 1;
1059                else
1060                        latency_tolerant_lines = 2;
1061        }
1062
1063        latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
1064
1065        if (dce_v8_0_latency_watermark(wm) <= latency_hiding)
1066                return true;
1067        else
1068                return false;
1069}
1070
1071/**
1072 * dce_v8_0_program_watermarks - program display watermarks
1073 *
1074 * @adev: amdgpu_device pointer
1075 * @amdgpu_crtc: the selected display controller
1076 * @lb_size: line buffer size
1077 * @num_heads: number of display controllers in use
1078 *
1079 * Calculate and program the display watermarks for the
1080 * selected display controller (CIK).
1081 */
1082static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
1083                                        struct amdgpu_crtc *amdgpu_crtc,
1084                                        u32 lb_size, u32 num_heads)
1085{
1086        struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
1087        struct dce8_wm_params wm_low, wm_high;
1088        u32 active_time;
1089        u32 line_time = 0;
1090        u32 latency_watermark_a = 0, latency_watermark_b = 0;
1091        u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
1092
1093        if (amdgpu_crtc->base.enabled && num_heads && mode) {
1094                active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
1095                                            (u32)mode->clock);
1096                line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
1097                                          (u32)mode->clock);
1098                line_time = min(line_time, (u32)65535);
1099
1100                /* watermark for high clocks */
1101                if (adev->pm.dpm_enabled) {
1102                        wm_high.yclk =
1103                                amdgpu_dpm_get_mclk(adev, false) * 10;
1104                        wm_high.sclk =
1105                                amdgpu_dpm_get_sclk(adev, false) * 10;
1106                } else {
1107                        wm_high.yclk = adev->pm.current_mclk * 10;
1108                        wm_high.sclk = adev->pm.current_sclk * 10;
1109                }
1110
1111                wm_high.disp_clk = mode->clock;
1112                wm_high.src_width = mode->crtc_hdisplay;
1113                wm_high.active_time = active_time;
1114                wm_high.blank_time = line_time - wm_high.active_time;
1115                wm_high.interlaced = false;
1116                if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1117                        wm_high.interlaced = true;
1118                wm_high.vsc = amdgpu_crtc->vsc;
1119                wm_high.vtaps = 1;
1120                if (amdgpu_crtc->rmx_type != RMX_OFF)
1121                        wm_high.vtaps = 2;
1122                wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
1123                wm_high.lb_size = lb_size;
1124                wm_high.dram_channels = cik_get_number_of_dram_channels(adev);
1125                wm_high.num_heads = num_heads;
1126
1127                /* set for high clocks */
1128                latency_watermark_a = min(dce_v8_0_latency_watermark(&wm_high), (u32)65535);
1129
1130                /* possibly force display priority to high */
1131                /* should really do this at mode validation time... */
1132                if (!dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
1133                    !dce_v8_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
1134                    !dce_v8_0_check_latency_hiding(&wm_high) ||
1135                    (adev->mode_info.disp_priority == 2)) {
1136                        DRM_DEBUG_KMS("force priority to high\n");
1137                }
1138
1139                /* watermark for low clocks */
1140                if (adev->pm.dpm_enabled) {
1141                        wm_low.yclk =
1142                                amdgpu_dpm_get_mclk(adev, true) * 10;
1143                        wm_low.sclk =
1144                                amdgpu_dpm_get_sclk(adev, true) * 10;
1145                } else {
1146                        wm_low.yclk = adev->pm.current_mclk * 10;
1147                        wm_low.sclk = adev->pm.current_sclk * 10;
1148                }
1149
1150                wm_low.disp_clk = mode->clock;
1151                wm_low.src_width = mode->crtc_hdisplay;
1152                wm_low.active_time = active_time;
1153                wm_low.blank_time = line_time - wm_low.active_time;
1154                wm_low.interlaced = false;
1155                if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1156                        wm_low.interlaced = true;
1157                wm_low.vsc = amdgpu_crtc->vsc;
1158                wm_low.vtaps = 1;
1159                if (amdgpu_crtc->rmx_type != RMX_OFF)
1160                        wm_low.vtaps = 2;
1161                wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
1162                wm_low.lb_size = lb_size;
1163                wm_low.dram_channels = cik_get_number_of_dram_channels(adev);
1164                wm_low.num_heads = num_heads;
1165
1166                /* set for low clocks */
1167                latency_watermark_b = min(dce_v8_0_latency_watermark(&wm_low), (u32)65535);
1168
1169                /* possibly force display priority to high */
1170                /* should really do this at mode validation time... */
1171                if (!dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
1172                    !dce_v8_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
1173                    !dce_v8_0_check_latency_hiding(&wm_low) ||
1174                    (adev->mode_info.disp_priority == 2)) {
1175                        DRM_DEBUG_KMS("force priority to high\n");
1176                }
1177                lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
1178        }
1179
1180        /* select wm A */
1181        wm_mask = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset);
1182        tmp = wm_mask;
1183        tmp &= ~(3 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1184        tmp |= (1 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1185        WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1186        WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
1187               ((latency_watermark_a << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
1188                (line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
1189        /* select wm B */
1190        tmp = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset);
1191        tmp &= ~(3 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1192        tmp |= (2 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1193        WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1194        WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
1195               ((latency_watermark_b << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
1196                (line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
1197        /* restore original selection */
1198        WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, wm_mask);
1199
1200        /* save values for DPM */
1201        amdgpu_crtc->line_time = line_time;
1202        amdgpu_crtc->wm_high = latency_watermark_a;
1203        amdgpu_crtc->wm_low = latency_watermark_b;
1204        /* Save number of lines the linebuffer leads before the scanout */
1205        amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
1206}
1207
1208/**
1209 * dce_v8_0_bandwidth_update - program display watermarks
1210 *
1211 * @adev: amdgpu_device pointer
1212 *
1213 * Calculate and program the display watermarks and line
1214 * buffer allocation (CIK).
1215 */
1216static void dce_v8_0_bandwidth_update(struct amdgpu_device *adev)
1217{
1218        struct drm_display_mode *mode = NULL;
1219        u32 num_heads = 0, lb_size;
1220        int i;
1221
1222        amdgpu_update_display_priority(adev);
1223
1224        for (i = 0; i < adev->mode_info.num_crtc; i++) {
1225                if (adev->mode_info.crtcs[i]->base.enabled)
1226                        num_heads++;
1227        }
1228        for (i = 0; i < adev->mode_info.num_crtc; i++) {
1229                mode = &adev->mode_info.crtcs[i]->base.mode;
1230                lb_size = dce_v8_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode);
1231                dce_v8_0_program_watermarks(adev, adev->mode_info.crtcs[i],
1232                                            lb_size, num_heads);
1233        }
1234}
1235
1236static void dce_v8_0_audio_get_connected_pins(struct amdgpu_device *adev)
1237{
1238        int i;
1239        u32 offset, tmp;
1240
1241        for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1242                offset = adev->mode_info.audio.pin[i].offset;
1243                tmp = RREG32_AUDIO_ENDPT(offset,
1244                                         ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
1245                if (((tmp &
1246                AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK) >>
1247                AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT) == 1)
1248                        adev->mode_info.audio.pin[i].connected = false;
1249                else
1250                        adev->mode_info.audio.pin[i].connected = true;
1251        }
1252}
1253
1254static struct amdgpu_audio_pin *dce_v8_0_audio_get_pin(struct amdgpu_device *adev)
1255{
1256        int i;
1257
1258        dce_v8_0_audio_get_connected_pins(adev);
1259
1260        for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1261                if (adev->mode_info.audio.pin[i].connected)
1262                        return &adev->mode_info.audio.pin[i];
1263        }
1264        DRM_ERROR("No connected audio pins found!\n");
1265        return NULL;
1266}
1267
1268static void dce_v8_0_afmt_audio_select_pin(struct drm_encoder *encoder)
1269{
1270        struct amdgpu_device *adev = encoder->dev->dev_private;
1271        struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1272        struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1273        u32 offset;
1274
1275        if (!dig || !dig->afmt || !dig->afmt->pin)
1276                return;
1277
1278        offset = dig->afmt->offset;
1279
1280        WREG32(mmAFMT_AUDIO_SRC_CONTROL + offset,
1281               (dig->afmt->pin->id << AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT__SHIFT));
1282}
1283
1284static void dce_v8_0_audio_write_latency_fields(struct drm_encoder *encoder,
1285                                                struct drm_display_mode *mode)
1286{
1287        struct amdgpu_device *adev = encoder->dev->dev_private;
1288        struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1289        struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1290        struct drm_connector *connector;
1291        struct amdgpu_connector *amdgpu_connector = NULL;
1292        u32 tmp = 0, offset;
1293
1294        if (!dig || !dig->afmt || !dig->afmt->pin)
1295                return;
1296
1297        offset = dig->afmt->pin->offset;
1298
1299        list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1300                if (connector->encoder == encoder) {
1301                        amdgpu_connector = to_amdgpu_connector(connector);
1302                        break;
1303                }
1304        }
1305
1306        if (!amdgpu_connector) {
1307                DRM_ERROR("Couldn't find encoder's connector\n");
1308                return;
1309        }
1310
1311        if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
1312                if (connector->latency_present[1])
1313                        tmp =
1314                        (connector->video_latency[1] <<
1315                         AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1316                        (connector->audio_latency[1] <<
1317                         AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1318                else
1319                        tmp =
1320                        (0 <<
1321                         AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1322                        (0 <<
1323                         AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1324        } else {
1325                if (connector->latency_present[0])
1326                        tmp =
1327                        (connector->video_latency[0] <<
1328                         AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1329                        (connector->audio_latency[0] <<
1330                         AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1331                else
1332                        tmp =
1333                        (0 <<
1334                         AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1335                        (0 <<
1336                         AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1337
1338        }
1339        WREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
1340}
1341
1342static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
1343{
1344        struct amdgpu_device *adev = encoder->dev->dev_private;
1345        struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1346        struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1347        struct drm_connector *connector;
1348        struct amdgpu_connector *amdgpu_connector = NULL;
1349        u32 offset, tmp;
1350        u8 *sadb = NULL;
1351        int sad_count;
1352
1353        if (!dig || !dig->afmt || !dig->afmt->pin)
1354                return;
1355
1356        offset = dig->afmt->pin->offset;
1357
1358        list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1359                if (connector->encoder == encoder) {
1360                        amdgpu_connector = to_amdgpu_connector(connector);
1361                        break;
1362                }
1363        }
1364
1365        if (!amdgpu_connector) {
1366                DRM_ERROR("Couldn't find encoder's connector\n");
1367                return;
1368        }
1369
1370        sad_count = drm_edid_to_speaker_allocation(amdgpu_connector_edid(connector), &sadb);
1371        if (sad_count < 0) {
1372                DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
1373                sad_count = 0;
1374        }
1375
1376        /* program the speaker allocation */
1377        tmp = RREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
1378        tmp &= ~(AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK |
1379                AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK);
1380        /* set HDMI mode */
1381        tmp |= AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK;
1382        if (sad_count)
1383                tmp |= (sadb[0] << AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT);
1384        else
1385                tmp |= (5 << AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT); /* stereo */
1386        WREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
1387
1388        kfree(sadb);
1389}
1390
1391static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder)
1392{
1393        struct amdgpu_device *adev = encoder->dev->dev_private;
1394        struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1395        struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1396        u32 offset;
1397        struct drm_connector *connector;
1398        struct amdgpu_connector *amdgpu_connector = NULL;
1399        struct cea_sad *sads;
1400        int i, sad_count;
1401
1402        static const u16 eld_reg_to_type[][2] = {
1403                { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
1404                { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
1405                { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
1406                { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
1407                { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
1408                { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
1409                { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
1410                { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
1411                { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
1412                { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
1413                { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
1414                { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
1415        };
1416
1417        if (!dig || !dig->afmt || !dig->afmt->pin)
1418                return;
1419
1420        offset = dig->afmt->pin->offset;
1421
1422        list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1423                if (connector->encoder == encoder) {
1424                        amdgpu_connector = to_amdgpu_connector(connector);
1425                        break;
1426                }
1427        }
1428
1429        if (!amdgpu_connector) {
1430                DRM_ERROR("Couldn't find encoder's connector\n");
1431                return;
1432        }
1433
1434        sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
1435        if (sad_count <= 0) {
1436                DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
1437                return;
1438        }
1439        BUG_ON(!sads);
1440
1441        for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
1442                u32 value = 0;
1443                u8 stereo_freqs = 0;
1444                int max_channels = -1;
1445                int j;
1446
1447                for (j = 0; j < sad_count; j++) {
1448                        struct cea_sad *sad = &sads[j];
1449
1450                        if (sad->format == eld_reg_to_type[i][1]) {
1451                                if (sad->channels > max_channels) {
1452                                        value = (sad->channels <<
1453                                                 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT) |
1454                                                (sad->byte2 <<
1455                                                 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT) |
1456                                                (sad->freq <<
1457                                                 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT);
1458                                        max_channels = sad->channels;
1459                                }
1460
1461                                if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
1462                                        stereo_freqs |= sad->freq;
1463                                else
1464                                        break;
1465                        }
1466                }
1467
1468                value |= (stereo_freqs <<
1469                        AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT);
1470
1471                WREG32_AUDIO_ENDPT(offset, eld_reg_to_type[i][0], value);
1472        }
1473
1474        kfree(sads);
1475}
1476
1477static void dce_v8_0_audio_enable(struct amdgpu_device *adev,
1478                                  struct amdgpu_audio_pin *pin,
1479                                  bool enable)
1480{
1481        if (!pin)
1482                return;
1483
1484        WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
1485                enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0);
1486}
1487
1488static const u32 pin_offsets[7] =
1489{
1490        (0x1780 - 0x1780),
1491        (0x1786 - 0x1780),
1492        (0x178c - 0x1780),
1493        (0x1792 - 0x1780),
1494        (0x1798 - 0x1780),
1495        (0x179d - 0x1780),
1496        (0x17a4 - 0x1780),
1497};
1498
1499static int dce_v8_0_audio_init(struct amdgpu_device *adev)
1500{
1501        int i;
1502
1503        if (!amdgpu_audio)
1504                return 0;
1505
1506        adev->mode_info.audio.enabled = true;
1507
1508        if (adev->asic_type == CHIP_KAVERI) /* KV: 4 streams, 7 endpoints */
1509                adev->mode_info.audio.num_pins = 7;
1510        else if ((adev->asic_type == CHIP_KABINI) ||
1511                 (adev->asic_type == CHIP_MULLINS)) /* KB/ML: 2 streams, 3 endpoints */
1512                adev->mode_info.audio.num_pins = 3;
1513        else if ((adev->asic_type == CHIP_BONAIRE) ||
1514                 (adev->asic_type == CHIP_HAWAII))/* BN/HW: 6 streams, 7 endpoints */
1515                adev->mode_info.audio.num_pins = 7;
1516        else
1517                adev->mode_info.audio.num_pins = 3;
1518
1519        for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1520                adev->mode_info.audio.pin[i].channels = -1;
1521                adev->mode_info.audio.pin[i].rate = -1;
1522                adev->mode_info.audio.pin[i].bits_per_sample = -1;
1523                adev->mode_info.audio.pin[i].status_bits = 0;
1524                adev->mode_info.audio.pin[i].category_code = 0;
1525                adev->mode_info.audio.pin[i].connected = false;
1526                adev->mode_info.audio.pin[i].offset = pin_offsets[i];
1527                adev->mode_info.audio.pin[i].id = i;
1528                /* disable audio.  it will be set up later */
1529                /* XXX remove once we switch to ip funcs */
1530                dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1531        }
1532
1533        return 0;
1534}
1535
1536static void dce_v8_0_audio_fini(struct amdgpu_device *adev)
1537{
1538        int i;
1539
1540        if (!amdgpu_audio)
1541                return;
1542
1543        if (!adev->mode_info.audio.enabled)
1544                return;
1545
1546        for (i = 0; i < adev->mode_info.audio.num_pins; i++)
1547                dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1548
1549        adev->mode_info.audio.enabled = false;
1550}
1551
1552/*
1553 * update the N and CTS parameters for a given pixel clock rate
1554 */
1555static void dce_v8_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
1556{
1557        struct drm_device *dev = encoder->dev;
1558        struct amdgpu_device *adev = dev->dev_private;
1559        struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
1560        struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1561        struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1562        uint32_t offset = dig->afmt->offset;
1563
1564        WREG32(mmHDMI_ACR_32_0 + offset, (acr.cts_32khz << HDMI_ACR_32_0__HDMI_ACR_CTS_32__SHIFT));
1565        WREG32(mmHDMI_ACR_32_1 + offset, acr.n_32khz);
1566
1567        WREG32(mmHDMI_ACR_44_0 + offset, (acr.cts_44_1khz << HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT));
1568        WREG32(mmHDMI_ACR_44_1 + offset, acr.n_44_1khz);
1569
1570        WREG32(mmHDMI_ACR_48_0 + offset, (acr.cts_48khz << HDMI_ACR_48_0__HDMI_ACR_CTS_48__SHIFT));
1571        WREG32(mmHDMI_ACR_48_1 + offset, acr.n_48khz);
1572}
1573
1574/*
1575 * build a HDMI Video Info Frame
1576 */
1577static void dce_v8_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
1578                                               void *buffer, size_t size)
1579{
1580        struct drm_device *dev = encoder->dev;
1581        struct amdgpu_device *adev = dev->dev_private;
1582        struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1583        struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1584        uint32_t offset = dig->afmt->offset;
1585        uint8_t *frame = buffer + 3;
1586        uint8_t *header = buffer;
1587
1588        WREG32(mmAFMT_AVI_INFO0 + offset,
1589                frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
1590        WREG32(mmAFMT_AVI_INFO1 + offset,
1591                frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
1592        WREG32(mmAFMT_AVI_INFO2 + offset,
1593                frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
1594        WREG32(mmAFMT_AVI_INFO3 + offset,
1595                frame[0xC] | (frame[0xD] << 8) | (header[1] << 24));
1596}
1597
1598static void dce_v8_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
1599{
1600        struct drm_device *dev = encoder->dev;
1601        struct amdgpu_device *adev = dev->dev_private;
1602        struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1603        struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1604        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1605        u32 dto_phase = 24 * 1000;
1606        u32 dto_modulo = clock;
1607
1608        if (!dig || !dig->afmt)
1609                return;
1610
1611        /* XXX two dtos; generally use dto0 for hdmi */
1612        /* Express [24MHz / target pixel clock] as an exact rational
1613         * number (coefficient of two integer numbers.  DCCG_AUDIO_DTOx_PHASE
1614         * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
1615         */
1616        WREG32(mmDCCG_AUDIO_DTO_SOURCE, (amdgpu_crtc->crtc_id << DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO0_SOURCE_SEL__SHIFT));
1617        WREG32(mmDCCG_AUDIO_DTO0_PHASE, dto_phase);
1618        WREG32(mmDCCG_AUDIO_DTO0_MODULE, dto_modulo);
1619}
1620
1621/*
1622 * update the info frames with the data from the current display mode
1623 */
1624static void dce_v8_0_afmt_setmode(struct drm_encoder *encoder,
1625                                  struct drm_display_mode *mode)
1626{
1627        struct drm_device *dev = encoder->dev;
1628        struct amdgpu_device *adev = dev->dev_private;
1629        struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1630        struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1631        struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
1632        u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
1633        struct hdmi_avi_infoframe frame;
1634        uint32_t offset, val;
1635        ssize_t err;
1636        int bpc = 8;
1637
1638        if (!dig || !dig->afmt)
1639                return;
1640
1641        /* Silent, r600_hdmi_enable will raise WARN for us */
1642        if (!dig->afmt->enabled)
1643                return;
1644
1645        offset = dig->afmt->offset;
1646
1647        /* hdmi deep color mode general control packets setup, if bpc > 8 */
1648        if (encoder->crtc) {
1649                struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1650                bpc = amdgpu_crtc->bpc;
1651        }
1652
1653        /* disable audio prior to setting up hw */
1654        dig->afmt->pin = dce_v8_0_audio_get_pin(adev);
1655        dce_v8_0_audio_enable(adev, dig->afmt->pin, false);
1656
1657        dce_v8_0_audio_set_dto(encoder, mode->clock);
1658
1659        WREG32(mmHDMI_VBI_PACKET_CONTROL + offset,
1660               HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK); /* send null packets when required */
1661
1662        WREG32(mmAFMT_AUDIO_CRC_CONTROL + offset, 0x1000);
1663
1664        val = RREG32(mmHDMI_CONTROL + offset);
1665        val &= ~HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK;
1666        val &= ~HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH_MASK;
1667
1668        switch (bpc) {
1669        case 0:
1670        case 6:
1671        case 8:
1672        case 16:
1673        default:
1674                DRM_DEBUG("%s: Disabling hdmi deep color for %d bpc.\n",
1675                          connector->name, bpc);
1676                break;
1677        case 10:
1678                val |= HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK;
1679                val |= 1 << HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT;
1680                DRM_DEBUG("%s: Enabling hdmi deep color 30 for 10 bpc.\n",
1681                          connector->name);
1682                break;
1683        case 12:
1684                val |= HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK;
1685                val |= 2 << HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT;
1686                DRM_DEBUG("%s: Enabling hdmi deep color 36 for 12 bpc.\n",
1687                          connector->name);
1688                break;
1689        }
1690
1691        WREG32(mmHDMI_CONTROL + offset, val);
1692
1693        WREG32(mmHDMI_VBI_PACKET_CONTROL + offset,
1694               HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK | /* send null packets when required */
1695               HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND_MASK | /* send general control packets */
1696               HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK); /* send general control packets every frame */
1697
1698        WREG32(mmHDMI_INFOFRAME_CONTROL0 + offset,
1699               HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND_MASK | /* enable audio info frames (frames won't be set until audio is enabled) */
1700               HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT_MASK); /* required for audio info values to be updated */
1701
1702        WREG32(mmAFMT_INFOFRAME_CONTROL0 + offset,
1703               AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE_MASK); /* required for audio info values to be updated */
1704
1705        WREG32(mmHDMI_INFOFRAME_CONTROL1 + offset,
1706               (2 << HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE__SHIFT)); /* anything other than 0 */
1707
1708        WREG32(mmHDMI_GC + offset, 0); /* unset HDMI_GC_AVMUTE */
1709
1710        WREG32(mmHDMI_AUDIO_PACKET_CONTROL + offset,
1711               (1 << HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN__SHIFT) | /* set the default audio delay */
1712               (3 << HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_PACKETS_PER_LINE__SHIFT)); /* should be suffient for all audio modes and small enough for all hblanks */
1713
1714        WREG32(mmAFMT_AUDIO_PACKET_CONTROL + offset,
1715               AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE_MASK); /* allow 60958 channel status fields to be updated */
1716
1717        /* fglrx clears sth in AFMT_AUDIO_PACKET_CONTROL2 here */
1718
1719        if (bpc > 8)
1720                WREG32(mmHDMI_ACR_PACKET_CONTROL + offset,
1721                       HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK); /* allow hw to sent ACR packets when required */
1722        else
1723                WREG32(mmHDMI_ACR_PACKET_CONTROL + offset,
1724                       HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE_MASK | /* select SW CTS value */
1725                       HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK); /* allow hw to sent ACR packets when required */
1726
1727        dce_v8_0_afmt_update_ACR(encoder, mode->clock);
1728
1729        WREG32(mmAFMT_60958_0 + offset,
1730               (1 << AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L__SHIFT));
1731
1732        WREG32(mmAFMT_60958_1 + offset,
1733               (2 << AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R__SHIFT));
1734
1735        WREG32(mmAFMT_60958_2 + offset,
1736               (3 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2__SHIFT) |
1737               (4 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3__SHIFT) |
1738               (5 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4__SHIFT) |
1739               (6 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5__SHIFT) |
1740               (7 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6__SHIFT) |
1741               (8 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7__SHIFT));
1742
1743        dce_v8_0_audio_write_speaker_allocation(encoder);
1744
1745
1746        WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + offset,
1747               (0xff << AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT));
1748
1749        dce_v8_0_afmt_audio_select_pin(encoder);
1750        dce_v8_0_audio_write_sad_regs(encoder);
1751        dce_v8_0_audio_write_latency_fields(encoder, mode);
1752
1753        err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
1754        if (err < 0) {
1755                DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
1756                return;
1757        }
1758
1759        err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
1760        if (err < 0) {
1761                DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
1762                return;
1763        }
1764
1765        dce_v8_0_afmt_update_avi_infoframe(encoder, buffer, sizeof(buffer));
1766
1767        WREG32_OR(mmHDMI_INFOFRAME_CONTROL0 + offset,
1768                  HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK | /* enable AVI info frames */
1769                  HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_CONT_MASK); /* required for audio info values to be updated */
1770
1771        WREG32_P(mmHDMI_INFOFRAME_CONTROL1 + offset,
1772                 (2 << HDMI_INFOFRAME_CONTROL1__HDMI_AVI_INFO_LINE__SHIFT), /* anything other than 0 */
1773                 ~HDMI_INFOFRAME_CONTROL1__HDMI_AVI_INFO_LINE_MASK);
1774
1775        WREG32_OR(mmAFMT_AUDIO_PACKET_CONTROL + offset,
1776                  AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_MASK); /* send audio packets */
1777
1778        WREG32(mmAFMT_RAMP_CONTROL0 + offset, 0x00FFFFFF);
1779        WREG32(mmAFMT_RAMP_CONTROL1 + offset, 0x007FFFFF);
1780        WREG32(mmAFMT_RAMP_CONTROL2 + offset, 0x00000001);
1781        WREG32(mmAFMT_RAMP_CONTROL3 + offset, 0x00000001);
1782
1783        /* enable audio after setting up hw */
1784        dce_v8_0_audio_enable(adev, dig->afmt->pin, true);
1785}
1786
1787static void dce_v8_0_afmt_enable(struct drm_encoder *encoder, bool enable)
1788{
1789        struct drm_device *dev = encoder->dev;
1790        struct amdgpu_device *adev = dev->dev_private;
1791        struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1792        struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1793
1794        if (!dig || !dig->afmt)
1795                return;
1796
1797        /* Silent, r600_hdmi_enable will raise WARN for us */
1798        if (enable && dig->afmt->enabled)
1799                return;
1800        if (!enable && !dig->afmt->enabled)
1801                return;
1802
1803        if (!enable && dig->afmt->pin) {
1804                dce_v8_0_audio_enable(adev, dig->afmt->pin, false);
1805                dig->afmt->pin = NULL;
1806        }
1807
1808        dig->afmt->enabled = enable;
1809
1810        DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
1811                  enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
1812}
1813
1814static int dce_v8_0_afmt_init(struct amdgpu_device *adev)
1815{
1816        int i;
1817
1818        for (i = 0; i < adev->mode_info.num_dig; i++)
1819                adev->mode_info.afmt[i] = NULL;
1820
1821        /* DCE8 has audio blocks tied to DIG encoders */
1822        for (i = 0; i < adev->mode_info.num_dig; i++) {
1823                adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
1824                if (adev->mode_info.afmt[i]) {
1825                        adev->mode_info.afmt[i]->offset = dig_offsets[i];
1826                        adev->mode_info.afmt[i]->id = i;
1827                } else {
1828                        int j;
1829                        for (j = 0; j < i; j++) {
1830                                kfree(adev->mode_info.afmt[j]);
1831                                adev->mode_info.afmt[j] = NULL;
1832                        }
1833                        return -ENOMEM;
1834                }
1835        }
1836        return 0;
1837}
1838
1839static void dce_v8_0_afmt_fini(struct amdgpu_device *adev)
1840{
1841        int i;
1842
1843        for (i = 0; i < adev->mode_info.num_dig; i++) {
1844                kfree(adev->mode_info.afmt[i]);
1845                adev->mode_info.afmt[i] = NULL;
1846        }
1847}
1848
1849static const u32 vga_control_regs[6] =
1850{
1851        mmD1VGA_CONTROL,
1852        mmD2VGA_CONTROL,
1853        mmD3VGA_CONTROL,
1854        mmD4VGA_CONTROL,
1855        mmD5VGA_CONTROL,
1856        mmD6VGA_CONTROL,
1857};
1858
1859static void dce_v8_0_vga_enable(struct drm_crtc *crtc, bool enable)
1860{
1861        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1862        struct drm_device *dev = crtc->dev;
1863        struct amdgpu_device *adev = dev->dev_private;
1864        u32 vga_control;
1865
1866        vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
1867        if (enable)
1868                WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | 1);
1869        else
1870                WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control);
1871}
1872
1873static void dce_v8_0_grph_enable(struct drm_crtc *crtc, bool enable)
1874{
1875        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1876        struct drm_device *dev = crtc->dev;
1877        struct amdgpu_device *adev = dev->dev_private;
1878
1879        if (enable)
1880                WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1);
1881        else
1882                WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 0);
1883}
1884
1885static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
1886                                     struct drm_framebuffer *fb,
1887                                     int x, int y, int atomic)
1888{
1889        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1890        struct drm_device *dev = crtc->dev;
1891        struct amdgpu_device *adev = dev->dev_private;
1892        struct amdgpu_framebuffer *amdgpu_fb;
1893        struct drm_framebuffer *target_fb;
1894        struct drm_gem_object *obj;
1895        struct amdgpu_bo *abo;
1896        uint64_t fb_location, tiling_flags;
1897        uint32_t fb_format, fb_pitch_pixels;
1898        u32 fb_swap = (GRPH_ENDIAN_NONE << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1899        u32 pipe_config;
1900        u32 viewport_w, viewport_h;
1901        int r;
1902        bool bypass_lut = false;
1903        struct drm_format_name_buf format_name;
1904
1905        /* no fb bound */
1906        if (!atomic && !crtc->primary->fb) {
1907                DRM_DEBUG_KMS("No FB bound\n");
1908                return 0;
1909        }
1910
1911        if (atomic) {
1912                amdgpu_fb = to_amdgpu_framebuffer(fb);
1913                target_fb = fb;
1914        } else {
1915                amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
1916                target_fb = crtc->primary->fb;
1917        }
1918
1919        /* If atomic, assume fb object is pinned & idle & fenced and
1920         * just update base pointers
1921         */
1922        obj = amdgpu_fb->obj;
1923        abo = gem_to_amdgpu_bo(obj);
1924        r = amdgpu_bo_reserve(abo, false);
1925        if (unlikely(r != 0))
1926                return r;
1927
1928        if (atomic) {
1929                fb_location = amdgpu_bo_gpu_offset(abo);
1930        } else {
1931                r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
1932                if (unlikely(r != 0)) {
1933                        amdgpu_bo_unreserve(abo);
1934                        return -EINVAL;
1935                }
1936        }
1937
1938        amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
1939        amdgpu_bo_unreserve(abo);
1940
1941        pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
1942
1943        switch (target_fb->format->format) {
1944        case DRM_FORMAT_C8:
1945                fb_format = ((GRPH_DEPTH_8BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1946                             (GRPH_FORMAT_INDEXED << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1947                break;
1948        case DRM_FORMAT_XRGB4444:
1949        case DRM_FORMAT_ARGB4444:
1950                fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1951                             (GRPH_FORMAT_ARGB4444 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1952#ifdef __BIG_ENDIAN
1953                fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1954#endif
1955                break;
1956        case DRM_FORMAT_XRGB1555:
1957        case DRM_FORMAT_ARGB1555:
1958                fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1959                             (GRPH_FORMAT_ARGB1555 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1960#ifdef __BIG_ENDIAN
1961                fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1962#endif
1963                break;
1964        case DRM_FORMAT_BGRX5551:
1965        case DRM_FORMAT_BGRA5551:
1966                fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1967                             (GRPH_FORMAT_BGRA5551 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1968#ifdef __BIG_ENDIAN
1969                fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1970#endif
1971                break;
1972        case DRM_FORMAT_RGB565:
1973                fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1974                             (GRPH_FORMAT_ARGB565 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1975#ifdef __BIG_ENDIAN
1976                fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1977#endif
1978                break;
1979        case DRM_FORMAT_XRGB8888:
1980        case DRM_FORMAT_ARGB8888:
1981                fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1982                             (GRPH_FORMAT_ARGB8888 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1983#ifdef __BIG_ENDIAN
1984                fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1985#endif
1986                break;
1987        case DRM_FORMAT_XRGB2101010:
1988        case DRM_FORMAT_ARGB2101010:
1989                fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1990                             (GRPH_FORMAT_ARGB2101010 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1991#ifdef __BIG_ENDIAN
1992                fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1993#endif
1994                /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1995                bypass_lut = true;
1996                break;
1997        case DRM_FORMAT_BGRX1010102:
1998        case DRM_FORMAT_BGRA1010102:
1999                fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
2000                             (GRPH_FORMAT_BGRA1010102 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
2001#ifdef __BIG_ENDIAN
2002                fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
2003#endif
2004                /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
2005                bypass_lut = true;
2006                break;
2007        default:
2008                DRM_ERROR("Unsupported screen format %s\n",
2009                          drm_get_format_name(target_fb->format->format, &format_name));
2010                return -EINVAL;
2011        }
2012
2013        if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) {
2014                unsigned bankw, bankh, mtaspect, tile_split, num_banks;
2015
2016                bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
2017                bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
2018                mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
2019                tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
2020                num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
2021
2022                fb_format |= (num_banks << GRPH_CONTROL__GRPH_NUM_BANKS__SHIFT);
2023                fb_format |= (GRPH_ARRAY_2D_TILED_THIN1 << GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT);
2024                fb_format |= (tile_split << GRPH_CONTROL__GRPH_TILE_SPLIT__SHIFT);
2025                fb_format |= (bankw << GRPH_CONTROL__GRPH_BANK_WIDTH__SHIFT);
2026                fb_format |= (bankh << GRPH_CONTROL__GRPH_BANK_HEIGHT__SHIFT);
2027                fb_format |= (mtaspect << GRPH_CONTROL__GRPH_MACRO_TILE_ASPECT__SHIFT);
2028                fb_format |= (DISPLAY_MICRO_TILING << GRPH_CONTROL__GRPH_MICRO_TILE_MODE__SHIFT);
2029        } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
2030                fb_format |= (GRPH_ARRAY_1D_TILED_THIN1 << GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT);
2031        }
2032
2033        fb_format |= (pipe_config << GRPH_CONTROL__GRPH_PIPE_CONFIG__SHIFT);
2034
2035        dce_v8_0_vga_enable(crtc, false);
2036
2037        /* Make sure surface address is updated at vertical blank rather than
2038         * horizontal blank
2039         */
2040        WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, 0);
2041
2042        WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2043               upper_32_bits(fb_location));
2044        WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2045               upper_32_bits(fb_location));
2046        WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2047               (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
2048        WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2049               (u32) fb_location & GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK);
2050        WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
2051        WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
2052
2053        /*
2054         * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
2055         * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
2056         * retain the full precision throughout the pipeline.
2057         */
2058        WREG32_P(mmGRPH_LUT_10BIT_BYPASS_CONTROL + amdgpu_crtc->crtc_offset,
2059                 (bypass_lut ? LUT_10BIT_BYPASS_EN : 0),
2060                 ~LUT_10BIT_BYPASS_EN);
2061
2062        if (bypass_lut)
2063                DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
2064
2065        WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
2066        WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
2067        WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0);
2068        WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
2069        WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
2070        WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
2071
2072        fb_pitch_pixels = target_fb->pitches[0] / target_fb->format->cpp[0];
2073        WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
2074
2075        dce_v8_0_grph_enable(crtc, true);
2076
2077        WREG32(mmLB_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
2078               target_fb->height);
2079
2080        x &= ~3;
2081        y &= ~1;
2082        WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset,
2083               (x << 16) | y);
2084        viewport_w = crtc->mode.hdisplay;
2085        viewport_h = (crtc->mode.vdisplay + 1) & ~1;
2086        WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
2087               (viewport_w << 16) | viewport_h);
2088
2089        /* set pageflip to happen anywhere in vblank interval */
2090        WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
2091
2092        if (!atomic && fb && fb != crtc->primary->fb) {
2093                amdgpu_fb = to_amdgpu_framebuffer(fb);
2094                abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
2095                r = amdgpu_bo_reserve(abo, true);
2096                if (unlikely(r != 0))
2097                        return r;
2098                amdgpu_bo_unpin(abo);
2099                amdgpu_bo_unreserve(abo);
2100        }
2101
2102        /* Bytes per pixel may have changed */
2103        dce_v8_0_bandwidth_update(adev);
2104
2105        return 0;
2106}
2107
2108static void dce_v8_0_set_interleave(struct drm_crtc *crtc,
2109                                    struct drm_display_mode *mode)
2110{
2111        struct drm_device *dev = crtc->dev;
2112        struct amdgpu_device *adev = dev->dev_private;
2113        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2114
2115        if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2116                WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset,
2117                       LB_DATA_FORMAT__INTERLEAVE_EN__SHIFT);
2118        else
2119                WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset, 0);
2120}
2121
2122static void dce_v8_0_crtc_load_lut(struct drm_crtc *crtc)
2123{
2124        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2125        struct drm_device *dev = crtc->dev;
2126        struct amdgpu_device *adev = dev->dev_private;
2127        int i;
2128
2129        DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
2130
2131        WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
2132               ((INPUT_CSC_BYPASS << INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT) |
2133                (INPUT_CSC_BYPASS << INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE__SHIFT)));
2134        WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset,
2135               PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_BYPASS_MASK);
2136        WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset,
2137               PRESCALE_OVL_CONTROL__OVL_PRESCALE_BYPASS_MASK);
2138        WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2139               ((INPUT_GAMMA_USE_LUT << INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT) |
2140                (INPUT_GAMMA_USE_LUT << INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE__SHIFT)));
2141
2142        WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
2143
2144        WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
2145        WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
2146        WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
2147
2148        WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
2149        WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
2150        WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
2151
2152        WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
2153        WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
2154
2155        WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
2156        for (i = 0; i < 256; i++) {
2157                WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
2158                       (amdgpu_crtc->lut_r[i] << 20) |
2159                       (amdgpu_crtc->lut_g[i] << 10) |
2160                       (amdgpu_crtc->lut_b[i] << 0));
2161        }
2162
2163        WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2164               ((DEGAMMA_BYPASS << DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT) |
2165                (DEGAMMA_BYPASS << DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT) |
2166                (DEGAMMA_BYPASS << DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT)));
2167        WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset,
2168               ((GAMUT_REMAP_BYPASS << GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT) |
2169                (GAMUT_REMAP_BYPASS << GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE__SHIFT)));
2170        WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2171               ((REGAMMA_BYPASS << REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT) |
2172                (REGAMMA_BYPASS << REGAMMA_CONTROL__OVL_REGAMMA_MODE__SHIFT)));
2173        WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
2174               ((OUTPUT_CSC_BYPASS << OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT) |
2175                (OUTPUT_CSC_BYPASS << OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE__SHIFT)));
2176        /* XXX match this to the depth of the crtc fmt block, move to modeset? */
2177        WREG32(0x1a50 + amdgpu_crtc->crtc_offset, 0);
2178        /* XXX this only needs to be programmed once per crtc at startup,
2179         * not sure where the best place for it is
2180         */
2181        WREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset,
2182               ALPHA_CONTROL__CURSOR_ALPHA_BLND_ENA_MASK);
2183}
2184
2185static int dce_v8_0_pick_dig_encoder(struct drm_encoder *encoder)
2186{
2187        struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2188        struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
2189
2190        switch (amdgpu_encoder->encoder_id) {
2191        case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
2192                if (dig->linkb)
2193                        return 1;
2194                else
2195                        return 0;
2196                break;
2197        case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2198                if (dig->linkb)
2199                        return 3;
2200                else
2201                        return 2;
2202                break;
2203        case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2204                if (dig->linkb)
2205                        return 5;
2206                else
2207                        return 4;
2208                break;
2209        case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
2210                return 6;
2211                break;
2212        default:
2213                DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
2214                return 0;
2215        }
2216}
2217
2218/**
2219 * dce_v8_0_pick_pll - Allocate a PPLL for use by the crtc.
2220 *
2221 * @crtc: drm crtc
2222 *
2223 * Returns the PPLL (Pixel PLL) to be used by the crtc.  For DP monitors
2224 * a single PPLL can be used for all DP crtcs/encoders.  For non-DP
2225 * monitors a dedicated PPLL must be used.  If a particular board has
2226 * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
2227 * as there is no need to program the PLL itself.  If we are not able to
2228 * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
2229 * avoid messing up an existing monitor.
2230 *
2231 * Asic specific PLL information
2232 *
2233 * DCE 8.x
2234 * KB/KV
2235 * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP)
2236 * CI
2237 * - PPLL0, PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
2238 *
2239 */
2240static u32 dce_v8_0_pick_pll(struct drm_crtc *crtc)
2241{
2242        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2243        struct drm_device *dev = crtc->dev;
2244        struct amdgpu_device *adev = dev->dev_private;
2245        u32 pll_in_use;
2246        int pll;
2247
2248        if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
2249                if (adev->clock.dp_extclk)
2250                        /* skip PPLL programming if using ext clock */
2251                        return ATOM_PPLL_INVALID;
2252                else {
2253                        /* use the same PPLL for all DP monitors */
2254                        pll = amdgpu_pll_get_shared_dp_ppll(crtc);
2255                        if (pll != ATOM_PPLL_INVALID)
2256                                return pll;
2257                }
2258        } else {
2259                /* use the same PPLL for all monitors with the same clock */
2260                pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
2261                if (pll != ATOM_PPLL_INVALID)
2262                        return pll;
2263        }
2264        /* otherwise, pick one of the plls */
2265        if ((adev->asic_type == CHIP_KABINI) ||
2266            (adev->asic_type == CHIP_MULLINS)) {
2267                /* KB/ML has PPLL1 and PPLL2 */
2268                pll_in_use = amdgpu_pll_get_use_mask(crtc);
2269                if (!(pll_in_use & (1 << ATOM_PPLL2)))
2270                        return ATOM_PPLL2;
2271                if (!(pll_in_use & (1 << ATOM_PPLL1)))
2272                        return ATOM_PPLL1;
2273                DRM_ERROR("unable to allocate a PPLL\n");
2274                return ATOM_PPLL_INVALID;
2275        } else {
2276                /* CI/KV has PPLL0, PPLL1, and PPLL2 */
2277                pll_in_use = amdgpu_pll_get_use_mask(crtc);
2278                if (!(pll_in_use & (1 << ATOM_PPLL2)))
2279                        return ATOM_PPLL2;
2280                if (!(pll_in_use & (1 << ATOM_PPLL1)))
2281                        return ATOM_PPLL1;
2282                if (!(pll_in_use & (1 << ATOM_PPLL0)))
2283                        return ATOM_PPLL0;
2284                DRM_ERROR("unable to allocate a PPLL\n");
2285                return ATOM_PPLL_INVALID;
2286        }
2287        return ATOM_PPLL_INVALID;
2288}
2289
2290static void dce_v8_0_lock_cursor(struct drm_crtc *crtc, bool lock)
2291{
2292        struct amdgpu_device *adev = crtc->dev->dev_private;
2293        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2294        uint32_t cur_lock;
2295
2296        cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset);
2297        if (lock)
2298                cur_lock |= CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
2299        else
2300                cur_lock &= ~CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
2301        WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
2302}
2303
2304static void dce_v8_0_hide_cursor(struct drm_crtc *crtc)
2305{
2306        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2307        struct amdgpu_device *adev = crtc->dev->dev_private;
2308
2309        WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
2310                   (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
2311                   (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
2312}
2313
2314static void dce_v8_0_show_cursor(struct drm_crtc *crtc)
2315{
2316        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2317        struct amdgpu_device *adev = crtc->dev->dev_private;
2318
2319        WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2320               upper_32_bits(amdgpu_crtc->cursor_addr));
2321        WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2322               lower_32_bits(amdgpu_crtc->cursor_addr));
2323
2324        WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
2325                   CUR_CONTROL__CURSOR_EN_MASK |
2326                   (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
2327                   (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
2328}
2329
2330static int dce_v8_0_cursor_move_locked(struct drm_crtc *crtc,
2331                                       int x, int y)
2332{
2333        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2334        struct amdgpu_device *adev = crtc->dev->dev_private;
2335        int xorigin = 0, yorigin = 0;
2336
2337        amdgpu_crtc->cursor_x = x;
2338        amdgpu_crtc->cursor_y = y;
2339
2340        /* avivo cursor are offset into the total surface */
2341        x += crtc->x;
2342        y += crtc->y;
2343        DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
2344
2345        if (x < 0) {
2346                xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
2347                x = 0;
2348        }
2349        if (y < 0) {
2350                yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
2351                y = 0;
2352        }
2353
2354        WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
2355        WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
2356        WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2357               ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
2358
2359        return 0;
2360}
2361
2362static int dce_v8_0_crtc_cursor_move(struct drm_crtc *crtc,
2363                                     int x, int y)
2364{
2365        int ret;
2366
2367        dce_v8_0_lock_cursor(crtc, true);
2368        ret = dce_v8_0_cursor_move_locked(crtc, x, y);
2369        dce_v8_0_lock_cursor(crtc, false);
2370
2371        return ret;
2372}
2373
2374static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
2375                                     struct drm_file *file_priv,
2376                                     uint32_t handle,
2377                                     uint32_t width,
2378                                     uint32_t height,
2379                                     int32_t hot_x,
2380                                     int32_t hot_y)
2381{
2382        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2383        struct drm_gem_object *obj;
2384        struct amdgpu_bo *aobj;
2385        int ret;
2386
2387        if (!handle) {
2388                /* turn off cursor */
2389                dce_v8_0_hide_cursor(crtc);
2390                obj = NULL;
2391                goto unpin;
2392        }
2393
2394        if ((width > amdgpu_crtc->max_cursor_width) ||
2395            (height > amdgpu_crtc->max_cursor_height)) {
2396                DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
2397                return -EINVAL;
2398        }
2399
2400        obj = drm_gem_object_lookup(file_priv, handle);
2401        if (!obj) {
2402                DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
2403                return -ENOENT;
2404        }
2405
2406        aobj = gem_to_amdgpu_bo(obj);
2407        ret = amdgpu_bo_reserve(aobj, false);
2408        if (ret != 0) {
2409                drm_gem_object_unreference_unlocked(obj);
2410                return ret;
2411        }
2412
2413        ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
2414        amdgpu_bo_unreserve(aobj);
2415        if (ret) {
2416                DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
2417                drm_gem_object_unreference_unlocked(obj);
2418                return ret;
2419        }
2420
2421        dce_v8_0_lock_cursor(crtc, true);
2422
2423        if (width != amdgpu_crtc->cursor_width ||
2424            height != amdgpu_crtc->cursor_height ||
2425            hot_x != amdgpu_crtc->cursor_hot_x ||
2426            hot_y != amdgpu_crtc->cursor_hot_y) {
2427                int x, y;
2428
2429                x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
2430                y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
2431
2432                dce_v8_0_cursor_move_locked(crtc, x, y);
2433
2434                amdgpu_crtc->cursor_width = width;
2435                amdgpu_crtc->cursor_height = height;
2436                amdgpu_crtc->cursor_hot_x = hot_x;
2437                amdgpu_crtc->cursor_hot_y = hot_y;
2438        }
2439
2440        dce_v8_0_show_cursor(crtc);
2441        dce_v8_0_lock_cursor(crtc, false);
2442
2443unpin:
2444        if (amdgpu_crtc->cursor_bo) {
2445                struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2446                ret = amdgpu_bo_reserve(aobj, true);
2447                if (likely(ret == 0)) {
2448                        amdgpu_bo_unpin(aobj);
2449                        amdgpu_bo_unreserve(aobj);
2450                }
2451                drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
2452        }
2453
2454        amdgpu_crtc->cursor_bo = obj;
2455        return 0;
2456}
2457
2458static void dce_v8_0_cursor_reset(struct drm_crtc *crtc)
2459{
2460        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2461
2462        if (amdgpu_crtc->cursor_bo) {
2463                dce_v8_0_lock_cursor(crtc, true);
2464
2465                dce_v8_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
2466                                            amdgpu_crtc->cursor_y);
2467
2468                dce_v8_0_show_cursor(crtc);
2469
2470                dce_v8_0_lock_cursor(crtc, false);
2471        }
2472}
2473
2474static int dce_v8_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
2475                                   u16 *blue, uint32_t size,
2476                                   struct drm_modeset_acquire_ctx *ctx)
2477{
2478        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2479        int i;
2480
2481        /* userspace palettes are always correct as is */
2482        for (i = 0; i < size; i++) {
2483                amdgpu_crtc->lut_r[i] = red[i] >> 6;
2484                amdgpu_crtc->lut_g[i] = green[i] >> 6;
2485                amdgpu_crtc->lut_b[i] = blue[i] >> 6;
2486        }
2487        dce_v8_0_crtc_load_lut(crtc);
2488
2489        return 0;
2490}
2491
2492static void dce_v8_0_crtc_destroy(struct drm_crtc *crtc)
2493{
2494        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2495
2496        drm_crtc_cleanup(crtc);
2497        kfree(amdgpu_crtc);
2498}
2499
2500static const struct drm_crtc_funcs dce_v8_0_crtc_funcs = {
2501        .cursor_set2 = dce_v8_0_crtc_cursor_set2,
2502        .cursor_move = dce_v8_0_crtc_cursor_move,
2503        .gamma_set = dce_v8_0_crtc_gamma_set,
2504        .set_config = amdgpu_crtc_set_config,
2505        .destroy = dce_v8_0_crtc_destroy,
2506        .page_flip_target = amdgpu_crtc_page_flip_target,
2507};
2508
2509static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2510{
2511        struct drm_device *dev = crtc->dev;
2512        struct amdgpu_device *adev = dev->dev_private;
2513        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2514        unsigned type;
2515
2516        switch (mode) {
2517        case DRM_MODE_DPMS_ON:
2518                amdgpu_crtc->enabled = true;
2519                amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
2520                dce_v8_0_vga_enable(crtc, true);
2521                amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2522                dce_v8_0_vga_enable(crtc, false);
2523                /* Make sure VBLANK and PFLIP interrupts are still enabled */
2524                type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
2525                amdgpu_irq_update(adev, &adev->crtc_irq, type);
2526                amdgpu_irq_update(adev, &adev->pageflip_irq, type);
2527                drm_crtc_vblank_on(crtc);
2528                dce_v8_0_crtc_load_lut(crtc);
2529                break;
2530        case DRM_MODE_DPMS_STANDBY:
2531        case DRM_MODE_DPMS_SUSPEND:
2532        case DRM_MODE_DPMS_OFF:
2533                drm_crtc_vblank_off(crtc);
2534                if (amdgpu_crtc->enabled) {
2535                        dce_v8_0_vga_enable(crtc, true);
2536                        amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
2537                        dce_v8_0_vga_enable(crtc, false);
2538                }
2539                amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
2540                amdgpu_crtc->enabled = false;
2541                break;
2542        }
2543        /* adjust pm to dpms */
2544        amdgpu_pm_compute_clocks(adev);
2545}
2546
2547static void dce_v8_0_crtc_prepare(struct drm_crtc *crtc)
2548{
2549        /* disable crtc pair power gating before programming */
2550        amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
2551        amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
2552        dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2553}
2554
2555static void dce_v8_0_crtc_commit(struct drm_crtc *crtc)
2556{
2557        dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
2558        amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
2559}
2560
2561static void dce_v8_0_crtc_disable(struct drm_crtc *crtc)
2562{
2563        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2564        struct drm_device *dev = crtc->dev;
2565        struct amdgpu_device *adev = dev->dev_private;
2566        struct amdgpu_atom_ss ss;
2567        int i;
2568
2569        dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2570        if (crtc->primary->fb) {
2571                int r;
2572                struct amdgpu_framebuffer *amdgpu_fb;
2573                struct amdgpu_bo *abo;
2574
2575                amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
2576                abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
2577                r = amdgpu_bo_reserve(abo, true);
2578                if (unlikely(r))
2579                        DRM_ERROR("failed to reserve abo before unpin\n");
2580                else {
2581                        amdgpu_bo_unpin(abo);
2582                        amdgpu_bo_unreserve(abo);
2583                }
2584        }
2585        /* disable the GRPH */
2586        dce_v8_0_grph_enable(crtc, false);
2587
2588        amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
2589
2590        for (i = 0; i < adev->mode_info.num_crtc; i++) {
2591                if (adev->mode_info.crtcs[i] &&
2592                    adev->mode_info.crtcs[i]->enabled &&
2593                    i != amdgpu_crtc->crtc_id &&
2594                    amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
2595                        /* one other crtc is using this pll don't turn
2596                         * off the pll
2597                         */
2598                        goto done;
2599                }
2600        }
2601
2602        switch (amdgpu_crtc->pll_id) {
2603        case ATOM_PPLL1:
2604        case ATOM_PPLL2:
2605                /* disable the ppll */
2606                amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2607                                                 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2608                break;
2609        case ATOM_PPLL0:
2610                /* disable the ppll */
2611                if ((adev->asic_type == CHIP_KAVERI) ||
2612                    (adev->asic_type == CHIP_BONAIRE) ||
2613                    (adev->asic_type == CHIP_HAWAII))
2614                        amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2615                                                  0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2616                break;
2617        default:
2618                break;
2619        }
2620done:
2621        amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2622        amdgpu_crtc->adjusted_clock = 0;
2623        amdgpu_crtc->encoder = NULL;
2624        amdgpu_crtc->connector = NULL;
2625}
2626
2627static int dce_v8_0_crtc_mode_set(struct drm_crtc *crtc,
2628                                  struct drm_display_mode *mode,
2629                                  struct drm_display_mode *adjusted_mode,
2630                                  int x, int y, struct drm_framebuffer *old_fb)
2631{
2632        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2633
2634        if (!amdgpu_crtc->adjusted_clock)
2635                return -EINVAL;
2636
2637        amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
2638        amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
2639        dce_v8_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2640        amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
2641        amdgpu_atombios_crtc_scaler_setup(crtc);
2642        dce_v8_0_cursor_reset(crtc);
2643        /* update the hw version fpr dpm */
2644        amdgpu_crtc->hw_mode = *adjusted_mode;
2645
2646        return 0;
2647}
2648
2649static bool dce_v8_0_crtc_mode_fixup(struct drm_crtc *crtc,
2650                                     const struct drm_display_mode *mode,
2651                                     struct drm_display_mode *adjusted_mode)
2652{
2653        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2654        struct drm_device *dev = crtc->dev;
2655        struct drm_encoder *encoder;
2656
2657        /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
2658        list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2659                if (encoder->crtc == crtc) {
2660                        amdgpu_crtc->encoder = encoder;
2661                        amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
2662                        break;
2663                }
2664        }
2665        if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
2666                amdgpu_crtc->encoder = NULL;
2667                amdgpu_crtc->connector = NULL;
2668                return false;
2669        }
2670        if (!amdgpu_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
2671                return false;
2672        if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
2673                return false;
2674        /* pick pll */
2675        amdgpu_crtc->pll_id = dce_v8_0_pick_pll(crtc);
2676        /* if we can't get a PPLL for a non-DP encoder, fail */
2677        if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
2678            !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
2679                return false;
2680
2681        return true;
2682}
2683
2684static int dce_v8_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
2685                                  struct drm_framebuffer *old_fb)
2686{
2687        return dce_v8_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2688}
2689
2690static int dce_v8_0_crtc_set_base_atomic(struct drm_crtc *crtc,
2691                                         struct drm_framebuffer *fb,
2692                                         int x, int y, enum mode_set_atomic state)
2693{
2694       return dce_v8_0_crtc_do_set_base(crtc, fb, x, y, 1);
2695}
2696
2697static const struct drm_crtc_helper_funcs dce_v8_0_crtc_helper_funcs = {
2698        .dpms = dce_v8_0_crtc_dpms,
2699        .mode_fixup = dce_v8_0_crtc_mode_fixup,
2700        .mode_set = dce_v8_0_crtc_mode_set,
2701        .mode_set_base = dce_v8_0_crtc_set_base,
2702        .mode_set_base_atomic = dce_v8_0_crtc_set_base_atomic,
2703        .prepare = dce_v8_0_crtc_prepare,
2704        .commit = dce_v8_0_crtc_commit,
2705        .load_lut = dce_v8_0_crtc_load_lut,
2706        .disable = dce_v8_0_crtc_disable,
2707};
2708
2709static int dce_v8_0_crtc_init(struct amdgpu_device *adev, int index)
2710{
2711        struct amdgpu_crtc *amdgpu_crtc;
2712        int i;
2713
2714        amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
2715                              (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
2716        if (amdgpu_crtc == NULL)
2717                return -ENOMEM;
2718
2719        drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v8_0_crtc_funcs);
2720
2721        drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
2722        amdgpu_crtc->crtc_id = index;
2723        adev->mode_info.crtcs[index] = amdgpu_crtc;
2724
2725        amdgpu_crtc->max_cursor_width = CIK_CURSOR_WIDTH;
2726        amdgpu_crtc->max_cursor_height = CIK_CURSOR_HEIGHT;
2727        adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
2728        adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
2729
2730        for (i = 0; i < 256; i++) {
2731                amdgpu_crtc->lut_r[i] = i << 2;
2732                amdgpu_crtc->lut_g[i] = i << 2;
2733                amdgpu_crtc->lut_b[i] = i << 2;
2734        }
2735
2736        amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id];
2737
2738        amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2739        amdgpu_crtc->adjusted_clock = 0;
2740        amdgpu_crtc->encoder = NULL;
2741        amdgpu_crtc->connector = NULL;
2742        drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v8_0_crtc_helper_funcs);
2743
2744        return 0;
2745}
2746
2747static int dce_v8_0_early_init(void *handle)
2748{
2749        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2750
2751        adev->audio_endpt_rreg = &dce_v8_0_audio_endpt_rreg;
2752        adev->audio_endpt_wreg = &dce_v8_0_audio_endpt_wreg;
2753
2754        dce_v8_0_set_display_funcs(adev);
2755        dce_v8_0_set_irq_funcs(adev);
2756
2757        adev->mode_info.num_crtc = dce_v8_0_get_num_crtc(adev);
2758
2759        switch (adev->asic_type) {
2760        case CHIP_BONAIRE:
2761        case CHIP_HAWAII:
2762                adev->mode_info.num_hpd = 6;
2763                adev->mode_info.num_dig = 6;
2764                break;
2765        case CHIP_KAVERI:
2766                adev->mode_info.num_hpd = 6;
2767                adev->mode_info.num_dig = 7;
2768                break;
2769        case CHIP_KABINI:
2770        case CHIP_MULLINS:
2771                adev->mode_info.num_hpd = 6;
2772                adev->mode_info.num_dig = 6; /* ? */
2773                break;
2774        default:
2775                /* FIXME: not supported yet */
2776                return -EINVAL;
2777        }
2778
2779        return 0;
2780}
2781
2782static int dce_v8_0_sw_init(void *handle)
2783{
2784        int r, i;
2785        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2786
2787        for (i = 0; i < adev->mode_info.num_crtc; i++) {
2788                r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
2789                if (r)
2790                        return r;
2791        }
2792
2793        for (i = 8; i < 20; i += 2) {
2794                r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq);
2795                if (r)
2796                        return r;
2797        }
2798
2799        /* HPD hotplug */
2800        r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 42, &adev->hpd_irq);
2801        if (r)
2802                return r;
2803
2804        adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
2805
2806        adev->ddev->mode_config.async_page_flip = true;
2807
2808        adev->ddev->mode_config.max_width = 16384;
2809        adev->ddev->mode_config.max_height = 16384;
2810
2811        adev->ddev->mode_config.preferred_depth = 24;
2812        adev->ddev->mode_config.prefer_shadow = 1;
2813
2814        adev->ddev->mode_config.fb_base = adev->mc.aper_base;
2815
2816        r = amdgpu_modeset_create_props(adev);
2817        if (r)
2818                return r;
2819
2820        adev->ddev->mode_config.max_width = 16384;
2821        adev->ddev->mode_config.max_height = 16384;
2822
2823        /* allocate crtcs */
2824        for (i = 0; i < adev->mode_info.num_crtc; i++) {
2825                r = dce_v8_0_crtc_init(adev, i);
2826                if (r)
2827                        return r;
2828        }
2829
2830        if (amdgpu_atombios_get_connector_info_from_object_table(adev))
2831                amdgpu_print_display_setup(adev->ddev);
2832        else
2833                return -EINVAL;
2834
2835        /* setup afmt */
2836        r = dce_v8_0_afmt_init(adev);
2837        if (r)
2838                return r;
2839
2840        r = dce_v8_0_audio_init(adev);
2841        if (r)
2842                return r;
2843
2844        drm_kms_helper_poll_init(adev->ddev);
2845
2846        adev->mode_info.mode_config_initialized = true;
2847        return 0;
2848}
2849
2850static int dce_v8_0_sw_fini(void *handle)
2851{
2852        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2853
2854        kfree(adev->mode_info.bios_hardcoded_edid);
2855
2856        drm_kms_helper_poll_fini(adev->ddev);
2857
2858        dce_v8_0_audio_fini(adev);
2859
2860        dce_v8_0_afmt_fini(adev);
2861
2862        drm_mode_config_cleanup(adev->ddev);
2863        adev->mode_info.mode_config_initialized = false;
2864
2865        return 0;
2866}
2867
2868static int dce_v8_0_hw_init(void *handle)
2869{
2870        int i;
2871        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2872
2873        /* init dig PHYs, disp eng pll */
2874        amdgpu_atombios_encoder_init_dig(adev);
2875        amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
2876
2877        /* initialize hpd */
2878        dce_v8_0_hpd_init(adev);
2879
2880        for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2881                dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2882        }
2883
2884        dce_v8_0_pageflip_interrupt_init(adev);
2885
2886        return 0;
2887}
2888
2889static int dce_v8_0_hw_fini(void *handle)
2890{
2891        int i;
2892        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2893
2894        dce_v8_0_hpd_fini(adev);
2895
2896        for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2897                dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2898        }
2899
2900        dce_v8_0_pageflip_interrupt_fini(adev);
2901
2902        return 0;
2903}
2904
2905static int dce_v8_0_suspend(void *handle)
2906{
2907        return dce_v8_0_hw_fini(handle);
2908}
2909
2910static int dce_v8_0_resume(void *handle)
2911{
2912        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2913        int ret;
2914
2915        ret = dce_v8_0_hw_init(handle);
2916
2917        /* turn on the BL */
2918        if (adev->mode_info.bl_encoder) {
2919                u8 bl_level = amdgpu_display_backlight_get_level(adev,
2920                                                                  adev->mode_info.bl_encoder);
2921                amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
2922                                                    bl_level);
2923        }
2924
2925        return ret;
2926}
2927
2928static bool dce_v8_0_is_idle(void *handle)
2929{
2930        return true;
2931}
2932
2933static int dce_v8_0_wait_for_idle(void *handle)
2934{
2935        return 0;
2936}
2937
2938static int dce_v8_0_soft_reset(void *handle)
2939{
2940        u32 srbm_soft_reset = 0, tmp;
2941        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2942
2943        if (dce_v8_0_is_display_hung(adev))
2944                srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
2945
2946        if (srbm_soft_reset) {
2947                tmp = RREG32(mmSRBM_SOFT_RESET);
2948                tmp |= srbm_soft_reset;
2949                dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
2950                WREG32(mmSRBM_SOFT_RESET, tmp);
2951                tmp = RREG32(mmSRBM_SOFT_RESET);
2952
2953                udelay(50);
2954
2955                tmp &= ~srbm_soft_reset;
2956                WREG32(mmSRBM_SOFT_RESET, tmp);
2957                tmp = RREG32(mmSRBM_SOFT_RESET);
2958
2959                /* Wait a little for things to settle down */
2960                udelay(50);
2961        }
2962        return 0;
2963}
2964
2965static void dce_v8_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
2966                                                     int crtc,
2967                                                     enum amdgpu_interrupt_state state)
2968{
2969        u32 reg_block, lb_interrupt_mask;
2970
2971        if (crtc >= adev->mode_info.num_crtc) {
2972                DRM_DEBUG("invalid crtc %d\n", crtc);
2973                return;
2974        }
2975
2976        switch (crtc) {
2977        case 0:
2978                reg_block = CRTC0_REGISTER_OFFSET;
2979                break;
2980        case 1:
2981                reg_block = CRTC1_REGISTER_OFFSET;
2982                break;
2983        case 2:
2984                reg_block = CRTC2_REGISTER_OFFSET;
2985                break;
2986        case 3:
2987                reg_block = CRTC3_REGISTER_OFFSET;
2988                break;
2989        case 4:
2990                reg_block = CRTC4_REGISTER_OFFSET;
2991                break;
2992        case 5:
2993                reg_block = CRTC5_REGISTER_OFFSET;
2994                break;
2995        default:
2996                DRM_DEBUG("invalid crtc %d\n", crtc);
2997                return;
2998        }
2999
3000        switch (state) {
3001        case AMDGPU_IRQ_STATE_DISABLE:
3002                lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
3003                lb_interrupt_mask &= ~LB_INTERRUPT_MASK__VBLANK_INTERRUPT_MASK_MASK;
3004                WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
3005                break;
3006        case AMDGPU_IRQ_STATE_ENABLE:
3007                lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
3008                lb_interrupt_mask |= LB_INTERRUPT_MASK__VBLANK_INTERRUPT_MASK_MASK;
3009                WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
3010                break;
3011        default:
3012                break;
3013        }
3014}
3015
3016static void dce_v8_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
3017                                                    int crtc,
3018                                                    enum amdgpu_interrupt_state state)
3019{
3020        u32 reg_block, lb_interrupt_mask;
3021
3022        if (crtc >= adev->mode_info.num_crtc) {
3023                DRM_DEBUG("invalid crtc %d\n", crtc);
3024                return;
3025        }
3026
3027        switch (crtc) {
3028        case 0:
3029                reg_block = CRTC0_REGISTER_OFFSET;
3030                break;
3031        case 1:
3032                reg_block = CRTC1_REGISTER_OFFSET;
3033                break;
3034        case 2:
3035                reg_block = CRTC2_REGISTER_OFFSET;
3036                break;
3037        case 3:
3038                reg_block = CRTC3_REGISTER_OFFSET;
3039                break;
3040        case 4:
3041                reg_block = CRTC4_REGISTER_OFFSET;
3042                break;
3043        case 5:
3044                reg_block = CRTC5_REGISTER_OFFSET;
3045                break;
3046        default:
3047                DRM_DEBUG("invalid crtc %d\n", crtc);
3048                return;
3049        }
3050
3051        switch (state) {
3052        case AMDGPU_IRQ_STATE_DISABLE:
3053                lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
3054                lb_interrupt_mask &= ~LB_INTERRUPT_MASK__VLINE_INTERRUPT_MASK_MASK;
3055                WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
3056                break;
3057        case AMDGPU_IRQ_STATE_ENABLE:
3058                lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
3059                lb_interrupt_mask |= LB_INTERRUPT_MASK__VLINE_INTERRUPT_MASK_MASK;
3060                WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
3061                break;
3062        default:
3063                break;
3064        }
3065}
3066
3067static int dce_v8_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
3068                                            struct amdgpu_irq_src *src,
3069                                            unsigned type,
3070                                            enum amdgpu_interrupt_state state)
3071{
3072        u32 dc_hpd_int_cntl;
3073
3074        if (type >= adev->mode_info.num_hpd) {
3075                DRM_DEBUG("invalid hdp %d\n", type);
3076                return 0;
3077        }
3078
3079        switch (state) {
3080        case AMDGPU_IRQ_STATE_DISABLE:
3081                dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
3082                dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
3083                WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
3084                break;
3085        case AMDGPU_IRQ_STATE_ENABLE:
3086                dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
3087                dc_hpd_int_cntl |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
3088                WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
3089                break;
3090        default:
3091                break;
3092        }
3093
3094        return 0;
3095}
3096
3097static int dce_v8_0_set_crtc_interrupt_state(struct amdgpu_device *adev,
3098                                             struct amdgpu_irq_src *src,
3099                                             unsigned type,
3100                                             enum amdgpu_interrupt_state state)
3101{
3102        switch (type) {
3103        case AMDGPU_CRTC_IRQ_VBLANK1:
3104                dce_v8_0_set_crtc_vblank_interrupt_state(adev, 0, state);
3105                break;
3106        case AMDGPU_CRTC_IRQ_VBLANK2:
3107                dce_v8_0_set_crtc_vblank_interrupt_state(adev, 1, state);
3108                break;
3109        case AMDGPU_CRTC_IRQ_VBLANK3:
3110                dce_v8_0_set_crtc_vblank_interrupt_state(adev, 2, state);
3111                break;
3112        case AMDGPU_CRTC_IRQ_VBLANK4:
3113                dce_v8_0_set_crtc_vblank_interrupt_state(adev, 3, state);
3114                break;
3115        case AMDGPU_CRTC_IRQ_VBLANK5:
3116                dce_v8_0_set_crtc_vblank_interrupt_state(adev, 4, state);
3117                break;
3118        case AMDGPU_CRTC_IRQ_VBLANK6:
3119                dce_v8_0_set_crtc_vblank_interrupt_state(adev, 5, state);
3120                break;
3121        case AMDGPU_CRTC_IRQ_VLINE1:
3122                dce_v8_0_set_crtc_vline_interrupt_state(adev, 0, state);
3123                break;
3124        case AMDGPU_CRTC_IRQ_VLINE2:
3125                dce_v8_0_set_crtc_vline_interrupt_state(adev, 1, state);
3126                break;
3127        case AMDGPU_CRTC_IRQ_VLINE3:
3128                dce_v8_0_set_crtc_vline_interrupt_state(adev, 2, state);
3129                break;
3130        case AMDGPU_CRTC_IRQ_VLINE4:
3131                dce_v8_0_set_crtc_vline_interrupt_state(adev, 3, state);
3132                break;
3133        case AMDGPU_CRTC_IRQ_VLINE5:
3134                dce_v8_0_set_crtc_vline_interrupt_state(adev, 4, state);
3135                break;
3136        case AMDGPU_CRTC_IRQ_VLINE6:
3137                dce_v8_0_set_crtc_vline_interrupt_state(adev, 5, state);
3138                break;
3139        default:
3140                break;
3141        }
3142        return 0;
3143}
3144
3145static int dce_v8_0_crtc_irq(struct amdgpu_device *adev,
3146                             struct amdgpu_irq_src *source,
3147                             struct amdgpu_iv_entry *entry)
3148{
3149        unsigned crtc = entry->src_id - 1;
3150        uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
3151        unsigned irq_type = amdgpu_crtc_idx_to_irq_type(adev, crtc);
3152
3153        switch (entry->src_data[0]) {
3154        case 0: /* vblank */
3155                if (disp_int & interrupt_status_offsets[crtc].vblank)
3156                        WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], LB_VBLANK_STATUS__VBLANK_ACK_MASK);
3157                else
3158                        DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3159
3160                if (amdgpu_irq_enabled(adev, source, irq_type)) {
3161                        drm_handle_vblank(adev->ddev, crtc);
3162                }
3163                DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
3164                break;
3165        case 1: /* vline */
3166                if (disp_int & interrupt_status_offsets[crtc].vline)
3167                        WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], LB_VLINE_STATUS__VLINE_ACK_MASK);
3168                else
3169                        DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3170
3171                DRM_DEBUG("IH: D%d vline\n", crtc + 1);
3172                break;
3173        default:
3174                DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
3175                break;
3176        }
3177
3178        return 0;
3179}
3180
3181static int dce_v8_0_set_pageflip_interrupt_state(struct amdgpu_device *adev,
3182                                                 struct amdgpu_irq_src *src,
3183                                                 unsigned type,
3184                                                 enum amdgpu_interrupt_state state)
3185{
3186        u32 reg;
3187
3188        if (type >= adev->mode_info.num_crtc) {
3189                DRM_ERROR("invalid pageflip crtc %d\n", type);
3190                return -EINVAL;
3191        }
3192
3193        reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
3194        if (state == AMDGPU_IRQ_STATE_DISABLE)
3195                WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3196                       reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3197        else
3198                WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3199                       reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3200
3201        return 0;
3202}
3203
3204static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev,
3205                                struct amdgpu_irq_src *source,
3206                                struct amdgpu_iv_entry *entry)
3207{
3208        unsigned long flags;
3209        unsigned crtc_id;
3210        struct amdgpu_crtc *amdgpu_crtc;
3211        struct amdgpu_flip_work *works;
3212
3213        crtc_id = (entry->src_id - 8) >> 1;
3214        amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
3215
3216        if (crtc_id >= adev->mode_info.num_crtc) {
3217                DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
3218                return -EINVAL;
3219        }
3220
3221        if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
3222            GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
3223                WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
3224                       GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
3225
3226        /* IRQ could occur when in initial stage */
3227        if (amdgpu_crtc == NULL)
3228                return 0;
3229
3230        spin_lock_irqsave(&adev->ddev->event_lock, flags);
3231        works = amdgpu_crtc->pflip_works;
3232        if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
3233                DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
3234                                                "AMDGPU_FLIP_SUBMITTED(%d)\n",
3235                                                amdgpu_crtc->pflip_status,
3236                                                AMDGPU_FLIP_SUBMITTED);
3237                spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
3238                return 0;
3239        }
3240
3241        /* page flip completed. clean up */
3242        amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
3243        amdgpu_crtc->pflip_works = NULL;
3244
3245        /* wakeup usersapce */
3246        if (works->event)
3247                drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
3248
3249        spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
3250
3251        drm_crtc_vblank_put(&amdgpu_crtc->base);
3252        schedule_work(&works->unpin_work);
3253
3254        return 0;
3255}
3256
3257static int dce_v8_0_hpd_irq(struct amdgpu_device *adev,
3258                            struct amdgpu_irq_src *source,
3259                            struct amdgpu_iv_entry *entry)
3260{
3261        uint32_t disp_int, mask, tmp;
3262        unsigned hpd;
3263
3264        if (entry->src_data[0] >= adev->mode_info.num_hpd) {
3265                DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
3266                return 0;
3267        }
3268
3269        hpd = entry->src_data[0];
3270        disp_int = RREG32(interrupt_status_offsets[hpd].reg);
3271        mask = interrupt_status_offsets[hpd].hpd;
3272
3273        if (disp_int & mask) {
3274                tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
3275                tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
3276                WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
3277                schedule_work(&adev->hotplug_work);
3278                DRM_DEBUG("IH: HPD%d\n", hpd + 1);
3279        }
3280
3281        return 0;
3282
3283}
3284
3285static int dce_v8_0_set_clockgating_state(void *handle,
3286                                          enum amd_clockgating_state state)
3287{
3288        return 0;
3289}
3290
3291static int dce_v8_0_set_powergating_state(void *handle,
3292                                          enum amd_powergating_state state)
3293{
3294        return 0;
3295}
3296
3297static const struct amd_ip_funcs dce_v8_0_ip_funcs = {
3298        .name = "dce_v8_0",
3299        .early_init = dce_v8_0_early_init,
3300        .late_init = NULL,
3301        .sw_init = dce_v8_0_sw_init,
3302        .sw_fini = dce_v8_0_sw_fini,
3303        .hw_init = dce_v8_0_hw_init,
3304        .hw_fini = dce_v8_0_hw_fini,
3305        .suspend = dce_v8_0_suspend,
3306        .resume = dce_v8_0_resume,
3307        .is_idle = dce_v8_0_is_idle,
3308        .wait_for_idle = dce_v8_0_wait_for_idle,
3309        .soft_reset = dce_v8_0_soft_reset,
3310        .set_clockgating_state = dce_v8_0_set_clockgating_state,
3311        .set_powergating_state = dce_v8_0_set_powergating_state,
3312};
3313
3314static void
3315dce_v8_0_encoder_mode_set(struct drm_encoder *encoder,
3316                          struct drm_display_mode *mode,
3317                          struct drm_display_mode *adjusted_mode)
3318{
3319        struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3320
3321        amdgpu_encoder->pixel_clock = adjusted_mode->clock;
3322
3323        /* need to call this here rather than in prepare() since we need some crtc info */
3324        amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3325
3326        /* set scaler clears this on some chips */
3327        dce_v8_0_set_interleave(encoder->crtc, mode);
3328
3329        if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
3330                dce_v8_0_afmt_enable(encoder, true);
3331                dce_v8_0_afmt_setmode(encoder, adjusted_mode);
3332        }
3333}
3334
3335static void dce_v8_0_encoder_prepare(struct drm_encoder *encoder)
3336{
3337        struct amdgpu_device *adev = encoder->dev->dev_private;
3338        struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3339        struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
3340
3341        if ((amdgpu_encoder->active_device &
3342             (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
3343            (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
3344             ENCODER_OBJECT_ID_NONE)) {
3345                struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
3346                if (dig) {
3347                        dig->dig_encoder = dce_v8_0_pick_dig_encoder(encoder);
3348                        if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
3349                                dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
3350                }
3351        }
3352
3353        amdgpu_atombios_scratch_regs_lock(adev, true);
3354
3355        if (connector) {
3356                struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
3357
3358                /* select the clock/data port if it uses a router */
3359                if (amdgpu_connector->router.cd_valid)
3360                        amdgpu_i2c_router_select_cd_port(amdgpu_connector);
3361
3362                /* turn eDP panel on for mode set */
3363                if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
3364                        amdgpu_atombios_encoder_set_edp_panel_power(connector,
3365                                                             ATOM_TRANSMITTER_ACTION_POWER_ON);
3366        }
3367
3368        /* this is needed for the pll/ss setup to work correctly in some cases */
3369        amdgpu_atombios_encoder_set_crtc_source(encoder);
3370        /* set up the FMT blocks */
3371        dce_v8_0_program_fmt(encoder);
3372}
3373
3374static void dce_v8_0_encoder_commit(struct drm_encoder *encoder)
3375{
3376        struct drm_device *dev = encoder->dev;
3377        struct amdgpu_device *adev = dev->dev_private;
3378
3379        /* need to call this here as we need the crtc set up */
3380        amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
3381        amdgpu_atombios_scratch_regs_lock(adev, false);
3382}
3383
3384static void dce_v8_0_encoder_disable(struct drm_encoder *encoder)
3385{
3386        struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3387        struct amdgpu_encoder_atom_dig *dig;
3388
3389        amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3390
3391        if (amdgpu_atombios_encoder_is_digital(encoder)) {
3392                if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
3393                        dce_v8_0_afmt_enable(encoder, false);
3394                dig = amdgpu_encoder->enc_priv;
3395                dig->dig_encoder = -1;
3396        }
3397        amdgpu_encoder->active_device = 0;
3398}
3399
3400/* these are handled by the primary encoders */
3401static void dce_v8_0_ext_prepare(struct drm_encoder *encoder)
3402{
3403
3404}
3405
3406static void dce_v8_0_ext_commit(struct drm_encoder *encoder)
3407{
3408
3409}
3410
3411static void
3412dce_v8_0_ext_mode_set(struct drm_encoder *encoder,
3413                      struct drm_display_mode *mode,
3414                      struct drm_display_mode *adjusted_mode)
3415{
3416
3417}
3418
3419static void dce_v8_0_ext_disable(struct drm_encoder *encoder)
3420{
3421
3422}
3423
3424static void
3425dce_v8_0_ext_dpms(struct drm_encoder *encoder, int mode)
3426{
3427
3428}
3429
3430static const struct drm_encoder_helper_funcs dce_v8_0_ext_helper_funcs = {
3431        .dpms = dce_v8_0_ext_dpms,
3432        .prepare = dce_v8_0_ext_prepare,
3433        .mode_set = dce_v8_0_ext_mode_set,
3434        .commit = dce_v8_0_ext_commit,
3435        .disable = dce_v8_0_ext_disable,
3436        /* no detect for TMDS/LVDS yet */
3437};
3438
3439static const struct drm_encoder_helper_funcs dce_v8_0_dig_helper_funcs = {
3440        .dpms = amdgpu_atombios_encoder_dpms,
3441        .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3442        .prepare = dce_v8_0_encoder_prepare,
3443        .mode_set = dce_v8_0_encoder_mode_set,
3444        .commit = dce_v8_0_encoder_commit,
3445        .disable = dce_v8_0_encoder_disable,
3446        .detect = amdgpu_atombios_encoder_dig_detect,
3447};
3448
3449static const struct drm_encoder_helper_funcs dce_v8_0_dac_helper_funcs = {
3450        .dpms = amdgpu_atombios_encoder_dpms,
3451        .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3452        .prepare = dce_v8_0_encoder_prepare,
3453        .mode_set = dce_v8_0_encoder_mode_set,
3454        .commit = dce_v8_0_encoder_commit,
3455        .detect = amdgpu_atombios_encoder_dac_detect,
3456};
3457
3458static void dce_v8_0_encoder_destroy(struct drm_encoder *encoder)
3459{
3460        struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3461        if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3462                amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
3463        kfree(amdgpu_encoder->enc_priv);
3464        drm_encoder_cleanup(encoder);
3465        kfree(amdgpu_encoder);
3466}
3467
3468static const struct drm_encoder_funcs dce_v8_0_encoder_funcs = {
3469        .destroy = dce_v8_0_encoder_destroy,
3470};
3471
3472static void dce_v8_0_encoder_add(struct amdgpu_device *adev,
3473                                 uint32_t encoder_enum,
3474                                 uint32_t supported_device,
3475                                 u16 caps)
3476{
3477        struct drm_device *dev = adev->ddev;
3478        struct drm_encoder *encoder;
3479        struct amdgpu_encoder *amdgpu_encoder;
3480
3481        /* see if we already added it */
3482        list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
3483                amdgpu_encoder = to_amdgpu_encoder(encoder);
3484                if (amdgpu_encoder->encoder_enum == encoder_enum) {
3485                        amdgpu_encoder->devices |= supported_device;
3486                        return;
3487                }
3488
3489        }
3490
3491        /* add a new one */
3492        amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
3493        if (!amdgpu_encoder)
3494                return;
3495
3496        encoder = &amdgpu_encoder->base;
3497        switch (adev->mode_info.num_crtc) {
3498        case 1:
3499                encoder->possible_crtcs = 0x1;
3500                break;
3501        case 2:
3502        default:
3503                encoder->possible_crtcs = 0x3;
3504                break;
3505        case 4:
3506                encoder->possible_crtcs = 0xf;
3507                break;
3508        case 6:
3509                encoder->possible_crtcs = 0x3f;
3510                break;
3511        }
3512
3513        amdgpu_encoder->enc_priv = NULL;
3514
3515        amdgpu_encoder->encoder_enum = encoder_enum;
3516        amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
3517        amdgpu_encoder->devices = supported_device;
3518        amdgpu_encoder->rmx_type = RMX_OFF;
3519        amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
3520        amdgpu_encoder->is_ext_encoder = false;
3521        amdgpu_encoder->caps = caps;
3522
3523        switch (amdgpu_encoder->encoder_id) {
3524        case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
3525        case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
3526                drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3527                                 DRM_MODE_ENCODER_DAC, NULL);
3528                drm_encoder_helper_add(encoder, &dce_v8_0_dac_helper_funcs);
3529                break;
3530        case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
3531        case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
3532        case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
3533        case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
3534        case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
3535                if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
3536                        amdgpu_encoder->rmx_type = RMX_FULL;
3537                        drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3538                                         DRM_MODE_ENCODER_LVDS, NULL);
3539                        amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
3540                } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
3541                        drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3542                                         DRM_MODE_ENCODER_DAC, NULL);
3543                        amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3544                } else {
3545                        drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3546                                         DRM_MODE_ENCODER_TMDS, NULL);
3547                        amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3548                }
3549                drm_encoder_helper_add(encoder, &dce_v8_0_dig_helper_funcs);
3550                break;
3551        case ENCODER_OBJECT_ID_SI170B:
3552        case ENCODER_OBJECT_ID_CH7303:
3553        case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
3554        case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
3555        case ENCODER_OBJECT_ID_TITFP513:
3556        case ENCODER_OBJECT_ID_VT1623:
3557        case ENCODER_OBJECT_ID_HDMI_SI1930:
3558        case ENCODER_OBJECT_ID_TRAVIS:
3559        case ENCODER_OBJECT_ID_NUTMEG:
3560                /* these are handled by the primary encoders */
3561                amdgpu_encoder->is_ext_encoder = true;
3562                if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3563                        drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3564                                         DRM_MODE_ENCODER_LVDS, NULL);
3565                else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
3566                        drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3567                                         DRM_MODE_ENCODER_DAC, NULL);
3568                else
3569                        drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3570                                         DRM_MODE_ENCODER_TMDS, NULL);
3571                drm_encoder_helper_add(encoder, &dce_v8_0_ext_helper_funcs);
3572                break;
3573        }
3574}
3575
3576static const struct amdgpu_display_funcs dce_v8_0_display_funcs = {
3577        .set_vga_render_state = &dce_v8_0_set_vga_render_state,
3578        .bandwidth_update = &dce_v8_0_bandwidth_update,
3579        .vblank_get_counter = &dce_v8_0_vblank_get_counter,
3580        .vblank_wait = &dce_v8_0_vblank_wait,
3581        .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
3582        .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
3583        .hpd_sense = &dce_v8_0_hpd_sense,
3584        .hpd_set_polarity = &dce_v8_0_hpd_set_polarity,
3585        .hpd_get_gpio_reg = &dce_v8_0_hpd_get_gpio_reg,
3586        .page_flip = &dce_v8_0_page_flip,
3587        .page_flip_get_scanoutpos = &dce_v8_0_crtc_get_scanoutpos,
3588        .add_encoder = &dce_v8_0_encoder_add,
3589        .add_connector = &amdgpu_connector_add,
3590        .stop_mc_access = &dce_v8_0_stop_mc_access,
3591        .resume_mc_access = &dce_v8_0_resume_mc_access,
3592};
3593
3594static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev)
3595{
3596        if (adev->mode_info.funcs == NULL)
3597                adev->mode_info.funcs = &dce_v8_0_display_funcs;
3598}
3599
3600static const struct amdgpu_irq_src_funcs dce_v8_0_crtc_irq_funcs = {
3601        .set = dce_v8_0_set_crtc_interrupt_state,
3602        .process = dce_v8_0_crtc_irq,
3603};
3604
3605static const struct amdgpu_irq_src_funcs dce_v8_0_pageflip_irq_funcs = {
3606        .set = dce_v8_0_set_pageflip_interrupt_state,
3607        .process = dce_v8_0_pageflip_irq,
3608};
3609
3610static const struct amdgpu_irq_src_funcs dce_v8_0_hpd_irq_funcs = {
3611        .set = dce_v8_0_set_hpd_interrupt_state,
3612        .process = dce_v8_0_hpd_irq,
3613};
3614
3615static void dce_v8_0_set_irq_funcs(struct amdgpu_device *adev)
3616{
3617        adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
3618        adev->crtc_irq.funcs = &dce_v8_0_crtc_irq_funcs;
3619
3620        adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
3621        adev->pageflip_irq.funcs = &dce_v8_0_pageflip_irq_funcs;
3622
3623        adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
3624        adev->hpd_irq.funcs = &dce_v8_0_hpd_irq_funcs;
3625}
3626
3627const struct amdgpu_ip_block_version dce_v8_0_ip_block =
3628{
3629        .type = AMD_IP_BLOCK_TYPE_DCE,
3630        .major = 8,
3631        .minor = 0,
3632        .rev = 0,
3633        .funcs = &dce_v8_0_ip_funcs,
3634};
3635
3636const struct amdgpu_ip_block_version dce_v8_1_ip_block =
3637{
3638        .type = AMD_IP_BLOCK_TYPE_DCE,
3639        .major = 8,
3640        .minor = 1,
3641        .rev = 0,
3642        .funcs = &dce_v8_0_ip_funcs,
3643};
3644
3645const struct amdgpu_ip_block_version dce_v8_2_ip_block =
3646{
3647        .type = AMD_IP_BLOCK_TYPE_DCE,
3648        .major = 8,
3649        .minor = 2,
3650        .rev = 0,
3651        .funcs = &dce_v8_0_ip_funcs,
3652};
3653
3654const struct amdgpu_ip_block_version dce_v8_3_ip_block =
3655{
3656        .type = AMD_IP_BLOCK_TYPE_DCE,
3657        .major = 8,
3658        .minor = 3,
3659        .rev = 0,
3660        .funcs = &dce_v8_0_ip_funcs,
3661};
3662
3663const struct amdgpu_ip_block_version dce_v8_5_ip_block =
3664{
3665        .type = AMD_IP_BLOCK_TYPE_DCE,
3666        .major = 8,
3667        .minor = 5,
3668        .rev = 0,
3669        .funcs = &dce_v8_0_ip_funcs,
3670};
3671