linux/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
<<
>>
Prefs
   1/*
   2 * Copyright 2013 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23
  24#include <linux/firmware.h>
  25#include <drm/drmP.h>
  26#include "amdgpu.h"
  27#include "amdgpu_pm.h"
  28#include "amdgpu_ucode.h"
  29#include "cikd.h"
  30#include "amdgpu_dpm.h"
  31#include "ci_dpm.h"
  32#include "gfx_v7_0.h"
  33#include "atom.h"
  34#include "amd_pcie.h"
  35#include <linux/seq_file.h>
  36
  37#include "smu/smu_7_0_1_d.h"
  38#include "smu/smu_7_0_1_sh_mask.h"
  39
  40#include "dce/dce_8_0_d.h"
  41#include "dce/dce_8_0_sh_mask.h"
  42
  43#include "bif/bif_4_1_d.h"
  44#include "bif/bif_4_1_sh_mask.h"
  45
  46#include "gca/gfx_7_2_d.h"
  47#include "gca/gfx_7_2_sh_mask.h"
  48
  49#include "gmc/gmc_7_1_d.h"
  50#include "gmc/gmc_7_1_sh_mask.h"
  51
  52MODULE_FIRMWARE("radeon/bonaire_smc.bin");
  53MODULE_FIRMWARE("radeon/bonaire_k_smc.bin");
  54MODULE_FIRMWARE("radeon/hawaii_smc.bin");
  55MODULE_FIRMWARE("radeon/hawaii_k_smc.bin");
  56
  57#define MC_CG_ARB_FREQ_F0           0x0a
  58#define MC_CG_ARB_FREQ_F1           0x0b
  59#define MC_CG_ARB_FREQ_F2           0x0c
  60#define MC_CG_ARB_FREQ_F3           0x0d
  61
  62#define SMC_RAM_END 0x40000
  63
  64#define VOLTAGE_SCALE               4
  65#define VOLTAGE_VID_OFFSET_SCALE1    625
  66#define VOLTAGE_VID_OFFSET_SCALE2    100
  67
  68static const struct amd_pm_funcs ci_dpm_funcs;
  69
  70static const struct ci_pt_defaults defaults_hawaii_xt =
  71{
  72        1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
  73        { 0x2E,  0x00,  0x00,  0x88,  0x00,  0x00,  0x72,  0x60,  0x51,  0xA7,  0x79,  0x6B,  0x90,  0xBD,  0x79  },
  74        { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
  75};
  76
  77static const struct ci_pt_defaults defaults_hawaii_pro =
  78{
  79        1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
  80        { 0x2E,  0x00,  0x00,  0x88,  0x00,  0x00,  0x72,  0x60,  0x51,  0xA7,  0x79,  0x6B,  0x90,  0xBD,  0x79  },
  81        { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
  82};
  83
  84static const struct ci_pt_defaults defaults_bonaire_xt =
  85{
  86        1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
  87        { 0x79,  0x253, 0x25D, 0xAE,  0x72,  0x80,  0x83,  0x86,  0x6F,  0xC8,  0xC9,  0xC9,  0x2F,  0x4D,  0x61  },
  88        { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
  89};
  90
  91#if 0
  92static const struct ci_pt_defaults defaults_bonaire_pro =
  93{
  94        1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062,
  95        { 0x8C,  0x23F, 0x244, 0xA6,  0x83,  0x85,  0x86,  0x86,  0x83,  0xDB,  0xDB,  0xDA,  0x67,  0x60,  0x5F  },
  96        { 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB }
  97};
  98#endif
  99
 100static const struct ci_pt_defaults defaults_saturn_xt =
 101{
 102        1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
 103        { 0x8C,  0x247, 0x249, 0xA6,  0x80,  0x81,  0x8B,  0x89,  0x86,  0xC9,  0xCA,  0xC9,  0x4D,  0x4D,  0x4D  },
 104        { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
 105};
 106
 107#if 0
 108static const struct ci_pt_defaults defaults_saturn_pro =
 109{
 110        1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000,
 111        { 0x96,  0x21D, 0x23B, 0xA1,  0x85,  0x87,  0x83,  0x84,  0x81,  0xE6,  0xE6,  0xE6,  0x71,  0x6A,  0x6A  },
 112        { 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 }
 113};
 114#endif
 115
 116static const struct ci_pt_config_reg didt_config_ci[] =
 117{
 118        { 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 119        { 0x10, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 120        { 0x10, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 121        { 0x10, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 122        { 0x11, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 123        { 0x11, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 124        { 0x11, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 125        { 0x11, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 126        { 0x12, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 127        { 0x12, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 128        { 0x12, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 129        { 0x12, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 130        { 0x2, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
 131        { 0x2, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
 132        { 0x2, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
 133        { 0x1, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
 134        { 0x1, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
 135        { 0x0, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 136        { 0x30, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 137        { 0x30, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 138        { 0x30, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 139        { 0x30, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 140        { 0x31, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 141        { 0x31, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 142        { 0x31, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 143        { 0x31, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 144        { 0x32, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 145        { 0x32, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 146        { 0x32, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 147        { 0x32, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 148        { 0x22, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
 149        { 0x22, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
 150        { 0x22, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
 151        { 0x21, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
 152        { 0x21, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
 153        { 0x20, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 154        { 0x50, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 155        { 0x50, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 156        { 0x50, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 157        { 0x50, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 158        { 0x51, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 159        { 0x51, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 160        { 0x51, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 161        { 0x51, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 162        { 0x52, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 163        { 0x52, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 164        { 0x52, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 165        { 0x52, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 166        { 0x42, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
 167        { 0x42, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
 168        { 0x42, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
 169        { 0x41, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
 170        { 0x41, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
 171        { 0x40, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 172        { 0x70, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 173        { 0x70, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 174        { 0x70, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 175        { 0x70, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 176        { 0x71, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 177        { 0x71, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 178        { 0x71, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 179        { 0x71, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 180        { 0x72, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 181        { 0x72, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 182        { 0x72, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 183        { 0x72, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 184        { 0x62, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
 185        { 0x62, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
 186        { 0x62, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
 187        { 0x61, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
 188        { 0x61, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
 189        { 0x60, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
 190        { 0xFFFFFFFF }
 191};
 192
 193static u8 ci_get_memory_module_index(struct amdgpu_device *adev)
 194{
 195        return (u8) ((RREG32(mmBIOS_SCRATCH_4) >> 16) & 0xff);
 196}
 197
 198#define MC_CG_ARB_FREQ_F0           0x0a
 199#define MC_CG_ARB_FREQ_F1           0x0b
 200#define MC_CG_ARB_FREQ_F2           0x0c
 201#define MC_CG_ARB_FREQ_F3           0x0d
 202
 203static int ci_copy_and_switch_arb_sets(struct amdgpu_device *adev,
 204                                       u32 arb_freq_src, u32 arb_freq_dest)
 205{
 206        u32 mc_arb_dram_timing;
 207        u32 mc_arb_dram_timing2;
 208        u32 burst_time;
 209        u32 mc_cg_config;
 210
 211        switch (arb_freq_src) {
 212        case MC_CG_ARB_FREQ_F0:
 213                mc_arb_dram_timing  = RREG32(mmMC_ARB_DRAM_TIMING);
 214                mc_arb_dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2);
 215                burst_time = (RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE0_MASK) >>
 216                         MC_ARB_BURST_TIME__STATE0__SHIFT;
 217                break;
 218        case MC_CG_ARB_FREQ_F1:
 219                mc_arb_dram_timing  = RREG32(mmMC_ARB_DRAM_TIMING_1);
 220                mc_arb_dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2_1);
 221                burst_time = (RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE1_MASK) >>
 222                         MC_ARB_BURST_TIME__STATE1__SHIFT;
 223                break;
 224        default:
 225                return -EINVAL;
 226        }
 227
 228        switch (arb_freq_dest) {
 229        case MC_CG_ARB_FREQ_F0:
 230                WREG32(mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
 231                WREG32(mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
 232                WREG32_P(mmMC_ARB_BURST_TIME, (burst_time << MC_ARB_BURST_TIME__STATE0__SHIFT),
 233                        ~MC_ARB_BURST_TIME__STATE0_MASK);
 234                break;
 235        case MC_CG_ARB_FREQ_F1:
 236                WREG32(mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
 237                WREG32(mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
 238                WREG32_P(mmMC_ARB_BURST_TIME, (burst_time << MC_ARB_BURST_TIME__STATE1__SHIFT),
 239                        ~MC_ARB_BURST_TIME__STATE1_MASK);
 240                break;
 241        default:
 242                return -EINVAL;
 243        }
 244
 245        mc_cg_config = RREG32(mmMC_CG_CONFIG) | 0x0000000F;
 246        WREG32(mmMC_CG_CONFIG, mc_cg_config);
 247        WREG32_P(mmMC_ARB_CG, (arb_freq_dest) << MC_ARB_CG__CG_ARB_REQ__SHIFT,
 248                ~MC_ARB_CG__CG_ARB_REQ_MASK);
 249
 250        return 0;
 251}
 252
 253static u8 ci_get_ddr3_mclk_frequency_ratio(u32 memory_clock)
 254{
 255        u8 mc_para_index;
 256
 257        if (memory_clock < 10000)
 258                mc_para_index = 0;
 259        else if (memory_clock >= 80000)
 260                mc_para_index = 0x0f;
 261        else
 262                mc_para_index = (u8)((memory_clock - 10000) / 5000 + 1);
 263        return mc_para_index;
 264}
 265
 266static u8 ci_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode)
 267{
 268        u8 mc_para_index;
 269
 270        if (strobe_mode) {
 271                if (memory_clock < 12500)
 272                        mc_para_index = 0x00;
 273                else if (memory_clock > 47500)
 274                        mc_para_index = 0x0f;
 275                else
 276                        mc_para_index = (u8)((memory_clock - 10000) / 2500);
 277        } else {
 278                if (memory_clock < 65000)
 279                        mc_para_index = 0x00;
 280                else if (memory_clock > 135000)
 281                        mc_para_index = 0x0f;
 282                else
 283                        mc_para_index = (u8)((memory_clock - 60000) / 5000);
 284        }
 285        return mc_para_index;
 286}
 287
 288static void ci_trim_voltage_table_to_fit_state_table(struct amdgpu_device *adev,
 289                                                     u32 max_voltage_steps,
 290                                                     struct atom_voltage_table *voltage_table)
 291{
 292        unsigned int i, diff;
 293
 294        if (voltage_table->count <= max_voltage_steps)
 295                return;
 296
 297        diff = voltage_table->count - max_voltage_steps;
 298
 299        for (i = 0; i < max_voltage_steps; i++)
 300                voltage_table->entries[i] = voltage_table->entries[i + diff];
 301
 302        voltage_table->count = max_voltage_steps;
 303}
 304
 305static int ci_get_std_voltage_value_sidd(struct amdgpu_device *adev,
 306                                         struct atom_voltage_table_entry *voltage_table,
 307                                         u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd);
 308static int ci_set_power_limit(struct amdgpu_device *adev, u32 n);
 309static int ci_set_overdrive_target_tdp(struct amdgpu_device *adev,
 310                                       u32 target_tdp);
 311static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate);
 312static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev);
 313
 314static PPSMC_Result amdgpu_ci_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
 315                                                             PPSMC_Msg msg, u32 parameter);
 316static void ci_thermal_start_smc_fan_control(struct amdgpu_device *adev);
 317static void ci_fan_ctrl_set_default_mode(struct amdgpu_device *adev);
 318
 319static struct ci_power_info *ci_get_pi(struct amdgpu_device *adev)
 320{
 321        struct ci_power_info *pi = adev->pm.dpm.priv;
 322
 323        return pi;
 324}
 325
 326static struct ci_ps *ci_get_ps(struct amdgpu_ps *rps)
 327{
 328        struct ci_ps *ps = rps->ps_priv;
 329
 330        return ps;
 331}
 332
 333static void ci_initialize_powertune_defaults(struct amdgpu_device *adev)
 334{
 335        struct ci_power_info *pi = ci_get_pi(adev);
 336
 337        switch (adev->pdev->device) {
 338        case 0x6649:
 339        case 0x6650:
 340        case 0x6651:
 341        case 0x6658:
 342        case 0x665C:
 343        case 0x665D:
 344        default:
 345                pi->powertune_defaults = &defaults_bonaire_xt;
 346                break;
 347        case 0x6640:
 348        case 0x6641:
 349        case 0x6646:
 350        case 0x6647:
 351                pi->powertune_defaults = &defaults_saturn_xt;
 352                break;
 353        case 0x67B8:
 354        case 0x67B0:
 355                pi->powertune_defaults = &defaults_hawaii_xt;
 356                break;
 357        case 0x67BA:
 358        case 0x67B1:
 359                pi->powertune_defaults = &defaults_hawaii_pro;
 360                break;
 361        case 0x67A0:
 362        case 0x67A1:
 363        case 0x67A2:
 364        case 0x67A8:
 365        case 0x67A9:
 366        case 0x67AA:
 367        case 0x67B9:
 368        case 0x67BE:
 369                pi->powertune_defaults = &defaults_bonaire_xt;
 370                break;
 371        }
 372
 373        pi->dte_tj_offset = 0;
 374
 375        pi->caps_power_containment = true;
 376        pi->caps_cac = false;
 377        pi->caps_sq_ramping = false;
 378        pi->caps_db_ramping = false;
 379        pi->caps_td_ramping = false;
 380        pi->caps_tcp_ramping = false;
 381
 382        if (pi->caps_power_containment) {
 383                pi->caps_cac = true;
 384                if (adev->asic_type == CHIP_HAWAII)
 385                        pi->enable_bapm_feature = false;
 386                else
 387                        pi->enable_bapm_feature = true;
 388                pi->enable_tdc_limit_feature = true;
 389                pi->enable_pkg_pwr_tracking_feature = true;
 390        }
 391}
 392
 393static u8 ci_convert_to_vid(u16 vddc)
 394{
 395        return (6200 - (vddc * VOLTAGE_SCALE)) / 25;
 396}
 397
 398static int ci_populate_bapm_vddc_vid_sidd(struct amdgpu_device *adev)
 399{
 400        struct ci_power_info *pi = ci_get_pi(adev);
 401        u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
 402        u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
 403        u8 *hi2_vid = pi->smc_powertune_table.BapmVddCVidHiSidd2;
 404        u32 i;
 405
 406        if (adev->pm.dpm.dyn_state.cac_leakage_table.entries == NULL)
 407                return -EINVAL;
 408        if (adev->pm.dpm.dyn_state.cac_leakage_table.count > 8)
 409                return -EINVAL;
 410        if (adev->pm.dpm.dyn_state.cac_leakage_table.count !=
 411            adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count)
 412                return -EINVAL;
 413
 414        for (i = 0; i < adev->pm.dpm.dyn_state.cac_leakage_table.count; i++) {
 415                if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
 416                        lo_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1);
 417                        hi_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2);
 418                        hi2_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3);
 419                } else {
 420                        lo_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc);
 421                        hi_vid[i] = ci_convert_to_vid((u16)adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage);
 422                }
 423        }
 424        return 0;
 425}
 426
 427static int ci_populate_vddc_vid(struct amdgpu_device *adev)
 428{
 429        struct ci_power_info *pi = ci_get_pi(adev);
 430        u8 *vid = pi->smc_powertune_table.VddCVid;
 431        u32 i;
 432
 433        if (pi->vddc_voltage_table.count > 8)
 434                return -EINVAL;
 435
 436        for (i = 0; i < pi->vddc_voltage_table.count; i++)
 437                vid[i] = ci_convert_to_vid(pi->vddc_voltage_table.entries[i].value);
 438
 439        return 0;
 440}
 441
 442static int ci_populate_svi_load_line(struct amdgpu_device *adev)
 443{
 444        struct ci_power_info *pi = ci_get_pi(adev);
 445        const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
 446
 447        pi->smc_powertune_table.SviLoadLineEn = pt_defaults->svi_load_line_en;
 448        pi->smc_powertune_table.SviLoadLineVddC = pt_defaults->svi_load_line_vddc;
 449        pi->smc_powertune_table.SviLoadLineTrimVddC = 3;
 450        pi->smc_powertune_table.SviLoadLineOffsetVddC = 0;
 451
 452        return 0;
 453}
 454
 455static int ci_populate_tdc_limit(struct amdgpu_device *adev)
 456{
 457        struct ci_power_info *pi = ci_get_pi(adev);
 458        const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
 459        u16 tdc_limit;
 460
 461        tdc_limit = adev->pm.dpm.dyn_state.cac_tdp_table->tdc * 256;
 462        pi->smc_powertune_table.TDC_VDDC_PkgLimit = cpu_to_be16(tdc_limit);
 463        pi->smc_powertune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
 464                pt_defaults->tdc_vddc_throttle_release_limit_perc;
 465        pi->smc_powertune_table.TDC_MAWt = pt_defaults->tdc_mawt;
 466
 467        return 0;
 468}
 469
 470static int ci_populate_dw8(struct amdgpu_device *adev)
 471{
 472        struct ci_power_info *pi = ci_get_pi(adev);
 473        const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
 474        int ret;
 475
 476        ret = amdgpu_ci_read_smc_sram_dword(adev,
 477                                     SMU7_FIRMWARE_HEADER_LOCATION +
 478                                     offsetof(SMU7_Firmware_Header, PmFuseTable) +
 479                                     offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl),
 480                                     (u32 *)&pi->smc_powertune_table.TdcWaterfallCtl,
 481                                     pi->sram_end);
 482        if (ret)
 483                return -EINVAL;
 484        else
 485                pi->smc_powertune_table.TdcWaterfallCtl = pt_defaults->tdc_waterfall_ctl;
 486
 487        return 0;
 488}
 489
 490static int ci_populate_fuzzy_fan(struct amdgpu_device *adev)
 491{
 492        struct ci_power_info *pi = ci_get_pi(adev);
 493
 494        if ((adev->pm.dpm.fan.fan_output_sensitivity & (1 << 15)) ||
 495            (adev->pm.dpm.fan.fan_output_sensitivity == 0))
 496                adev->pm.dpm.fan.fan_output_sensitivity =
 497                        adev->pm.dpm.fan.default_fan_output_sensitivity;
 498
 499        pi->smc_powertune_table.FuzzyFan_PwmSetDelta =
 500                cpu_to_be16(adev->pm.dpm.fan.fan_output_sensitivity);
 501
 502        return 0;
 503}
 504
 505static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct amdgpu_device *adev)
 506{
 507        struct ci_power_info *pi = ci_get_pi(adev);
 508        u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
 509        u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
 510        int i, min, max;
 511
 512        min = max = hi_vid[0];
 513        for (i = 0; i < 8; i++) {
 514                if (0 != hi_vid[i]) {
 515                        if (min > hi_vid[i])
 516                                min = hi_vid[i];
 517                        if (max < hi_vid[i])
 518                                max = hi_vid[i];
 519                }
 520
 521                if (0 != lo_vid[i]) {
 522                        if (min > lo_vid[i])
 523                                min = lo_vid[i];
 524                        if (max < lo_vid[i])
 525                                max = lo_vid[i];
 526                }
 527        }
 528
 529        if ((min == 0) || (max == 0))
 530                return -EINVAL;
 531        pi->smc_powertune_table.GnbLPMLMaxVid = (u8)max;
 532        pi->smc_powertune_table.GnbLPMLMinVid = (u8)min;
 533
 534        return 0;
 535}
 536
 537static int ci_populate_bapm_vddc_base_leakage_sidd(struct amdgpu_device *adev)
 538{
 539        struct ci_power_info *pi = ci_get_pi(adev);
 540        u16 hi_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd;
 541        u16 lo_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd;
 542        struct amdgpu_cac_tdp_table *cac_tdp_table =
 543                adev->pm.dpm.dyn_state.cac_tdp_table;
 544
 545        hi_sidd = cac_tdp_table->high_cac_leakage / 100 * 256;
 546        lo_sidd = cac_tdp_table->low_cac_leakage / 100 * 256;
 547
 548        pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd = cpu_to_be16(hi_sidd);
 549        pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd = cpu_to_be16(lo_sidd);
 550
 551        return 0;
 552}
 553
 554static int ci_populate_bapm_parameters_in_dpm_table(struct amdgpu_device *adev)
 555{
 556        struct ci_power_info *pi = ci_get_pi(adev);
 557        const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
 558        SMU7_Discrete_DpmTable  *dpm_table = &pi->smc_state_table;
 559        struct amdgpu_cac_tdp_table *cac_tdp_table =
 560                adev->pm.dpm.dyn_state.cac_tdp_table;
 561        struct amdgpu_ppm_table *ppm = adev->pm.dpm.dyn_state.ppm_table;
 562        int i, j, k;
 563        const u16 *def1;
 564        const u16 *def2;
 565
 566        dpm_table->DefaultTdp = cac_tdp_table->tdp * 256;
 567        dpm_table->TargetTdp = cac_tdp_table->configurable_tdp * 256;
 568
 569        dpm_table->DTETjOffset = (u8)pi->dte_tj_offset;
 570        dpm_table->GpuTjMax =
 571                (u8)(pi->thermal_temp_setting.temperature_high / 1000);
 572        dpm_table->GpuTjHyst = 8;
 573
 574        dpm_table->DTEAmbientTempBase = pt_defaults->dte_ambient_temp_base;
 575
 576        if (ppm) {
 577                dpm_table->PPM_PkgPwrLimit = cpu_to_be16((u16)ppm->dgpu_tdp * 256 / 1000);
 578                dpm_table->PPM_TemperatureLimit = cpu_to_be16((u16)ppm->tj_max * 256);
 579        } else {
 580                dpm_table->PPM_PkgPwrLimit = cpu_to_be16(0);
 581                dpm_table->PPM_TemperatureLimit = cpu_to_be16(0);
 582        }
 583
 584        dpm_table->BAPM_TEMP_GRADIENT = cpu_to_be32(pt_defaults->bapm_temp_gradient);
 585        def1 = pt_defaults->bapmti_r;
 586        def2 = pt_defaults->bapmti_rc;
 587
 588        for (i = 0; i < SMU7_DTE_ITERATIONS; i++) {
 589                for (j = 0; j < SMU7_DTE_SOURCES; j++) {
 590                        for (k = 0; k < SMU7_DTE_SINKS; k++) {
 591                                dpm_table->BAPMTI_R[i][j][k] = cpu_to_be16(*def1);
 592                                dpm_table->BAPMTI_RC[i][j][k] = cpu_to_be16(*def2);
 593                                def1++;
 594                                def2++;
 595                        }
 596                }
 597        }
 598
 599        return 0;
 600}
 601
 602static int ci_populate_pm_base(struct amdgpu_device *adev)
 603{
 604        struct ci_power_info *pi = ci_get_pi(adev);
 605        u32 pm_fuse_table_offset;
 606        int ret;
 607
 608        if (pi->caps_power_containment) {
 609                ret = amdgpu_ci_read_smc_sram_dword(adev,
 610                                             SMU7_FIRMWARE_HEADER_LOCATION +
 611                                             offsetof(SMU7_Firmware_Header, PmFuseTable),
 612                                             &pm_fuse_table_offset, pi->sram_end);
 613                if (ret)
 614                        return ret;
 615                ret = ci_populate_bapm_vddc_vid_sidd(adev);
 616                if (ret)
 617                        return ret;
 618                ret = ci_populate_vddc_vid(adev);
 619                if (ret)
 620                        return ret;
 621                ret = ci_populate_svi_load_line(adev);
 622                if (ret)
 623                        return ret;
 624                ret = ci_populate_tdc_limit(adev);
 625                if (ret)
 626                        return ret;
 627                ret = ci_populate_dw8(adev);
 628                if (ret)
 629                        return ret;
 630                ret = ci_populate_fuzzy_fan(adev);
 631                if (ret)
 632                        return ret;
 633                ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(adev);
 634                if (ret)
 635                        return ret;
 636                ret = ci_populate_bapm_vddc_base_leakage_sidd(adev);
 637                if (ret)
 638                        return ret;
 639                ret = amdgpu_ci_copy_bytes_to_smc(adev, pm_fuse_table_offset,
 640                                           (u8 *)&pi->smc_powertune_table,
 641                                           sizeof(SMU7_Discrete_PmFuses), pi->sram_end);
 642                if (ret)
 643                        return ret;
 644        }
 645
 646        return 0;
 647}
 648
 649static void ci_do_enable_didt(struct amdgpu_device *adev, const bool enable)
 650{
 651        struct ci_power_info *pi = ci_get_pi(adev);
 652        u32 data;
 653
 654        if (pi->caps_sq_ramping) {
 655                data = RREG32_DIDT(ixDIDT_SQ_CTRL0);
 656                if (enable)
 657                        data |= DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
 658                else
 659                        data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
 660                WREG32_DIDT(ixDIDT_SQ_CTRL0, data);
 661        }
 662
 663        if (pi->caps_db_ramping) {
 664                data = RREG32_DIDT(ixDIDT_DB_CTRL0);
 665                if (enable)
 666                        data |= DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
 667                else
 668                        data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
 669                WREG32_DIDT(ixDIDT_DB_CTRL0, data);
 670        }
 671
 672        if (pi->caps_td_ramping) {
 673                data = RREG32_DIDT(ixDIDT_TD_CTRL0);
 674                if (enable)
 675                        data |= DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
 676                else
 677                        data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
 678                WREG32_DIDT(ixDIDT_TD_CTRL0, data);
 679        }
 680
 681        if (pi->caps_tcp_ramping) {
 682                data = RREG32_DIDT(ixDIDT_TCP_CTRL0);
 683                if (enable)
 684                        data |= DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
 685                else
 686                        data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
 687                WREG32_DIDT(ixDIDT_TCP_CTRL0, data);
 688        }
 689}
 690
 691static int ci_program_pt_config_registers(struct amdgpu_device *adev,
 692                                          const struct ci_pt_config_reg *cac_config_regs)
 693{
 694        const struct ci_pt_config_reg *config_regs = cac_config_regs;
 695        u32 data;
 696        u32 cache = 0;
 697
 698        if (config_regs == NULL)
 699                return -EINVAL;
 700
 701        while (config_regs->offset != 0xFFFFFFFF) {
 702                if (config_regs->type == CISLANDS_CONFIGREG_CACHE) {
 703                        cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
 704                } else {
 705                        switch (config_regs->type) {
 706                        case CISLANDS_CONFIGREG_SMC_IND:
 707                                data = RREG32_SMC(config_regs->offset);
 708                                break;
 709                        case CISLANDS_CONFIGREG_DIDT_IND:
 710                                data = RREG32_DIDT(config_regs->offset);
 711                                break;
 712                        default:
 713                                data = RREG32(config_regs->offset);
 714                                break;
 715                        }
 716
 717                        data &= ~config_regs->mask;
 718                        data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
 719                        data |= cache;
 720
 721                        switch (config_regs->type) {
 722                        case CISLANDS_CONFIGREG_SMC_IND:
 723                                WREG32_SMC(config_regs->offset, data);
 724                                break;
 725                        case CISLANDS_CONFIGREG_DIDT_IND:
 726                                WREG32_DIDT(config_regs->offset, data);
 727                                break;
 728                        default:
 729                                WREG32(config_regs->offset, data);
 730                                break;
 731                        }
 732                        cache = 0;
 733                }
 734                config_regs++;
 735        }
 736        return 0;
 737}
 738
 739static int ci_enable_didt(struct amdgpu_device *adev, bool enable)
 740{
 741        struct ci_power_info *pi = ci_get_pi(adev);
 742        int ret;
 743
 744        if (pi->caps_sq_ramping || pi->caps_db_ramping ||
 745            pi->caps_td_ramping || pi->caps_tcp_ramping) {
 746                adev->gfx.rlc.funcs->enter_safe_mode(adev);
 747
 748                if (enable) {
 749                        ret = ci_program_pt_config_registers(adev, didt_config_ci);
 750                        if (ret) {
 751                                adev->gfx.rlc.funcs->exit_safe_mode(adev);
 752                                return ret;
 753                        }
 754                }
 755
 756                ci_do_enable_didt(adev, enable);
 757
 758                adev->gfx.rlc.funcs->exit_safe_mode(adev);
 759        }
 760
 761        return 0;
 762}
 763
 764static int ci_enable_power_containment(struct amdgpu_device *adev, bool enable)
 765{
 766        struct ci_power_info *pi = ci_get_pi(adev);
 767        PPSMC_Result smc_result;
 768        int ret = 0;
 769
 770        if (enable) {
 771                pi->power_containment_features = 0;
 772                if (pi->caps_power_containment) {
 773                        if (pi->enable_bapm_feature) {
 774                                smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableDTE);
 775                                if (smc_result != PPSMC_Result_OK)
 776                                        ret = -EINVAL;
 777                                else
 778                                        pi->power_containment_features |= POWERCONTAINMENT_FEATURE_BAPM;
 779                        }
 780
 781                        if (pi->enable_tdc_limit_feature) {
 782                                smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_TDCLimitEnable);
 783                                if (smc_result != PPSMC_Result_OK)
 784                                        ret = -EINVAL;
 785                                else
 786                                        pi->power_containment_features |= POWERCONTAINMENT_FEATURE_TDCLimit;
 787                        }
 788
 789                        if (pi->enable_pkg_pwr_tracking_feature) {
 790                                smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PkgPwrLimitEnable);
 791                                if (smc_result != PPSMC_Result_OK) {
 792                                        ret = -EINVAL;
 793                                } else {
 794                                        struct amdgpu_cac_tdp_table *cac_tdp_table =
 795                                                adev->pm.dpm.dyn_state.cac_tdp_table;
 796                                        u32 default_pwr_limit =
 797                                                (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
 798
 799                                        pi->power_containment_features |= POWERCONTAINMENT_FEATURE_PkgPwrLimit;
 800
 801                                        ci_set_power_limit(adev, default_pwr_limit);
 802                                }
 803                        }
 804                }
 805        } else {
 806                if (pi->caps_power_containment && pi->power_containment_features) {
 807                        if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_TDCLimit)
 808                                amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_TDCLimitDisable);
 809
 810                        if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)
 811                                amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableDTE);
 812
 813                        if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit)
 814                                amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PkgPwrLimitDisable);
 815                        pi->power_containment_features = 0;
 816                }
 817        }
 818
 819        return ret;
 820}
 821
 822static int ci_enable_smc_cac(struct amdgpu_device *adev, bool enable)
 823{
 824        struct ci_power_info *pi = ci_get_pi(adev);
 825        PPSMC_Result smc_result;
 826        int ret = 0;
 827
 828        if (pi->caps_cac) {
 829                if (enable) {
 830                        smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableCac);
 831                        if (smc_result != PPSMC_Result_OK) {
 832                                ret = -EINVAL;
 833                                pi->cac_enabled = false;
 834                        } else {
 835                                pi->cac_enabled = true;
 836                        }
 837                } else if (pi->cac_enabled) {
 838                        amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableCac);
 839                        pi->cac_enabled = false;
 840                }
 841        }
 842
 843        return ret;
 844}
 845
 846static int ci_enable_thermal_based_sclk_dpm(struct amdgpu_device *adev,
 847                                            bool enable)
 848{
 849        struct ci_power_info *pi = ci_get_pi(adev);
 850        PPSMC_Result smc_result = PPSMC_Result_OK;
 851
 852        if (pi->thermal_sclk_dpm_enabled) {
 853                if (enable)
 854                        smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_ENABLE_THERMAL_DPM);
 855                else
 856                        smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DISABLE_THERMAL_DPM);
 857        }
 858
 859        if (smc_result == PPSMC_Result_OK)
 860                return 0;
 861        else
 862                return -EINVAL;
 863}
 864
 865static int ci_power_control_set_level(struct amdgpu_device *adev)
 866{
 867        struct ci_power_info *pi = ci_get_pi(adev);
 868        struct amdgpu_cac_tdp_table *cac_tdp_table =
 869                adev->pm.dpm.dyn_state.cac_tdp_table;
 870        s32 adjust_percent;
 871        s32 target_tdp;
 872        int ret = 0;
 873        bool adjust_polarity = false; /* ??? */
 874
 875        if (pi->caps_power_containment) {
 876                adjust_percent = adjust_polarity ?
 877                        adev->pm.dpm.tdp_adjustment : (-1 * adev->pm.dpm.tdp_adjustment);
 878                target_tdp = ((100 + adjust_percent) *
 879                              (s32)cac_tdp_table->configurable_tdp) / 100;
 880
 881                ret = ci_set_overdrive_target_tdp(adev, (u32)target_tdp);
 882        }
 883
 884        return ret;
 885}
 886
 887static void ci_dpm_powergate_uvd(void *handle, bool gate)
 888{
 889        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 890        struct ci_power_info *pi = ci_get_pi(adev);
 891
 892        pi->uvd_power_gated = gate;
 893
 894        if (gate) {
 895                /* stop the UVD block */
 896                amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
 897                                                       AMD_PG_STATE_GATE);
 898                ci_update_uvd_dpm(adev, gate);
 899        } else {
 900                amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
 901                                                       AMD_PG_STATE_UNGATE);
 902                ci_update_uvd_dpm(adev, gate);
 903        }
 904}
 905
 906static bool ci_dpm_vblank_too_short(void *handle)
 907{
 908        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 909        u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
 910        u32 switch_limit = adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300;
 911
 912        /* disable mclk switching if the refresh is >120Hz, even if the
 913         * blanking period would allow it
 914         */
 915        if (amdgpu_dpm_get_vrefresh(adev) > 120)
 916                return true;
 917
 918        if (vblank_time < switch_limit)
 919                return true;
 920        else
 921                return false;
 922
 923}
 924
 925static void ci_apply_state_adjust_rules(struct amdgpu_device *adev,
 926                                        struct amdgpu_ps *rps)
 927{
 928        struct ci_ps *ps = ci_get_ps(rps);
 929        struct ci_power_info *pi = ci_get_pi(adev);
 930        struct amdgpu_clock_and_voltage_limits *max_limits;
 931        bool disable_mclk_switching;
 932        u32 sclk, mclk;
 933        int i;
 934
 935        if (rps->vce_active) {
 936                rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk;
 937                rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk;
 938        } else {
 939                rps->evclk = 0;
 940                rps->ecclk = 0;
 941        }
 942
 943        if ((adev->pm.dpm.new_active_crtc_count > 1) ||
 944            ci_dpm_vblank_too_short(adev))
 945                disable_mclk_switching = true;
 946        else
 947                disable_mclk_switching = false;
 948
 949        if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
 950                pi->battery_state = true;
 951        else
 952                pi->battery_state = false;
 953
 954        if (adev->pm.dpm.ac_power)
 955                max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
 956        else
 957                max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
 958
 959        if (adev->pm.dpm.ac_power == false) {
 960                for (i = 0; i < ps->performance_level_count; i++) {
 961                        if (ps->performance_levels[i].mclk > max_limits->mclk)
 962                                ps->performance_levels[i].mclk = max_limits->mclk;
 963                        if (ps->performance_levels[i].sclk > max_limits->sclk)
 964                                ps->performance_levels[i].sclk = max_limits->sclk;
 965                }
 966        }
 967
 968        /* XXX validate the min clocks required for display */
 969
 970        if (disable_mclk_switching) {
 971                mclk  = ps->performance_levels[ps->performance_level_count - 1].mclk;
 972                sclk = ps->performance_levels[0].sclk;
 973        } else {
 974                mclk = ps->performance_levels[0].mclk;
 975                sclk = ps->performance_levels[0].sclk;
 976        }
 977
 978        if (adev->pm.pm_display_cfg.min_core_set_clock > sclk)
 979                sclk = adev->pm.pm_display_cfg.min_core_set_clock;
 980
 981        if (adev->pm.pm_display_cfg.min_mem_set_clock > mclk)
 982                mclk = adev->pm.pm_display_cfg.min_mem_set_clock;
 983
 984        if (rps->vce_active) {
 985                if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk)
 986                        sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk;
 987                if (mclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk)
 988                        mclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk;
 989        }
 990
 991        ps->performance_levels[0].sclk = sclk;
 992        ps->performance_levels[0].mclk = mclk;
 993
 994        if (ps->performance_levels[1].sclk < ps->performance_levels[0].sclk)
 995                ps->performance_levels[1].sclk = ps->performance_levels[0].sclk;
 996
 997        if (disable_mclk_switching) {
 998                if (ps->performance_levels[0].mclk < ps->performance_levels[1].mclk)
 999                        ps->performance_levels[0].mclk = ps->performance_levels[1].mclk;
1000        } else {
1001                if (ps->performance_levels[1].mclk < ps->performance_levels[0].mclk)
1002                        ps->performance_levels[1].mclk = ps->performance_levels[0].mclk;
1003        }
1004}
1005
1006static int ci_thermal_set_temperature_range(struct amdgpu_device *adev,
1007                                            int min_temp, int max_temp)
1008{
1009        int low_temp = 0 * 1000;
1010        int high_temp = 255 * 1000;
1011        u32 tmp;
1012
1013        if (low_temp < min_temp)
1014                low_temp = min_temp;
1015        if (high_temp > max_temp)
1016                high_temp = max_temp;
1017        if (high_temp < low_temp) {
1018                DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
1019                return -EINVAL;
1020        }
1021
1022        tmp = RREG32_SMC(ixCG_THERMAL_INT);
1023        tmp &= ~(CG_THERMAL_INT__DIG_THERM_INTH_MASK | CG_THERMAL_INT__DIG_THERM_INTL_MASK);
1024        tmp |= ((high_temp / 1000) << CG_THERMAL_INT__DIG_THERM_INTH__SHIFT) |
1025                ((low_temp / 1000)) << CG_THERMAL_INT__DIG_THERM_INTL__SHIFT;
1026        WREG32_SMC(ixCG_THERMAL_INT, tmp);
1027
1028#if 0
1029        /* XXX: need to figure out how to handle this properly */
1030        tmp = RREG32_SMC(ixCG_THERMAL_CTRL);
1031        tmp &= DIG_THERM_DPM_MASK;
1032        tmp |= DIG_THERM_DPM(high_temp / 1000);
1033        WREG32_SMC(ixCG_THERMAL_CTRL, tmp);
1034#endif
1035
1036        adev->pm.dpm.thermal.min_temp = low_temp;
1037        adev->pm.dpm.thermal.max_temp = high_temp;
1038        return 0;
1039}
1040
1041static int ci_thermal_enable_alert(struct amdgpu_device *adev,
1042                                   bool enable)
1043{
1044        u32 thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
1045        PPSMC_Result result;
1046
1047        if (enable) {
1048                thermal_int &= ~(CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK |
1049                                 CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK);
1050                WREG32_SMC(ixCG_THERMAL_INT, thermal_int);
1051                result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Thermal_Cntl_Enable);
1052                if (result != PPSMC_Result_OK) {
1053                        DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
1054                        return -EINVAL;
1055                }
1056        } else {
1057                thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK |
1058                        CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
1059                WREG32_SMC(ixCG_THERMAL_INT, thermal_int);
1060                result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Thermal_Cntl_Disable);
1061                if (result != PPSMC_Result_OK) {
1062                        DRM_DEBUG_KMS("Could not disable thermal interrupts.\n");
1063                        return -EINVAL;
1064                }
1065        }
1066
1067        return 0;
1068}
1069
1070static void ci_fan_ctrl_set_static_mode(struct amdgpu_device *adev, u32 mode)
1071{
1072        struct ci_power_info *pi = ci_get_pi(adev);
1073        u32 tmp;
1074
1075        if (pi->fan_ctrl_is_in_default_mode) {
1076                tmp = (RREG32_SMC(ixCG_FDO_CTRL2) & CG_FDO_CTRL2__FDO_PWM_MODE_MASK)
1077                        >> CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
1078                pi->fan_ctrl_default_mode = tmp;
1079                tmp = (RREG32_SMC(ixCG_FDO_CTRL2) & CG_FDO_CTRL2__TMIN_MASK)
1080                        >> CG_FDO_CTRL2__TMIN__SHIFT;
1081                pi->t_min = tmp;
1082                pi->fan_ctrl_is_in_default_mode = false;
1083        }
1084
1085        tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TMIN_MASK;
1086        tmp |= 0 << CG_FDO_CTRL2__TMIN__SHIFT;
1087        WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1088
1089        tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
1090        tmp |= mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
1091        WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1092}
1093
1094static int ci_thermal_setup_fan_table(struct amdgpu_device *adev)
1095{
1096        struct ci_power_info *pi = ci_get_pi(adev);
1097        SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
1098        u32 duty100;
1099        u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2;
1100        u16 fdo_min, slope1, slope2;
1101        u32 reference_clock, tmp;
1102        int ret;
1103        u64 tmp64;
1104
1105        if (!pi->fan_table_start) {
1106                adev->pm.dpm.fan.ucode_fan_control = false;
1107                return 0;
1108        }
1109
1110        duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK)
1111                >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
1112
1113        if (duty100 == 0) {
1114                adev->pm.dpm.fan.ucode_fan_control = false;
1115                return 0;
1116        }
1117
1118        tmp64 = (u64)adev->pm.dpm.fan.pwm_min * duty100;
1119        do_div(tmp64, 10000);
1120        fdo_min = (u16)tmp64;
1121
1122        t_diff1 = adev->pm.dpm.fan.t_med - adev->pm.dpm.fan.t_min;
1123        t_diff2 = adev->pm.dpm.fan.t_high - adev->pm.dpm.fan.t_med;
1124
1125        pwm_diff1 = adev->pm.dpm.fan.pwm_med - adev->pm.dpm.fan.pwm_min;
1126        pwm_diff2 = adev->pm.dpm.fan.pwm_high - adev->pm.dpm.fan.pwm_med;
1127
1128        slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
1129        slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
1130
1131        fan_table.TempMin = cpu_to_be16((50 + adev->pm.dpm.fan.t_min) / 100);
1132        fan_table.TempMed = cpu_to_be16((50 + adev->pm.dpm.fan.t_med) / 100);
1133        fan_table.TempMax = cpu_to_be16((50 + adev->pm.dpm.fan.t_max) / 100);
1134
1135        fan_table.Slope1 = cpu_to_be16(slope1);
1136        fan_table.Slope2 = cpu_to_be16(slope2);
1137
1138        fan_table.FdoMin = cpu_to_be16(fdo_min);
1139
1140        fan_table.HystDown = cpu_to_be16(adev->pm.dpm.fan.t_hyst);
1141
1142        fan_table.HystUp = cpu_to_be16(1);
1143
1144        fan_table.HystSlope = cpu_to_be16(1);
1145
1146        fan_table.TempRespLim = cpu_to_be16(5);
1147
1148        reference_clock = amdgpu_asic_get_xclk(adev);
1149
1150        fan_table.RefreshPeriod = cpu_to_be32((adev->pm.dpm.fan.cycle_delay *
1151                                               reference_clock) / 1600);
1152
1153        fan_table.FdoMax = cpu_to_be16((u16)duty100);
1154
1155        tmp = (RREG32_SMC(ixCG_MULT_THERMAL_CTRL) & CG_MULT_THERMAL_CTRL__TEMP_SEL_MASK)
1156                >> CG_MULT_THERMAL_CTRL__TEMP_SEL__SHIFT;
1157        fan_table.TempSrc = (uint8_t)tmp;
1158
1159        ret = amdgpu_ci_copy_bytes_to_smc(adev,
1160                                          pi->fan_table_start,
1161                                          (u8 *)(&fan_table),
1162                                          sizeof(fan_table),
1163                                          pi->sram_end);
1164
1165        if (ret) {
1166                DRM_ERROR("Failed to load fan table to the SMC.");
1167                adev->pm.dpm.fan.ucode_fan_control = false;
1168        }
1169
1170        return 0;
1171}
1172
1173static int ci_fan_ctrl_start_smc_fan_control(struct amdgpu_device *adev)
1174{
1175        struct ci_power_info *pi = ci_get_pi(adev);
1176        PPSMC_Result ret;
1177
1178        if (pi->caps_od_fuzzy_fan_control_support) {
1179                ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
1180                                                               PPSMC_StartFanControl,
1181                                                               FAN_CONTROL_FUZZY);
1182                if (ret != PPSMC_Result_OK)
1183                        return -EINVAL;
1184                ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
1185                                                               PPSMC_MSG_SetFanPwmMax,
1186                                                               adev->pm.dpm.fan.default_max_fan_pwm);
1187                if (ret != PPSMC_Result_OK)
1188                        return -EINVAL;
1189        } else {
1190                ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
1191                                                               PPSMC_StartFanControl,
1192                                                               FAN_CONTROL_TABLE);
1193                if (ret != PPSMC_Result_OK)
1194                        return -EINVAL;
1195        }
1196
1197        pi->fan_is_controlled_by_smc = true;
1198        return 0;
1199}
1200
1201
1202static int ci_fan_ctrl_stop_smc_fan_control(struct amdgpu_device *adev)
1203{
1204        PPSMC_Result ret;
1205        struct ci_power_info *pi = ci_get_pi(adev);
1206
1207        ret = amdgpu_ci_send_msg_to_smc(adev, PPSMC_StopFanControl);
1208        if (ret == PPSMC_Result_OK) {
1209                pi->fan_is_controlled_by_smc = false;
1210                return 0;
1211        } else {
1212                return -EINVAL;
1213        }
1214}
1215
1216static int ci_dpm_get_fan_speed_percent(void *handle,
1217                                        u32 *speed)
1218{
1219        u32 duty, duty100;
1220        u64 tmp64;
1221        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1222
1223        if (adev->pm.no_fan)
1224                return -ENOENT;
1225
1226        duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK)
1227                >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
1228        duty = (RREG32_SMC(ixCG_THERMAL_STATUS) & CG_THERMAL_STATUS__FDO_PWM_DUTY_MASK)
1229                >> CG_THERMAL_STATUS__FDO_PWM_DUTY__SHIFT;
1230
1231        if (duty100 == 0)
1232                return -EINVAL;
1233
1234        tmp64 = (u64)duty * 100;
1235        do_div(tmp64, duty100);
1236        *speed = (u32)tmp64;
1237
1238        if (*speed > 100)
1239                *speed = 100;
1240
1241        return 0;
1242}
1243
1244static int ci_dpm_set_fan_speed_percent(void *handle,
1245                                        u32 speed)
1246{
1247        u32 tmp;
1248        u32 duty, duty100;
1249        u64 tmp64;
1250        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1251        struct ci_power_info *pi = ci_get_pi(adev);
1252
1253        if (adev->pm.no_fan)
1254                return -ENOENT;
1255
1256        if (pi->fan_is_controlled_by_smc)
1257                return -EINVAL;
1258
1259        if (speed > 100)
1260                return -EINVAL;
1261
1262        duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK)
1263                >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
1264
1265        if (duty100 == 0)
1266                return -EINVAL;
1267
1268        tmp64 = (u64)speed * duty100;
1269        do_div(tmp64, 100);
1270        duty = (u32)tmp64;
1271
1272        tmp = RREG32_SMC(ixCG_FDO_CTRL0) & ~CG_FDO_CTRL0__FDO_STATIC_DUTY_MASK;
1273        tmp |= duty << CG_FDO_CTRL0__FDO_STATIC_DUTY__SHIFT;
1274        WREG32_SMC(ixCG_FDO_CTRL0, tmp);
1275
1276        return 0;
1277}
1278
1279static void ci_dpm_set_fan_control_mode(void *handle, u32 mode)
1280{
1281        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1282
1283        switch (mode) {
1284        case AMD_FAN_CTRL_NONE:
1285                if (adev->pm.dpm.fan.ucode_fan_control)
1286                        ci_fan_ctrl_stop_smc_fan_control(adev);
1287                ci_dpm_set_fan_speed_percent(adev, 100);
1288                break;
1289        case AMD_FAN_CTRL_MANUAL:
1290                if (adev->pm.dpm.fan.ucode_fan_control)
1291                        ci_fan_ctrl_stop_smc_fan_control(adev);
1292                break;
1293        case AMD_FAN_CTRL_AUTO:
1294                if (adev->pm.dpm.fan.ucode_fan_control)
1295                        ci_thermal_start_smc_fan_control(adev);
1296                break;
1297        default:
1298                break;
1299        }
1300}
1301
1302static u32 ci_dpm_get_fan_control_mode(void *handle)
1303{
1304        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1305        struct ci_power_info *pi = ci_get_pi(adev);
1306
1307        if (pi->fan_is_controlled_by_smc)
1308                return AMD_FAN_CTRL_AUTO;
1309        else
1310                return AMD_FAN_CTRL_MANUAL;
1311}
1312
1313#if 0
1314static int ci_fan_ctrl_get_fan_speed_rpm(struct amdgpu_device *adev,
1315                                         u32 *speed)
1316{
1317        u32 tach_period;
1318        u32 xclk = amdgpu_asic_get_xclk(adev);
1319
1320        if (adev->pm.no_fan)
1321                return -ENOENT;
1322
1323        if (adev->pm.fan_pulses_per_revolution == 0)
1324                return -ENOENT;
1325
1326        tach_period = (RREG32_SMC(ixCG_TACH_STATUS) & CG_TACH_STATUS__TACH_PERIOD_MASK)
1327                >> CG_TACH_STATUS__TACH_PERIOD__SHIFT;
1328        if (tach_period == 0)
1329                return -ENOENT;
1330
1331        *speed = 60 * xclk * 10000 / tach_period;
1332
1333        return 0;
1334}
1335
1336static int ci_fan_ctrl_set_fan_speed_rpm(struct amdgpu_device *adev,
1337                                         u32 speed)
1338{
1339        u32 tach_period, tmp;
1340        u32 xclk = amdgpu_asic_get_xclk(adev);
1341
1342        if (adev->pm.no_fan)
1343                return -ENOENT;
1344
1345        if (adev->pm.fan_pulses_per_revolution == 0)
1346                return -ENOENT;
1347
1348        if ((speed < adev->pm.fan_min_rpm) ||
1349            (speed > adev->pm.fan_max_rpm))
1350                return -EINVAL;
1351
1352        if (adev->pm.dpm.fan.ucode_fan_control)
1353                ci_fan_ctrl_stop_smc_fan_control(adev);
1354
1355        tach_period = 60 * xclk * 10000 / (8 * speed);
1356        tmp = RREG32_SMC(ixCG_TACH_CTRL) & ~CG_TACH_CTRL__TARGET_PERIOD_MASK;
1357        tmp |= tach_period << CG_TACH_CTRL__TARGET_PERIOD__SHIFT;
1358        WREG32_SMC(CG_TACH_CTRL, tmp);
1359
1360        ci_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC_RPM);
1361
1362        return 0;
1363}
1364#endif
1365
1366static void ci_fan_ctrl_set_default_mode(struct amdgpu_device *adev)
1367{
1368        struct ci_power_info *pi = ci_get_pi(adev);
1369        u32 tmp;
1370
1371        if (!pi->fan_ctrl_is_in_default_mode) {
1372                tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
1373                tmp |= pi->fan_ctrl_default_mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
1374                WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1375
1376                tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TMIN_MASK;
1377                tmp |= pi->t_min << CG_FDO_CTRL2__TMIN__SHIFT;
1378                WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1379                pi->fan_ctrl_is_in_default_mode = true;
1380        }
1381}
1382
1383static void ci_thermal_start_smc_fan_control(struct amdgpu_device *adev)
1384{
1385        if (adev->pm.dpm.fan.ucode_fan_control) {
1386                ci_fan_ctrl_start_smc_fan_control(adev);
1387                ci_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC);
1388        }
1389}
1390
1391static void ci_thermal_initialize(struct amdgpu_device *adev)
1392{
1393        u32 tmp;
1394
1395        if (adev->pm.fan_pulses_per_revolution) {
1396                tmp = RREG32_SMC(ixCG_TACH_CTRL) & ~CG_TACH_CTRL__EDGE_PER_REV_MASK;
1397                tmp |= (adev->pm.fan_pulses_per_revolution - 1)
1398                        << CG_TACH_CTRL__EDGE_PER_REV__SHIFT;
1399                WREG32_SMC(ixCG_TACH_CTRL, tmp);
1400        }
1401
1402        tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TACH_PWM_RESP_RATE_MASK;
1403        tmp |= 0x28 << CG_FDO_CTRL2__TACH_PWM_RESP_RATE__SHIFT;
1404        WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1405}
1406
1407static int ci_thermal_start_thermal_controller(struct amdgpu_device *adev)
1408{
1409        int ret;
1410
1411        ci_thermal_initialize(adev);
1412        ret = ci_thermal_set_temperature_range(adev, CISLANDS_TEMP_RANGE_MIN, CISLANDS_TEMP_RANGE_MAX);
1413        if (ret)
1414                return ret;
1415        ret = ci_thermal_enable_alert(adev, true);
1416        if (ret)
1417                return ret;
1418        if (adev->pm.dpm.fan.ucode_fan_control) {
1419                ret = ci_thermal_setup_fan_table(adev);
1420                if (ret)
1421                        return ret;
1422                ci_thermal_start_smc_fan_control(adev);
1423        }
1424
1425        return 0;
1426}
1427
1428static void ci_thermal_stop_thermal_controller(struct amdgpu_device *adev)
1429{
1430        if (!adev->pm.no_fan)
1431                ci_fan_ctrl_set_default_mode(adev);
1432}
1433
1434static int ci_read_smc_soft_register(struct amdgpu_device *adev,
1435                                     u16 reg_offset, u32 *value)
1436{
1437        struct ci_power_info *pi = ci_get_pi(adev);
1438
1439        return amdgpu_ci_read_smc_sram_dword(adev,
1440                                      pi->soft_regs_start + reg_offset,
1441                                      value, pi->sram_end);
1442}
1443
1444static int ci_write_smc_soft_register(struct amdgpu_device *adev,
1445                                      u16 reg_offset, u32 value)
1446{
1447        struct ci_power_info *pi = ci_get_pi(adev);
1448
1449        return amdgpu_ci_write_smc_sram_dword(adev,
1450                                       pi->soft_regs_start + reg_offset,
1451                                       value, pi->sram_end);
1452}
1453
1454static void ci_init_fps_limits(struct amdgpu_device *adev)
1455{
1456        struct ci_power_info *pi = ci_get_pi(adev);
1457        SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
1458
1459        if (pi->caps_fps) {
1460                u16 tmp;
1461
1462                tmp = 45;
1463                table->FpsHighT = cpu_to_be16(tmp);
1464
1465                tmp = 30;
1466                table->FpsLowT = cpu_to_be16(tmp);
1467        }
1468}
1469
1470static int ci_update_sclk_t(struct amdgpu_device *adev)
1471{
1472        struct ci_power_info *pi = ci_get_pi(adev);
1473        int ret = 0;
1474        u32 low_sclk_interrupt_t = 0;
1475
1476        if (pi->caps_sclk_throttle_low_notification) {
1477                low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
1478
1479                ret = amdgpu_ci_copy_bytes_to_smc(adev,
1480                                           pi->dpm_table_start +
1481                                           offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT),
1482                                           (u8 *)&low_sclk_interrupt_t,
1483                                           sizeof(u32), pi->sram_end);
1484
1485        }
1486
1487        return ret;
1488}
1489
1490static void ci_get_leakage_voltages(struct amdgpu_device *adev)
1491{
1492        struct ci_power_info *pi = ci_get_pi(adev);
1493        u16 leakage_id, virtual_voltage_id;
1494        u16 vddc, vddci;
1495        int i;
1496
1497        pi->vddc_leakage.count = 0;
1498        pi->vddci_leakage.count = 0;
1499
1500        if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
1501                for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
1502                        virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1503                        if (amdgpu_atombios_get_voltage_evv(adev, virtual_voltage_id, &vddc) != 0)
1504                                continue;
1505                        if (vddc != 0 && vddc != virtual_voltage_id) {
1506                                pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
1507                                pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
1508                                pi->vddc_leakage.count++;
1509                        }
1510                }
1511        } else if (amdgpu_atombios_get_leakage_id_from_vbios(adev, &leakage_id) == 0) {
1512                for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
1513                        virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1514                        if (amdgpu_atombios_get_leakage_vddc_based_on_leakage_params(adev, &vddc, &vddci,
1515                                                                                     virtual_voltage_id,
1516                                                                                     leakage_id) == 0) {
1517                                if (vddc != 0 && vddc != virtual_voltage_id) {
1518                                        pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
1519                                        pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
1520                                        pi->vddc_leakage.count++;
1521                                }
1522                                if (vddci != 0 && vddci != virtual_voltage_id) {
1523                                        pi->vddci_leakage.actual_voltage[pi->vddci_leakage.count] = vddci;
1524                                        pi->vddci_leakage.leakage_id[pi->vddci_leakage.count] = virtual_voltage_id;
1525                                        pi->vddci_leakage.count++;
1526                                }
1527                        }
1528                }
1529        }
1530}
1531
1532static void ci_set_dpm_event_sources(struct amdgpu_device *adev, u32 sources)
1533{
1534        struct ci_power_info *pi = ci_get_pi(adev);
1535        bool want_thermal_protection;
1536        enum amdgpu_dpm_event_src dpm_event_src;
1537        u32 tmp;
1538
1539        switch (sources) {
1540        case 0:
1541        default:
1542                want_thermal_protection = false;
1543                break;
1544        case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL):
1545                want_thermal_protection = true;
1546                dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGITAL;
1547                break;
1548        case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
1549                want_thermal_protection = true;
1550                dpm_event_src = AMDGPU_DPM_EVENT_SRC_EXTERNAL;
1551                break;
1552        case ((1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
1553              (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL)):
1554                want_thermal_protection = true;
1555                dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
1556                break;
1557        }
1558
1559        if (want_thermal_protection) {
1560#if 0
1561                /* XXX: need to figure out how to handle this properly */
1562                tmp = RREG32_SMC(ixCG_THERMAL_CTRL);
1563                tmp &= DPM_EVENT_SRC_MASK;
1564                tmp |= DPM_EVENT_SRC(dpm_event_src);
1565                WREG32_SMC(ixCG_THERMAL_CTRL, tmp);
1566#endif
1567
1568                tmp = RREG32_SMC(ixGENERAL_PWRMGT);
1569                if (pi->thermal_protection)
1570                        tmp &= ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
1571                else
1572                        tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
1573                WREG32_SMC(ixGENERAL_PWRMGT, tmp);
1574        } else {
1575                tmp = RREG32_SMC(ixGENERAL_PWRMGT);
1576                tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
1577                WREG32_SMC(ixGENERAL_PWRMGT, tmp);
1578        }
1579}
1580
1581static void ci_enable_auto_throttle_source(struct amdgpu_device *adev,
1582                                           enum amdgpu_dpm_auto_throttle_src source,
1583                                           bool enable)
1584{
1585        struct ci_power_info *pi = ci_get_pi(adev);
1586
1587        if (enable) {
1588                if (!(pi->active_auto_throttle_sources & (1 << source))) {
1589                        pi->active_auto_throttle_sources |= 1 << source;
1590                        ci_set_dpm_event_sources(adev, pi->active_auto_throttle_sources);
1591                }
1592        } else {
1593                if (pi->active_auto_throttle_sources & (1 << source)) {
1594                        pi->active_auto_throttle_sources &= ~(1 << source);
1595                        ci_set_dpm_event_sources(adev, pi->active_auto_throttle_sources);
1596                }
1597        }
1598}
1599
1600static void ci_enable_vr_hot_gpio_interrupt(struct amdgpu_device *adev)
1601{
1602        if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT)
1603                amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableVRHotGPIOInterrupt);
1604}
1605
1606static int ci_unfreeze_sclk_mclk_dpm(struct amdgpu_device *adev)
1607{
1608        struct ci_power_info *pi = ci_get_pi(adev);
1609        PPSMC_Result smc_result;
1610
1611        if (!pi->need_update_smu7_dpm_table)
1612                return 0;
1613
1614        if ((!pi->sclk_dpm_key_disabled) &&
1615            (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1616                smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
1617                if (smc_result != PPSMC_Result_OK)
1618                        return -EINVAL;
1619        }
1620
1621        if ((!pi->mclk_dpm_key_disabled) &&
1622            (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1623                smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
1624                if (smc_result != PPSMC_Result_OK)
1625                        return -EINVAL;
1626        }
1627
1628        pi->need_update_smu7_dpm_table = 0;
1629        return 0;
1630}
1631
1632static int ci_enable_sclk_mclk_dpm(struct amdgpu_device *adev, bool enable)
1633{
1634        struct ci_power_info *pi = ci_get_pi(adev);
1635        PPSMC_Result smc_result;
1636
1637        if (enable) {
1638                if (!pi->sclk_dpm_key_disabled) {
1639                        smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DPM_Enable);
1640                        if (smc_result != PPSMC_Result_OK)
1641                                return -EINVAL;
1642                }
1643
1644                if (!pi->mclk_dpm_key_disabled) {
1645                        smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_Enable);
1646                        if (smc_result != PPSMC_Result_OK)
1647                                return -EINVAL;
1648
1649                        WREG32_P(mmMC_SEQ_CNTL_3, MC_SEQ_CNTL_3__CAC_EN_MASK,
1650                                        ~MC_SEQ_CNTL_3__CAC_EN_MASK);
1651
1652                        WREG32_SMC(ixLCAC_MC0_CNTL, 0x05);
1653                        WREG32_SMC(ixLCAC_MC1_CNTL, 0x05);
1654                        WREG32_SMC(ixLCAC_CPL_CNTL, 0x100005);
1655
1656                        udelay(10);
1657
1658                        WREG32_SMC(ixLCAC_MC0_CNTL, 0x400005);
1659                        WREG32_SMC(ixLCAC_MC1_CNTL, 0x400005);
1660                        WREG32_SMC(ixLCAC_CPL_CNTL, 0x500005);
1661                }
1662        } else {
1663                if (!pi->sclk_dpm_key_disabled) {
1664                        smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DPM_Disable);
1665                        if (smc_result != PPSMC_Result_OK)
1666                                return -EINVAL;
1667                }
1668
1669                if (!pi->mclk_dpm_key_disabled) {
1670                        smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_Disable);
1671                        if (smc_result != PPSMC_Result_OK)
1672                                return -EINVAL;
1673                }
1674        }
1675
1676        return 0;
1677}
1678
1679static int ci_start_dpm(struct amdgpu_device *adev)
1680{
1681        struct ci_power_info *pi = ci_get_pi(adev);
1682        PPSMC_Result smc_result;
1683        int ret;
1684        u32 tmp;
1685
1686        tmp = RREG32_SMC(ixGENERAL_PWRMGT);
1687        tmp |= GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK;
1688        WREG32_SMC(ixGENERAL_PWRMGT, tmp);
1689
1690        tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
1691        tmp |= SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK;
1692        WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
1693
1694        ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, VoltageChangeTimeout), 0x1000);
1695
1696        WREG32_P(mmBIF_LNCNT_RESET, 0, ~BIF_LNCNT_RESET__RESET_LNCNT_EN_MASK);
1697
1698        smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Voltage_Cntl_Enable);
1699        if (smc_result != PPSMC_Result_OK)
1700                return -EINVAL;
1701
1702        ret = ci_enable_sclk_mclk_dpm(adev, true);
1703        if (ret)
1704                return ret;
1705
1706        if (!pi->pcie_dpm_key_disabled) {
1707                smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PCIeDPM_Enable);
1708                if (smc_result != PPSMC_Result_OK)
1709                        return -EINVAL;
1710        }
1711
1712        return 0;
1713}
1714
1715static int ci_freeze_sclk_mclk_dpm(struct amdgpu_device *adev)
1716{
1717        struct ci_power_info *pi = ci_get_pi(adev);
1718        PPSMC_Result smc_result;
1719
1720        if (!pi->need_update_smu7_dpm_table)
1721                return 0;
1722
1723        if ((!pi->sclk_dpm_key_disabled) &&
1724            (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1725                smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_SCLKDPM_FreezeLevel);
1726                if (smc_result != PPSMC_Result_OK)
1727                        return -EINVAL;
1728        }
1729
1730        if ((!pi->mclk_dpm_key_disabled) &&
1731            (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1732                smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_FreezeLevel);
1733                if (smc_result != PPSMC_Result_OK)
1734                        return -EINVAL;
1735        }
1736
1737        return 0;
1738}
1739
1740static int ci_stop_dpm(struct amdgpu_device *adev)
1741{
1742        struct ci_power_info *pi = ci_get_pi(adev);
1743        PPSMC_Result smc_result;
1744        int ret;
1745        u32 tmp;
1746
1747        tmp = RREG32_SMC(ixGENERAL_PWRMGT);
1748        tmp &= ~GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK;
1749        WREG32_SMC(ixGENERAL_PWRMGT, tmp);
1750
1751        tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
1752        tmp &= ~SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK;
1753        WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
1754
1755        if (!pi->pcie_dpm_key_disabled) {
1756                smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PCIeDPM_Disable);
1757                if (smc_result != PPSMC_Result_OK)
1758                        return -EINVAL;
1759        }
1760
1761        ret = ci_enable_sclk_mclk_dpm(adev, false);
1762        if (ret)
1763                return ret;
1764
1765        smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Voltage_Cntl_Disable);
1766        if (smc_result != PPSMC_Result_OK)
1767                return -EINVAL;
1768
1769        return 0;
1770}
1771
1772static void ci_enable_sclk_control(struct amdgpu_device *adev, bool enable)
1773{
1774        u32 tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
1775
1776        if (enable)
1777                tmp &= ~SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK;
1778        else
1779                tmp |= SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK;
1780        WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
1781}
1782
1783#if 0
1784static int ci_notify_hw_of_power_source(struct amdgpu_device *adev,
1785                                        bool ac_power)
1786{
1787        struct ci_power_info *pi = ci_get_pi(adev);
1788        struct amdgpu_cac_tdp_table *cac_tdp_table =
1789                adev->pm.dpm.dyn_state.cac_tdp_table;
1790        u32 power_limit;
1791
1792        if (ac_power)
1793                power_limit = (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
1794        else
1795                power_limit = (u32)(cac_tdp_table->battery_power_limit * 256);
1796
1797        ci_set_power_limit(adev, power_limit);
1798
1799        if (pi->caps_automatic_dc_transition) {
1800                if (ac_power)
1801                        amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_RunningOnAC);
1802                else
1803                        amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Remove_DC_Clamp);
1804        }
1805
1806        return 0;
1807}
1808#endif
1809
1810static PPSMC_Result amdgpu_ci_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
1811                                                      PPSMC_Msg msg, u32 parameter)
1812{
1813        WREG32(mmSMC_MSG_ARG_0, parameter);
1814        return amdgpu_ci_send_msg_to_smc(adev, msg);
1815}
1816
1817static PPSMC_Result amdgpu_ci_send_msg_to_smc_return_parameter(struct amdgpu_device *adev,
1818                                                        PPSMC_Msg msg, u32 *parameter)
1819{
1820        PPSMC_Result smc_result;
1821
1822        smc_result = amdgpu_ci_send_msg_to_smc(adev, msg);
1823
1824        if ((smc_result == PPSMC_Result_OK) && parameter)
1825                *parameter = RREG32(mmSMC_MSG_ARG_0);
1826
1827        return smc_result;
1828}
1829
1830static int ci_dpm_force_state_sclk(struct amdgpu_device *adev, u32 n)
1831{
1832        struct ci_power_info *pi = ci_get_pi(adev);
1833
1834        if (!pi->sclk_dpm_key_disabled) {
1835                PPSMC_Result smc_result =
1836                        amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SCLKDPM_SetEnabledMask, 1 << n);
1837                if (smc_result != PPSMC_Result_OK)
1838                        return -EINVAL;
1839        }
1840
1841        return 0;
1842}
1843
1844static int ci_dpm_force_state_mclk(struct amdgpu_device *adev, u32 n)
1845{
1846        struct ci_power_info *pi = ci_get_pi(adev);
1847
1848        if (!pi->mclk_dpm_key_disabled) {
1849                PPSMC_Result smc_result =
1850                        amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_MCLKDPM_SetEnabledMask, 1 << n);
1851                if (smc_result != PPSMC_Result_OK)
1852                        return -EINVAL;
1853        }
1854
1855        return 0;
1856}
1857
1858static int ci_dpm_force_state_pcie(struct amdgpu_device *adev, u32 n)
1859{
1860        struct ci_power_info *pi = ci_get_pi(adev);
1861
1862        if (!pi->pcie_dpm_key_disabled) {
1863                PPSMC_Result smc_result =
1864                        amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_PCIeDPM_ForceLevel, n);
1865                if (smc_result != PPSMC_Result_OK)
1866                        return -EINVAL;
1867        }
1868
1869        return 0;
1870}
1871
1872static int ci_set_power_limit(struct amdgpu_device *adev, u32 n)
1873{
1874        struct ci_power_info *pi = ci_get_pi(adev);
1875
1876        if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
1877                PPSMC_Result smc_result =
1878                        amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_PkgPwrSetLimit, n);
1879                if (smc_result != PPSMC_Result_OK)
1880                        return -EINVAL;
1881        }
1882
1883        return 0;
1884}
1885
1886static int ci_set_overdrive_target_tdp(struct amdgpu_device *adev,
1887                                       u32 target_tdp)
1888{
1889        PPSMC_Result smc_result =
1890                amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
1891        if (smc_result != PPSMC_Result_OK)
1892                return -EINVAL;
1893        return 0;
1894}
1895
1896#if 0
1897static int ci_set_boot_state(struct amdgpu_device *adev)
1898{
1899        return ci_enable_sclk_mclk_dpm(adev, false);
1900}
1901#endif
1902
1903static u32 ci_get_average_sclk_freq(struct amdgpu_device *adev)
1904{
1905        u32 sclk_freq;
1906        PPSMC_Result smc_result =
1907                amdgpu_ci_send_msg_to_smc_return_parameter(adev,
1908                                                    PPSMC_MSG_API_GetSclkFrequency,
1909                                                    &sclk_freq);
1910        if (smc_result != PPSMC_Result_OK)
1911                sclk_freq = 0;
1912
1913        return sclk_freq;
1914}
1915
1916static u32 ci_get_average_mclk_freq(struct amdgpu_device *adev)
1917{
1918        u32 mclk_freq;
1919        PPSMC_Result smc_result =
1920                amdgpu_ci_send_msg_to_smc_return_parameter(adev,
1921                                                    PPSMC_MSG_API_GetMclkFrequency,
1922                                                    &mclk_freq);
1923        if (smc_result != PPSMC_Result_OK)
1924                mclk_freq = 0;
1925
1926        return mclk_freq;
1927}
1928
1929static void ci_dpm_start_smc(struct amdgpu_device *adev)
1930{
1931        int i;
1932
1933        amdgpu_ci_program_jump_on_start(adev);
1934        amdgpu_ci_start_smc_clock(adev);
1935        amdgpu_ci_start_smc(adev);
1936        for (i = 0; i < adev->usec_timeout; i++) {
1937                if (RREG32_SMC(ixFIRMWARE_FLAGS) & FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK)
1938                        break;
1939        }
1940}
1941
1942static void ci_dpm_stop_smc(struct amdgpu_device *adev)
1943{
1944        amdgpu_ci_reset_smc(adev);
1945        amdgpu_ci_stop_smc_clock(adev);
1946}
1947
1948static int ci_process_firmware_header(struct amdgpu_device *adev)
1949{
1950        struct ci_power_info *pi = ci_get_pi(adev);
1951        u32 tmp;
1952        int ret;
1953
1954        ret = amdgpu_ci_read_smc_sram_dword(adev,
1955                                     SMU7_FIRMWARE_HEADER_LOCATION +
1956                                     offsetof(SMU7_Firmware_Header, DpmTable),
1957                                     &tmp, pi->sram_end);
1958        if (ret)
1959                return ret;
1960
1961        pi->dpm_table_start = tmp;
1962
1963        ret = amdgpu_ci_read_smc_sram_dword(adev,
1964                                     SMU7_FIRMWARE_HEADER_LOCATION +
1965                                     offsetof(SMU7_Firmware_Header, SoftRegisters),
1966                                     &tmp, pi->sram_end);
1967        if (ret)
1968                return ret;
1969
1970        pi->soft_regs_start = tmp;
1971
1972        ret = amdgpu_ci_read_smc_sram_dword(adev,
1973                                     SMU7_FIRMWARE_HEADER_LOCATION +
1974                                     offsetof(SMU7_Firmware_Header, mcRegisterTable),
1975                                     &tmp, pi->sram_end);
1976        if (ret)
1977                return ret;
1978
1979        pi->mc_reg_table_start = tmp;
1980
1981        ret = amdgpu_ci_read_smc_sram_dword(adev,
1982                                     SMU7_FIRMWARE_HEADER_LOCATION +
1983                                     offsetof(SMU7_Firmware_Header, FanTable),
1984                                     &tmp, pi->sram_end);
1985        if (ret)
1986                return ret;
1987
1988        pi->fan_table_start = tmp;
1989
1990        ret = amdgpu_ci_read_smc_sram_dword(adev,
1991                                     SMU7_FIRMWARE_HEADER_LOCATION +
1992                                     offsetof(SMU7_Firmware_Header, mcArbDramTimingTable),
1993                                     &tmp, pi->sram_end);
1994        if (ret)
1995                return ret;
1996
1997        pi->arb_table_start = tmp;
1998
1999        return 0;
2000}
2001
2002static void ci_read_clock_registers(struct amdgpu_device *adev)
2003{
2004        struct ci_power_info *pi = ci_get_pi(adev);
2005
2006        pi->clock_registers.cg_spll_func_cntl =
2007                RREG32_SMC(ixCG_SPLL_FUNC_CNTL);
2008        pi->clock_registers.cg_spll_func_cntl_2 =
2009                RREG32_SMC(ixCG_SPLL_FUNC_CNTL_2);
2010        pi->clock_registers.cg_spll_func_cntl_3 =
2011                RREG32_SMC(ixCG_SPLL_FUNC_CNTL_3);
2012        pi->clock_registers.cg_spll_func_cntl_4 =
2013                RREG32_SMC(ixCG_SPLL_FUNC_CNTL_4);
2014        pi->clock_registers.cg_spll_spread_spectrum =
2015                RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM);
2016        pi->clock_registers.cg_spll_spread_spectrum_2 =
2017                RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM_2);
2018        pi->clock_registers.dll_cntl = RREG32(mmDLL_CNTL);
2019        pi->clock_registers.mclk_pwrmgt_cntl = RREG32(mmMCLK_PWRMGT_CNTL);
2020        pi->clock_registers.mpll_ad_func_cntl = RREG32(mmMPLL_AD_FUNC_CNTL);
2021        pi->clock_registers.mpll_dq_func_cntl = RREG32(mmMPLL_DQ_FUNC_CNTL);
2022        pi->clock_registers.mpll_func_cntl = RREG32(mmMPLL_FUNC_CNTL);
2023        pi->clock_registers.mpll_func_cntl_1 = RREG32(mmMPLL_FUNC_CNTL_1);
2024        pi->clock_registers.mpll_func_cntl_2 = RREG32(mmMPLL_FUNC_CNTL_2);
2025        pi->clock_registers.mpll_ss1 = RREG32(mmMPLL_SS1);
2026        pi->clock_registers.mpll_ss2 = RREG32(mmMPLL_SS2);
2027}
2028
2029static void ci_init_sclk_t(struct amdgpu_device *adev)
2030{
2031        struct ci_power_info *pi = ci_get_pi(adev);
2032
2033        pi->low_sclk_interrupt_t = 0;
2034}
2035
2036static void ci_enable_thermal_protection(struct amdgpu_device *adev,
2037                                         bool enable)
2038{
2039        u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
2040
2041        if (enable)
2042                tmp &= ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
2043        else
2044                tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
2045        WREG32_SMC(ixGENERAL_PWRMGT, tmp);
2046}
2047
2048static void ci_enable_acpi_power_management(struct amdgpu_device *adev)
2049{
2050        u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
2051
2052        tmp |= GENERAL_PWRMGT__STATIC_PM_EN_MASK;
2053
2054        WREG32_SMC(ixGENERAL_PWRMGT, tmp);
2055}
2056
2057#if 0
2058static int ci_enter_ulp_state(struct amdgpu_device *adev)
2059{
2060
2061        WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower);
2062
2063        udelay(25000);
2064
2065        return 0;
2066}
2067
2068static int ci_exit_ulp_state(struct amdgpu_device *adev)
2069{
2070        int i;
2071
2072        WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower);
2073
2074        udelay(7000);
2075
2076        for (i = 0; i < adev->usec_timeout; i++) {
2077                if (RREG32(mmSMC_RESP_0) == 1)
2078                        break;
2079                udelay(1000);
2080        }
2081
2082        return 0;
2083}
2084#endif
2085
2086static int ci_notify_smc_display_change(struct amdgpu_device *adev,
2087                                        bool has_display)
2088{
2089        PPSMC_Msg msg = has_display ? PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay;
2090
2091        return (amdgpu_ci_send_msg_to_smc(adev, msg) == PPSMC_Result_OK) ?  0 : -EINVAL;
2092}
2093
2094static int ci_enable_ds_master_switch(struct amdgpu_device *adev,
2095                                      bool enable)
2096{
2097        struct ci_power_info *pi = ci_get_pi(adev);
2098
2099        if (enable) {
2100                if (pi->caps_sclk_ds) {
2101                        if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_ON) != PPSMC_Result_OK)
2102                                return -EINVAL;
2103                } else {
2104                        if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
2105                                return -EINVAL;
2106                }
2107        } else {
2108                if (pi->caps_sclk_ds) {
2109                        if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
2110                                return -EINVAL;
2111                }
2112        }
2113
2114        return 0;
2115}
2116
2117static void ci_program_display_gap(struct amdgpu_device *adev)
2118{
2119        u32 tmp = RREG32_SMC(ixCG_DISPLAY_GAP_CNTL);
2120        u32 pre_vbi_time_in_us;
2121        u32 frame_time_in_us;
2122        u32 ref_clock = adev->clock.spll.reference_freq;
2123        u32 refresh_rate = amdgpu_dpm_get_vrefresh(adev);
2124        u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
2125
2126        tmp &= ~CG_DISPLAY_GAP_CNTL__DISP_GAP_MASK;
2127        if (adev->pm.dpm.new_active_crtc_count > 0)
2128                tmp |= (AMDGPU_PM_DISPLAY_GAP_VBLANK_OR_WM << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT);
2129        else
2130                tmp |= (AMDGPU_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT);
2131        WREG32_SMC(ixCG_DISPLAY_GAP_CNTL, tmp);
2132
2133        if (refresh_rate == 0)
2134                refresh_rate = 60;
2135        if (vblank_time == 0xffffffff)
2136                vblank_time = 500;
2137        frame_time_in_us = 1000000 / refresh_rate;
2138        pre_vbi_time_in_us =
2139                frame_time_in_us - 200 - vblank_time;
2140        tmp = pre_vbi_time_in_us * (ref_clock / 100);
2141
2142        WREG32_SMC(ixCG_DISPLAY_GAP_CNTL2, tmp);
2143        ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, PreVBlankGap), 0x64);
2144        ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
2145
2146
2147        ci_notify_smc_display_change(adev, (adev->pm.dpm.new_active_crtc_count == 1));
2148
2149}
2150
2151static void ci_enable_spread_spectrum(struct amdgpu_device *adev, bool enable)
2152{
2153        struct ci_power_info *pi = ci_get_pi(adev);
2154        u32 tmp;
2155
2156        if (enable) {
2157                if (pi->caps_sclk_ss_support) {
2158                        tmp = RREG32_SMC(ixGENERAL_PWRMGT);
2159                        tmp |= GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK;
2160                        WREG32_SMC(ixGENERAL_PWRMGT, tmp);
2161                }
2162        } else {
2163                tmp = RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM);
2164                tmp &= ~CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK;
2165                WREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM, tmp);
2166
2167                tmp = RREG32_SMC(ixGENERAL_PWRMGT);
2168                tmp &= ~GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK;
2169                WREG32_SMC(ixGENERAL_PWRMGT, tmp);
2170        }
2171}
2172
2173static void ci_program_sstp(struct amdgpu_device *adev)
2174{
2175        WREG32_SMC(ixCG_STATIC_SCREEN_PARAMETER,
2176        ((CISLANDS_SSTU_DFLT << CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD_UNIT__SHIFT) |
2177         (CISLANDS_SST_DFLT << CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD__SHIFT)));
2178}
2179
2180static void ci_enable_display_gap(struct amdgpu_device *adev)
2181{
2182        u32 tmp = RREG32_SMC(ixCG_DISPLAY_GAP_CNTL);
2183
2184        tmp &= ~(CG_DISPLAY_GAP_CNTL__DISP_GAP_MASK |
2185                        CG_DISPLAY_GAP_CNTL__DISP_GAP_MCHG_MASK);
2186        tmp |= ((AMDGPU_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT) |
2187                (AMDGPU_PM_DISPLAY_GAP_VBLANK << CG_DISPLAY_GAP_CNTL__DISP_GAP_MCHG__SHIFT));
2188
2189        WREG32_SMC(ixCG_DISPLAY_GAP_CNTL, tmp);
2190}
2191
2192static void ci_program_vc(struct amdgpu_device *adev)
2193{
2194        u32 tmp;
2195
2196        tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
2197        tmp &= ~(SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK);
2198        WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
2199
2200        WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, CISLANDS_VRC_DFLT0);
2201        WREG32_SMC(ixCG_FREQ_TRAN_VOTING_1, CISLANDS_VRC_DFLT1);
2202        WREG32_SMC(ixCG_FREQ_TRAN_VOTING_2, CISLANDS_VRC_DFLT2);
2203        WREG32_SMC(ixCG_FREQ_TRAN_VOTING_3, CISLANDS_VRC_DFLT3);
2204        WREG32_SMC(ixCG_FREQ_TRAN_VOTING_4, CISLANDS_VRC_DFLT4);
2205        WREG32_SMC(ixCG_FREQ_TRAN_VOTING_5, CISLANDS_VRC_DFLT5);
2206        WREG32_SMC(ixCG_FREQ_TRAN_VOTING_6, CISLANDS_VRC_DFLT6);
2207        WREG32_SMC(ixCG_FREQ_TRAN_VOTING_7, CISLANDS_VRC_DFLT7);
2208}
2209
2210static void ci_clear_vc(struct amdgpu_device *adev)
2211{
2212        u32 tmp;
2213
2214        tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
2215        tmp |= (SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK);
2216        WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
2217
2218        WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0);
2219        WREG32_SMC(ixCG_FREQ_TRAN_VOTING_1, 0);
2220        WREG32_SMC(ixCG_FREQ_TRAN_VOTING_2, 0);
2221        WREG32_SMC(ixCG_FREQ_TRAN_VOTING_3, 0);
2222        WREG32_SMC(ixCG_FREQ_TRAN_VOTING_4, 0);
2223        WREG32_SMC(ixCG_FREQ_TRAN_VOTING_5, 0);
2224        WREG32_SMC(ixCG_FREQ_TRAN_VOTING_6, 0);
2225        WREG32_SMC(ixCG_FREQ_TRAN_VOTING_7, 0);
2226}
2227
2228static int ci_upload_firmware(struct amdgpu_device *adev)
2229{
2230        int i, ret;
2231
2232        if (amdgpu_ci_is_smc_running(adev)) {
2233                DRM_INFO("smc is running, no need to load smc firmware\n");
2234                return 0;
2235        }
2236
2237        for (i = 0; i < adev->usec_timeout; i++) {
2238                if (RREG32_SMC(ixRCU_UC_EVENTS) & RCU_UC_EVENTS__boot_seq_done_MASK)
2239                        break;
2240        }
2241        WREG32_SMC(ixSMC_SYSCON_MISC_CNTL, 1);
2242
2243        amdgpu_ci_stop_smc_clock(adev);
2244        amdgpu_ci_reset_smc(adev);
2245
2246        ret = amdgpu_ci_load_smc_ucode(adev, SMC_RAM_END);
2247
2248        return ret;
2249
2250}
2251
2252static int ci_get_svi2_voltage_table(struct amdgpu_device *adev,
2253                                     struct amdgpu_clock_voltage_dependency_table *voltage_dependency_table,
2254                                     struct atom_voltage_table *voltage_table)
2255{
2256        u32 i;
2257
2258        if (voltage_dependency_table == NULL)
2259                return -EINVAL;
2260
2261        voltage_table->mask_low = 0;
2262        voltage_table->phase_delay = 0;
2263
2264        voltage_table->count = voltage_dependency_table->count;
2265        for (i = 0; i < voltage_table->count; i++) {
2266                voltage_table->entries[i].value = voltage_dependency_table->entries[i].v;
2267                voltage_table->entries[i].smio_low = 0;
2268        }
2269
2270        return 0;
2271}
2272
2273static int ci_construct_voltage_tables(struct amdgpu_device *adev)
2274{
2275        struct ci_power_info *pi = ci_get_pi(adev);
2276        int ret;
2277
2278        if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2279                ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDC,
2280                                                        VOLTAGE_OBJ_GPIO_LUT,
2281                                                        &pi->vddc_voltage_table);
2282                if (ret)
2283                        return ret;
2284        } else if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2285                ret = ci_get_svi2_voltage_table(adev,
2286                                                &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2287                                                &pi->vddc_voltage_table);
2288                if (ret)
2289                        return ret;
2290        }
2291
2292        if (pi->vddc_voltage_table.count > SMU7_MAX_LEVELS_VDDC)
2293                ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_VDDC,
2294                                                         &pi->vddc_voltage_table);
2295
2296        if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2297                ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDCI,
2298                                                        VOLTAGE_OBJ_GPIO_LUT,
2299                                                        &pi->vddci_voltage_table);
2300                if (ret)
2301                        return ret;
2302        } else if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2303                ret = ci_get_svi2_voltage_table(adev,
2304                                                &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2305                                                &pi->vddci_voltage_table);
2306                if (ret)
2307                        return ret;
2308        }
2309
2310        if (pi->vddci_voltage_table.count > SMU7_MAX_LEVELS_VDDCI)
2311                ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_VDDCI,
2312                                                         &pi->vddci_voltage_table);
2313
2314        if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2315                ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_MVDDC,
2316                                                        VOLTAGE_OBJ_GPIO_LUT,
2317                                                        &pi->mvdd_voltage_table);
2318                if (ret)
2319                        return ret;
2320        } else if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2321                ret = ci_get_svi2_voltage_table(adev,
2322                                                &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
2323                                                &pi->mvdd_voltage_table);
2324                if (ret)
2325                        return ret;
2326        }
2327
2328        if (pi->mvdd_voltage_table.count > SMU7_MAX_LEVELS_MVDD)
2329                ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_MVDD,
2330                                                         &pi->mvdd_voltage_table);
2331
2332        return 0;
2333}
2334
2335static void ci_populate_smc_voltage_table(struct amdgpu_device *adev,
2336                                          struct atom_voltage_table_entry *voltage_table,
2337                                          SMU7_Discrete_VoltageLevel *smc_voltage_table)
2338{
2339        int ret;
2340
2341        ret = ci_get_std_voltage_value_sidd(adev, voltage_table,
2342                                            &smc_voltage_table->StdVoltageHiSidd,
2343                                            &smc_voltage_table->StdVoltageLoSidd);
2344
2345        if (ret) {
2346                smc_voltage_table->StdVoltageHiSidd = voltage_table->value * VOLTAGE_SCALE;
2347                smc_voltage_table->StdVoltageLoSidd = voltage_table->value * VOLTAGE_SCALE;
2348        }
2349
2350        smc_voltage_table->Voltage = cpu_to_be16(voltage_table->value * VOLTAGE_SCALE);
2351        smc_voltage_table->StdVoltageHiSidd =
2352                cpu_to_be16(smc_voltage_table->StdVoltageHiSidd);
2353        smc_voltage_table->StdVoltageLoSidd =
2354                cpu_to_be16(smc_voltage_table->StdVoltageLoSidd);
2355}
2356
2357static int ci_populate_smc_vddc_table(struct amdgpu_device *adev,
2358                                      SMU7_Discrete_DpmTable *table)
2359{
2360        struct ci_power_info *pi = ci_get_pi(adev);
2361        unsigned int count;
2362
2363        table->VddcLevelCount = pi->vddc_voltage_table.count;
2364        for (count = 0; count < table->VddcLevelCount; count++) {
2365                ci_populate_smc_voltage_table(adev,
2366                                              &pi->vddc_voltage_table.entries[count],
2367                                              &table->VddcLevel[count]);
2368
2369                if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2370                        table->VddcLevel[count].Smio |=
2371                                pi->vddc_voltage_table.entries[count].smio_low;
2372                else
2373                        table->VddcLevel[count].Smio = 0;
2374        }
2375        table->VddcLevelCount = cpu_to_be32(table->VddcLevelCount);
2376
2377        return 0;
2378}
2379
2380static int ci_populate_smc_vddci_table(struct amdgpu_device *adev,
2381                                       SMU7_Discrete_DpmTable *table)
2382{
2383        unsigned int count;
2384        struct ci_power_info *pi = ci_get_pi(adev);
2385
2386        table->VddciLevelCount = pi->vddci_voltage_table.count;
2387        for (count = 0; count < table->VddciLevelCount; count++) {
2388                ci_populate_smc_voltage_table(adev,
2389                                              &pi->vddci_voltage_table.entries[count],
2390                                              &table->VddciLevel[count]);
2391
2392                if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2393                        table->VddciLevel[count].Smio |=
2394                                pi->vddci_voltage_table.entries[count].smio_low;
2395                else
2396                        table->VddciLevel[count].Smio = 0;
2397        }
2398        table->VddciLevelCount = cpu_to_be32(table->VddciLevelCount);
2399
2400        return 0;
2401}
2402
2403static int ci_populate_smc_mvdd_table(struct amdgpu_device *adev,
2404                                      SMU7_Discrete_DpmTable *table)
2405{
2406        struct ci_power_info *pi = ci_get_pi(adev);
2407        unsigned int count;
2408
2409        table->MvddLevelCount = pi->mvdd_voltage_table.count;
2410        for (count = 0; count < table->MvddLevelCount; count++) {
2411                ci_populate_smc_voltage_table(adev,
2412                                              &pi->mvdd_voltage_table.entries[count],
2413                                              &table->MvddLevel[count]);
2414
2415                if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2416                        table->MvddLevel[count].Smio |=
2417                                pi->mvdd_voltage_table.entries[count].smio_low;
2418                else
2419                        table->MvddLevel[count].Smio = 0;
2420        }
2421        table->MvddLevelCount = cpu_to_be32(table->MvddLevelCount);
2422
2423        return 0;
2424}
2425
2426static int ci_populate_smc_voltage_tables(struct amdgpu_device *adev,
2427                                          SMU7_Discrete_DpmTable *table)
2428{
2429        int ret;
2430
2431        ret = ci_populate_smc_vddc_table(adev, table);
2432        if (ret)
2433                return ret;
2434
2435        ret = ci_populate_smc_vddci_table(adev, table);
2436        if (ret)
2437                return ret;
2438
2439        ret = ci_populate_smc_mvdd_table(adev, table);
2440        if (ret)
2441                return ret;
2442
2443        return 0;
2444}
2445
2446static int ci_populate_mvdd_value(struct amdgpu_device *adev, u32 mclk,
2447                                  SMU7_Discrete_VoltageLevel *voltage)
2448{
2449        struct ci_power_info *pi = ci_get_pi(adev);
2450        u32 i = 0;
2451
2452        if (pi->mvdd_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
2453                for (i = 0; i < adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count; i++) {
2454                        if (mclk <= adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries[i].clk) {
2455                                voltage->Voltage = pi->mvdd_voltage_table.entries[i].value;
2456                                break;
2457                        }
2458                }
2459
2460                if (i >= adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count)
2461                        return -EINVAL;
2462        }
2463
2464        return -EINVAL;
2465}
2466
2467static int ci_get_std_voltage_value_sidd(struct amdgpu_device *adev,
2468                                         struct atom_voltage_table_entry *voltage_table,
2469                                         u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd)
2470{
2471        u16 v_index, idx;
2472        bool voltage_found = false;
2473        *std_voltage_hi_sidd = voltage_table->value * VOLTAGE_SCALE;
2474        *std_voltage_lo_sidd = voltage_table->value * VOLTAGE_SCALE;
2475
2476        if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL)
2477                return -EINVAL;
2478
2479        if (adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
2480                for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
2481                        if (voltage_table->value ==
2482                            adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
2483                                voltage_found = true;
2484                                if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
2485                                        idx = v_index;
2486                                else
2487                                        idx = adev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
2488                                *std_voltage_lo_sidd =
2489                                        adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
2490                                *std_voltage_hi_sidd =
2491                                        adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
2492                                break;
2493                        }
2494                }
2495
2496                if (!voltage_found) {
2497                        for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
2498                                if (voltage_table->value <=
2499                                    adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
2500                                        voltage_found = true;
2501                                        if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
2502                                                idx = v_index;
2503                                        else
2504                                                idx = adev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
2505                                        *std_voltage_lo_sidd =
2506                                                adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
2507                                        *std_voltage_hi_sidd =
2508                                                adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
2509                                        break;
2510                                }
2511                        }
2512                }
2513        }
2514
2515        return 0;
2516}
2517
2518static void ci_populate_phase_value_based_on_sclk(struct amdgpu_device *adev,
2519                                                  const struct amdgpu_phase_shedding_limits_table *limits,
2520                                                  u32 sclk,
2521                                                  u32 *phase_shedding)
2522{
2523        unsigned int i;
2524
2525        *phase_shedding = 1;
2526
2527        for (i = 0; i < limits->count; i++) {
2528                if (sclk < limits->entries[i].sclk) {
2529                        *phase_shedding = i;
2530                        break;
2531                }
2532        }
2533}
2534
2535static void ci_populate_phase_value_based_on_mclk(struct amdgpu_device *adev,
2536                                                  const struct amdgpu_phase_shedding_limits_table *limits,
2537                                                  u32 mclk,
2538                                                  u32 *phase_shedding)
2539{
2540        unsigned int i;
2541
2542        *phase_shedding = 1;
2543
2544        for (i = 0; i < limits->count; i++) {
2545                if (mclk < limits->entries[i].mclk) {
2546                        *phase_shedding = i;
2547                        break;
2548                }
2549        }
2550}
2551
2552static int ci_init_arb_table_index(struct amdgpu_device *adev)
2553{
2554        struct ci_power_info *pi = ci_get_pi(adev);
2555        u32 tmp;
2556        int ret;
2557
2558        ret = amdgpu_ci_read_smc_sram_dword(adev, pi->arb_table_start,
2559                                     &tmp, pi->sram_end);
2560        if (ret)
2561                return ret;
2562
2563        tmp &= 0x00FFFFFF;
2564        tmp |= MC_CG_ARB_FREQ_F1 << 24;
2565
2566        return amdgpu_ci_write_smc_sram_dword(adev, pi->arb_table_start,
2567                                       tmp, pi->sram_end);
2568}
2569
2570static int ci_get_dependency_volt_by_clk(struct amdgpu_device *adev,
2571                                         struct amdgpu_clock_voltage_dependency_table *allowed_clock_voltage_table,
2572                                         u32 clock, u32 *voltage)
2573{
2574        u32 i = 0;
2575
2576        if (allowed_clock_voltage_table->count == 0)
2577                return -EINVAL;
2578
2579        for (i = 0; i < allowed_clock_voltage_table->count; i++) {
2580                if (allowed_clock_voltage_table->entries[i].clk >= clock) {
2581                        *voltage = allowed_clock_voltage_table->entries[i].v;
2582                        return 0;
2583                }
2584        }
2585
2586        *voltage = allowed_clock_voltage_table->entries[i-1].v;
2587
2588        return 0;
2589}
2590
2591static u8 ci_get_sleep_divider_id_from_clock(u32 sclk, u32 min_sclk_in_sr)
2592{
2593        u32 i;
2594        u32 tmp;
2595        u32 min = max(min_sclk_in_sr, (u32)CISLAND_MINIMUM_ENGINE_CLOCK);
2596
2597        if (sclk < min)
2598                return 0;
2599
2600        for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID;  ; i--) {
2601                tmp = sclk >> i;
2602                if (tmp >= min || i == 0)
2603                        break;
2604        }
2605
2606        return (u8)i;
2607}
2608
2609static int ci_initial_switch_from_arb_f0_to_f1(struct amdgpu_device *adev)
2610{
2611        return ci_copy_and_switch_arb_sets(adev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
2612}
2613
2614static int ci_reset_to_default(struct amdgpu_device *adev)
2615{
2616        return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ?
2617                0 : -EINVAL;
2618}
2619
2620static int ci_force_switch_to_arb_f0(struct amdgpu_device *adev)
2621{
2622        u32 tmp;
2623
2624        tmp = (RREG32_SMC(ixSMC_SCRATCH9) & 0x0000ff00) >> 8;
2625
2626        if (tmp == MC_CG_ARB_FREQ_F0)
2627                return 0;
2628
2629        return ci_copy_and_switch_arb_sets(adev, tmp, MC_CG_ARB_FREQ_F0);
2630}
2631
2632static void ci_register_patching_mc_arb(struct amdgpu_device *adev,
2633                                        const u32 engine_clock,
2634                                        const u32 memory_clock,
2635                                        u32 *dram_timimg2)
2636{
2637        bool patch;
2638        u32 tmp, tmp2;
2639
2640        tmp = RREG32(mmMC_SEQ_MISC0);
2641        patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
2642
2643        if (patch &&
2644            ((adev->pdev->device == 0x67B0) ||
2645             (adev->pdev->device == 0x67B1))) {
2646                if ((memory_clock > 100000) && (memory_clock <= 125000)) {
2647                        tmp2 = (((0x31 * engine_clock) / 125000) - 1) & 0xff;
2648                        *dram_timimg2 &= ~0x00ff0000;
2649                        *dram_timimg2 |= tmp2 << 16;
2650                } else if ((memory_clock > 125000) && (memory_clock <= 137500)) {
2651                        tmp2 = (((0x36 * engine_clock) / 137500) - 1) & 0xff;
2652                        *dram_timimg2 &= ~0x00ff0000;
2653                        *dram_timimg2 |= tmp2 << 16;
2654                }
2655        }
2656}
2657
2658static int ci_populate_memory_timing_parameters(struct amdgpu_device *adev,
2659                                                u32 sclk,
2660                                                u32 mclk,
2661                                                SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs)
2662{
2663        u32 dram_timing;
2664        u32 dram_timing2;
2665        u32 burst_time;
2666
2667        amdgpu_atombios_set_engine_dram_timings(adev, sclk, mclk);
2668
2669        dram_timing  = RREG32(mmMC_ARB_DRAM_TIMING);
2670        dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2);
2671        burst_time = RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE0_MASK;
2672
2673        ci_register_patching_mc_arb(adev, sclk, mclk, &dram_timing2);
2674
2675        arb_regs->McArbDramTiming  = cpu_to_be32(dram_timing);
2676        arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2);
2677        arb_regs->McArbBurstTime = (u8)burst_time;
2678
2679        return 0;
2680}
2681
2682static int ci_do_program_memory_timing_parameters(struct amdgpu_device *adev)
2683{
2684        struct ci_power_info *pi = ci_get_pi(adev);
2685        SMU7_Discrete_MCArbDramTimingTable arb_regs;
2686        u32 i, j;
2687        int ret =  0;
2688
2689        memset(&arb_regs, 0, sizeof(SMU7_Discrete_MCArbDramTimingTable));
2690
2691        for (i = 0; i < pi->dpm_table.sclk_table.count; i++) {
2692                for (j = 0; j < pi->dpm_table.mclk_table.count; j++) {
2693                        ret = ci_populate_memory_timing_parameters(adev,
2694                                                                   pi->dpm_table.sclk_table.dpm_levels[i].value,
2695                                                                   pi->dpm_table.mclk_table.dpm_levels[j].value,
2696                                                                   &arb_regs.entries[i][j]);
2697                        if (ret)
2698                                break;
2699                }
2700        }
2701
2702        if (ret == 0)
2703                ret = amdgpu_ci_copy_bytes_to_smc(adev,
2704                                           pi->arb_table_start,
2705                                           (u8 *)&arb_regs,
2706                                           sizeof(SMU7_Discrete_MCArbDramTimingTable),
2707                                           pi->sram_end);
2708
2709        return ret;
2710}
2711
2712static int ci_program_memory_timing_parameters(struct amdgpu_device *adev)
2713{
2714        struct ci_power_info *pi = ci_get_pi(adev);
2715
2716        if (pi->need_update_smu7_dpm_table == 0)
2717                return 0;
2718
2719        return ci_do_program_memory_timing_parameters(adev);
2720}
2721
2722static void ci_populate_smc_initial_state(struct amdgpu_device *adev,
2723                                          struct amdgpu_ps *amdgpu_boot_state)
2724{
2725        struct ci_ps *boot_state = ci_get_ps(amdgpu_boot_state);
2726        struct ci_power_info *pi = ci_get_pi(adev);
2727        u32 level = 0;
2728
2729        for (level = 0; level < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; level++) {
2730                if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[level].clk >=
2731                    boot_state->performance_levels[0].sclk) {
2732                        pi->smc_state_table.GraphicsBootLevel = level;
2733                        break;
2734                }
2735        }
2736
2737        for (level = 0; level < adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.count; level++) {
2738                if (adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries[level].clk >=
2739                    boot_state->performance_levels[0].mclk) {
2740                        pi->smc_state_table.MemoryBootLevel = level;
2741                        break;
2742                }
2743        }
2744}
2745
2746static u32 ci_get_dpm_level_enable_mask_value(struct ci_single_dpm_table *dpm_table)
2747{
2748        u32 i;
2749        u32 mask_value = 0;
2750
2751        for (i = dpm_table->count; i > 0; i--) {
2752                mask_value = mask_value << 1;
2753                if (dpm_table->dpm_levels[i-1].enabled)
2754                        mask_value |= 0x1;
2755                else
2756                        mask_value &= 0xFFFFFFFE;
2757        }
2758
2759        return mask_value;
2760}
2761
2762static void ci_populate_smc_link_level(struct amdgpu_device *adev,
2763                                       SMU7_Discrete_DpmTable *table)
2764{
2765        struct ci_power_info *pi = ci_get_pi(adev);
2766        struct ci_dpm_table *dpm_table = &pi->dpm_table;
2767        u32 i;
2768
2769        for (i = 0; i < dpm_table->pcie_speed_table.count; i++) {
2770                table->LinkLevel[i].PcieGenSpeed =
2771                        (u8)dpm_table->pcie_speed_table.dpm_levels[i].value;
2772                table->LinkLevel[i].PcieLaneCount =
2773                        amdgpu_encode_pci_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
2774                table->LinkLevel[i].EnabledForActivity = 1;
2775                table->LinkLevel[i].DownT = cpu_to_be32(5);
2776                table->LinkLevel[i].UpT = cpu_to_be32(30);
2777        }
2778
2779        pi->smc_state_table.LinkLevelCount = (u8)dpm_table->pcie_speed_table.count;
2780        pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
2781                ci_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
2782}
2783
2784static int ci_populate_smc_uvd_level(struct amdgpu_device *adev,
2785                                     SMU7_Discrete_DpmTable *table)
2786{
2787        u32 count;
2788        struct atom_clock_dividers dividers;
2789        int ret = -EINVAL;
2790
2791        table->UvdLevelCount =
2792                adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count;
2793
2794        for (count = 0; count < table->UvdLevelCount; count++) {
2795                table->UvdLevel[count].VclkFrequency =
2796                        adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].vclk;
2797                table->UvdLevel[count].DclkFrequency =
2798                        adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].dclk;
2799                table->UvdLevel[count].MinVddc =
2800                        adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2801                table->UvdLevel[count].MinVddcPhases = 1;
2802
2803                ret = amdgpu_atombios_get_clock_dividers(adev,
2804                                                         COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2805                                                         table->UvdLevel[count].VclkFrequency, false, &dividers);
2806                if (ret)
2807                        return ret;
2808
2809                table->UvdLevel[count].VclkDivider = (u8)dividers.post_divider;
2810
2811                ret = amdgpu_atombios_get_clock_dividers(adev,
2812                                                         COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2813                                                         table->UvdLevel[count].DclkFrequency, false, &dividers);
2814                if (ret)
2815                        return ret;
2816
2817                table->UvdLevel[count].DclkDivider = (u8)dividers.post_divider;
2818
2819                table->UvdLevel[count].VclkFrequency = cpu_to_be32(table->UvdLevel[count].VclkFrequency);
2820                table->UvdLevel[count].DclkFrequency = cpu_to_be32(table->UvdLevel[count].DclkFrequency);
2821                table->UvdLevel[count].MinVddc = cpu_to_be16(table->UvdLevel[count].MinVddc);
2822        }
2823
2824        return ret;
2825}
2826
2827static int ci_populate_smc_vce_level(struct amdgpu_device *adev,
2828                                     SMU7_Discrete_DpmTable *table)
2829{
2830        u32 count;
2831        struct atom_clock_dividers dividers;
2832        int ret = -EINVAL;
2833
2834        table->VceLevelCount =
2835                adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count;
2836
2837        for (count = 0; count < table->VceLevelCount; count++) {
2838                table->VceLevel[count].Frequency =
2839                        adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].evclk;
2840                table->VceLevel[count].MinVoltage =
2841                        (u16)adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2842                table->VceLevel[count].MinPhases = 1;
2843
2844                ret = amdgpu_atombios_get_clock_dividers(adev,
2845                                                         COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2846                                                         table->VceLevel[count].Frequency, false, &dividers);
2847                if (ret)
2848                        return ret;
2849
2850                table->VceLevel[count].Divider = (u8)dividers.post_divider;
2851
2852                table->VceLevel[count].Frequency = cpu_to_be32(table->VceLevel[count].Frequency);
2853                table->VceLevel[count].MinVoltage = cpu_to_be16(table->VceLevel[count].MinVoltage);
2854        }
2855
2856        return ret;
2857
2858}
2859
2860static int ci_populate_smc_acp_level(struct amdgpu_device *adev,
2861                                     SMU7_Discrete_DpmTable *table)
2862{
2863        u32 count;
2864        struct atom_clock_dividers dividers;
2865        int ret = -EINVAL;
2866
2867        table->AcpLevelCount = (u8)
2868                (adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count);
2869
2870        for (count = 0; count < table->AcpLevelCount; count++) {
2871                table->AcpLevel[count].Frequency =
2872                        adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].clk;
2873                table->AcpLevel[count].MinVoltage =
2874                        adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].v;
2875                table->AcpLevel[count].MinPhases = 1;
2876
2877                ret = amdgpu_atombios_get_clock_dividers(adev,
2878                                                         COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2879                                                         table->AcpLevel[count].Frequency, false, &dividers);
2880                if (ret)
2881                        return ret;
2882
2883                table->AcpLevel[count].Divider = (u8)dividers.post_divider;
2884
2885                table->AcpLevel[count].Frequency = cpu_to_be32(table->AcpLevel[count].Frequency);
2886                table->AcpLevel[count].MinVoltage = cpu_to_be16(table->AcpLevel[count].MinVoltage);
2887        }
2888
2889        return ret;
2890}
2891
2892static int ci_populate_smc_samu_level(struct amdgpu_device *adev,
2893                                      SMU7_Discrete_DpmTable *table)
2894{
2895        u32 count;
2896        struct atom_clock_dividers dividers;
2897        int ret = -EINVAL;
2898
2899        table->SamuLevelCount =
2900                adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count;
2901
2902        for (count = 0; count < table->SamuLevelCount; count++) {
2903                table->SamuLevel[count].Frequency =
2904                        adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].clk;
2905                table->SamuLevel[count].MinVoltage =
2906                        adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2907                table->SamuLevel[count].MinPhases = 1;
2908
2909                ret = amdgpu_atombios_get_clock_dividers(adev,
2910                                                         COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2911                                                         table->SamuLevel[count].Frequency, false, &dividers);
2912                if (ret)
2913                        return ret;
2914
2915                table->SamuLevel[count].Divider = (u8)dividers.post_divider;
2916
2917                table->SamuLevel[count].Frequency = cpu_to_be32(table->SamuLevel[count].Frequency);
2918                table->SamuLevel[count].MinVoltage = cpu_to_be16(table->SamuLevel[count].MinVoltage);
2919        }
2920
2921        return ret;
2922}
2923
2924static int ci_calculate_mclk_params(struct amdgpu_device *adev,
2925                                    u32 memory_clock,
2926                                    SMU7_Discrete_MemoryLevel *mclk,
2927                                    bool strobe_mode,
2928                                    bool dll_state_on)
2929{
2930        struct ci_power_info *pi = ci_get_pi(adev);
2931        u32  dll_cntl = pi->clock_registers.dll_cntl;
2932        u32  mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
2933        u32  mpll_ad_func_cntl = pi->clock_registers.mpll_ad_func_cntl;
2934        u32  mpll_dq_func_cntl = pi->clock_registers.mpll_dq_func_cntl;
2935        u32  mpll_func_cntl = pi->clock_registers.mpll_func_cntl;
2936        u32  mpll_func_cntl_1 = pi->clock_registers.mpll_func_cntl_1;
2937        u32  mpll_func_cntl_2 = pi->clock_registers.mpll_func_cntl_2;
2938        u32  mpll_ss1 = pi->clock_registers.mpll_ss1;
2939        u32  mpll_ss2 = pi->clock_registers.mpll_ss2;
2940        struct atom_mpll_param mpll_param;
2941        int ret;
2942
2943        ret = amdgpu_atombios_get_memory_pll_dividers(adev, memory_clock, strobe_mode, &mpll_param);
2944        if (ret)
2945                return ret;
2946
2947        mpll_func_cntl &= ~MPLL_FUNC_CNTL__BWCTRL_MASK;
2948        mpll_func_cntl |= (mpll_param.bwcntl << MPLL_FUNC_CNTL__BWCTRL__SHIFT);
2949
2950        mpll_func_cntl_1 &= ~(MPLL_FUNC_CNTL_1__CLKF_MASK | MPLL_FUNC_CNTL_1__CLKFRAC_MASK |
2951                        MPLL_FUNC_CNTL_1__VCO_MODE_MASK);
2952        mpll_func_cntl_1 |= (mpll_param.clkf) << MPLL_FUNC_CNTL_1__CLKF__SHIFT |
2953                (mpll_param.clkfrac << MPLL_FUNC_CNTL_1__CLKFRAC__SHIFT) |
2954                (mpll_param.vco_mode << MPLL_FUNC_CNTL_1__VCO_MODE__SHIFT);
2955
2956        mpll_ad_func_cntl &= ~MPLL_AD_FUNC_CNTL__YCLK_POST_DIV_MASK;
2957        mpll_ad_func_cntl |= (mpll_param.post_div << MPLL_AD_FUNC_CNTL__YCLK_POST_DIV__SHIFT);
2958
2959        if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
2960                mpll_dq_func_cntl &= ~(MPLL_DQ_FUNC_CNTL__YCLK_SEL_MASK |
2961                                MPLL_AD_FUNC_CNTL__YCLK_POST_DIV_MASK);
2962                mpll_dq_func_cntl |= (mpll_param.yclk_sel << MPLL_DQ_FUNC_CNTL__YCLK_SEL__SHIFT) |
2963                                (mpll_param.post_div << MPLL_AD_FUNC_CNTL__YCLK_POST_DIV__SHIFT);
2964        }
2965
2966        if (pi->caps_mclk_ss_support) {
2967                struct amdgpu_atom_ss ss;
2968                u32 freq_nom;
2969                u32 tmp;
2970                u32 reference_clock = adev->clock.mpll.reference_freq;
2971
2972                if (mpll_param.qdr == 1)
2973                        freq_nom = memory_clock * 4 * (1 << mpll_param.post_div);
2974                else
2975                        freq_nom = memory_clock * 2 * (1 << mpll_param.post_div);
2976
2977                tmp = (freq_nom / reference_clock);
2978                tmp = tmp * tmp;
2979                if (amdgpu_atombios_get_asic_ss_info(adev, &ss,
2980                                                     ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
2981                        u32 clks = reference_clock * 5 / ss.rate;
2982                        u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
2983
2984                        mpll_ss1 &= ~MPLL_SS1__CLKV_MASK;
2985                        mpll_ss1 |= (clkv << MPLL_SS1__CLKV__SHIFT);
2986
2987                        mpll_ss2 &= ~MPLL_SS2__CLKS_MASK;
2988                        mpll_ss2 |= (clks << MPLL_SS2__CLKS__SHIFT);
2989                }
2990        }
2991
2992        mclk_pwrmgt_cntl &= ~MCLK_PWRMGT_CNTL__DLL_SPEED_MASK;
2993        mclk_pwrmgt_cntl |= (mpll_param.dll_speed << MCLK_PWRMGT_CNTL__DLL_SPEED__SHIFT);
2994
2995        if (dll_state_on)
2996                mclk_pwrmgt_cntl |= MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK |
2997                        MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK;
2998        else
2999                mclk_pwrmgt_cntl &= ~(MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK |
3000                        MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK);
3001
3002        mclk->MclkFrequency = memory_clock;
3003        mclk->MpllFuncCntl = mpll_func_cntl;
3004        mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
3005        mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
3006        mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
3007        mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
3008        mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
3009        mclk->DllCntl = dll_cntl;
3010        mclk->MpllSs1 = mpll_ss1;
3011        mclk->MpllSs2 = mpll_ss2;
3012
3013        return 0;
3014}
3015
3016static int ci_populate_single_memory_level(struct amdgpu_device *adev,
3017                                           u32 memory_clock,
3018                                           SMU7_Discrete_MemoryLevel *memory_level)
3019{
3020        struct ci_power_info *pi = ci_get_pi(adev);
3021        int ret;
3022        bool dll_state_on;
3023
3024        if (adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) {
3025                ret = ci_get_dependency_volt_by_clk(adev,
3026                                                    &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
3027                                                    memory_clock, &memory_level->MinVddc);
3028                if (ret)
3029                        return ret;
3030        }
3031
3032        if (adev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) {
3033                ret = ci_get_dependency_volt_by_clk(adev,
3034                                                    &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
3035                                                    memory_clock, &memory_level->MinVddci);
3036                if (ret)
3037                        return ret;
3038        }
3039
3040        if (adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries) {
3041                ret = ci_get_dependency_volt_by_clk(adev,
3042                                                    &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
3043                                                    memory_clock, &memory_level->MinMvdd);
3044                if (ret)
3045                        return ret;
3046        }
3047
3048        memory_level->MinVddcPhases = 1;
3049
3050        if (pi->vddc_phase_shed_control)
3051                ci_populate_phase_value_based_on_mclk(adev,
3052                                                      &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
3053                                                      memory_clock,
3054                                                      &memory_level->MinVddcPhases);
3055
3056        memory_level->EnabledForActivity = 1;
3057        memory_level->EnabledForThrottle = 1;
3058        memory_level->UpH = 0;
3059        memory_level->DownH = 100;
3060        memory_level->VoltageDownH = 0;
3061        memory_level->ActivityLevel = (u16)pi->mclk_activity_target;
3062
3063        memory_level->StutterEnable = false;
3064        memory_level->StrobeEnable = false;
3065        memory_level->EdcReadEnable = false;
3066        memory_level->EdcWriteEnable = false;
3067        memory_level->RttEnable = false;
3068
3069        memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
3070
3071        if (pi->mclk_stutter_mode_threshold &&
3072            (memory_clock <= pi->mclk_stutter_mode_threshold) &&
3073            (!pi->uvd_enabled) &&
3074            (RREG32(mmDPG_PIPE_STUTTER_CONTROL) & DPG_PIPE_STUTTER_CONTROL__STUTTER_ENABLE_MASK) &&
3075            (adev->pm.dpm.new_active_crtc_count <= 2))
3076                memory_level->StutterEnable = true;
3077
3078        if (pi->mclk_strobe_mode_threshold &&
3079            (memory_clock <= pi->mclk_strobe_mode_threshold))
3080                memory_level->StrobeEnable = 1;
3081
3082        if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
3083                memory_level->StrobeRatio =
3084                        ci_get_mclk_frequency_ratio(memory_clock, memory_level->StrobeEnable);
3085                if (pi->mclk_edc_enable_threshold &&
3086                    (memory_clock > pi->mclk_edc_enable_threshold))
3087                        memory_level->EdcReadEnable = true;
3088
3089                if (pi->mclk_edc_wr_enable_threshold &&
3090                    (memory_clock > pi->mclk_edc_wr_enable_threshold))
3091                        memory_level->EdcWriteEnable = true;
3092
3093                if (memory_level->StrobeEnable) {
3094                        if (ci_get_mclk_frequency_ratio(memory_clock, true) >=
3095                            ((RREG32(mmMC_SEQ_MISC7) >> 16) & 0xf))
3096                                dll_state_on = ((RREG32(mmMC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
3097                        else
3098                                dll_state_on = ((RREG32(mmMC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
3099                } else {
3100                        dll_state_on = pi->dll_default_on;
3101                }
3102        } else {
3103                memory_level->StrobeRatio = ci_get_ddr3_mclk_frequency_ratio(memory_clock);
3104                dll_state_on = ((RREG32(mmMC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
3105        }
3106
3107        ret = ci_calculate_mclk_params(adev, memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
3108        if (ret)
3109                return ret;
3110
3111        memory_level->MinVddc = cpu_to_be32(memory_level->MinVddc * VOLTAGE_SCALE);
3112        memory_level->MinVddcPhases = cpu_to_be32(memory_level->MinVddcPhases);
3113        memory_level->MinVddci = cpu_to_be32(memory_level->MinVddci * VOLTAGE_SCALE);
3114        memory_level->MinMvdd = cpu_to_be32(memory_level->MinMvdd * VOLTAGE_SCALE);
3115
3116        memory_level->MclkFrequency = cpu_to_be32(memory_level->MclkFrequency);
3117        memory_level->ActivityLevel = cpu_to_be16(memory_level->ActivityLevel);
3118        memory_level->MpllFuncCntl = cpu_to_be32(memory_level->MpllFuncCntl);
3119        memory_level->MpllFuncCntl_1 = cpu_to_be32(memory_level->MpllFuncCntl_1);
3120        memory_level->MpllFuncCntl_2 = cpu_to_be32(memory_level->MpllFuncCntl_2);
3121        memory_level->MpllAdFuncCntl = cpu_to_be32(memory_level->MpllAdFuncCntl);
3122        memory_level->MpllDqFuncCntl = cpu_to_be32(memory_level->MpllDqFuncCntl);
3123        memory_level->MclkPwrmgtCntl = cpu_to_be32(memory_level->MclkPwrmgtCntl);
3124        memory_level->DllCntl = cpu_to_be32(memory_level->DllCntl);
3125        memory_level->MpllSs1 = cpu_to_be32(memory_level->MpllSs1);
3126        memory_level->MpllSs2 = cpu_to_be32(memory_level->MpllSs2);
3127
3128        return 0;
3129}
3130
3131static int ci_populate_smc_acpi_level(struct amdgpu_device *adev,
3132                                      SMU7_Discrete_DpmTable *table)
3133{
3134        struct ci_power_info *pi = ci_get_pi(adev);
3135        struct atom_clock_dividers dividers;
3136        SMU7_Discrete_VoltageLevel voltage_level;
3137        u32 spll_func_cntl = pi->clock_registers.cg_spll_func_cntl;
3138        u32 spll_func_cntl_2 = pi->clock_registers.cg_spll_func_cntl_2;
3139        u32 dll_cntl = pi->clock_registers.dll_cntl;
3140        u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
3141        int ret;
3142
3143        table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
3144
3145        if (pi->acpi_vddc)
3146                table->ACPILevel.MinVddc = cpu_to_be32(pi->acpi_vddc * VOLTAGE_SCALE);
3147        else
3148                table->ACPILevel.MinVddc = cpu_to_be32(pi->min_vddc_in_pp_table * VOLTAGE_SCALE);
3149
3150        table->ACPILevel.MinVddcPhases = pi->vddc_phase_shed_control ? 0 : 1;
3151
3152        table->ACPILevel.SclkFrequency = adev->clock.spll.reference_freq;
3153
3154        ret = amdgpu_atombios_get_clock_dividers(adev,
3155                                                 COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
3156                                                 table->ACPILevel.SclkFrequency, false, &dividers);
3157        if (ret)
3158                return ret;
3159
3160        table->ACPILevel.SclkDid = (u8)dividers.post_divider;
3161        table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
3162        table->ACPILevel.DeepSleepDivId = 0;
3163
3164        spll_func_cntl &= ~CG_SPLL_FUNC_CNTL__SPLL_PWRON_MASK;
3165        spll_func_cntl |= CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK;
3166
3167        spll_func_cntl_2 &= ~CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL_MASK;
3168        spll_func_cntl_2 |= (4 << CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL__SHIFT);
3169
3170        table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
3171        table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
3172        table->ACPILevel.CgSpllFuncCntl3 = pi->clock_registers.cg_spll_func_cntl_3;
3173        table->ACPILevel.CgSpllFuncCntl4 = pi->clock_registers.cg_spll_func_cntl_4;
3174        table->ACPILevel.SpllSpreadSpectrum = pi->clock_registers.cg_spll_spread_spectrum;
3175        table->ACPILevel.SpllSpreadSpectrum2 = pi->clock_registers.cg_spll_spread_spectrum_2;
3176        table->ACPILevel.CcPwrDynRm = 0;
3177        table->ACPILevel.CcPwrDynRm1 = 0;
3178
3179        table->ACPILevel.Flags = cpu_to_be32(table->ACPILevel.Flags);
3180        table->ACPILevel.MinVddcPhases = cpu_to_be32(table->ACPILevel.MinVddcPhases);
3181        table->ACPILevel.SclkFrequency = cpu_to_be32(table->ACPILevel.SclkFrequency);
3182        table->ACPILevel.CgSpllFuncCntl = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl);
3183        table->ACPILevel.CgSpllFuncCntl2 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl2);
3184        table->ACPILevel.CgSpllFuncCntl3 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl3);
3185        table->ACPILevel.CgSpllFuncCntl4 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl4);
3186        table->ACPILevel.SpllSpreadSpectrum = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum);
3187        table->ACPILevel.SpllSpreadSpectrum2 = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum2);
3188        table->ACPILevel.CcPwrDynRm = cpu_to_be32(table->ACPILevel.CcPwrDynRm);
3189        table->ACPILevel.CcPwrDynRm1 = cpu_to_be32(table->ACPILevel.CcPwrDynRm1);
3190
3191        table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
3192        table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
3193
3194        if (pi->vddci_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
3195                if (pi->acpi_vddci)
3196                        table->MemoryACPILevel.MinVddci =
3197                                cpu_to_be32(pi->acpi_vddci * VOLTAGE_SCALE);
3198                else
3199                        table->MemoryACPILevel.MinVddci =
3200                                cpu_to_be32(pi->min_vddci_in_pp_table * VOLTAGE_SCALE);
3201        }
3202
3203        if (ci_populate_mvdd_value(adev, 0, &voltage_level))
3204                table->MemoryACPILevel.MinMvdd = 0;
3205        else
3206                table->MemoryACPILevel.MinMvdd =
3207                        cpu_to_be32(voltage_level.Voltage * VOLTAGE_SCALE);
3208
3209        mclk_pwrmgt_cntl |= MCLK_PWRMGT_CNTL__MRDCK0_RESET_MASK |
3210                MCLK_PWRMGT_CNTL__MRDCK1_RESET_MASK;
3211        mclk_pwrmgt_cntl &= ~(MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK |
3212                        MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK);
3213
3214        dll_cntl &= ~(DLL_CNTL__MRDCK0_BYPASS_MASK | DLL_CNTL__MRDCK1_BYPASS_MASK);
3215
3216        table->MemoryACPILevel.DllCntl = cpu_to_be32(dll_cntl);
3217        table->MemoryACPILevel.MclkPwrmgtCntl = cpu_to_be32(mclk_pwrmgt_cntl);
3218        table->MemoryACPILevel.MpllAdFuncCntl =
3219                cpu_to_be32(pi->clock_registers.mpll_ad_func_cntl);
3220        table->MemoryACPILevel.MpllDqFuncCntl =
3221                cpu_to_be32(pi->clock_registers.mpll_dq_func_cntl);
3222        table->MemoryACPILevel.MpllFuncCntl =
3223                cpu_to_be32(pi->clock_registers.mpll_func_cntl);
3224        table->MemoryACPILevel.MpllFuncCntl_1 =
3225                cpu_to_be32(pi->clock_registers.mpll_func_cntl_1);
3226        table->MemoryACPILevel.MpllFuncCntl_2 =
3227                cpu_to_be32(pi->clock_registers.mpll_func_cntl_2);
3228        table->MemoryACPILevel.MpllSs1 = cpu_to_be32(pi->clock_registers.mpll_ss1);
3229        table->MemoryACPILevel.MpllSs2 = cpu_to_be32(pi->clock_registers.mpll_ss2);
3230
3231        table->MemoryACPILevel.EnabledForThrottle = 0;
3232        table->MemoryACPILevel.EnabledForActivity = 0;
3233        table->MemoryACPILevel.UpH = 0;
3234        table->MemoryACPILevel.DownH = 100;
3235        table->MemoryACPILevel.VoltageDownH = 0;
3236        table->MemoryACPILevel.ActivityLevel =
3237                cpu_to_be16((u16)pi->mclk_activity_target);
3238
3239        table->MemoryACPILevel.StutterEnable = false;
3240        table->MemoryACPILevel.StrobeEnable = false;
3241        table->MemoryACPILevel.EdcReadEnable = false;
3242        table->MemoryACPILevel.EdcWriteEnable = false;
3243        table->MemoryACPILevel.RttEnable = false;
3244
3245        return 0;
3246}
3247
3248
3249static int ci_enable_ulv(struct amdgpu_device *adev, bool enable)
3250{
3251        struct ci_power_info *pi = ci_get_pi(adev);
3252        struct ci_ulv_parm *ulv = &pi->ulv;
3253
3254        if (ulv->supported) {
3255                if (enable)
3256                        return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ?
3257                                0 : -EINVAL;
3258                else
3259                        return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
3260                                0 : -EINVAL;
3261        }
3262
3263        return 0;
3264}
3265
3266static int ci_populate_ulv_level(struct amdgpu_device *adev,
3267                                 SMU7_Discrete_Ulv *state)
3268{
3269        struct ci_power_info *pi = ci_get_pi(adev);
3270        u16 ulv_voltage = adev->pm.dpm.backbias_response_time;
3271
3272        state->CcPwrDynRm = 0;
3273        state->CcPwrDynRm1 = 0;
3274
3275        if (ulv_voltage == 0) {
3276                pi->ulv.supported = false;
3277                return 0;
3278        }
3279
3280        if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
3281                if (ulv_voltage > adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
3282                        state->VddcOffset = 0;
3283                else
3284                        state->VddcOffset =
3285                                adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage;
3286        } else {
3287                if (ulv_voltage > adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
3288                        state->VddcOffsetVid = 0;
3289                else
3290                        state->VddcOffsetVid = (u8)
3291                                ((adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage) *
3292                                 VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
3293        }
3294        state->VddcPhase = pi->vddc_phase_shed_control ? 0 : 1;
3295
3296        state->CcPwrDynRm = cpu_to_be32(state->CcPwrDynRm);
3297        state->CcPwrDynRm1 = cpu_to_be32(state->CcPwrDynRm1);
3298        state->VddcOffset = cpu_to_be16(state->VddcOffset);
3299
3300        return 0;
3301}
3302
3303static int ci_calculate_sclk_params(struct amdgpu_device *adev,
3304                                    u32 engine_clock,
3305                                    SMU7_Discrete_GraphicsLevel *sclk)
3306{
3307        struct ci_power_info *pi = ci_get_pi(adev);
3308        struct atom_clock_dividers dividers;
3309        u32 spll_func_cntl_3 = pi->clock_registers.cg_spll_func_cntl_3;
3310        u32 spll_func_cntl_4 = pi->clock_registers.cg_spll_func_cntl_4;
3311        u32 cg_spll_spread_spectrum = pi->clock_registers.cg_spll_spread_spectrum;
3312        u32 cg_spll_spread_spectrum_2 = pi->clock_registers.cg_spll_spread_spectrum_2;
3313        u32 reference_clock = adev->clock.spll.reference_freq;
3314        u32 reference_divider;
3315        u32 fbdiv;
3316        int ret;
3317
3318        ret = amdgpu_atombios_get_clock_dividers(adev,
3319                                                 COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
3320                                                 engine_clock, false, &dividers);
3321        if (ret)
3322                return ret;
3323
3324        reference_divider = 1 + dividers.ref_div;
3325        fbdiv = dividers.fb_div & 0x3FFFFFF;
3326
3327        spll_func_cntl_3 &= ~CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV_MASK;
3328        spll_func_cntl_3 |= (fbdiv << CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV__SHIFT);
3329        spll_func_cntl_3 |= CG_SPLL_FUNC_CNTL_3__SPLL_DITHEN_MASK;
3330
3331        if (pi->caps_sclk_ss_support) {
3332                struct amdgpu_atom_ss ss;
3333                u32 vco_freq = engine_clock * dividers.post_div;
3334
3335                if (amdgpu_atombios_get_asic_ss_info(adev, &ss,
3336                                                     ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
3337                        u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
3338                        u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
3339
3340                        cg_spll_spread_spectrum &= ~(CG_SPLL_SPREAD_SPECTRUM__CLKS_MASK | CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK);
3341                        cg_spll_spread_spectrum |= (clk_s << CG_SPLL_SPREAD_SPECTRUM__CLKS__SHIFT);
3342                        cg_spll_spread_spectrum |= (1 << CG_SPLL_SPREAD_SPECTRUM__SSEN__SHIFT);
3343
3344                        cg_spll_spread_spectrum_2 &= ~CG_SPLL_SPREAD_SPECTRUM_2__CLKV_MASK;
3345                        cg_spll_spread_spectrum_2 |= (clk_v << CG_SPLL_SPREAD_SPECTRUM_2__CLKV__SHIFT);
3346                }
3347        }
3348
3349        sclk->SclkFrequency = engine_clock;
3350        sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
3351        sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
3352        sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
3353        sclk->SpllSpreadSpectrum2  = cg_spll_spread_spectrum_2;
3354        sclk->SclkDid = (u8)dividers.post_divider;
3355
3356        return 0;
3357}
3358
3359static int ci_populate_single_graphic_level(struct amdgpu_device *adev,
3360                                            u32 engine_clock,
3361                                            u16 sclk_activity_level_t,
3362                                            SMU7_Discrete_GraphicsLevel *graphic_level)
3363{
3364        struct ci_power_info *pi = ci_get_pi(adev);
3365        int ret;
3366
3367        ret = ci_calculate_sclk_params(adev, engine_clock, graphic_level);
3368        if (ret)
3369                return ret;
3370
3371        ret = ci_get_dependency_volt_by_clk(adev,
3372                                            &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
3373                                            engine_clock, &graphic_level->MinVddc);
3374        if (ret)
3375                return ret;
3376
3377        graphic_level->SclkFrequency = engine_clock;
3378
3379        graphic_level->Flags =  0;
3380        graphic_level->MinVddcPhases = 1;
3381
3382        if (pi->vddc_phase_shed_control)
3383                ci_populate_phase_value_based_on_sclk(adev,
3384                                                      &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
3385                                                      engine_clock,
3386                                                      &graphic_level->MinVddcPhases);
3387
3388        graphic_level->ActivityLevel = sclk_activity_level_t;
3389
3390        graphic_level->CcPwrDynRm = 0;
3391        graphic_level->CcPwrDynRm1 = 0;
3392        graphic_level->EnabledForThrottle = 1;
3393        graphic_level->UpH = 0;
3394        graphic_level->DownH = 0;
3395        graphic_level->VoltageDownH = 0;
3396        graphic_level->PowerThrottle = 0;
3397
3398        if (pi->caps_sclk_ds)
3399                graphic_level->DeepSleepDivId = ci_get_sleep_divider_id_from_clock(engine_clock,
3400                                                                                   CISLAND_MINIMUM_ENGINE_CLOCK);
3401
3402        graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
3403
3404        graphic_level->Flags = cpu_to_be32(graphic_level->Flags);
3405        graphic_level->MinVddc = cpu_to_be32(graphic_level->MinVddc * VOLTAGE_SCALE);
3406        graphic_level->MinVddcPhases = cpu_to_be32(graphic_level->MinVddcPhases);
3407        graphic_level->SclkFrequency = cpu_to_be32(graphic_level->SclkFrequency);
3408        graphic_level->ActivityLevel = cpu_to_be16(graphic_level->ActivityLevel);
3409        graphic_level->CgSpllFuncCntl3 = cpu_to_be32(graphic_level->CgSpllFuncCntl3);
3410        graphic_level->CgSpllFuncCntl4 = cpu_to_be32(graphic_level->CgSpllFuncCntl4);
3411        graphic_level->SpllSpreadSpectrum = cpu_to_be32(graphic_level->SpllSpreadSpectrum);
3412        graphic_level->SpllSpreadSpectrum2 = cpu_to_be32(graphic_level->SpllSpreadSpectrum2);
3413        graphic_level->CcPwrDynRm = cpu_to_be32(graphic_level->CcPwrDynRm);
3414        graphic_level->CcPwrDynRm1 = cpu_to_be32(graphic_level->CcPwrDynRm1);
3415
3416        return 0;
3417}
3418
3419static int ci_populate_all_graphic_levels(struct amdgpu_device *adev)
3420{
3421        struct ci_power_info *pi = ci_get_pi(adev);
3422        struct ci_dpm_table *dpm_table = &pi->dpm_table;
3423        u32 level_array_address = pi->dpm_table_start +
3424                offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
3425        u32 level_array_size = sizeof(SMU7_Discrete_GraphicsLevel) *
3426                SMU7_MAX_LEVELS_GRAPHICS;
3427        SMU7_Discrete_GraphicsLevel *levels = pi->smc_state_table.GraphicsLevel;
3428        u32 i, ret;
3429
3430        memset(levels, 0, level_array_size);
3431
3432        for (i = 0; i < dpm_table->sclk_table.count; i++) {
3433                ret = ci_populate_single_graphic_level(adev,
3434                                                       dpm_table->sclk_table.dpm_levels[i].value,
3435                                                       (u16)pi->activity_target[i],
3436                                                       &pi->smc_state_table.GraphicsLevel[i]);
3437                if (ret)
3438                        return ret;
3439                if (i > 1)
3440                        pi->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
3441                if (i == (dpm_table->sclk_table.count - 1))
3442                        pi->smc_state_table.GraphicsLevel[i].DisplayWatermark =
3443                                PPSMC_DISPLAY_WATERMARK_HIGH;
3444        }
3445        pi->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
3446
3447        pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
3448        pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
3449                ci_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
3450
3451        ret = amdgpu_ci_copy_bytes_to_smc(adev, level_array_address,
3452                                   (u8 *)levels, level_array_size,
3453                                   pi->sram_end);
3454        if (ret)
3455                return ret;
3456
3457        return 0;
3458}
3459
3460static int ci_populate_ulv_state(struct amdgpu_device *adev,
3461                                 SMU7_Discrete_Ulv *ulv_level)
3462{
3463        return ci_populate_ulv_level(adev, ulv_level);
3464}
3465
3466static int ci_populate_all_memory_levels(struct amdgpu_device *adev)
3467{
3468        struct ci_power_info *pi = ci_get_pi(adev);
3469        struct ci_dpm_table *dpm_table = &pi->dpm_table;
3470        u32 level_array_address = pi->dpm_table_start +
3471                offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
3472        u32 level_array_size = sizeof(SMU7_Discrete_MemoryLevel) *
3473                SMU7_MAX_LEVELS_MEMORY;
3474        SMU7_Discrete_MemoryLevel *levels = pi->smc_state_table.MemoryLevel;
3475        u32 i, ret;
3476
3477        memset(levels, 0, level_array_size);
3478
3479        for (i = 0; i < dpm_table->mclk_table.count; i++) {
3480                if (dpm_table->mclk_table.dpm_levels[i].value == 0)
3481                        return -EINVAL;
3482                ret = ci_populate_single_memory_level(adev,
3483                                                      dpm_table->mclk_table.dpm_levels[i].value,
3484                                                      &pi->smc_state_table.MemoryLevel[i]);
3485                if (ret)
3486                        return ret;
3487        }
3488
3489        if ((dpm_table->mclk_table.count >= 2) &&
3490            ((adev->pdev->device == 0x67B0) || (adev->pdev->device == 0x67B1))) {
3491                pi->smc_state_table.MemoryLevel[1].MinVddc =
3492                        pi->smc_state_table.MemoryLevel[0].MinVddc;
3493                pi->smc_state_table.MemoryLevel[1].MinVddcPhases =
3494                        pi->smc_state_table.MemoryLevel[0].MinVddcPhases;
3495        }
3496
3497        pi->smc_state_table.MemoryLevel[0].ActivityLevel = cpu_to_be16(0x1F);
3498
3499        pi->smc_state_table.MemoryDpmLevelCount = (u8)dpm_table->mclk_table.count;
3500        pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
3501                ci_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
3502
3503        pi->smc_state_table.MemoryLevel[dpm_table->mclk_table.count - 1].DisplayWatermark =
3504                PPSMC_DISPLAY_WATERMARK_HIGH;
3505
3506        ret = amdgpu_ci_copy_bytes_to_smc(adev, level_array_address,
3507                                   (u8 *)levels, level_array_size,
3508                                   pi->sram_end);
3509        if (ret)
3510                return ret;
3511
3512        return 0;
3513}
3514
3515static void ci_reset_single_dpm_table(struct amdgpu_device *adev,
3516                                      struct ci_single_dpm_table* dpm_table,
3517                                      u32 count)
3518{
3519        u32 i;
3520
3521        dpm_table->count = count;
3522        for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++)
3523                dpm_table->dpm_levels[i].enabled = false;
3524}
3525
3526static void ci_setup_pcie_table_entry(struct ci_single_dpm_table* dpm_table,
3527                                      u32 index, u32 pcie_gen, u32 pcie_lanes)
3528{
3529        dpm_table->dpm_levels[index].value = pcie_gen;
3530        dpm_table->dpm_levels[index].param1 = pcie_lanes;
3531        dpm_table->dpm_levels[index].enabled = true;
3532}
3533
3534static int ci_setup_default_pcie_tables(struct amdgpu_device *adev)
3535{
3536        struct ci_power_info *pi = ci_get_pi(adev);
3537
3538        if (!pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels)
3539                return -EINVAL;
3540
3541        if (pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels) {
3542                pi->pcie_gen_powersaving = pi->pcie_gen_performance;
3543                pi->pcie_lane_powersaving = pi->pcie_lane_performance;
3544        } else if (!pi->use_pcie_performance_levels && pi->use_pcie_powersaving_levels) {
3545                pi->pcie_gen_performance = pi->pcie_gen_powersaving;
3546                pi->pcie_lane_performance = pi->pcie_lane_powersaving;
3547        }
3548
3549        ci_reset_single_dpm_table(adev,
3550                                  &pi->dpm_table.pcie_speed_table,
3551                                  SMU7_MAX_LEVELS_LINK);
3552
3553        if (adev->asic_type == CHIP_BONAIRE)
3554                ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
3555                                          pi->pcie_gen_powersaving.min,
3556                                          pi->pcie_lane_powersaving.max);
3557        else
3558                ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
3559                                          pi->pcie_gen_powersaving.min,
3560                                          pi->pcie_lane_powersaving.min);
3561        ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 1,
3562                                  pi->pcie_gen_performance.min,
3563                                  pi->pcie_lane_performance.min);
3564        ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 2,
3565                                  pi->pcie_gen_powersaving.min,
3566                                  pi->pcie_lane_powersaving.max);
3567        ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 3,
3568                                  pi->pcie_gen_performance.min,
3569                                  pi->pcie_lane_performance.max);
3570        ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 4,
3571                                  pi->pcie_gen_powersaving.max,
3572                                  pi->pcie_lane_powersaving.max);
3573        ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 5,
3574                                  pi->pcie_gen_performance.max,
3575                                  pi->pcie_lane_performance.max);
3576
3577        pi->dpm_table.pcie_speed_table.count = 6;
3578
3579        return 0;
3580}
3581
3582static int ci_setup_default_dpm_tables(struct amdgpu_device *adev)
3583{
3584        struct ci_power_info *pi = ci_get_pi(adev);
3585        struct amdgpu_clock_voltage_dependency_table *allowed_sclk_vddc_table =
3586                &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
3587        struct amdgpu_clock_voltage_dependency_table *allowed_mclk_table =
3588                &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
3589        struct amdgpu_cac_leakage_table *std_voltage_table =
3590                &adev->pm.dpm.dyn_state.cac_leakage_table;
3591        u32 i;
3592
3593        if (allowed_sclk_vddc_table == NULL)
3594                return -EINVAL;
3595        if (allowed_sclk_vddc_table->count < 1)
3596                return -EINVAL;
3597        if (allowed_mclk_table == NULL)
3598                return -EINVAL;
3599        if (allowed_mclk_table->count < 1)
3600                return -EINVAL;
3601
3602        memset(&pi->dpm_table, 0, sizeof(struct ci_dpm_table));
3603
3604        ci_reset_single_dpm_table(adev,
3605                                  &pi->dpm_table.sclk_table,
3606                                  SMU7_MAX_LEVELS_GRAPHICS);
3607        ci_reset_single_dpm_table(adev,
3608                                  &pi->dpm_table.mclk_table,
3609                                  SMU7_MAX_LEVELS_MEMORY);
3610        ci_reset_single_dpm_table(adev,
3611                                  &pi->dpm_table.vddc_table,
3612                                  SMU7_MAX_LEVELS_VDDC);
3613        ci_reset_single_dpm_table(adev,
3614                                  &pi->dpm_table.vddci_table,
3615                                  SMU7_MAX_LEVELS_VDDCI);
3616        ci_reset_single_dpm_table(adev,
3617                                  &pi->dpm_table.mvdd_table,
3618                                  SMU7_MAX_LEVELS_MVDD);
3619
3620        pi->dpm_table.sclk_table.count = 0;
3621        for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
3622                if ((i == 0) ||
3623                    (pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count-1].value !=
3624                     allowed_sclk_vddc_table->entries[i].clk)) {
3625                        pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value =
3626                                allowed_sclk_vddc_table->entries[i].clk;
3627                        pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled =
3628                                (i == 0) ? true : false;
3629                        pi->dpm_table.sclk_table.count++;
3630                }
3631        }
3632
3633        pi->dpm_table.mclk_table.count = 0;
3634        for (i = 0; i < allowed_mclk_table->count; i++) {
3635                if ((i == 0) ||
3636                    (pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count-1].value !=
3637                     allowed_mclk_table->entries[i].clk)) {
3638                        pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value =
3639                                allowed_mclk_table->entries[i].clk;
3640                        pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled =
3641                                (i == 0) ? true : false;
3642                        pi->dpm_table.mclk_table.count++;
3643                }
3644        }
3645
3646        for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
3647                pi->dpm_table.vddc_table.dpm_levels[i].value =
3648                        allowed_sclk_vddc_table->entries[i].v;
3649                pi->dpm_table.vddc_table.dpm_levels[i].param1 =
3650                        std_voltage_table->entries[i].leakage;
3651                pi->dpm_table.vddc_table.dpm_levels[i].enabled = true;
3652        }
3653        pi->dpm_table.vddc_table.count = allowed_sclk_vddc_table->count;
3654
3655        allowed_mclk_table = &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
3656        if (allowed_mclk_table) {
3657                for (i = 0; i < allowed_mclk_table->count; i++) {
3658                        pi->dpm_table.vddci_table.dpm_levels[i].value =
3659                                allowed_mclk_table->entries[i].v;
3660                        pi->dpm_table.vddci_table.dpm_levels[i].enabled = true;
3661                }
3662                pi->dpm_table.vddci_table.count = allowed_mclk_table->count;
3663        }
3664
3665        allowed_mclk_table = &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk;
3666        if (allowed_mclk_table) {
3667                for (i = 0; i < allowed_mclk_table->count; i++) {
3668                        pi->dpm_table.mvdd_table.dpm_levels[i].value =
3669                                allowed_mclk_table->entries[i].v;
3670                        pi->dpm_table.mvdd_table.dpm_levels[i].enabled = true;
3671                }
3672                pi->dpm_table.mvdd_table.count = allowed_mclk_table->count;
3673        }
3674
3675        ci_setup_default_pcie_tables(adev);
3676
3677        /* save a copy of the default DPM table */
3678        memcpy(&(pi->golden_dpm_table), &(pi->dpm_table),
3679                        sizeof(struct ci_dpm_table));
3680
3681        return 0;
3682}
3683
3684static int ci_find_boot_level(struct ci_single_dpm_table *table,
3685                              u32 value, u32 *boot_level)
3686{
3687        u32 i;
3688        int ret = -EINVAL;
3689
3690        for(i = 0; i < table->count; i++) {
3691                if (value == table->dpm_levels[i].value) {
3692                        *boot_level = i;
3693                        ret = 0;
3694                }
3695        }
3696
3697        return ret;
3698}
3699
3700static int ci_init_smc_table(struct amdgpu_device *adev)
3701{
3702        struct ci_power_info *pi = ci_get_pi(adev);
3703        struct ci_ulv_parm *ulv = &pi->ulv;
3704        struct amdgpu_ps *amdgpu_boot_state = adev->pm.dpm.boot_ps;
3705        SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
3706        int ret;
3707
3708        ret = ci_setup_default_dpm_tables(adev);
3709        if (ret)
3710                return ret;
3711
3712        if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE)
3713                ci_populate_smc_voltage_tables(adev, table);
3714
3715        ci_init_fps_limits(adev);
3716
3717        if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
3718                table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
3719
3720        if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
3721                table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
3722
3723        if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
3724                table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
3725
3726        if (ulv->supported) {
3727                ret = ci_populate_ulv_state(adev, &pi->smc_state_table.Ulv);
3728                if (ret)
3729                        return ret;
3730                WREG32_SMC(ixCG_ULV_PARAMETER, ulv->cg_ulv_parameter);
3731        }
3732
3733        ret = ci_populate_all_graphic_levels(adev);
3734        if (ret)
3735                return ret;
3736
3737        ret = ci_populate_all_memory_levels(adev);
3738        if (ret)
3739                return ret;
3740
3741        ci_populate_smc_link_level(adev, table);
3742
3743        ret = ci_populate_smc_acpi_level(adev, table);
3744        if (ret)
3745                return ret;
3746
3747        ret = ci_populate_smc_vce_level(adev, table);
3748        if (ret)
3749                return ret;
3750
3751        ret = ci_populate_smc_acp_level(adev, table);
3752        if (ret)
3753                return ret;
3754
3755        ret = ci_populate_smc_samu_level(adev, table);
3756        if (ret)
3757                return ret;
3758
3759        ret = ci_do_program_memory_timing_parameters(adev);
3760        if (ret)
3761                return ret;
3762
3763        ret = ci_populate_smc_uvd_level(adev, table);
3764        if (ret)
3765                return ret;
3766
3767        table->UvdBootLevel  = 0;
3768        table->VceBootLevel  = 0;
3769        table->AcpBootLevel  = 0;
3770        table->SamuBootLevel  = 0;
3771        table->GraphicsBootLevel  = 0;
3772        table->MemoryBootLevel  = 0;
3773
3774        ret = ci_find_boot_level(&pi->dpm_table.sclk_table,
3775                                 pi->vbios_boot_state.sclk_bootup_value,
3776                                 (u32 *)&pi->smc_state_table.GraphicsBootLevel);
3777
3778        ret = ci_find_boot_level(&pi->dpm_table.mclk_table,
3779                                 pi->vbios_boot_state.mclk_bootup_value,
3780                                 (u32 *)&pi->smc_state_table.MemoryBootLevel);
3781
3782        table->BootVddc = pi->vbios_boot_state.vddc_bootup_value;
3783        table->BootVddci = pi->vbios_boot_state.vddci_bootup_value;
3784        table->BootMVdd = pi->vbios_boot_state.mvdd_bootup_value;
3785
3786        ci_populate_smc_initial_state(adev, amdgpu_boot_state);
3787
3788        ret = ci_populate_bapm_parameters_in_dpm_table(adev);
3789        if (ret)
3790                return ret;
3791
3792        table->UVDInterval = 1;
3793        table->VCEInterval = 1;
3794        table->ACPInterval = 1;
3795        table->SAMUInterval = 1;
3796        table->GraphicsVoltageChangeEnable = 1;
3797        table->GraphicsThermThrottleEnable = 1;
3798        table->GraphicsInterval = 1;
3799        table->VoltageInterval = 1;
3800        table->ThermalInterval = 1;
3801        table->TemperatureLimitHigh = (u16)((pi->thermal_temp_setting.temperature_high *
3802                                             CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3803        table->TemperatureLimitLow = (u16)((pi->thermal_temp_setting.temperature_low *
3804                                            CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3805        table->MemoryVoltageChangeEnable = 1;
3806        table->MemoryInterval = 1;
3807        table->VoltageResponseTime = 0;
3808        table->VddcVddciDelta = 4000;
3809        table->PhaseResponseTime = 0;
3810        table->MemoryThermThrottleEnable = 1;
3811        table->PCIeBootLinkLevel = pi->dpm_table.pcie_speed_table.count - 1;
3812        table->PCIeGenInterval = 1;
3813        if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2)
3814                table->SVI2Enable  = 1;
3815        else
3816                table->SVI2Enable  = 0;
3817
3818        table->ThermGpio = 17;
3819        table->SclkStepSize = 0x4000;
3820
3821        table->SystemFlags = cpu_to_be32(table->SystemFlags);
3822        table->SmioMaskVddcVid = cpu_to_be32(table->SmioMaskVddcVid);
3823        table->SmioMaskVddcPhase = cpu_to_be32(table->SmioMaskVddcPhase);
3824        table->SmioMaskVddciVid = cpu_to_be32(table->SmioMaskVddciVid);
3825        table->SmioMaskMvddVid = cpu_to_be32(table->SmioMaskMvddVid);
3826        table->SclkStepSize = cpu_to_be32(table->SclkStepSize);
3827        table->TemperatureLimitHigh = cpu_to_be16(table->TemperatureLimitHigh);
3828        table->TemperatureLimitLow = cpu_to_be16(table->TemperatureLimitLow);
3829        table->VddcVddciDelta = cpu_to_be16(table->VddcVddciDelta);
3830        table->VoltageResponseTime = cpu_to_be16(table->VoltageResponseTime);
3831        table->PhaseResponseTime = cpu_to_be16(table->PhaseResponseTime);
3832        table->BootVddc = cpu_to_be16(table->BootVddc * VOLTAGE_SCALE);
3833        table->BootVddci = cpu_to_be16(table->BootVddci * VOLTAGE_SCALE);
3834        table->BootMVdd = cpu_to_be16(table->BootMVdd * VOLTAGE_SCALE);
3835
3836        ret = amdgpu_ci_copy_bytes_to_smc(adev,
3837                                   pi->dpm_table_start +
3838                                   offsetof(SMU7_Discrete_DpmTable, SystemFlags),
3839                                   (u8 *)&table->SystemFlags,
3840                                   sizeof(SMU7_Discrete_DpmTable) - 3 * sizeof(SMU7_PIDController),
3841                                   pi->sram_end);
3842        if (ret)
3843                return ret;
3844
3845        return 0;
3846}
3847
3848static void ci_trim_single_dpm_states(struct amdgpu_device *adev,
3849                                      struct ci_single_dpm_table *dpm_table,
3850                                      u32 low_limit, u32 high_limit)
3851{
3852        u32 i;
3853
3854        for (i = 0; i < dpm_table->count; i++) {
3855                if ((dpm_table->dpm_levels[i].value < low_limit) ||
3856                    (dpm_table->dpm_levels[i].value > high_limit))
3857                        dpm_table->dpm_levels[i].enabled = false;
3858                else
3859                        dpm_table->dpm_levels[i].enabled = true;
3860        }
3861}
3862
3863static void ci_trim_pcie_dpm_states(struct amdgpu_device *adev,
3864                                    u32 speed_low, u32 lanes_low,
3865                                    u32 speed_high, u32 lanes_high)
3866{
3867        struct ci_power_info *pi = ci_get_pi(adev);
3868        struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
3869        u32 i, j;
3870
3871        for (i = 0; i < pcie_table->count; i++) {
3872                if ((pcie_table->dpm_levels[i].value < speed_low) ||
3873                    (pcie_table->dpm_levels[i].param1 < lanes_low) ||
3874                    (pcie_table->dpm_levels[i].value > speed_high) ||
3875                    (pcie_table->dpm_levels[i].param1 > lanes_high))
3876                        pcie_table->dpm_levels[i].enabled = false;
3877                else
3878                        pcie_table->dpm_levels[i].enabled = true;
3879        }
3880
3881        for (i = 0; i < pcie_table->count; i++) {
3882                if (pcie_table->dpm_levels[i].enabled) {
3883                        for (j = i + 1; j < pcie_table->count; j++) {
3884                                if (pcie_table->dpm_levels[j].enabled) {
3885                                        if ((pcie_table->dpm_levels[i].value == pcie_table->dpm_levels[j].value) &&
3886                                            (pcie_table->dpm_levels[i].param1 == pcie_table->dpm_levels[j].param1))
3887                                                pcie_table->dpm_levels[j].enabled = false;
3888                                }
3889                        }
3890                }
3891        }
3892}
3893
3894static int ci_trim_dpm_states(struct amdgpu_device *adev,
3895                              struct amdgpu_ps *amdgpu_state)
3896{
3897        struct ci_ps *state = ci_get_ps(amdgpu_state);
3898        struct ci_power_info *pi = ci_get_pi(adev);
3899        u32 high_limit_count;
3900
3901        if (state->performance_level_count < 1)
3902                return -EINVAL;
3903
3904        if (state->performance_level_count == 1)
3905                high_limit_count = 0;
3906        else
3907                high_limit_count = 1;
3908
3909        ci_trim_single_dpm_states(adev,
3910                                  &pi->dpm_table.sclk_table,
3911                                  state->performance_levels[0].sclk,
3912                                  state->performance_levels[high_limit_count].sclk);
3913
3914        ci_trim_single_dpm_states(adev,
3915                                  &pi->dpm_table.mclk_table,
3916                                  state->performance_levels[0].mclk,
3917                                  state->performance_levels[high_limit_count].mclk);
3918
3919        ci_trim_pcie_dpm_states(adev,
3920                                state->performance_levels[0].pcie_gen,
3921                                state->performance_levels[0].pcie_lane,
3922                                state->performance_levels[high_limit_count].pcie_gen,
3923                                state->performance_levels[high_limit_count].pcie_lane);
3924
3925        return 0;
3926}
3927
3928static int ci_apply_disp_minimum_voltage_request(struct amdgpu_device *adev)
3929{
3930        struct amdgpu_clock_voltage_dependency_table *disp_voltage_table =
3931                &adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk;
3932        struct amdgpu_clock_voltage_dependency_table *vddc_table =
3933                &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
3934        u32 requested_voltage = 0;
3935        u32 i;
3936
3937        if (disp_voltage_table == NULL)
3938                return -EINVAL;
3939        if (!disp_voltage_table->count)
3940                return -EINVAL;
3941
3942        for (i = 0; i < disp_voltage_table->count; i++) {
3943                if (adev->clock.current_dispclk == disp_voltage_table->entries[i].clk)
3944                        requested_voltage = disp_voltage_table->entries[i].v;
3945        }
3946
3947        for (i = 0; i < vddc_table->count; i++) {
3948                if (requested_voltage <= vddc_table->entries[i].v) {
3949                        requested_voltage = vddc_table->entries[i].v;
3950                        return (amdgpu_ci_send_msg_to_smc_with_parameter(adev,
3951                                                                  PPSMC_MSG_VddC_Request,
3952                                                                  requested_voltage * VOLTAGE_SCALE) == PPSMC_Result_OK) ?
3953                                0 : -EINVAL;
3954                }
3955        }
3956
3957        return -EINVAL;
3958}
3959
3960static int ci_upload_dpm_level_enable_mask(struct amdgpu_device *adev)
3961{
3962        struct ci_power_info *pi = ci_get_pi(adev);
3963        PPSMC_Result result;
3964
3965        ci_apply_disp_minimum_voltage_request(adev);
3966
3967        if (!pi->sclk_dpm_key_disabled) {
3968                if (pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3969                        result = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
3970                                                                   PPSMC_MSG_SCLKDPM_SetEnabledMask,
3971                                                                   pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
3972                        if (result != PPSMC_Result_OK)
3973                                return -EINVAL;
3974                }
3975        }
3976
3977        if (!pi->mclk_dpm_key_disabled) {
3978                if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3979                        result = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
3980                                                                   PPSMC_MSG_MCLKDPM_SetEnabledMask,
3981                                                                   pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3982                        if (result != PPSMC_Result_OK)
3983                                return -EINVAL;
3984                }
3985        }
3986
3987#if 0
3988        if (!pi->pcie_dpm_key_disabled) {
3989                if (pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3990                        result = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
3991                                                                   PPSMC_MSG_PCIeDPM_SetEnabledMask,
3992                                                                   pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
3993                        if (result != PPSMC_Result_OK)
3994                                return -EINVAL;
3995                }
3996        }
3997#endif
3998
3999        return 0;
4000}
4001
4002static void ci_find_dpm_states_clocks_in_dpm_table(struct amdgpu_device *adev,
4003                                                   struct amdgpu_ps *amdgpu_state)
4004{
4005        struct ci_power_info *pi = ci_get_pi(adev);
4006        struct ci_ps *state = ci_get_ps(amdgpu_state);
4007        struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
4008        u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
4009        struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
4010        u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
4011        u32 i;
4012
4013        pi->need_update_smu7_dpm_table = 0;
4014
4015        for (i = 0; i < sclk_table->count; i++) {
4016                if (sclk == sclk_table->dpm_levels[i].value)
4017                        break;
4018        }
4019
4020        if (i >= sclk_table->count) {
4021                pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
4022        } else {
4023                /* XXX check display min clock requirements */
4024                if (CISLAND_MINIMUM_ENGINE_CLOCK != CISLAND_MINIMUM_ENGINE_CLOCK)
4025                        pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
4026        }
4027
4028        for (i = 0; i < mclk_table->count; i++) {
4029                if (mclk == mclk_table->dpm_levels[i].value)
4030                        break;
4031        }
4032
4033        if (i >= mclk_table->count)
4034                pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
4035
4036        if (adev->pm.dpm.current_active_crtc_count !=
4037            adev->pm.dpm.new_active_crtc_count)
4038                pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
4039}
4040
4041static int ci_populate_and_upload_sclk_mclk_dpm_levels(struct amdgpu_device *adev,
4042                                                       struct amdgpu_ps *amdgpu_state)
4043{
4044        struct ci_power_info *pi = ci_get_pi(adev);
4045        struct ci_ps *state = ci_get_ps(amdgpu_state);
4046        u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
4047        u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
4048        struct ci_dpm_table *dpm_table = &pi->dpm_table;
4049        int ret;
4050
4051        if (!pi->need_update_smu7_dpm_table)
4052                return 0;
4053
4054        if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK)
4055                dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value = sclk;
4056
4057        if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)
4058                dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value = mclk;
4059
4060        if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK)) {
4061                ret = ci_populate_all_graphic_levels(adev);
4062                if (ret)
4063                        return ret;
4064        }
4065
4066        if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) {
4067                ret = ci_populate_all_memory_levels(adev);
4068                if (ret)
4069                        return ret;
4070        }
4071
4072        return 0;
4073}
4074
4075static int ci_enable_uvd_dpm(struct amdgpu_device *adev, bool enable)
4076{
4077        struct ci_power_info *pi = ci_get_pi(adev);
4078        const struct amdgpu_clock_and_voltage_limits *max_limits;
4079        int i;
4080
4081        if (adev->pm.dpm.ac_power)
4082                max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4083        else
4084                max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4085
4086        if (enable) {
4087                pi->dpm_level_enable_mask.uvd_dpm_enable_mask = 0;
4088
4089                for (i = adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4090                        if (adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4091                                pi->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i;
4092
4093                                if (!pi->caps_uvd_dpm)
4094                                        break;
4095                        }
4096                }
4097
4098                amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4099                                                  PPSMC_MSG_UVDDPM_SetEnabledMask,
4100                                                  pi->dpm_level_enable_mask.uvd_dpm_enable_mask);
4101
4102                if (pi->last_mclk_dpm_enable_mask & 0x1) {
4103                        pi->uvd_enabled = true;
4104                        pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
4105                        amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4106                                                          PPSMC_MSG_MCLKDPM_SetEnabledMask,
4107                                                          pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
4108                }
4109        } else {
4110                if (pi->uvd_enabled) {
4111                        pi->uvd_enabled = false;
4112                        pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1;
4113                        amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4114                                                          PPSMC_MSG_MCLKDPM_SetEnabledMask,
4115                                                          pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
4116                }
4117        }
4118
4119        return (amdgpu_ci_send_msg_to_smc(adev, enable ?
4120                                   PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable) == PPSMC_Result_OK) ?
4121                0 : -EINVAL;
4122}
4123
4124static int ci_enable_vce_dpm(struct amdgpu_device *adev, bool enable)
4125{
4126        struct ci_power_info *pi = ci_get_pi(adev);
4127        const struct amdgpu_clock_and_voltage_limits *max_limits;
4128        int i;
4129
4130        if (adev->pm.dpm.ac_power)
4131                max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4132        else
4133                max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4134
4135        if (enable) {
4136                pi->dpm_level_enable_mask.vce_dpm_enable_mask = 0;
4137                for (i = adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4138                        if (adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4139                                pi->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i;
4140
4141                                if (!pi->caps_vce_dpm)
4142                                        break;
4143                        }
4144                }
4145
4146                amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4147                                                  PPSMC_MSG_VCEDPM_SetEnabledMask,
4148                                                  pi->dpm_level_enable_mask.vce_dpm_enable_mask);
4149        }
4150
4151        return (amdgpu_ci_send_msg_to_smc(adev, enable ?
4152                                   PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable) == PPSMC_Result_OK) ?
4153                0 : -EINVAL;
4154}
4155
4156#if 0
4157static int ci_enable_samu_dpm(struct amdgpu_device *adev, bool enable)
4158{
4159        struct ci_power_info *pi = ci_get_pi(adev);
4160        const struct amdgpu_clock_and_voltage_limits *max_limits;
4161        int i;
4162
4163        if (adev->pm.dpm.ac_power)
4164                max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4165        else
4166                max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4167
4168        if (enable) {
4169                pi->dpm_level_enable_mask.samu_dpm_enable_mask = 0;
4170                for (i = adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4171                        if (adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4172                                pi->dpm_level_enable_mask.samu_dpm_enable_mask |= 1 << i;
4173
4174                                if (!pi->caps_samu_dpm)
4175                                        break;
4176                        }
4177                }
4178
4179                amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4180                                                  PPSMC_MSG_SAMUDPM_SetEnabledMask,
4181                                                  pi->dpm_level_enable_mask.samu_dpm_enable_mask);
4182        }
4183        return (amdgpu_ci_send_msg_to_smc(adev, enable ?
4184                                   PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable) == PPSMC_Result_OK) ?
4185                0 : -EINVAL;
4186}
4187
4188static int ci_enable_acp_dpm(struct amdgpu_device *adev, bool enable)
4189{
4190        struct ci_power_info *pi = ci_get_pi(adev);
4191        const struct amdgpu_clock_and_voltage_limits *max_limits;
4192        int i;
4193
4194        if (adev->pm.dpm.ac_power)
4195                max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4196        else
4197                max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4198
4199        if (enable) {
4200                pi->dpm_level_enable_mask.acp_dpm_enable_mask = 0;
4201                for (i = adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4202                        if (adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4203                                pi->dpm_level_enable_mask.acp_dpm_enable_mask |= 1 << i;
4204
4205                                if (!pi->caps_acp_dpm)
4206                                        break;
4207                        }
4208                }
4209
4210                amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4211                                                  PPSMC_MSG_ACPDPM_SetEnabledMask,
4212                                                  pi->dpm_level_enable_mask.acp_dpm_enable_mask);
4213        }
4214
4215        return (amdgpu_ci_send_msg_to_smc(adev, enable ?
4216                                   PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable) == PPSMC_Result_OK) ?
4217                0 : -EINVAL;
4218}
4219#endif
4220
4221static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate)
4222{
4223        struct ci_power_info *pi = ci_get_pi(adev);
4224        u32 tmp;
4225        int ret = 0;
4226
4227        if (!gate) {
4228                /* turn the clocks on when decoding */
4229                if (pi->caps_uvd_dpm ||
4230                    (adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0))
4231                        pi->smc_state_table.UvdBootLevel = 0;
4232                else
4233                        pi->smc_state_table.UvdBootLevel =
4234                                adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1;
4235
4236                tmp = RREG32_SMC(ixDPM_TABLE_475);
4237                tmp &= ~DPM_TABLE_475__UvdBootLevel_MASK;
4238                tmp |= (pi->smc_state_table.UvdBootLevel << DPM_TABLE_475__UvdBootLevel__SHIFT);
4239                WREG32_SMC(ixDPM_TABLE_475, tmp);
4240                ret = ci_enable_uvd_dpm(adev, true);
4241        } else {
4242                ret = ci_enable_uvd_dpm(adev, false);
4243                if (ret)
4244                        return ret;
4245        }
4246
4247        return ret;
4248}
4249
4250static u8 ci_get_vce_boot_level(struct amdgpu_device *adev)
4251{
4252        u8 i;
4253        u32 min_evclk = 30000; /* ??? */
4254        struct amdgpu_vce_clock_voltage_dependency_table *table =
4255                &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
4256
4257        for (i = 0; i < table->count; i++) {
4258                if (table->entries[i].evclk >= min_evclk)
4259                        return i;
4260        }
4261
4262        return table->count - 1;
4263}
4264
4265static int ci_update_vce_dpm(struct amdgpu_device *adev,
4266                             struct amdgpu_ps *amdgpu_new_state,
4267                             struct amdgpu_ps *amdgpu_current_state)
4268{
4269        struct ci_power_info *pi = ci_get_pi(adev);
4270        int ret = 0;
4271        u32 tmp;
4272
4273        if (amdgpu_current_state->evclk != amdgpu_new_state->evclk) {
4274                if (amdgpu_new_state->evclk) {
4275                        pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(adev);
4276                        tmp = RREG32_SMC(ixDPM_TABLE_475);
4277                        tmp &= ~DPM_TABLE_475__VceBootLevel_MASK;
4278                        tmp |= (pi->smc_state_table.VceBootLevel << DPM_TABLE_475__VceBootLevel__SHIFT);
4279                        WREG32_SMC(ixDPM_TABLE_475, tmp);
4280
4281                        ret = ci_enable_vce_dpm(adev, true);
4282                } else {
4283                        ret = ci_enable_vce_dpm(adev, false);
4284                        if (ret)
4285                                return ret;
4286                }
4287        }
4288        return ret;
4289}
4290
4291#if 0
4292static int ci_update_samu_dpm(struct amdgpu_device *adev, bool gate)
4293{
4294        return ci_enable_samu_dpm(adev, gate);
4295}
4296
4297static int ci_update_acp_dpm(struct amdgpu_device *adev, bool gate)
4298{
4299        struct ci_power_info *pi = ci_get_pi(adev);
4300        u32 tmp;
4301
4302        if (!gate) {
4303                pi->smc_state_table.AcpBootLevel = 0;
4304
4305                tmp = RREG32_SMC(ixDPM_TABLE_475);
4306                tmp &= ~AcpBootLevel_MASK;
4307                tmp |= AcpBootLevel(pi->smc_state_table.AcpBootLevel);
4308                WREG32_SMC(ixDPM_TABLE_475, tmp);
4309        }
4310
4311        return ci_enable_acp_dpm(adev, !gate);
4312}
4313#endif
4314
4315static int ci_generate_dpm_level_enable_mask(struct amdgpu_device *adev,
4316                                             struct amdgpu_ps *amdgpu_state)
4317{
4318        struct ci_power_info *pi = ci_get_pi(adev);
4319        int ret;
4320
4321        ret = ci_trim_dpm_states(adev, amdgpu_state);
4322        if (ret)
4323                return ret;
4324
4325        pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
4326                ci_get_dpm_level_enable_mask_value(&pi->dpm_table.sclk_table);
4327        pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
4328                ci_get_dpm_level_enable_mask_value(&pi->dpm_table.mclk_table);
4329        pi->last_mclk_dpm_enable_mask =
4330                pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
4331        if (pi->uvd_enabled) {
4332                if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask & 1)
4333                        pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
4334        }
4335        pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
4336                ci_get_dpm_level_enable_mask_value(&pi->dpm_table.pcie_speed_table);
4337
4338        return 0;
4339}
4340
4341static u32 ci_get_lowest_enabled_level(struct amdgpu_device *adev,
4342                                       u32 level_mask)
4343{
4344        u32 level = 0;
4345
4346        while ((level_mask & (1 << level)) == 0)
4347                level++;
4348
4349        return level;
4350}
4351
4352
4353static int ci_dpm_force_performance_level(void *handle,
4354                                          enum amd_dpm_forced_level level)
4355{
4356        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4357        struct ci_power_info *pi = ci_get_pi(adev);
4358        u32 tmp, levels, i;
4359        int ret;
4360
4361        if (level == AMD_DPM_FORCED_LEVEL_HIGH) {
4362                if ((!pi->pcie_dpm_key_disabled) &&
4363                    pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
4364                        levels = 0;
4365                        tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
4366                        while (tmp >>= 1)
4367                                levels++;
4368                        if (levels) {
4369                                ret = ci_dpm_force_state_pcie(adev, level);
4370                                if (ret)
4371                                        return ret;
4372                                for (i = 0; i < adev->usec_timeout; i++) {
4373                                        tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_1) &
4374                                        TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK) >>
4375                                        TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT;
4376                                        if (tmp == levels)
4377                                                break;
4378                                        udelay(1);
4379                                }
4380                        }
4381                }
4382                if ((!pi->sclk_dpm_key_disabled) &&
4383                    pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
4384                        levels = 0;
4385                        tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask;
4386                        while (tmp >>= 1)
4387                                levels++;
4388                        if (levels) {
4389                                ret = ci_dpm_force_state_sclk(adev, levels);
4390                                if (ret)
4391                                        return ret;
4392                                for (i = 0; i < adev->usec_timeout; i++) {
4393                                        tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
4394                                        TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
4395                                        TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
4396                                        if (tmp == levels)
4397                                                break;
4398                                        udelay(1);
4399                                }
4400                        }
4401                }
4402                if ((!pi->mclk_dpm_key_disabled) &&
4403                    pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
4404                        levels = 0;
4405                        tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
4406                        while (tmp >>= 1)
4407                                levels++;
4408                        if (levels) {
4409                                ret = ci_dpm_force_state_mclk(adev, levels);
4410                                if (ret)
4411                                        return ret;
4412                                for (i = 0; i < adev->usec_timeout; i++) {
4413                                        tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
4414                                        TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX_MASK) >>
4415                                        TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX__SHIFT;
4416                                        if (tmp == levels)
4417                                                break;
4418                                        udelay(1);
4419                                }
4420                        }
4421                }
4422        } else if (level == AMD_DPM_FORCED_LEVEL_LOW) {
4423                if ((!pi->sclk_dpm_key_disabled) &&
4424                    pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
4425                        levels = ci_get_lowest_enabled_level(adev,
4426                                                             pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
4427                        ret = ci_dpm_force_state_sclk(adev, levels);
4428                        if (ret)
4429                                return ret;
4430                        for (i = 0; i < adev->usec_timeout; i++) {
4431                                tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
4432                                TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
4433                                TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
4434                                if (tmp == levels)
4435                                        break;
4436                                udelay(1);
4437                        }
4438                }
4439                if ((!pi->mclk_dpm_key_disabled) &&
4440                    pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
4441                        levels = ci_get_lowest_enabled_level(adev,
4442                                                             pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
4443                        ret = ci_dpm_force_state_mclk(adev, levels);
4444                        if (ret)
4445                                return ret;
4446                        for (i = 0; i < adev->usec_timeout; i++) {
4447                                tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
4448                                TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX_MASK) >>
4449                                TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX__SHIFT;
4450                                if (tmp == levels)
4451                                        break;
4452                                udelay(1);
4453                        }
4454                }
4455                if ((!pi->pcie_dpm_key_disabled) &&
4456                    pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
4457                        levels = ci_get_lowest_enabled_level(adev,
4458                                                             pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
4459                        ret = ci_dpm_force_state_pcie(adev, levels);
4460                        if (ret)
4461                                return ret;
4462                        for (i = 0; i < adev->usec_timeout; i++) {
4463                                tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_1) &
4464                                TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK) >>
4465                                TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT;
4466                                if (tmp == levels)
4467                                        break;
4468                                udelay(1);
4469                        }
4470                }
4471        } else if (level == AMD_DPM_FORCED_LEVEL_AUTO) {
4472                if (!pi->pcie_dpm_key_disabled) {
4473                        PPSMC_Result smc_result;
4474
4475                        smc_result = amdgpu_ci_send_msg_to_smc(adev,
4476                                                               PPSMC_MSG_PCIeDPM_UnForceLevel);
4477                        if (smc_result != PPSMC_Result_OK)
4478                                return -EINVAL;
4479                }
4480                ret = ci_upload_dpm_level_enable_mask(adev);
4481                if (ret)
4482                        return ret;
4483        }
4484
4485        adev->pm.dpm.forced_level = level;
4486
4487        return 0;
4488}
4489
4490static int ci_set_mc_special_registers(struct amdgpu_device *adev,
4491                                       struct ci_mc_reg_table *table)
4492{
4493        u8 i, j, k;
4494        u32 temp_reg;
4495
4496        for (i = 0, j = table->last; i < table->last; i++) {
4497                if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4498                        return -EINVAL;
4499                switch(table->mc_reg_address[i].s1) {
4500                case mmMC_SEQ_MISC1:
4501                        temp_reg = RREG32(mmMC_PMG_CMD_EMRS);
4502                        table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
4503                        table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
4504                        for (k = 0; k < table->num_entries; k++) {
4505                                table->mc_reg_table_entry[k].mc_data[j] =
4506                                        ((temp_reg & 0xffff0000)) | ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
4507                        }
4508                        j++;
4509
4510                        if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4511                                return -EINVAL;
4512                        temp_reg = RREG32(mmMC_PMG_CMD_MRS);
4513                        table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
4514                        table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
4515                        for (k = 0; k < table->num_entries; k++) {
4516                                table->mc_reg_table_entry[k].mc_data[j] =
4517                                        (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
4518                                if (adev->gmc.vram_type != AMDGPU_VRAM_TYPE_GDDR5)
4519                                        table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
4520                        }
4521                        j++;
4522
4523                        if (adev->gmc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) {
4524                                if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4525                                        return -EINVAL;
4526                                table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
4527                                table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
4528                                for (k = 0; k < table->num_entries; k++) {
4529                                        table->mc_reg_table_entry[k].mc_data[j] =
4530                                                (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
4531                                }
4532                                j++;
4533                        }
4534                        break;
4535                case mmMC_SEQ_RESERVE_M:
4536                        temp_reg = RREG32(mmMC_PMG_CMD_MRS1);
4537                        table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
4538                        table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
4539                        for (k = 0; k < table->num_entries; k++) {
4540                                table->mc_reg_table_entry[k].mc_data[j] =
4541                                        (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
4542                        }
4543                        j++;
4544                        break;
4545                default:
4546                        break;
4547                }
4548
4549        }
4550
4551        table->last = j;
4552
4553        return 0;
4554}
4555
4556static bool ci_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
4557{
4558        bool result = true;
4559
4560        switch(in_reg) {
4561        case mmMC_SEQ_RAS_TIMING:
4562                *out_reg = mmMC_SEQ_RAS_TIMING_LP;
4563                break;
4564        case mmMC_SEQ_DLL_STBY:
4565                *out_reg = mmMC_SEQ_DLL_STBY_LP;
4566                break;
4567        case mmMC_SEQ_G5PDX_CMD0:
4568                *out_reg = mmMC_SEQ_G5PDX_CMD0_LP;
4569                break;
4570        case mmMC_SEQ_G5PDX_CMD1:
4571                *out_reg = mmMC_SEQ_G5PDX_CMD1_LP;
4572                break;
4573        case mmMC_SEQ_G5PDX_CTRL:
4574                *out_reg = mmMC_SEQ_G5PDX_CTRL_LP;
4575                break;
4576        case mmMC_SEQ_CAS_TIMING:
4577                *out_reg = mmMC_SEQ_CAS_TIMING_LP;
4578            break;
4579        case mmMC_SEQ_MISC_TIMING:
4580                *out_reg = mmMC_SEQ_MISC_TIMING_LP;
4581                break;
4582        case mmMC_SEQ_MISC_TIMING2:
4583                *out_reg = mmMC_SEQ_MISC_TIMING2_LP;
4584                break;
4585        case mmMC_SEQ_PMG_DVS_CMD:
4586                *out_reg = mmMC_SEQ_PMG_DVS_CMD_LP;
4587                break;
4588        case mmMC_SEQ_PMG_DVS_CTL:
4589                *out_reg = mmMC_SEQ_PMG_DVS_CTL_LP;
4590                break;
4591        case mmMC_SEQ_RD_CTL_D0:
4592                *out_reg = mmMC_SEQ_RD_CTL_D0_LP;
4593                break;
4594        case mmMC_SEQ_RD_CTL_D1:
4595                *out_reg = mmMC_SEQ_RD_CTL_D1_LP;
4596                break;
4597        case mmMC_SEQ_WR_CTL_D0:
4598                *out_reg = mmMC_SEQ_WR_CTL_D0_LP;
4599                break;
4600        case mmMC_SEQ_WR_CTL_D1:
4601                *out_reg = mmMC_SEQ_WR_CTL_D1_LP;
4602                break;
4603        case mmMC_PMG_CMD_EMRS:
4604                *out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP;
4605                break;
4606        case mmMC_PMG_CMD_MRS:
4607                *out_reg = mmMC_SEQ_PMG_CMD_MRS_LP;
4608                break;
4609        case mmMC_PMG_CMD_MRS1:
4610                *out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP;
4611                break;
4612        case mmMC_SEQ_PMG_TIMING:
4613                *out_reg = mmMC_SEQ_PMG_TIMING_LP;
4614                break;
4615        case mmMC_PMG_CMD_MRS2:
4616                *out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP;
4617                break;
4618        case mmMC_SEQ_WR_CTL_2:
4619                *out_reg = mmMC_SEQ_WR_CTL_2_LP;
4620                break;
4621        default:
4622                result = false;
4623                break;
4624        }
4625
4626        return result;
4627}
4628
4629static void ci_set_valid_flag(struct ci_mc_reg_table *table)
4630{
4631        u8 i, j;
4632
4633        for (i = 0; i < table->last; i++) {
4634                for (j = 1; j < table->num_entries; j++) {
4635                        if (table->mc_reg_table_entry[j-1].mc_data[i] !=
4636                            table->mc_reg_table_entry[j].mc_data[i]) {
4637                                table->valid_flag |= 1 << i;
4638                                break;
4639                        }
4640                }
4641        }
4642}
4643
4644static void ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table)
4645{
4646        u32 i;
4647        u16 address;
4648
4649        for (i = 0; i < table->last; i++) {
4650                table->mc_reg_address[i].s0 =
4651                        ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
4652                        address : table->mc_reg_address[i].s1;
4653        }
4654}
4655
4656static int ci_copy_vbios_mc_reg_table(const struct atom_mc_reg_table *table,
4657                                      struct ci_mc_reg_table *ci_table)
4658{
4659        u8 i, j;
4660
4661        if (table->last > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4662                return -EINVAL;
4663        if (table->num_entries > MAX_AC_TIMING_ENTRIES)
4664                return -EINVAL;
4665
4666        for (i = 0; i < table->last; i++)
4667                ci_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
4668
4669        ci_table->last = table->last;
4670
4671        for (i = 0; i < table->num_entries; i++) {
4672                ci_table->mc_reg_table_entry[i].mclk_max =
4673                        table->mc_reg_table_entry[i].mclk_max;
4674                for (j = 0; j < table->last; j++)
4675                        ci_table->mc_reg_table_entry[i].mc_data[j] =
4676                                table->mc_reg_table_entry[i].mc_data[j];
4677        }
4678        ci_table->num_entries = table->num_entries;
4679
4680        return 0;
4681}
4682
4683static int ci_register_patching_mc_seq(struct amdgpu_device *adev,
4684                                       struct ci_mc_reg_table *table)
4685{
4686        u8 i, k;
4687        u32 tmp;
4688        bool patch;
4689
4690        tmp = RREG32(mmMC_SEQ_MISC0);
4691        patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
4692
4693        if (patch &&
4694            ((adev->pdev->device == 0x67B0) ||
4695             (adev->pdev->device == 0x67B1))) {
4696                for (i = 0; i < table->last; i++) {
4697                        if (table->last >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4698                                return -EINVAL;
4699                        switch (table->mc_reg_address[i].s1) {
4700                        case mmMC_SEQ_MISC1:
4701                                for (k = 0; k < table->num_entries; k++) {
4702                                        if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4703                                            (table->mc_reg_table_entry[k].mclk_max == 137500))
4704                                                table->mc_reg_table_entry[k].mc_data[i] =
4705                                                        (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFF8) |
4706                                                        0x00000007;
4707                                }
4708                                break;
4709                        case mmMC_SEQ_WR_CTL_D0:
4710                                for (k = 0; k < table->num_entries; k++) {
4711                                        if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4712                                            (table->mc_reg_table_entry[k].mclk_max == 137500))
4713                                                table->mc_reg_table_entry[k].mc_data[i] =
4714                                                        (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
4715                                                        0x0000D0DD;
4716                                }
4717                                break;
4718                        case mmMC_SEQ_WR_CTL_D1:
4719                                for (k = 0; k < table->num_entries; k++) {
4720                                        if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4721                                            (table->mc_reg_table_entry[k].mclk_max == 137500))
4722                                                table->mc_reg_table_entry[k].mc_data[i] =
4723                                                        (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
4724                                                        0x0000D0DD;
4725                                }
4726                                break;
4727                        case mmMC_SEQ_WR_CTL_2:
4728                                for (k = 0; k < table->num_entries; k++) {
4729                                        if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4730                                            (table->mc_reg_table_entry[k].mclk_max == 137500))
4731                                                table->mc_reg_table_entry[k].mc_data[i] = 0;
4732                                }
4733                                break;
4734                        case mmMC_SEQ_CAS_TIMING:
4735                                for (k = 0; k < table->num_entries; k++) {
4736                                        if (table->mc_reg_table_entry[k].mclk_max == 125000)
4737                                                table->mc_reg_table_entry[k].mc_data[i] =
4738                                                        (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
4739                                                        0x000C0140;
4740                                        else if (table->mc_reg_table_entry[k].mclk_max == 137500)
4741                                                table->mc_reg_table_entry[k].mc_data[i] =
4742                                                        (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
4743                                                        0x000C0150;
4744                                }
4745                                break;
4746                        case mmMC_SEQ_MISC_TIMING:
4747                                for (k = 0; k < table->num_entries; k++) {
4748                                        if (table->mc_reg_table_entry[k].mclk_max == 125000)
4749                                                table->mc_reg_table_entry[k].mc_data[i] =
4750                                                        (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
4751                                                        0x00000030;
4752                                        else if (table->mc_reg_table_entry[k].mclk_max == 137500)
4753                                                table->mc_reg_table_entry[k].mc_data[i] =
4754                                                        (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
4755                                                        0x00000035;
4756                                }
4757                                break;
4758                        default:
4759                                break;
4760                        }
4761                }
4762
4763                WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 3);
4764                tmp = RREG32(mmMC_SEQ_IO_DEBUG_DATA);
4765                tmp = (tmp & 0xFFF8FFFF) | (1 << 16);
4766                WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 3);
4767                WREG32(mmMC_SEQ_IO_DEBUG_DATA, tmp);
4768        }
4769
4770        return 0;
4771}
4772
4773static int ci_initialize_mc_reg_table(struct amdgpu_device *adev)
4774{
4775        struct ci_power_info *pi = ci_get_pi(adev);
4776        struct atom_mc_reg_table *table;
4777        struct ci_mc_reg_table *ci_table = &pi->mc_reg_table;
4778        u8 module_index = ci_get_memory_module_index(adev);
4779        int ret;
4780
4781        table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
4782        if (!table)
4783                return -ENOMEM;
4784
4785        WREG32(mmMC_SEQ_RAS_TIMING_LP, RREG32(mmMC_SEQ_RAS_TIMING));
4786        WREG32(mmMC_SEQ_CAS_TIMING_LP, RREG32(mmMC_SEQ_CAS_TIMING));
4787        WREG32(mmMC_SEQ_DLL_STBY_LP, RREG32(mmMC_SEQ_DLL_STBY));
4788        WREG32(mmMC_SEQ_G5PDX_CMD0_LP, RREG32(mmMC_SEQ_G5PDX_CMD0));
4789        WREG32(mmMC_SEQ_G5PDX_CMD1_LP, RREG32(mmMC_SEQ_G5PDX_CMD1));
4790        WREG32(mmMC_SEQ_G5PDX_CTRL_LP, RREG32(mmMC_SEQ_G5PDX_CTRL));
4791        WREG32(mmMC_SEQ_PMG_DVS_CMD_LP, RREG32(mmMC_SEQ_PMG_DVS_CMD));
4792        WREG32(mmMC_SEQ_PMG_DVS_CTL_LP, RREG32(mmMC_SEQ_PMG_DVS_CTL));
4793        WREG32(mmMC_SEQ_MISC_TIMING_LP, RREG32(mmMC_SEQ_MISC_TIMING));
4794        WREG32(mmMC_SEQ_MISC_TIMING2_LP, RREG32(mmMC_SEQ_MISC_TIMING2));
4795        WREG32(mmMC_SEQ_PMG_CMD_EMRS_LP, RREG32(mmMC_PMG_CMD_EMRS));
4796        WREG32(mmMC_SEQ_PMG_CMD_MRS_LP, RREG32(mmMC_PMG_CMD_MRS));
4797        WREG32(mmMC_SEQ_PMG_CMD_MRS1_LP, RREG32(mmMC_PMG_CMD_MRS1));
4798        WREG32(mmMC_SEQ_WR_CTL_D0_LP, RREG32(mmMC_SEQ_WR_CTL_D0));
4799        WREG32(mmMC_SEQ_WR_CTL_D1_LP, RREG32(mmMC_SEQ_WR_CTL_D1));
4800        WREG32(mmMC_SEQ_RD_CTL_D0_LP, RREG32(mmMC_SEQ_RD_CTL_D0));
4801        WREG32(mmMC_SEQ_RD_CTL_D1_LP, RREG32(mmMC_SEQ_RD_CTL_D1));
4802        WREG32(mmMC_SEQ_PMG_TIMING_LP, RREG32(mmMC_SEQ_PMG_TIMING));
4803        WREG32(mmMC_SEQ_PMG_CMD_MRS2_LP, RREG32(mmMC_PMG_CMD_MRS2));
4804        WREG32(mmMC_SEQ_WR_CTL_2_LP, RREG32(mmMC_SEQ_WR_CTL_2));
4805
4806        ret = amdgpu_atombios_init_mc_reg_table(adev, module_index, table);
4807        if (ret)
4808                goto init_mc_done;
4809
4810        ret = ci_copy_vbios_mc_reg_table(table, ci_table);
4811        if (ret)
4812                goto init_mc_done;
4813
4814        ci_set_s0_mc_reg_index(ci_table);
4815
4816        ret = ci_register_patching_mc_seq(adev, ci_table);
4817        if (ret)
4818                goto init_mc_done;
4819
4820        ret = ci_set_mc_special_registers(adev, ci_table);
4821        if (ret)
4822                goto init_mc_done;
4823
4824        ci_set_valid_flag(ci_table);
4825
4826init_mc_done:
4827        kfree(table);
4828
4829        return ret;
4830}
4831
4832static int ci_populate_mc_reg_addresses(struct amdgpu_device *adev,
4833                                        SMU7_Discrete_MCRegisters *mc_reg_table)
4834{
4835        struct ci_power_info *pi = ci_get_pi(adev);
4836        u32 i, j;
4837
4838        for (i = 0, j = 0; j < pi->mc_reg_table.last; j++) {
4839                if (pi->mc_reg_table.valid_flag & (1 << j)) {
4840                        if (i >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4841                                return -EINVAL;
4842                        mc_reg_table->address[i].s0 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s0);
4843                        mc_reg_table->address[i].s1 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s1);
4844                        i++;
4845                }
4846        }
4847
4848        mc_reg_table->last = (u8)i;
4849
4850        return 0;
4851}
4852
4853static void ci_convert_mc_registers(const struct ci_mc_reg_entry *entry,
4854                                    SMU7_Discrete_MCRegisterSet *data,
4855                                    u32 num_entries, u32 valid_flag)
4856{
4857        u32 i, j;
4858
4859        for (i = 0, j = 0; j < num_entries; j++) {
4860                if (valid_flag & (1 << j)) {
4861                        data->value[i] = cpu_to_be32(entry->mc_data[j]);
4862                        i++;
4863                }
4864        }
4865}
4866
4867static void ci_convert_mc_reg_table_entry_to_smc(struct amdgpu_device *adev,
4868                                                 const u32 memory_clock,
4869                                                 SMU7_Discrete_MCRegisterSet *mc_reg_table_data)
4870{
4871        struct ci_power_info *pi = ci_get_pi(adev);
4872        u32 i = 0;
4873
4874        for(i = 0; i < pi->mc_reg_table.num_entries; i++) {
4875                if (memory_clock <= pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
4876                        break;
4877        }
4878
4879        if ((i == pi->mc_reg_table.num_entries) && (i > 0))
4880                --i;
4881
4882        ci_convert_mc_registers(&pi->mc_reg_table.mc_reg_table_entry[i],
4883                                mc_reg_table_data, pi->mc_reg_table.last,
4884                                pi->mc_reg_table.valid_flag);
4885}
4886
4887static void ci_convert_mc_reg_table_to_smc(struct amdgpu_device *adev,
4888                                           SMU7_Discrete_MCRegisters *mc_reg_table)
4889{
4890        struct ci_power_info *pi = ci_get_pi(adev);
4891        u32 i;
4892
4893        for (i = 0; i < pi->dpm_table.mclk_table.count; i++)
4894                ci_convert_mc_reg_table_entry_to_smc(adev,
4895                                                     pi->dpm_table.mclk_table.dpm_levels[i].value,
4896                                                     &mc_reg_table->data[i]);
4897}
4898
4899static int ci_populate_initial_mc_reg_table(struct amdgpu_device *adev)
4900{
4901        struct ci_power_info *pi = ci_get_pi(adev);
4902        int ret;
4903
4904        memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4905
4906        ret = ci_populate_mc_reg_addresses(adev, &pi->smc_mc_reg_table);
4907        if (ret)
4908                return ret;
4909        ci_convert_mc_reg_table_to_smc(adev, &pi->smc_mc_reg_table);
4910
4911        return amdgpu_ci_copy_bytes_to_smc(adev,
4912                                    pi->mc_reg_table_start,
4913                                    (u8 *)&pi->smc_mc_reg_table,
4914                                    sizeof(SMU7_Discrete_MCRegisters),
4915                                    pi->sram_end);
4916}
4917
4918static int ci_update_and_upload_mc_reg_table(struct amdgpu_device *adev)
4919{
4920        struct ci_power_info *pi = ci_get_pi(adev);
4921
4922        if (!(pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
4923                return 0;
4924
4925        memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4926
4927        ci_convert_mc_reg_table_to_smc(adev, &pi->smc_mc_reg_table);
4928
4929        return amdgpu_ci_copy_bytes_to_smc(adev,
4930                                    pi->mc_reg_table_start +
4931                                    offsetof(SMU7_Discrete_MCRegisters, data[0]),
4932                                    (u8 *)&pi->smc_mc_reg_table.data[0],
4933                                    sizeof(SMU7_Discrete_MCRegisterSet) *
4934                                    pi->dpm_table.mclk_table.count,
4935                                    pi->sram_end);
4936}
4937
4938static void ci_enable_voltage_control(struct amdgpu_device *adev)
4939{
4940        u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
4941
4942        tmp |= GENERAL_PWRMGT__VOLT_PWRMGT_EN_MASK;
4943        WREG32_SMC(ixGENERAL_PWRMGT, tmp);
4944}
4945
4946static enum amdgpu_pcie_gen ci_get_maximum_link_speed(struct amdgpu_device *adev,
4947                                                      struct amdgpu_ps *amdgpu_state)
4948{
4949        struct ci_ps *state = ci_get_ps(amdgpu_state);
4950        int i;
4951        u16 pcie_speed, max_speed = 0;
4952
4953        for (i = 0; i < state->performance_level_count; i++) {
4954                pcie_speed = state->performance_levels[i].pcie_gen;
4955                if (max_speed < pcie_speed)
4956                        max_speed = pcie_speed;
4957        }
4958
4959        return max_speed;
4960}
4961
4962static u16 ci_get_current_pcie_speed(struct amdgpu_device *adev)
4963{
4964        u32 speed_cntl = 0;
4965
4966        speed_cntl = RREG32_PCIE(ixPCIE_LC_SPEED_CNTL) &
4967                PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK;
4968        speed_cntl >>= PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
4969
4970        return (u16)speed_cntl;
4971}
4972
4973static int ci_get_current_pcie_lane_number(struct amdgpu_device *adev)
4974{
4975        u32 link_width = 0;
4976
4977        link_width = RREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL) &
4978                PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK;
4979        link_width >>= PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
4980
4981        switch (link_width) {
4982        case 1:
4983                return 1;
4984        case 2:
4985                return 2;
4986        case 3:
4987                return 4;
4988        case 4:
4989                return 8;
4990        case 0:
4991        case 6:
4992        default:
4993                return 16;
4994        }
4995}
4996
4997static void ci_request_link_speed_change_before_state_change(struct amdgpu_device *adev,
4998                                                             struct amdgpu_ps *amdgpu_new_state,
4999                                                             struct amdgpu_ps *amdgpu_current_state)
5000{
5001        struct ci_power_info *pi = ci_get_pi(adev);
5002        enum amdgpu_pcie_gen target_link_speed =
5003                ci_get_maximum_link_speed(adev, amdgpu_new_state);
5004        enum amdgpu_pcie_gen current_link_speed;
5005
5006        if (pi->force_pcie_gen == AMDGPU_PCIE_GEN_INVALID)
5007                current_link_speed = ci_get_maximum_link_speed(adev, amdgpu_current_state);
5008        else
5009                current_link_speed = pi->force_pcie_gen;
5010
5011        pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
5012        pi->pspp_notify_required = false;
5013        if (target_link_speed > current_link_speed) {
5014                switch (target_link_speed) {
5015#ifdef CONFIG_ACPI
5016                case AMDGPU_PCIE_GEN3:
5017                        if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN3, false) == 0)
5018                                break;
5019                        pi->force_pcie_gen = AMDGPU_PCIE_GEN2;
5020                        if (current_link_speed == AMDGPU_PCIE_GEN2)
5021                                break;
5022                case AMDGPU_PCIE_GEN2:
5023                        if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
5024                                break;
5025#endif
5026                default:
5027                        pi->force_pcie_gen = ci_get_current_pcie_speed(adev);
5028                        break;
5029                }
5030        } else {
5031                if (target_link_speed < current_link_speed)
5032                        pi->pspp_notify_required = true;
5033        }
5034}
5035
5036static void ci_notify_link_speed_change_after_state_change(struct amdgpu_device *adev,
5037                                                           struct amdgpu_ps *amdgpu_new_state,
5038                                                           struct amdgpu_ps *amdgpu_current_state)
5039{
5040        struct ci_power_info *pi = ci_get_pi(adev);
5041        enum amdgpu_pcie_gen target_link_speed =
5042                ci_get_maximum_link_speed(adev, amdgpu_new_state);
5043        u8 request;
5044
5045        if (pi->pspp_notify_required) {
5046                if (target_link_speed == AMDGPU_PCIE_GEN3)
5047                        request = PCIE_PERF_REQ_PECI_GEN3;
5048                else if (target_link_speed == AMDGPU_PCIE_GEN2)
5049                        request = PCIE_PERF_REQ_PECI_GEN2;
5050                else
5051                        request = PCIE_PERF_REQ_PECI_GEN1;
5052
5053                if ((request == PCIE_PERF_REQ_PECI_GEN1) &&
5054                    (ci_get_current_pcie_speed(adev) > 0))
5055                        return;
5056
5057#ifdef CONFIG_ACPI
5058                amdgpu_acpi_pcie_performance_request(adev, request, false);
5059#endif
5060        }
5061}
5062
5063static int ci_set_private_data_variables_based_on_pptable(struct amdgpu_device *adev)
5064{
5065        struct ci_power_info *pi = ci_get_pi(adev);
5066        struct amdgpu_clock_voltage_dependency_table *allowed_sclk_vddc_table =
5067                &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
5068        struct amdgpu_clock_voltage_dependency_table *allowed_mclk_vddc_table =
5069                &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
5070        struct amdgpu_clock_voltage_dependency_table *allowed_mclk_vddci_table =
5071                &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
5072
5073        if (allowed_sclk_vddc_table == NULL)
5074                return -EINVAL;
5075        if (allowed_sclk_vddc_table->count < 1)
5076                return -EINVAL;
5077        if (allowed_mclk_vddc_table == NULL)
5078                return -EINVAL;
5079        if (allowed_mclk_vddc_table->count < 1)
5080                return -EINVAL;
5081        if (allowed_mclk_vddci_table == NULL)
5082                return -EINVAL;
5083        if (allowed_mclk_vddci_table->count < 1)
5084                return -EINVAL;
5085
5086        pi->min_vddc_in_pp_table = allowed_sclk_vddc_table->entries[0].v;
5087        pi->max_vddc_in_pp_table =
5088                allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
5089
5090        pi->min_vddci_in_pp_table = allowed_mclk_vddci_table->entries[0].v;
5091        pi->max_vddci_in_pp_table =
5092                allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
5093
5094        adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk =
5095                allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
5096        adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk =
5097                allowed_mclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
5098        adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc =
5099                allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
5100        adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci =
5101                allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
5102
5103        return 0;
5104}
5105
5106static void ci_patch_with_vddc_leakage(struct amdgpu_device *adev, u16 *vddc)
5107{
5108        struct ci_power_info *pi = ci_get_pi(adev);
5109        struct ci_leakage_voltage *leakage_table = &pi->vddc_leakage;
5110        u32 leakage_index;
5111
5112        for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
5113                if (leakage_table->leakage_id[leakage_index] == *vddc) {
5114                        *vddc = leakage_table->actual_voltage[leakage_index];
5115                        break;
5116                }
5117        }
5118}
5119
5120static void ci_patch_with_vddci_leakage(struct amdgpu_device *adev, u16 *vddci)
5121{
5122        struct ci_power_info *pi = ci_get_pi(adev);
5123        struct ci_leakage_voltage *leakage_table = &pi->vddci_leakage;
5124        u32 leakage_index;
5125
5126        for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
5127                if (leakage_table->leakage_id[leakage_index] == *vddci) {
5128                        *vddci = leakage_table->actual_voltage[leakage_index];
5129                        break;
5130                }
5131        }
5132}
5133
5134static void ci_patch_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev,
5135                                                                      struct amdgpu_clock_voltage_dependency_table *table)
5136{
5137        u32 i;
5138
5139        if (table) {
5140                for (i = 0; i < table->count; i++)
5141                        ci_patch_with_vddc_leakage(adev, &table->entries[i].v);
5142        }
5143}
5144
5145static void ci_patch_clock_voltage_dependency_table_with_vddci_leakage(struct amdgpu_device *adev,
5146                                                                       struct amdgpu_clock_voltage_dependency_table *table)
5147{
5148        u32 i;
5149
5150        if (table) {
5151                for (i = 0; i < table->count; i++)
5152                        ci_patch_with_vddci_leakage(adev, &table->entries[i].v);
5153        }
5154}
5155
5156static void ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev,
5157                                                                          struct amdgpu_vce_clock_voltage_dependency_table *table)
5158{
5159        u32 i;
5160
5161        if (table) {
5162                for (i = 0; i < table->count; i++)
5163                        ci_patch_with_vddc_leakage(adev, &table->entries[i].v);
5164        }
5165}
5166
5167static void ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev,
5168                                                                          struct amdgpu_uvd_clock_voltage_dependency_table *table)
5169{
5170        u32 i;
5171
5172        if (table) {
5173                for (i = 0; i < table->count; i++)
5174                        ci_patch_with_vddc_leakage(adev, &table->entries[i].v);
5175        }
5176}
5177
5178static void ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(struct amdgpu_device *adev,
5179                                                                   struct amdgpu_phase_shedding_limits_table *table)
5180{
5181        u32 i;
5182
5183        if (table) {
5184                for (i = 0; i < table->count; i++)
5185                        ci_patch_with_vddc_leakage(adev, &table->entries[i].voltage);
5186        }
5187}
5188
5189static void ci_patch_clock_voltage_limits_with_vddc_leakage(struct amdgpu_device *adev,
5190                                                            struct amdgpu_clock_and_voltage_limits *table)
5191{
5192        if (table) {
5193                ci_patch_with_vddc_leakage(adev, (u16 *)&table->vddc);
5194                ci_patch_with_vddci_leakage(adev, (u16 *)&table->vddci);
5195        }
5196}
5197
5198static void ci_patch_cac_leakage_table_with_vddc_leakage(struct amdgpu_device *adev,
5199                                                         struct amdgpu_cac_leakage_table *table)
5200{
5201        u32 i;
5202
5203        if (table) {
5204                for (i = 0; i < table->count; i++)
5205                        ci_patch_with_vddc_leakage(adev, &table->entries[i].vddc);
5206        }
5207}
5208
5209static void ci_patch_dependency_tables_with_leakage(struct amdgpu_device *adev)
5210{
5211
5212        ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5213                                                                  &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
5214        ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5215                                                                  &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
5216        ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5217                                                                  &adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk);
5218        ci_patch_clock_voltage_dependency_table_with_vddci_leakage(adev,
5219                                                                   &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk);
5220        ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(adev,
5221                                                                      &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table);
5222        ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(adev,
5223                                                                      &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table);
5224        ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5225                                                                  &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table);
5226        ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5227                                                                  &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table);
5228        ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(adev,
5229                                                               &adev->pm.dpm.dyn_state.phase_shedding_limits_table);
5230        ci_patch_clock_voltage_limits_with_vddc_leakage(adev,
5231                                                        &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
5232        ci_patch_clock_voltage_limits_with_vddc_leakage(adev,
5233                                                        &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc);
5234        ci_patch_cac_leakage_table_with_vddc_leakage(adev,
5235                                                     &adev->pm.dpm.dyn_state.cac_leakage_table);
5236
5237}
5238
5239static void ci_update_current_ps(struct amdgpu_device *adev,
5240                                 struct amdgpu_ps *rps)
5241{
5242        struct ci_ps *new_ps = ci_get_ps(rps);
5243        struct ci_power_info *pi = ci_get_pi(adev);
5244
5245        pi->current_rps = *rps;
5246        pi->current_ps = *new_ps;
5247        pi->current_rps.ps_priv = &pi->current_ps;
5248        adev->pm.dpm.current_ps = &pi->current_rps;
5249}
5250
5251static void ci_update_requested_ps(struct amdgpu_device *adev,
5252                                   struct amdgpu_ps *rps)
5253{
5254        struct ci_ps *new_ps = ci_get_ps(rps);
5255        struct ci_power_info *pi = ci_get_pi(adev);
5256
5257        pi->requested_rps = *rps;
5258        pi->requested_ps = *new_ps;
5259        pi->requested_rps.ps_priv = &pi->requested_ps;
5260        adev->pm.dpm.requested_ps = &pi->requested_rps;
5261}
5262
5263static int ci_dpm_pre_set_power_state(void *handle)
5264{
5265        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5266        struct ci_power_info *pi = ci_get_pi(adev);
5267        struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps;
5268        struct amdgpu_ps *new_ps = &requested_ps;
5269
5270        ci_update_requested_ps(adev, new_ps);
5271
5272        ci_apply_state_adjust_rules(adev, &pi->requested_rps);
5273
5274        return 0;
5275}
5276
5277static void ci_dpm_post_set_power_state(void *handle)
5278{
5279        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5280        struct ci_power_info *pi = ci_get_pi(adev);
5281        struct amdgpu_ps *new_ps = &pi->requested_rps;
5282
5283        ci_update_current_ps(adev, new_ps);
5284}
5285
5286
5287static void ci_dpm_setup_asic(struct amdgpu_device *adev)
5288{
5289        ci_read_clock_registers(adev);
5290        ci_enable_acpi_power_management(adev);
5291        ci_init_sclk_t(adev);
5292}
5293
5294static int ci_dpm_enable(struct amdgpu_device *adev)
5295{
5296        struct ci_power_info *pi = ci_get_pi(adev);
5297        struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
5298        int ret;
5299
5300        if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
5301                ci_enable_voltage_control(adev);
5302                ret = ci_construct_voltage_tables(adev);
5303                if (ret) {
5304                        DRM_ERROR("ci_construct_voltage_tables failed\n");
5305                        return ret;
5306                }
5307        }
5308        if (pi->caps_dynamic_ac_timing) {
5309                ret = ci_initialize_mc_reg_table(adev);
5310                if (ret)
5311                        pi->caps_dynamic_ac_timing = false;
5312        }
5313        if (pi->dynamic_ss)
5314                ci_enable_spread_spectrum(adev, true);
5315        if (pi->thermal_protection)
5316                ci_enable_thermal_protection(adev, true);
5317        ci_program_sstp(adev);
5318        ci_enable_display_gap(adev);
5319        ci_program_vc(adev);
5320        ret = ci_upload_firmware(adev);
5321        if (ret) {
5322                DRM_ERROR("ci_upload_firmware failed\n");
5323                return ret;
5324        }
5325        ret = ci_process_firmware_header(adev);
5326        if (ret) {
5327                DRM_ERROR("ci_process_firmware_header failed\n");
5328                return ret;
5329        }
5330        ret = ci_initial_switch_from_arb_f0_to_f1(adev);
5331        if (ret) {
5332                DRM_ERROR("ci_initial_switch_from_arb_f0_to_f1 failed\n");
5333                return ret;
5334        }
5335        ret = ci_init_smc_table(adev);
5336        if (ret) {
5337                DRM_ERROR("ci_init_smc_table failed\n");
5338                return ret;
5339        }
5340        ret = ci_init_arb_table_index(adev);
5341        if (ret) {
5342                DRM_ERROR("ci_init_arb_table_index failed\n");
5343                return ret;
5344        }
5345        if (pi->caps_dynamic_ac_timing) {
5346                ret = ci_populate_initial_mc_reg_table(adev);
5347                if (ret) {
5348                        DRM_ERROR("ci_populate_initial_mc_reg_table failed\n");
5349                        return ret;
5350                }
5351        }
5352        ret = ci_populate_pm_base(adev);
5353        if (ret) {
5354                DRM_ERROR("ci_populate_pm_base failed\n");
5355                return ret;
5356        }
5357        ci_dpm_start_smc(adev);
5358        ci_enable_vr_hot_gpio_interrupt(adev);
5359        ret = ci_notify_smc_display_change(adev, false);
5360        if (ret) {
5361                DRM_ERROR("ci_notify_smc_display_change failed\n");
5362                return ret;
5363        }
5364        ci_enable_sclk_control(adev, true);
5365        ret = ci_enable_ulv(adev, true);
5366        if (ret) {
5367                DRM_ERROR("ci_enable_ulv failed\n");
5368                return ret;
5369        }
5370        ret = ci_enable_ds_master_switch(adev, true);
5371        if (ret) {
5372                DRM_ERROR("ci_enable_ds_master_switch failed\n");
5373                return ret;
5374        }
5375        ret = ci_start_dpm(adev);
5376        if (ret) {
5377                DRM_ERROR("ci_start_dpm failed\n");
5378                return ret;
5379        }
5380        ret = ci_enable_didt(adev, true);
5381        if (ret) {
5382                DRM_ERROR("ci_enable_didt failed\n");
5383                return ret;
5384        }
5385        ret = ci_enable_smc_cac(adev, true);
5386        if (ret) {
5387                DRM_ERROR("ci_enable_smc_cac failed\n");
5388                return ret;
5389        }
5390        ret = ci_enable_power_containment(adev, true);
5391        if (ret) {
5392                DRM_ERROR("ci_enable_power_containment failed\n");
5393                return ret;
5394        }
5395
5396        ret = ci_power_control_set_level(adev);
5397        if (ret) {
5398                DRM_ERROR("ci_power_control_set_level failed\n");
5399                return ret;
5400        }
5401
5402        ci_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
5403
5404        ret = ci_enable_thermal_based_sclk_dpm(adev, true);
5405        if (ret) {
5406                DRM_ERROR("ci_enable_thermal_based_sclk_dpm failed\n");
5407                return ret;
5408        }
5409
5410        ci_thermal_start_thermal_controller(adev);
5411
5412        ci_update_current_ps(adev, boot_ps);
5413
5414        return 0;
5415}
5416
5417static void ci_dpm_disable(struct amdgpu_device *adev)
5418{
5419        struct ci_power_info *pi = ci_get_pi(adev);
5420        struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
5421
5422        amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
5423                       AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
5424        amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
5425                       AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
5426
5427        ci_dpm_powergate_uvd(adev, true);
5428
5429        if (!amdgpu_ci_is_smc_running(adev))
5430                return;
5431
5432        ci_thermal_stop_thermal_controller(adev);
5433
5434        if (pi->thermal_protection)
5435                ci_enable_thermal_protection(adev, false);
5436        ci_enable_power_containment(adev, false);
5437        ci_enable_smc_cac(adev, false);
5438        ci_enable_didt(adev, false);
5439        ci_enable_spread_spectrum(adev, false);
5440        ci_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
5441        ci_stop_dpm(adev);
5442        ci_enable_ds_master_switch(adev, false);
5443        ci_enable_ulv(adev, false);
5444        ci_clear_vc(adev);
5445        ci_reset_to_default(adev);
5446        ci_dpm_stop_smc(adev);
5447        ci_force_switch_to_arb_f0(adev);
5448        ci_enable_thermal_based_sclk_dpm(adev, false);
5449
5450        ci_update_current_ps(adev, boot_ps);
5451}
5452
5453static int ci_dpm_set_power_state(void *handle)
5454{
5455        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5456        struct ci_power_info *pi = ci_get_pi(adev);
5457        struct amdgpu_ps *new_ps = &pi->requested_rps;
5458        struct amdgpu_ps *old_ps = &pi->current_rps;
5459        int ret;
5460
5461        ci_find_dpm_states_clocks_in_dpm_table(adev, new_ps);
5462        if (pi->pcie_performance_request)
5463                ci_request_link_speed_change_before_state_change(adev, new_ps, old_ps);
5464        ret = ci_freeze_sclk_mclk_dpm(adev);
5465        if (ret) {
5466                DRM_ERROR("ci_freeze_sclk_mclk_dpm failed\n");
5467                return ret;
5468        }
5469        ret = ci_populate_and_upload_sclk_mclk_dpm_levels(adev, new_ps);
5470        if (ret) {
5471                DRM_ERROR("ci_populate_and_upload_sclk_mclk_dpm_levels failed\n");
5472                return ret;
5473        }
5474        ret = ci_generate_dpm_level_enable_mask(adev, new_ps);
5475        if (ret) {
5476                DRM_ERROR("ci_generate_dpm_level_enable_mask failed\n");
5477                return ret;
5478        }
5479
5480        ret = ci_update_vce_dpm(adev, new_ps, old_ps);
5481        if (ret) {
5482                DRM_ERROR("ci_update_vce_dpm failed\n");
5483                return ret;
5484        }
5485
5486        ret = ci_update_sclk_t(adev);
5487        if (ret) {
5488                DRM_ERROR("ci_update_sclk_t failed\n");
5489                return ret;
5490        }
5491        if (pi->caps_dynamic_ac_timing) {
5492                ret = ci_update_and_upload_mc_reg_table(adev);
5493                if (ret) {
5494                        DRM_ERROR("ci_update_and_upload_mc_reg_table failed\n");
5495                        return ret;
5496                }
5497        }
5498        ret = ci_program_memory_timing_parameters(adev);
5499        if (ret) {
5500                DRM_ERROR("ci_program_memory_timing_parameters failed\n");
5501                return ret;
5502        }
5503        ret = ci_unfreeze_sclk_mclk_dpm(adev);
5504        if (ret) {
5505                DRM_ERROR("ci_unfreeze_sclk_mclk_dpm failed\n");
5506                return ret;
5507        }
5508        ret = ci_upload_dpm_level_enable_mask(adev);
5509        if (ret) {
5510                DRM_ERROR("ci_upload_dpm_level_enable_mask failed\n");
5511                return ret;
5512        }
5513        if (pi->pcie_performance_request)
5514                ci_notify_link_speed_change_after_state_change(adev, new_ps, old_ps);
5515
5516        return 0;
5517}
5518
5519#if 0
5520static void ci_dpm_reset_asic(struct amdgpu_device *adev)
5521{
5522        ci_set_boot_state(adev);
5523}
5524#endif
5525
5526static void ci_dpm_display_configuration_changed(void *handle)
5527{
5528        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5529
5530        ci_program_display_gap(adev);
5531}
5532
5533union power_info {
5534        struct _ATOM_POWERPLAY_INFO info;
5535        struct _ATOM_POWERPLAY_INFO_V2 info_2;
5536        struct _ATOM_POWERPLAY_INFO_V3 info_3;
5537        struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
5538        struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
5539        struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
5540};
5541
5542union pplib_clock_info {
5543        struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
5544        struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
5545        struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
5546        struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
5547        struct _ATOM_PPLIB_SI_CLOCK_INFO si;
5548        struct _ATOM_PPLIB_CI_CLOCK_INFO ci;
5549};
5550
5551union pplib_power_state {
5552        struct _ATOM_PPLIB_STATE v1;
5553        struct _ATOM_PPLIB_STATE_V2 v2;
5554};
5555
5556static void ci_parse_pplib_non_clock_info(struct amdgpu_device *adev,
5557                                          struct amdgpu_ps *rps,
5558                                          struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
5559                                          u8 table_rev)
5560{
5561        rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
5562        rps->class = le16_to_cpu(non_clock_info->usClassification);
5563        rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
5564
5565        if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
5566                rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
5567                rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
5568        } else {
5569                rps->vclk = 0;
5570                rps->dclk = 0;
5571        }
5572
5573        if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
5574                adev->pm.dpm.boot_ps = rps;
5575        if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
5576                adev->pm.dpm.uvd_ps = rps;
5577}
5578
5579static void ci_parse_pplib_clock_info(struct amdgpu_device *adev,
5580                                      struct amdgpu_ps *rps, int index,
5581                                      union pplib_clock_info *clock_info)
5582{
5583        struct ci_power_info *pi = ci_get_pi(adev);
5584        struct ci_ps *ps = ci_get_ps(rps);
5585        struct ci_pl *pl = &ps->performance_levels[index];
5586
5587        ps->performance_level_count = index + 1;
5588
5589        pl->sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
5590        pl->sclk |= clock_info->ci.ucEngineClockHigh << 16;
5591        pl->mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
5592        pl->mclk |= clock_info->ci.ucMemoryClockHigh << 16;
5593
5594        pl->pcie_gen = amdgpu_get_pcie_gen_support(adev,
5595                                                   pi->sys_pcie_mask,
5596                                                   pi->vbios_boot_state.pcie_gen_bootup_value,
5597                                                   clock_info->ci.ucPCIEGen);
5598        pl->pcie_lane = amdgpu_get_pcie_lane_support(adev,
5599                                                     pi->vbios_boot_state.pcie_lane_bootup_value,
5600                                                     le16_to_cpu(clock_info->ci.usPCIELane));
5601
5602        if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
5603                pi->acpi_pcie_gen = pl->pcie_gen;
5604        }
5605
5606        if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) {
5607                pi->ulv.supported = true;
5608                pi->ulv.pl = *pl;
5609                pi->ulv.cg_ulv_parameter = CISLANDS_CGULVPARAMETER_DFLT;
5610        }
5611
5612        /* patch up boot state */
5613        if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
5614                pl->mclk = pi->vbios_boot_state.mclk_bootup_value;
5615                pl->sclk = pi->vbios_boot_state.sclk_bootup_value;
5616                pl->pcie_gen = pi->vbios_boot_state.pcie_gen_bootup_value;
5617                pl->pcie_lane = pi->vbios_boot_state.pcie_lane_bootup_value;
5618        }
5619
5620        switch (rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
5621        case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
5622                pi->use_pcie_powersaving_levels = true;
5623                if (pi->pcie_gen_powersaving.max < pl->pcie_gen)
5624                        pi->pcie_gen_powersaving.max = pl->pcie_gen;
5625                if (pi->pcie_gen_powersaving.min > pl->pcie_gen)
5626                        pi->pcie_gen_powersaving.min = pl->pcie_gen;
5627                if (pi->pcie_lane_powersaving.max < pl->pcie_lane)
5628                        pi->pcie_lane_powersaving.max = pl->pcie_lane;
5629                if (pi->pcie_lane_powersaving.min > pl->pcie_lane)
5630                        pi->pcie_lane_powersaving.min = pl->pcie_lane;
5631                break;
5632        case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
5633                pi->use_pcie_performance_levels = true;
5634                if (pi->pcie_gen_performance.max < pl->pcie_gen)
5635                        pi->pcie_gen_performance.max = pl->pcie_gen;
5636                if (pi->pcie_gen_performance.min > pl->pcie_gen)
5637                        pi->pcie_gen_performance.min = pl->pcie_gen;
5638                if (pi->pcie_lane_performance.max < pl->pcie_lane)
5639                        pi->pcie_lane_performance.max = pl->pcie_lane;
5640                if (pi->pcie_lane_performance.min > pl->pcie_lane)
5641                        pi->pcie_lane_performance.min = pl->pcie_lane;
5642                break;
5643        default:
5644                break;
5645        }
5646}
5647
5648static int ci_parse_power_table(struct amdgpu_device *adev)
5649{
5650        struct amdgpu_mode_info *mode_info = &adev->mode_info;
5651        struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
5652        union pplib_power_state *power_state;
5653        int i, j, k, non_clock_array_index, clock_array_index;
5654        union pplib_clock_info *clock_info;
5655        struct _StateArray *state_array;
5656        struct _ClockInfoArray *clock_info_array;
5657        struct _NonClockInfoArray *non_clock_info_array;
5658        union power_info *power_info;
5659        int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
5660        u16 data_offset;
5661        u8 frev, crev;
5662        u8 *power_state_offset;
5663        struct ci_ps *ps;
5664
5665        if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
5666                                   &frev, &crev, &data_offset))
5667                return -EINVAL;
5668        power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
5669
5670        amdgpu_add_thermal_controller(adev);
5671
5672        state_array = (struct _StateArray *)
5673                (mode_info->atom_context->bios + data_offset +
5674                 le16_to_cpu(power_info->pplib.usStateArrayOffset));
5675        clock_info_array = (struct _ClockInfoArray *)
5676                (mode_info->atom_context->bios + data_offset +
5677                 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
5678        non_clock_info_array = (struct _NonClockInfoArray *)
5679                (mode_info->atom_context->bios + data_offset +
5680                 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
5681
5682        adev->pm.dpm.ps = kzalloc(sizeof(struct amdgpu_ps) *
5683                                  state_array->ucNumEntries, GFP_KERNEL);
5684        if (!adev->pm.dpm.ps)
5685                return -ENOMEM;
5686        power_state_offset = (u8 *)state_array->states;
5687        for (i = 0; i < state_array->ucNumEntries; i++) {
5688                u8 *idx;
5689                power_state = (union pplib_power_state *)power_state_offset;
5690                non_clock_array_index = power_state->v2.nonClockInfoIndex;
5691                non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
5692                        &non_clock_info_array->nonClockInfo[non_clock_array_index];
5693                ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL);
5694                if (ps == NULL) {
5695                        kfree(adev->pm.dpm.ps);
5696                        return -ENOMEM;
5697                }
5698                adev->pm.dpm.ps[i].ps_priv = ps;
5699                ci_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i],
5700                                              non_clock_info,
5701                                              non_clock_info_array->ucEntrySize);
5702                k = 0;
5703                idx = (u8 *)&power_state->v2.clockInfoIndex[0];
5704                for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
5705                        clock_array_index = idx[j];
5706                        if (clock_array_index >= clock_info_array->ucNumEntries)
5707                                continue;
5708                        if (k >= CISLANDS_MAX_HARDWARE_POWERLEVELS)
5709                                break;
5710                        clock_info = (union pplib_clock_info *)
5711                                ((u8 *)&clock_info_array->clockInfo[0] +
5712                                 (clock_array_index * clock_info_array->ucEntrySize));
5713                        ci_parse_pplib_clock_info(adev,
5714                                                  &adev->pm.dpm.ps[i], k,
5715                                                  clock_info);
5716                        k++;
5717                }
5718                power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
5719        }
5720        adev->pm.dpm.num_ps = state_array->ucNumEntries;
5721
5722        /* fill in the vce power states */
5723        for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
5724                u32 sclk, mclk;
5725                clock_array_index = adev->pm.dpm.vce_states[i].clk_idx;
5726                clock_info = (union pplib_clock_info *)
5727                        &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
5728                sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
5729                sclk |= clock_info->ci.ucEngineClockHigh << 16;
5730                mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
5731                mclk |= clock_info->ci.ucMemoryClockHigh << 16;
5732                adev->pm.dpm.vce_states[i].sclk = sclk;
5733                adev->pm.dpm.vce_states[i].mclk = mclk;
5734        }
5735
5736        return 0;
5737}
5738
5739static int ci_get_vbios_boot_values(struct amdgpu_device *adev,
5740                                    struct ci_vbios_boot_state *boot_state)
5741{
5742        struct amdgpu_mode_info *mode_info = &adev->mode_info;
5743        int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
5744        ATOM_FIRMWARE_INFO_V2_2 *firmware_info;
5745        u8 frev, crev;
5746        u16 data_offset;
5747
5748        if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
5749                                   &frev, &crev, &data_offset)) {
5750                firmware_info =
5751                        (ATOM_FIRMWARE_INFO_V2_2 *)(mode_info->atom_context->bios +
5752                                                    data_offset);
5753                boot_state->mvdd_bootup_value = le16_to_cpu(firmware_info->usBootUpMVDDCVoltage);
5754                boot_state->vddc_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCVoltage);
5755                boot_state->vddci_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCIVoltage);
5756                boot_state->pcie_gen_bootup_value = ci_get_current_pcie_speed(adev);
5757                boot_state->pcie_lane_bootup_value = ci_get_current_pcie_lane_number(adev);
5758                boot_state->sclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultEngineClock);
5759                boot_state->mclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultMemoryClock);
5760
5761                return 0;
5762        }
5763        return -EINVAL;
5764}
5765
5766static void ci_dpm_fini(struct amdgpu_device *adev)
5767{
5768        int i;
5769
5770        for (i = 0; i < adev->pm.dpm.num_ps; i++) {
5771                kfree(adev->pm.dpm.ps[i].ps_priv);
5772        }
5773        kfree(adev->pm.dpm.ps);
5774        kfree(adev->pm.dpm.priv);
5775        kfree(adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
5776        amdgpu_free_extended_power_table(adev);
5777}
5778
5779/**
5780 * ci_dpm_init_microcode - load ucode images from disk
5781 *
5782 * @adev: amdgpu_device pointer
5783 *
5784 * Use the firmware interface to load the ucode images into
5785 * the driver (not loaded into hw).
5786 * Returns 0 on success, error on failure.
5787 */
5788static int ci_dpm_init_microcode(struct amdgpu_device *adev)
5789{
5790        const char *chip_name;
5791        char fw_name[30];
5792        int err;
5793
5794        DRM_DEBUG("\n");
5795
5796        switch (adev->asic_type) {
5797        case CHIP_BONAIRE:
5798                if ((adev->pdev->revision == 0x80) ||
5799                    (adev->pdev->revision == 0x81) ||
5800                    (adev->pdev->device == 0x665f))
5801                        chip_name = "bonaire_k";
5802                else
5803                        chip_name = "bonaire";
5804                break;
5805        case CHIP_HAWAII:
5806                if (adev->pdev->revision == 0x80)
5807                        chip_name = "hawaii_k";
5808                else
5809                        chip_name = "hawaii";
5810                break;
5811        case CHIP_KAVERI:
5812        case CHIP_KABINI:
5813        case CHIP_MULLINS:
5814        default: BUG();
5815        }
5816
5817        snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
5818        err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
5819        if (err)
5820                goto out;
5821        err = amdgpu_ucode_validate(adev->pm.fw);
5822
5823out:
5824        if (err) {
5825                pr_err("cik_smc: Failed to load firmware \"%s\"\n", fw_name);
5826                release_firmware(adev->pm.fw);
5827                adev->pm.fw = NULL;
5828        }
5829        return err;
5830}
5831
5832static int ci_dpm_init(struct amdgpu_device *adev)
5833{
5834        int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
5835        SMU7_Discrete_DpmTable *dpm_table;
5836        struct amdgpu_gpio_rec gpio;
5837        u16 data_offset, size;
5838        u8 frev, crev;
5839        struct ci_power_info *pi;
5840        int ret;
5841
5842        pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL);
5843        if (pi == NULL)
5844                return -ENOMEM;
5845        adev->pm.dpm.priv = pi;
5846
5847        pi->sys_pcie_mask =
5848                (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) >>
5849                CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT;
5850
5851        pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
5852
5853        pi->pcie_gen_performance.max = AMDGPU_PCIE_GEN1;
5854        pi->pcie_gen_performance.min = AMDGPU_PCIE_GEN3;
5855        pi->pcie_gen_powersaving.max = AMDGPU_PCIE_GEN1;
5856        pi->pcie_gen_powersaving.min = AMDGPU_PCIE_GEN3;
5857
5858        pi->pcie_lane_performance.max = 0;
5859        pi->pcie_lane_performance.min = 16;
5860        pi->pcie_lane_powersaving.max = 0;
5861        pi->pcie_lane_powersaving.min = 16;
5862
5863        ret = ci_get_vbios_boot_values(adev, &pi->vbios_boot_state);
5864        if (ret) {
5865                ci_dpm_fini(adev);
5866                return ret;
5867        }
5868
5869        ret = amdgpu_get_platform_caps(adev);
5870        if (ret) {
5871                ci_dpm_fini(adev);
5872                return ret;
5873        }
5874
5875        ret = amdgpu_parse_extended_power_table(adev);
5876        if (ret) {
5877                ci_dpm_fini(adev);
5878                return ret;
5879        }
5880
5881        ret = ci_parse_power_table(adev);
5882        if (ret) {
5883                ci_dpm_fini(adev);
5884                return ret;
5885        }
5886
5887        pi->dll_default_on = false;
5888        pi->sram_end = SMC_RAM_END;
5889
5890        pi->activity_target[0] = CISLAND_TARGETACTIVITY_DFLT;
5891        pi->activity_target[1] = CISLAND_TARGETACTIVITY_DFLT;
5892        pi->activity_target[2] = CISLAND_TARGETACTIVITY_DFLT;
5893        pi->activity_target[3] = CISLAND_TARGETACTIVITY_DFLT;
5894        pi->activity_target[4] = CISLAND_TARGETACTIVITY_DFLT;
5895        pi->activity_target[5] = CISLAND_TARGETACTIVITY_DFLT;
5896        pi->activity_target[6] = CISLAND_TARGETACTIVITY_DFLT;
5897        pi->activity_target[7] = CISLAND_TARGETACTIVITY_DFLT;
5898
5899        pi->mclk_activity_target = CISLAND_MCLK_TARGETACTIVITY_DFLT;
5900
5901        pi->sclk_dpm_key_disabled = 0;
5902        pi->mclk_dpm_key_disabled = 0;
5903        pi->pcie_dpm_key_disabled = 0;
5904        pi->thermal_sclk_dpm_enabled = 0;
5905
5906        if (amdgpu_pp_feature_mask & SCLK_DEEP_SLEEP_MASK)
5907                pi->caps_sclk_ds = true;
5908        else
5909                pi->caps_sclk_ds = false;
5910
5911        pi->mclk_strobe_mode_threshold = 40000;
5912        pi->mclk_stutter_mode_threshold = 40000;
5913        pi->mclk_edc_enable_threshold = 40000;
5914        pi->mclk_edc_wr_enable_threshold = 40000;
5915
5916        ci_initialize_powertune_defaults(adev);
5917
5918        pi->caps_fps = false;
5919
5920        pi->caps_sclk_throttle_low_notification = false;
5921
5922        pi->caps_uvd_dpm = true;
5923        pi->caps_vce_dpm = true;
5924
5925        ci_get_leakage_voltages(adev);
5926        ci_patch_dependency_tables_with_leakage(adev);
5927        ci_set_private_data_variables_based_on_pptable(adev);
5928
5929        adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
5930                kzalloc(4 * sizeof(struct amdgpu_clock_voltage_dependency_entry), GFP_KERNEL);
5931        if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
5932                ci_dpm_fini(adev);
5933                return -ENOMEM;
5934        }
5935        adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
5936        adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
5937        adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
5938        adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
5939        adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
5940        adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
5941        adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
5942        adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
5943        adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
5944
5945        adev->pm.dpm.dyn_state.mclk_sclk_ratio = 4;
5946        adev->pm.dpm.dyn_state.sclk_mclk_delta = 15000;
5947        adev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
5948
5949        adev->pm.dpm.dyn_state.valid_sclk_values.count = 0;
5950        adev->pm.dpm.dyn_state.valid_sclk_values.values = NULL;
5951        adev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
5952        adev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
5953
5954        if (adev->asic_type == CHIP_HAWAII) {
5955                pi->thermal_temp_setting.temperature_low = 94500;
5956                pi->thermal_temp_setting.temperature_high = 95000;
5957                pi->thermal_temp_setting.temperature_shutdown = 104000;
5958        } else {
5959                pi->thermal_temp_setting.temperature_low = 99500;
5960                pi->thermal_temp_setting.temperature_high = 100000;
5961                pi->thermal_temp_setting.temperature_shutdown = 104000;
5962        }
5963
5964        pi->uvd_enabled = false;
5965
5966        dpm_table = &pi->smc_state_table;
5967
5968        gpio = amdgpu_atombios_lookup_gpio(adev, VDDC_VRHOT_GPIO_PINID);
5969        if (gpio.valid) {
5970                dpm_table->VRHotGpio = gpio.shift;
5971                adev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
5972        } else {
5973                dpm_table->VRHotGpio = CISLANDS_UNUSED_GPIO_PIN;
5974                adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
5975        }
5976
5977        gpio = amdgpu_atombios_lookup_gpio(adev, PP_AC_DC_SWITCH_GPIO_PINID);
5978        if (gpio.valid) {
5979                dpm_table->AcDcGpio = gpio.shift;
5980                adev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_HARDWAREDC;
5981        } else {
5982                dpm_table->AcDcGpio = CISLANDS_UNUSED_GPIO_PIN;
5983                adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_HARDWAREDC;
5984        }
5985
5986        gpio = amdgpu_atombios_lookup_gpio(adev, VDDC_PCC_GPIO_PINID);
5987        if (gpio.valid) {
5988                u32 tmp = RREG32_SMC(ixCNB_PWRMGT_CNTL);
5989
5990                switch (gpio.shift) {
5991                case 0:
5992                        tmp &= ~CNB_PWRMGT_CNTL__GNB_SLOW_MODE_MASK;
5993                        tmp |= 1 << CNB_PWRMGT_CNTL__GNB_SLOW_MODE__SHIFT;
5994                        break;
5995                case 1:
5996                        tmp &= ~CNB_PWRMGT_CNTL__GNB_SLOW_MODE_MASK;
5997                        tmp |= 2 << CNB_PWRMGT_CNTL__GNB_SLOW_MODE__SHIFT;
5998                        break;
5999                case 2:
6000                        tmp |= CNB_PWRMGT_CNTL__GNB_SLOW_MASK;
6001                        break;
6002                case 3:
6003                        tmp |= CNB_PWRMGT_CNTL__FORCE_NB_PS1_MASK;
6004                        break;
6005                case 4:
6006                        tmp |= CNB_PWRMGT_CNTL__DPM_ENABLED_MASK;
6007                        break;
6008                default:
6009                        DRM_INFO("Invalid PCC GPIO: %u!\n", gpio.shift);
6010                        break;
6011                }
6012                WREG32_SMC(ixCNB_PWRMGT_CNTL, tmp);
6013        }
6014
6015        pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_NONE;
6016        pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_NONE;
6017        pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_NONE;
6018        if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT))
6019                pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
6020        else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
6021                pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
6022
6023        if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL) {
6024                if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
6025                        pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
6026                else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
6027                        pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
6028                else
6029                        adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL;
6030        }
6031
6032        if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_MVDDCONTROL) {
6033                if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
6034                        pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
6035                else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
6036                        pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
6037                else
6038                        adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_MVDDCONTROL;
6039        }
6040
6041        pi->vddc_phase_shed_control = true;
6042
6043#if defined(CONFIG_ACPI)
6044        pi->pcie_performance_request =
6045                amdgpu_acpi_is_pcie_performance_request_supported(adev);
6046#else
6047        pi->pcie_performance_request = false;
6048#endif
6049
6050        if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, &size,
6051                                   &frev, &crev, &data_offset)) {
6052                pi->caps_sclk_ss_support = true;
6053                pi->caps_mclk_ss_support = true;
6054                pi->dynamic_ss = true;
6055        } else {
6056                pi->caps_sclk_ss_support = false;
6057                pi->caps_mclk_ss_support = false;
6058                pi->dynamic_ss = true;
6059        }
6060
6061        if (adev->pm.int_thermal_type != THERMAL_TYPE_NONE)
6062                pi->thermal_protection = true;
6063        else
6064                pi->thermal_protection = false;
6065
6066        pi->caps_dynamic_ac_timing = true;
6067
6068        pi->uvd_power_gated = true;
6069
6070        /* make sure dc limits are valid */
6071        if ((adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
6072            (adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
6073                adev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
6074                        adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
6075
6076        pi->fan_ctrl_is_in_default_mode = true;
6077
6078        return 0;
6079}
6080
6081static void
6082ci_dpm_debugfs_print_current_performance_level(void *handle,
6083                                               struct seq_file *m)
6084{
6085        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6086        struct ci_power_info *pi = ci_get_pi(adev);
6087        struct amdgpu_ps *rps = &pi->current_rps;
6088        u32 sclk = ci_get_average_sclk_freq(adev);
6089        u32 mclk = ci_get_average_mclk_freq(adev);
6090        u32 activity_percent = 50;
6091        int ret;
6092
6093        ret = ci_read_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, AverageGraphicsA),
6094                                        &activity_percent);
6095
6096        if (ret == 0) {
6097                activity_percent += 0x80;
6098                activity_percent >>= 8;
6099                activity_percent = activity_percent > 100 ? 100 : activity_percent;
6100        }
6101
6102        seq_printf(m, "uvd %sabled\n", pi->uvd_power_gated ? "dis" : "en");
6103        seq_printf(m, "vce %sabled\n", rps->vce_active ? "en" : "dis");
6104        seq_printf(m, "power level avg    sclk: %u mclk: %u\n",
6105                   sclk, mclk);
6106        seq_printf(m, "GPU load: %u %%\n", activity_percent);
6107}
6108
6109static void ci_dpm_print_power_state(void *handle, void *current_ps)
6110{
6111        struct amdgpu_ps *rps = (struct amdgpu_ps *)current_ps;
6112        struct ci_ps *ps = ci_get_ps(rps);
6113        struct ci_pl *pl;
6114        int i;
6115        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6116
6117        amdgpu_dpm_print_class_info(rps->class, rps->class2);
6118        amdgpu_dpm_print_cap_info(rps->caps);
6119        printk("\tuvd    vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
6120        for (i = 0; i < ps->performance_level_count; i++) {
6121                pl = &ps->performance_levels[i];
6122                printk("\t\tpower level %d    sclk: %u mclk: %u pcie gen: %u pcie lanes: %u\n",
6123                       i, pl->sclk, pl->mclk, pl->pcie_gen + 1, pl->pcie_lane);
6124        }
6125        amdgpu_dpm_print_ps_status(adev, rps);
6126}
6127
6128static inline bool ci_are_power_levels_equal(const struct ci_pl *ci_cpl1,
6129                                                const struct ci_pl *ci_cpl2)
6130{
6131        return ((ci_cpl1->mclk == ci_cpl2->mclk) &&
6132                  (ci_cpl1->sclk == ci_cpl2->sclk) &&
6133                  (ci_cpl1->pcie_gen == ci_cpl2->pcie_gen) &&
6134                  (ci_cpl1->pcie_lane == ci_cpl2->pcie_lane));
6135}
6136
6137static int ci_check_state_equal(void *handle,
6138                                void *current_ps,
6139                                void *request_ps,
6140                                bool *equal)
6141{
6142        struct ci_ps *ci_cps;
6143        struct ci_ps *ci_rps;
6144        int i;
6145        struct amdgpu_ps *cps = (struct amdgpu_ps *)current_ps;
6146        struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps;
6147        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6148
6149        if (adev == NULL || cps == NULL || rps == NULL || equal == NULL)
6150                return -EINVAL;
6151
6152        ci_cps = ci_get_ps((struct amdgpu_ps *)cps);
6153        ci_rps = ci_get_ps((struct amdgpu_ps *)rps);
6154
6155        if (ci_cps == NULL) {
6156                *equal = false;
6157                return 0;
6158        }
6159
6160        if (ci_cps->performance_level_count != ci_rps->performance_level_count) {
6161
6162                *equal = false;
6163                return 0;
6164        }
6165
6166        for (i = 0; i < ci_cps->performance_level_count; i++) {
6167                if (!ci_are_power_levels_equal(&(ci_cps->performance_levels[i]),
6168                                        &(ci_rps->performance_levels[i]))) {
6169                        *equal = false;
6170                        return 0;
6171                }
6172        }
6173
6174        /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
6175        *equal = ((cps->vclk == rps->vclk) && (cps->dclk == rps->dclk));
6176        *equal &= ((cps->evclk == rps->evclk) && (cps->ecclk == rps->ecclk));
6177
6178        return 0;
6179}
6180
6181static u32 ci_dpm_get_sclk(void *handle, bool low)
6182{
6183        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6184        struct ci_power_info *pi = ci_get_pi(adev);
6185        struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
6186
6187        if (low)
6188                return requested_state->performance_levels[0].sclk;
6189        else
6190                return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
6191}
6192
6193static u32 ci_dpm_get_mclk(void *handle, bool low)
6194{
6195        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6196        struct ci_power_info *pi = ci_get_pi(adev);
6197        struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
6198
6199        if (low)
6200                return requested_state->performance_levels[0].mclk;
6201        else
6202                return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
6203}
6204
6205/* get temperature in millidegrees */
6206static int ci_dpm_get_temp(void *handle)
6207{
6208        u32 temp;
6209        int actual_temp = 0;
6210        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6211
6212        temp = (RREG32_SMC(ixCG_MULT_THERMAL_STATUS) & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >>
6213                CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT;
6214
6215        if (temp & 0x200)
6216                actual_temp = 255;
6217        else
6218                actual_temp = temp & 0x1ff;
6219
6220        actual_temp = actual_temp * 1000;
6221
6222        return actual_temp;
6223}
6224
6225static int ci_set_temperature_range(struct amdgpu_device *adev)
6226{
6227        int ret;
6228
6229        ret = ci_thermal_enable_alert(adev, false);
6230        if (ret)
6231                return ret;
6232        ret = ci_thermal_set_temperature_range(adev, CISLANDS_TEMP_RANGE_MIN,
6233                                               CISLANDS_TEMP_RANGE_MAX);
6234        if (ret)
6235                return ret;
6236        ret = ci_thermal_enable_alert(adev, true);
6237        if (ret)
6238                return ret;
6239        return ret;
6240}
6241
6242static int ci_dpm_early_init(void *handle)
6243{
6244        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6245
6246        adev->powerplay.pp_funcs = &ci_dpm_funcs;
6247        adev->powerplay.pp_handle = adev;
6248        ci_dpm_set_irq_funcs(adev);
6249
6250        return 0;
6251}
6252
6253static int ci_dpm_late_init(void *handle)
6254{
6255        int ret;
6256        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6257
6258        if (!amdgpu_dpm)
6259                return 0;
6260
6261        /* init the sysfs and debugfs files late */
6262        ret = amdgpu_pm_sysfs_init(adev);
6263        if (ret)
6264                return ret;
6265
6266        ret = ci_set_temperature_range(adev);
6267        if (ret)
6268                return ret;
6269
6270        return 0;
6271}
6272
6273static int ci_dpm_sw_init(void *handle)
6274{
6275        int ret;
6276        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6277
6278        ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 230,
6279                                &adev->pm.dpm.thermal.irq);
6280        if (ret)
6281                return ret;
6282
6283        ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 231,
6284                                &adev->pm.dpm.thermal.irq);
6285        if (ret)
6286                return ret;
6287
6288        /* default to balanced state */
6289        adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
6290        adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
6291        adev->pm.dpm.forced_level = AMD_DPM_FORCED_LEVEL_AUTO;
6292        adev->pm.default_sclk = adev->clock.default_sclk;
6293        adev->pm.default_mclk = adev->clock.default_mclk;
6294        adev->pm.current_sclk = adev->clock.default_sclk;
6295        adev->pm.current_mclk = adev->clock.default_mclk;
6296        adev->pm.int_thermal_type = THERMAL_TYPE_NONE;
6297
6298        ret = ci_dpm_init_microcode(adev);
6299        if (ret)
6300                return ret;
6301
6302        if (amdgpu_dpm == 0)
6303                return 0;
6304
6305        INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler);
6306        mutex_lock(&adev->pm.mutex);
6307        ret = ci_dpm_init(adev);
6308        if (ret)
6309                goto dpm_failed;
6310        adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
6311        if (amdgpu_dpm == 1)
6312                amdgpu_pm_print_power_states(adev);
6313        mutex_unlock(&adev->pm.mutex);
6314        DRM_INFO("amdgpu: dpm initialized\n");
6315
6316        return 0;
6317
6318dpm_failed:
6319        ci_dpm_fini(adev);
6320        mutex_unlock(&adev->pm.mutex);
6321        DRM_ERROR("amdgpu: dpm initialization failed\n");
6322        return ret;
6323}
6324
6325static int ci_dpm_sw_fini(void *handle)
6326{
6327        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6328
6329        flush_work(&adev->pm.dpm.thermal.work);
6330
6331        mutex_lock(&adev->pm.mutex);
6332        ci_dpm_fini(adev);
6333        mutex_unlock(&adev->pm.mutex);
6334
6335        release_firmware(adev->pm.fw);
6336        adev->pm.fw = NULL;
6337
6338        return 0;
6339}
6340
6341static int ci_dpm_hw_init(void *handle)
6342{
6343        int ret;
6344
6345        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6346
6347        if (!amdgpu_dpm) {
6348                ret = ci_upload_firmware(adev);
6349                if (ret) {
6350                        DRM_ERROR("ci_upload_firmware failed\n");
6351                        return ret;
6352                }
6353                ci_dpm_start_smc(adev);
6354                return 0;
6355        }
6356
6357        mutex_lock(&adev->pm.mutex);
6358        ci_dpm_setup_asic(adev);
6359        ret = ci_dpm_enable(adev);
6360        if (ret)
6361                adev->pm.dpm_enabled = false;
6362        else
6363                adev->pm.dpm_enabled = true;
6364        mutex_unlock(&adev->pm.mutex);
6365
6366        return ret;
6367}
6368
6369static int ci_dpm_hw_fini(void *handle)
6370{
6371        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6372
6373        if (adev->pm.dpm_enabled) {
6374                mutex_lock(&adev->pm.mutex);
6375                ci_dpm_disable(adev);
6376                mutex_unlock(&adev->pm.mutex);
6377        } else {
6378                ci_dpm_stop_smc(adev);
6379        }
6380
6381        return 0;
6382}
6383
6384static int ci_dpm_suspend(void *handle)
6385{
6386        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6387
6388        if (adev->pm.dpm_enabled) {
6389                mutex_lock(&adev->pm.mutex);
6390                amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
6391                               AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
6392                amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
6393                               AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
6394                adev->pm.dpm.last_user_state = adev->pm.dpm.user_state;
6395                adev->pm.dpm.last_state = adev->pm.dpm.state;
6396                adev->pm.dpm.user_state = POWER_STATE_TYPE_INTERNAL_BOOT;
6397                adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_BOOT;
6398                mutex_unlock(&adev->pm.mutex);
6399                amdgpu_pm_compute_clocks(adev);
6400
6401        }
6402
6403        return 0;
6404}
6405
6406static int ci_dpm_resume(void *handle)
6407{
6408        int ret;
6409        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6410
6411        if (adev->pm.dpm_enabled) {
6412                /* asic init will reset to the boot state */
6413                mutex_lock(&adev->pm.mutex);
6414                ci_dpm_setup_asic(adev);
6415                ret = ci_dpm_enable(adev);
6416                if (ret)
6417                        adev->pm.dpm_enabled = false;
6418                else
6419                        adev->pm.dpm_enabled = true;
6420                adev->pm.dpm.user_state = adev->pm.dpm.last_user_state;
6421                adev->pm.dpm.state = adev->pm.dpm.last_state;
6422                mutex_unlock(&adev->pm.mutex);
6423                if (adev->pm.dpm_enabled)
6424                        amdgpu_pm_compute_clocks(adev);
6425        }
6426        return 0;
6427}
6428
6429static bool ci_dpm_is_idle(void *handle)
6430{
6431        /* XXX */
6432        return true;
6433}
6434
6435static int ci_dpm_wait_for_idle(void *handle)
6436{
6437        /* XXX */
6438        return 0;
6439}
6440
6441static int ci_dpm_soft_reset(void *handle)
6442{
6443        return 0;
6444}
6445
6446static int ci_dpm_set_interrupt_state(struct amdgpu_device *adev,
6447                                      struct amdgpu_irq_src *source,
6448                                      unsigned type,
6449                                      enum amdgpu_interrupt_state state)
6450{
6451        u32 cg_thermal_int;
6452
6453        switch (type) {
6454        case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH:
6455                switch (state) {
6456                case AMDGPU_IRQ_STATE_DISABLE:
6457                        cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
6458                        cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
6459                        WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6460                        break;
6461                case AMDGPU_IRQ_STATE_ENABLE:
6462                        cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
6463                        cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
6464                        WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6465                        break;
6466                default:
6467                        break;
6468                }
6469                break;
6470
6471        case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW:
6472                switch (state) {
6473                case AMDGPU_IRQ_STATE_DISABLE:
6474                        cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
6475                        cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
6476                        WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6477                        break;
6478                case AMDGPU_IRQ_STATE_ENABLE:
6479                        cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
6480                        cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
6481                        WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6482                        break;
6483                default:
6484                        break;
6485                }
6486                break;
6487
6488        default:
6489                break;
6490        }
6491        return 0;
6492}
6493
6494static int ci_dpm_process_interrupt(struct amdgpu_device *adev,
6495                                    struct amdgpu_irq_src *source,
6496                                    struct amdgpu_iv_entry *entry)
6497{
6498        bool queue_thermal = false;
6499
6500        if (entry == NULL)
6501                return -EINVAL;
6502
6503        switch (entry->src_id) {
6504        case 230: /* thermal low to high */
6505                DRM_DEBUG("IH: thermal low to high\n");
6506                adev->pm.dpm.thermal.high_to_low = false;
6507                queue_thermal = true;
6508                break;
6509        case 231: /* thermal high to low */
6510                DRM_DEBUG("IH: thermal high to low\n");
6511                adev->pm.dpm.thermal.high_to_low = true;
6512                queue_thermal = true;
6513                break;
6514        default:
6515                break;
6516        }
6517
6518        if (queue_thermal)
6519                schedule_work(&adev->pm.dpm.thermal.work);
6520
6521        return 0;
6522}
6523
6524static int ci_dpm_set_clockgating_state(void *handle,
6525                                          enum amd_clockgating_state state)
6526{
6527        return 0;
6528}
6529
6530static int ci_dpm_set_powergating_state(void *handle,
6531                                          enum amd_powergating_state state)
6532{
6533        return 0;
6534}
6535
6536static int ci_dpm_print_clock_levels(void *handle,
6537                enum pp_clock_type type, char *buf)
6538{
6539        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6540        struct ci_power_info *pi = ci_get_pi(adev);
6541        struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
6542        struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
6543        struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
6544
6545        int i, now, size = 0;
6546        uint32_t clock, pcie_speed;
6547
6548        switch (type) {
6549        case PP_SCLK:
6550                amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_API_GetSclkFrequency);
6551                clock = RREG32(mmSMC_MSG_ARG_0);
6552
6553                for (i = 0; i < sclk_table->count; i++) {
6554                        if (clock > sclk_table->dpm_levels[i].value)
6555                                continue;
6556                        break;
6557                }
6558                now = i;
6559
6560                for (i = 0; i < sclk_table->count; i++)
6561                        size += sprintf(buf + size, "%d: %uMhz %s\n",
6562                                        i, sclk_table->dpm_levels[i].value / 100,
6563                                        (i == now) ? "*" : "");
6564                break;
6565        case PP_MCLK:
6566                amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_API_GetMclkFrequency);
6567                clock = RREG32(mmSMC_MSG_ARG_0);
6568
6569                for (i = 0; i < mclk_table->count; i++) {
6570                        if (clock > mclk_table->dpm_levels[i].value)
6571                                continue;
6572                        break;
6573                }
6574                now = i;
6575
6576                for (i = 0; i < mclk_table->count; i++)
6577                        size += sprintf(buf + size, "%d: %uMhz %s\n",
6578                                        i, mclk_table->dpm_levels[i].value / 100,
6579                                        (i == now) ? "*" : "");
6580                break;
6581        case PP_PCIE:
6582                pcie_speed = ci_get_current_pcie_speed(adev);
6583                for (i = 0; i < pcie_table->count; i++) {
6584                        if (pcie_speed != pcie_table->dpm_levels[i].value)
6585                                continue;
6586                        break;
6587                }
6588                now = i;
6589
6590                for (i = 0; i < pcie_table->count; i++)
6591                        size += sprintf(buf + size, "%d: %s %s\n", i,
6592                                        (pcie_table->dpm_levels[i].value == 0) ? "2.5GT/s, x1" :
6593                                        (pcie_table->dpm_levels[i].value == 1) ? "5.0GT/s, x16" :
6594                                        (pcie_table->dpm_levels[i].value == 2) ? "8.0GT/s, x16" : "",
6595                                        (i == now) ? "*" : "");
6596                break;
6597        default:
6598                break;
6599        }
6600
6601        return size;
6602}
6603
6604static int ci_dpm_force_clock_level(void *handle,
6605                enum pp_clock_type type, uint32_t mask)
6606{
6607        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6608        struct ci_power_info *pi = ci_get_pi(adev);
6609
6610        if (adev->pm.dpm.forced_level != AMD_DPM_FORCED_LEVEL_MANUAL)
6611                return -EINVAL;
6612
6613        if (mask == 0)
6614                return -EINVAL;
6615
6616        switch (type) {
6617        case PP_SCLK:
6618                if (!pi->sclk_dpm_key_disabled)
6619                        amdgpu_ci_send_msg_to_smc_with_parameter(adev,
6620                                        PPSMC_MSG_SCLKDPM_SetEnabledMask,
6621                                        pi->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
6622                break;
6623
6624        case PP_MCLK:
6625                if (!pi->mclk_dpm_key_disabled)
6626                        amdgpu_ci_send_msg_to_smc_with_parameter(adev,
6627                                        PPSMC_MSG_MCLKDPM_SetEnabledMask,
6628                                        pi->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
6629                break;
6630
6631        case PP_PCIE:
6632        {
6633                uint32_t tmp = mask & pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
6634
6635                if (!pi->pcie_dpm_key_disabled) {
6636                        if (fls(tmp) != ffs(tmp))
6637                                amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PCIeDPM_UnForceLevel);
6638                        else
6639                                amdgpu_ci_send_msg_to_smc_with_parameter(adev,
6640                                        PPSMC_MSG_PCIeDPM_ForceLevel,
6641                                        fls(tmp) - 1);
6642                }
6643                break;
6644        }
6645        default:
6646                break;
6647        }
6648
6649        return 0;
6650}
6651
6652static int ci_dpm_get_sclk_od(void *handle)
6653{
6654        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6655        struct ci_power_info *pi = ci_get_pi(adev);
6656        struct ci_single_dpm_table *sclk_table = &(pi->dpm_table.sclk_table);
6657        struct ci_single_dpm_table *golden_sclk_table =
6658                        &(pi->golden_dpm_table.sclk_table);
6659        int value;
6660
6661        value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
6662                        golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
6663                        100 /
6664                        golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
6665
6666        return value;
6667}
6668
6669static int ci_dpm_set_sclk_od(void *handle, uint32_t value)
6670{
6671        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6672        struct ci_power_info *pi = ci_get_pi(adev);
6673        struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps);
6674        struct ci_single_dpm_table *golden_sclk_table =
6675                        &(pi->golden_dpm_table.sclk_table);
6676
6677        if (value > 20)
6678                value = 20;
6679
6680        ps->performance_levels[ps->performance_level_count - 1].sclk =
6681                        golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
6682                        value / 100 +
6683                        golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
6684
6685        return 0;
6686}
6687
6688static int ci_dpm_get_mclk_od(void *handle)
6689{
6690        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6691        struct ci_power_info *pi = ci_get_pi(adev);
6692        struct ci_single_dpm_table *mclk_table = &(pi->dpm_table.mclk_table);
6693        struct ci_single_dpm_table *golden_mclk_table =
6694                        &(pi->golden_dpm_table.mclk_table);
6695        int value;
6696
6697        value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
6698                        golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
6699                        100 /
6700                        golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
6701
6702        return value;
6703}
6704
6705static int ci_dpm_set_mclk_od(void *handle, uint32_t value)
6706{
6707        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6708        struct ci_power_info *pi = ci_get_pi(adev);
6709        struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps);
6710        struct ci_single_dpm_table *golden_mclk_table =
6711                        &(pi->golden_dpm_table.mclk_table);
6712
6713        if (value > 20)
6714                value = 20;
6715
6716        ps->performance_levels[ps->performance_level_count - 1].mclk =
6717                        golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
6718                        value / 100 +
6719                        golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
6720
6721        return 0;
6722}
6723
6724static int ci_dpm_read_sensor(void *handle, int idx,
6725                              void *value, int *size)
6726{
6727        u32 activity_percent = 50;
6728        int ret;
6729        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6730
6731        /* size must be at least 4 bytes for all sensors */
6732        if (*size < 4)
6733                return -EINVAL;
6734
6735        switch (idx) {
6736        case AMDGPU_PP_SENSOR_GFX_SCLK:
6737                *((uint32_t *)value) = ci_get_average_sclk_freq(adev);
6738                *size = 4;
6739                return 0;
6740        case AMDGPU_PP_SENSOR_GFX_MCLK:
6741                *((uint32_t *)value) = ci_get_average_mclk_freq(adev);
6742                *size = 4;
6743                return 0;
6744        case AMDGPU_PP_SENSOR_GPU_TEMP:
6745                *((uint32_t *)value) = ci_dpm_get_temp(adev);
6746                *size = 4;
6747                return 0;
6748        case AMDGPU_PP_SENSOR_GPU_LOAD:
6749                ret = ci_read_smc_soft_register(adev,
6750                                                offsetof(SMU7_SoftRegisters,
6751                                                         AverageGraphicsA),
6752                                                &activity_percent);
6753                if (ret == 0) {
6754                        activity_percent += 0x80;
6755                        activity_percent >>= 8;
6756                        activity_percent =
6757                                activity_percent > 100 ? 100 : activity_percent;
6758                }
6759                *((uint32_t *)value) = activity_percent;
6760                *size = 4;
6761                return 0;
6762        default:
6763                return -EINVAL;
6764        }
6765}
6766
6767static const struct amd_ip_funcs ci_dpm_ip_funcs = {
6768        .name = "ci_dpm",
6769        .early_init = ci_dpm_early_init,
6770        .late_init = ci_dpm_late_init,
6771        .sw_init = ci_dpm_sw_init,
6772        .sw_fini = ci_dpm_sw_fini,
6773        .hw_init = ci_dpm_hw_init,
6774        .hw_fini = ci_dpm_hw_fini,
6775        .suspend = ci_dpm_suspend,
6776        .resume = ci_dpm_resume,
6777        .is_idle = ci_dpm_is_idle,
6778        .wait_for_idle = ci_dpm_wait_for_idle,
6779        .soft_reset = ci_dpm_soft_reset,
6780        .set_clockgating_state = ci_dpm_set_clockgating_state,
6781        .set_powergating_state = ci_dpm_set_powergating_state,
6782};
6783
6784const struct amdgpu_ip_block_version ci_smu_ip_block =
6785{
6786        .type = AMD_IP_BLOCK_TYPE_SMC,
6787        .major = 7,
6788        .minor = 0,
6789        .rev = 0,
6790        .funcs = &ci_dpm_ip_funcs,
6791};
6792
6793static const struct amd_pm_funcs ci_dpm_funcs = {
6794        .pre_set_power_state = &ci_dpm_pre_set_power_state,
6795        .set_power_state = &ci_dpm_set_power_state,
6796        .post_set_power_state = &ci_dpm_post_set_power_state,
6797        .display_configuration_changed = &ci_dpm_display_configuration_changed,
6798        .get_sclk = &ci_dpm_get_sclk,
6799        .get_mclk = &ci_dpm_get_mclk,
6800        .print_power_state = &ci_dpm_print_power_state,
6801        .debugfs_print_current_performance_level = &ci_dpm_debugfs_print_current_performance_level,
6802        .force_performance_level = &ci_dpm_force_performance_level,
6803        .vblank_too_short = &ci_dpm_vblank_too_short,
6804        .powergate_uvd = &ci_dpm_powergate_uvd,
6805        .set_fan_control_mode = &ci_dpm_set_fan_control_mode,
6806        .get_fan_control_mode = &ci_dpm_get_fan_control_mode,
6807        .set_fan_speed_percent = &ci_dpm_set_fan_speed_percent,
6808        .get_fan_speed_percent = &ci_dpm_get_fan_speed_percent,
6809        .print_clock_levels = ci_dpm_print_clock_levels,
6810        .force_clock_level = ci_dpm_force_clock_level,
6811        .get_sclk_od = ci_dpm_get_sclk_od,
6812        .set_sclk_od = ci_dpm_set_sclk_od,
6813        .get_mclk_od = ci_dpm_get_mclk_od,
6814        .set_mclk_od = ci_dpm_set_mclk_od,
6815        .check_state_equal = ci_check_state_equal,
6816        .get_vce_clock_state = amdgpu_get_vce_clock_state,
6817        .read_sensor = ci_dpm_read_sensor,
6818};
6819
6820static const struct amdgpu_irq_src_funcs ci_dpm_irq_funcs = {
6821        .set = ci_dpm_set_interrupt_state,
6822        .process = ci_dpm_process_interrupt,
6823};
6824
6825static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev)
6826{
6827        adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST;
6828        adev->pm.dpm.thermal.irq.funcs = &ci_dpm_irq_funcs;
6829}
6830