linux/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
<<
>>
Prefs
   1/*
   2 * Copyright 2015 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23#include "pp_debug.h"
  24#include <linux/delay.h>
  25#include <linux/fb.h>
  26#include <linux/module.h>
  27#include <linux/pci.h>
  28#include <linux/slab.h>
  29#include <asm/div64.h>
  30#include <drm/amdgpu_drm.h>
  31#include "ppatomctrl.h"
  32#include "atombios.h"
  33#include "pptable_v1_0.h"
  34#include "pppcielanes.h"
  35#include "amd_pcie_helpers.h"
  36#include "hardwaremanager.h"
  37#include "process_pptables_v1_0.h"
  38#include "cgs_common.h"
  39
  40#include "smu7_common.h"
  41
  42#include "hwmgr.h"
  43#include "smu7_hwmgr.h"
  44#include "smu_ucode_xfer_vi.h"
  45#include "smu7_powertune.h"
  46#include "smu7_dyn_defaults.h"
  47#include "smu7_thermal.h"
  48#include "smu7_clockpowergating.h"
  49#include "processpptables.h"
  50#include "pp_thermal.h"
  51
  52#include "ivsrcid/ivsrcid_vislands30.h"
  53
  54#define MC_CG_ARB_FREQ_F0           0x0a
  55#define MC_CG_ARB_FREQ_F1           0x0b
  56#define MC_CG_ARB_FREQ_F2           0x0c
  57#define MC_CG_ARB_FREQ_F3           0x0d
  58
  59#define MC_CG_SEQ_DRAMCONF_S0       0x05
  60#define MC_CG_SEQ_DRAMCONF_S1       0x06
  61#define MC_CG_SEQ_YCLK_SUSPEND      0x04
  62#define MC_CG_SEQ_YCLK_RESUME       0x0a
  63
  64#define SMC_CG_IND_START            0xc0030000
  65#define SMC_CG_IND_END              0xc0040000
  66
  67#define MEM_FREQ_LOW_LATENCY        25000
  68#define MEM_FREQ_HIGH_LATENCY       80000
  69
  70#define MEM_LATENCY_HIGH            45
  71#define MEM_LATENCY_LOW             35
  72#define MEM_LATENCY_ERR             0xFFFF
  73
  74#define MC_SEQ_MISC0_GDDR5_SHIFT 28
  75#define MC_SEQ_MISC0_GDDR5_MASK  0xf0000000
  76#define MC_SEQ_MISC0_GDDR5_VALUE 5
  77
  78#define PCIE_BUS_CLK                10000
  79#define TCLK                        (PCIE_BUS_CLK / 10)
  80
  81static struct profile_mode_setting smu7_profiling[7] =
  82                                        {{0, 0, 0, 0, 0, 0, 0, 0},
  83                                         {1, 0, 100, 30, 1, 0, 100, 10},
  84                                         {1, 10, 0, 30, 0, 0, 0, 0},
  85                                         {0, 0, 0, 0, 1, 10, 16, 31},
  86                                         {1, 0, 11, 50, 1, 0, 100, 10},
  87                                         {1, 0, 5, 30, 0, 0, 0, 0},
  88                                         {0, 0, 0, 0, 0, 0, 0, 0},
  89                                        };
  90
  91#define PPSMC_MSG_SetVBITimeout_VEGAM    ((uint16_t) 0x310)
  92
  93#define ixPWR_SVI2_PLANE1_LOAD                     0xC0200280
  94#define PWR_SVI2_PLANE1_LOAD__PSI1_MASK                    0x00000020L
  95#define PWR_SVI2_PLANE1_LOAD__PSI0_EN_MASK                 0x00000040L
  96#define PWR_SVI2_PLANE1_LOAD__PSI1__SHIFT                  0x00000005
  97#define PWR_SVI2_PLANE1_LOAD__PSI0_EN__SHIFT               0x00000006
  98
  99/** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */
 100enum DPM_EVENT_SRC {
 101        DPM_EVENT_SRC_ANALOG = 0,
 102        DPM_EVENT_SRC_EXTERNAL = 1,
 103        DPM_EVENT_SRC_DIGITAL = 2,
 104        DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
 105        DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4
 106};
 107
 108static const unsigned long PhwVIslands_Magic = (unsigned long)(PHM_VIslands_Magic);
 109static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
 110                enum pp_clock_type type, uint32_t mask);
 111
 112static struct smu7_power_state *cast_phw_smu7_power_state(
 113                                  struct pp_hw_power_state *hw_ps)
 114{
 115        PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic),
 116                                "Invalid Powerstate Type!",
 117                                 return NULL);
 118
 119        return (struct smu7_power_state *)hw_ps;
 120}
 121
 122static const struct smu7_power_state *cast_const_phw_smu7_power_state(
 123                                 const struct pp_hw_power_state *hw_ps)
 124{
 125        PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic),
 126                                "Invalid Powerstate Type!",
 127                                 return NULL);
 128
 129        return (const struct smu7_power_state *)hw_ps;
 130}
 131
 132/**
 133 * Find the MC microcode version and store it in the HwMgr struct
 134 *
 135 * @param    hwmgr  the address of the powerplay hardware manager.
 136 * @return   always 0
 137 */
 138static int smu7_get_mc_microcode_version(struct pp_hwmgr *hwmgr)
 139{
 140        cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
 141
 142        hwmgr->microcode_version_info.MC = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA);
 143
 144        return 0;
 145}
 146
 147static uint16_t smu7_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
 148{
 149        uint32_t speedCntl = 0;
 150
 151        /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
 152        speedCntl = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__PCIE,
 153                        ixPCIE_LC_SPEED_CNTL);
 154        return((uint16_t)PHM_GET_FIELD(speedCntl,
 155                        PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
 156}
 157
 158static int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
 159{
 160        uint32_t link_width;
 161
 162        /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
 163        link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
 164                        PCIE_LC_LINK_WIDTH_CNTL, LC_LINK_WIDTH_RD);
 165
 166        PP_ASSERT_WITH_CODE((7 >= link_width),
 167                        "Invalid PCIe lane width!", return 0);
 168
 169        return decode_pcie_lane_width(link_width);
 170}
 171
 172/**
 173* Enable voltage control
 174*
 175* @param    pHwMgr  the address of the powerplay hardware manager.
 176* @return   always PP_Result_OK
 177*/
 178static int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
 179{
 180        if (hwmgr->chip_id == CHIP_VEGAM) {
 181                PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device,
 182                                CGS_IND_REG__SMC, PWR_SVI2_PLANE1_LOAD, PSI1, 0);
 183                PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device,
 184                                CGS_IND_REG__SMC, PWR_SVI2_PLANE1_LOAD, PSI0_EN, 0);
 185        }
 186
 187        if (hwmgr->feature_mask & PP_SMC_VOLTAGE_CONTROL_MASK)
 188                smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Enable);
 189
 190        return 0;
 191}
 192
 193/**
 194* Checks if we want to support voltage control
 195*
 196* @param    hwmgr  the address of the powerplay hardware manager.
 197*/
 198static bool smu7_voltage_control(const struct pp_hwmgr *hwmgr)
 199{
 200        const struct smu7_hwmgr *data =
 201                        (const struct smu7_hwmgr *)(hwmgr->backend);
 202
 203        return (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control);
 204}
 205
 206/**
 207* Enable voltage control
 208*
 209* @param    hwmgr  the address of the powerplay hardware manager.
 210* @return   always 0
 211*/
 212static int smu7_enable_voltage_control(struct pp_hwmgr *hwmgr)
 213{
 214        /* enable voltage control */
 215        PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
 216                        GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1);
 217
 218        return 0;
 219}
 220
 221static int phm_get_svi2_voltage_table_v0(pp_atomctrl_voltage_table *voltage_table,
 222                struct phm_clock_voltage_dependency_table *voltage_dependency_table
 223                )
 224{
 225        uint32_t i;
 226
 227        PP_ASSERT_WITH_CODE((NULL != voltage_table),
 228                        "Voltage Dependency Table empty.", return -EINVAL;);
 229
 230        voltage_table->mask_low = 0;
 231        voltage_table->phase_delay = 0;
 232        voltage_table->count = voltage_dependency_table->count;
 233
 234        for (i = 0; i < voltage_dependency_table->count; i++) {
 235                voltage_table->entries[i].value =
 236                        voltage_dependency_table->entries[i].v;
 237                voltage_table->entries[i].smio_low = 0;
 238        }
 239
 240        return 0;
 241}
 242
 243
 244/**
 245* Create Voltage Tables.
 246*
 247* @param    hwmgr  the address of the powerplay hardware manager.
 248* @return   always 0
 249*/
 250static int smu7_construct_voltage_tables(struct pp_hwmgr *hwmgr)
 251{
 252        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 253        struct phm_ppt_v1_information *table_info =
 254                        (struct phm_ppt_v1_information *)hwmgr->pptable;
 255        int result = 0;
 256        uint32_t tmp;
 257
 258        if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
 259                result = atomctrl_get_voltage_table_v3(hwmgr,
 260                                VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT,
 261                                &(data->mvdd_voltage_table));
 262                PP_ASSERT_WITH_CODE((0 == result),
 263                                "Failed to retrieve MVDD table.",
 264                                return result);
 265        } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
 266                if (hwmgr->pp_table_version == PP_TABLE_V1)
 267                        result = phm_get_svi2_mvdd_voltage_table(&(data->mvdd_voltage_table),
 268                                        table_info->vdd_dep_on_mclk);
 269                else if (hwmgr->pp_table_version == PP_TABLE_V0)
 270                        result = phm_get_svi2_voltage_table_v0(&(data->mvdd_voltage_table),
 271                                        hwmgr->dyn_state.mvdd_dependency_on_mclk);
 272
 273                PP_ASSERT_WITH_CODE((0 == result),
 274                                "Failed to retrieve SVI2 MVDD table from dependency table.",
 275                                return result;);
 276        }
 277
 278        if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
 279                result = atomctrl_get_voltage_table_v3(hwmgr,
 280                                VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT,
 281                                &(data->vddci_voltage_table));
 282                PP_ASSERT_WITH_CODE((0 == result),
 283                                "Failed to retrieve VDDCI table.",
 284                                return result);
 285        } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
 286                if (hwmgr->pp_table_version == PP_TABLE_V1)
 287                        result = phm_get_svi2_vddci_voltage_table(&(data->vddci_voltage_table),
 288                                        table_info->vdd_dep_on_mclk);
 289                else if (hwmgr->pp_table_version == PP_TABLE_V0)
 290                        result = phm_get_svi2_voltage_table_v0(&(data->vddci_voltage_table),
 291                                        hwmgr->dyn_state.vddci_dependency_on_mclk);
 292                PP_ASSERT_WITH_CODE((0 == result),
 293                                "Failed to retrieve SVI2 VDDCI table from dependency table.",
 294                                return result);
 295        }
 296
 297        if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
 298                /* VDDGFX has only SVI2 voltage control */
 299                result = phm_get_svi2_vdd_voltage_table(&(data->vddgfx_voltage_table),
 300                                        table_info->vddgfx_lookup_table);
 301                PP_ASSERT_WITH_CODE((0 == result),
 302                        "Failed to retrieve SVI2 VDDGFX table from lookup table.", return result;);
 303        }
 304
 305
 306        if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control) {
 307                result = atomctrl_get_voltage_table_v3(hwmgr,
 308                                        VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT,
 309                                        &data->vddc_voltage_table);
 310                PP_ASSERT_WITH_CODE((0 == result),
 311                        "Failed to retrieve VDDC table.", return result;);
 312        } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
 313
 314                if (hwmgr->pp_table_version == PP_TABLE_V0)
 315                        result = phm_get_svi2_voltage_table_v0(&data->vddc_voltage_table,
 316                                        hwmgr->dyn_state.vddc_dependency_on_mclk);
 317                else if (hwmgr->pp_table_version == PP_TABLE_V1)
 318                        result = phm_get_svi2_vdd_voltage_table(&(data->vddc_voltage_table),
 319                                table_info->vddc_lookup_table);
 320
 321                PP_ASSERT_WITH_CODE((0 == result),
 322                        "Failed to retrieve SVI2 VDDC table from dependency table.", return result;);
 323        }
 324
 325        tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDC);
 326        PP_ASSERT_WITH_CODE(
 327                        (data->vddc_voltage_table.count <= tmp),
 328                "Too many voltage values for VDDC. Trimming to fit state table.",
 329                        phm_trim_voltage_table_to_fit_state_table(tmp,
 330                                                &(data->vddc_voltage_table)));
 331
 332        tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDGFX);
 333        PP_ASSERT_WITH_CODE(
 334                        (data->vddgfx_voltage_table.count <= tmp),
 335                "Too many voltage values for VDDC. Trimming to fit state table.",
 336                        phm_trim_voltage_table_to_fit_state_table(tmp,
 337                                                &(data->vddgfx_voltage_table)));
 338
 339        tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDCI);
 340        PP_ASSERT_WITH_CODE(
 341                        (data->vddci_voltage_table.count <= tmp),
 342                "Too many voltage values for VDDCI. Trimming to fit state table.",
 343                        phm_trim_voltage_table_to_fit_state_table(tmp,
 344                                        &(data->vddci_voltage_table)));
 345
 346        tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_MVDD);
 347        PP_ASSERT_WITH_CODE(
 348                        (data->mvdd_voltage_table.count <= tmp),
 349                "Too many voltage values for MVDD. Trimming to fit state table.",
 350                        phm_trim_voltage_table_to_fit_state_table(tmp,
 351                                                &(data->mvdd_voltage_table)));
 352
 353        return 0;
 354}
 355
 356/**
 357* Programs static screed detection parameters
 358*
 359* @param    hwmgr  the address of the powerplay hardware manager.
 360* @return   always 0
 361*/
 362static int smu7_program_static_screen_threshold_parameters(
 363                                                        struct pp_hwmgr *hwmgr)
 364{
 365        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 366
 367        /* Set static screen threshold unit */
 368        PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
 369                        CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT,
 370                        data->static_screen_threshold_unit);
 371        /* Set static screen threshold */
 372        PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
 373                        CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD,
 374                        data->static_screen_threshold);
 375
 376        return 0;
 377}
 378
 379/**
 380* Setup display gap for glitch free memory clock switching.
 381*
 382* @param    hwmgr  the address of the powerplay hardware manager.
 383* @return   always  0
 384*/
 385static int smu7_enable_display_gap(struct pp_hwmgr *hwmgr)
 386{
 387        uint32_t display_gap =
 388                        cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
 389                                        ixCG_DISPLAY_GAP_CNTL);
 390
 391        display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
 392                        DISP_GAP, DISPLAY_GAP_IGNORE);
 393
 394        display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
 395                        DISP_GAP_MCHG, DISPLAY_GAP_VBLANK);
 396
 397        cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
 398                        ixCG_DISPLAY_GAP_CNTL, display_gap);
 399
 400        return 0;
 401}
 402
 403/**
 404* Programs activity state transition voting clients
 405*
 406* @param    hwmgr  the address of the powerplay hardware manager.
 407* @return   always  0
 408*/
 409static int smu7_program_voting_clients(struct pp_hwmgr *hwmgr)
 410{
 411        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 412        int i;
 413
 414        /* Clear reset for voting clients before enabling DPM */
 415        PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
 416                        SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0);
 417        PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
 418                        SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0);
 419
 420        for (i = 0; i < 8; i++)
 421                cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
 422                                        ixCG_FREQ_TRAN_VOTING_0 + i * 4,
 423                                        data->voting_rights_clients[i]);
 424        return 0;
 425}
 426
 427static int smu7_clear_voting_clients(struct pp_hwmgr *hwmgr)
 428{
 429        int i;
 430
 431        /* Reset voting clients before disabling DPM */
 432        PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
 433                        SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1);
 434        PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
 435                        SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1);
 436
 437        for (i = 0; i < 8; i++)
 438                cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
 439                                ixCG_FREQ_TRAN_VOTING_0 + i * 4, 0);
 440
 441        return 0;
 442}
 443
 444/* Copy one arb setting to another and then switch the active set.
 445 * arb_src and arb_dest is one of the MC_CG_ARB_FREQ_Fx constants.
 446 */
 447static int smu7_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
 448                uint32_t arb_src, uint32_t arb_dest)
 449{
 450        uint32_t mc_arb_dram_timing;
 451        uint32_t mc_arb_dram_timing2;
 452        uint32_t burst_time;
 453        uint32_t mc_cg_config;
 454
 455        switch (arb_src) {
 456        case MC_CG_ARB_FREQ_F0:
 457                mc_arb_dram_timing  = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
 458                mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
 459                burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
 460                break;
 461        case MC_CG_ARB_FREQ_F1:
 462                mc_arb_dram_timing  = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1);
 463                mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1);
 464                burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1);
 465                break;
 466        default:
 467                return -EINVAL;
 468        }
 469
 470        switch (arb_dest) {
 471        case MC_CG_ARB_FREQ_F0:
 472                cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
 473                cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
 474                PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time);
 475                break;
 476        case MC_CG_ARB_FREQ_F1:
 477                cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
 478                cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
 479                PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time);
 480                break;
 481        default:
 482                return -EINVAL;
 483        }
 484
 485        mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG);
 486        mc_cg_config |= 0x0000000F;
 487        cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config);
 488        PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arb_dest);
 489
 490        return 0;
 491}
 492
 493static int smu7_reset_to_default(struct pp_hwmgr *hwmgr)
 494{
 495        return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ResetToDefaults);
 496}
 497
 498/**
 499* Initial switch from ARB F0->F1
 500*
 501* @param    hwmgr  the address of the powerplay hardware manager.
 502* @return   always 0
 503* This function is to be called from the SetPowerState table.
 504*/
 505static int smu7_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr)
 506{
 507        return smu7_copy_and_switch_arb_sets(hwmgr,
 508                        MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
 509}
 510
 511static int smu7_force_switch_to_arbf0(struct pp_hwmgr *hwmgr)
 512{
 513        uint32_t tmp;
 514
 515        tmp = (cgs_read_ind_register(hwmgr->device,
 516                        CGS_IND_REG__SMC, ixSMC_SCRATCH9) &
 517                        0x0000ff00) >> 8;
 518
 519        if (tmp == MC_CG_ARB_FREQ_F0)
 520                return 0;
 521
 522        return smu7_copy_and_switch_arb_sets(hwmgr,
 523                        tmp, MC_CG_ARB_FREQ_F0);
 524}
 525
 526static int smu7_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
 527{
 528        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 529
 530        struct phm_ppt_v1_information *table_info =
 531                        (struct phm_ppt_v1_information *)(hwmgr->pptable);
 532        struct phm_ppt_v1_pcie_table *pcie_table = NULL;
 533
 534        uint32_t i, max_entry;
 535        uint32_t tmp;
 536
 537        PP_ASSERT_WITH_CODE((data->use_pcie_performance_levels ||
 538                        data->use_pcie_power_saving_levels), "No pcie performance levels!",
 539                        return -EINVAL);
 540
 541        if (table_info != NULL)
 542                pcie_table = table_info->pcie_table;
 543
 544        if (data->use_pcie_performance_levels &&
 545                        !data->use_pcie_power_saving_levels) {
 546                data->pcie_gen_power_saving = data->pcie_gen_performance;
 547                data->pcie_lane_power_saving = data->pcie_lane_performance;
 548        } else if (!data->use_pcie_performance_levels &&
 549                        data->use_pcie_power_saving_levels) {
 550                data->pcie_gen_performance = data->pcie_gen_power_saving;
 551                data->pcie_lane_performance = data->pcie_lane_power_saving;
 552        }
 553        tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_LINK);
 554        phm_reset_single_dpm_table(&data->dpm_table.pcie_speed_table,
 555                                        tmp,
 556                                        MAX_REGULAR_DPM_NUMBER);
 557
 558        if (pcie_table != NULL) {
 559                /* max_entry is used to make sure we reserve one PCIE level
 560                 * for boot level (fix for A+A PSPP issue).
 561                 * If PCIE table from PPTable have ULV entry + 8 entries,
 562                 * then ignore the last entry.*/
 563                max_entry = (tmp < pcie_table->count) ? tmp : pcie_table->count;
 564                for (i = 1; i < max_entry; i++) {
 565                        phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i - 1,
 566                                        get_pcie_gen_support(data->pcie_gen_cap,
 567                                                        pcie_table->entries[i].gen_speed),
 568                                        get_pcie_lane_support(data->pcie_lane_cap,
 569                                                        pcie_table->entries[i].lane_width));
 570                }
 571                data->dpm_table.pcie_speed_table.count = max_entry - 1;
 572                smum_update_smc_table(hwmgr, SMU_BIF_TABLE);
 573        } else {
 574                /* Hardcode Pcie Table */
 575                phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0,
 576                                get_pcie_gen_support(data->pcie_gen_cap,
 577                                                PP_Min_PCIEGen),
 578                                get_pcie_lane_support(data->pcie_lane_cap,
 579                                                PP_Max_PCIELane));
 580                phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1,
 581                                get_pcie_gen_support(data->pcie_gen_cap,
 582                                                PP_Min_PCIEGen),
 583                                get_pcie_lane_support(data->pcie_lane_cap,
 584                                                PP_Max_PCIELane));
 585                phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2,
 586                                get_pcie_gen_support(data->pcie_gen_cap,
 587                                                PP_Max_PCIEGen),
 588                                get_pcie_lane_support(data->pcie_lane_cap,
 589                                                PP_Max_PCIELane));
 590                phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3,
 591                                get_pcie_gen_support(data->pcie_gen_cap,
 592                                                PP_Max_PCIEGen),
 593                                get_pcie_lane_support(data->pcie_lane_cap,
 594                                                PP_Max_PCIELane));
 595                phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4,
 596                                get_pcie_gen_support(data->pcie_gen_cap,
 597                                                PP_Max_PCIEGen),
 598                                get_pcie_lane_support(data->pcie_lane_cap,
 599                                                PP_Max_PCIELane));
 600                phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5,
 601                                get_pcie_gen_support(data->pcie_gen_cap,
 602                                                PP_Max_PCIEGen),
 603                                get_pcie_lane_support(data->pcie_lane_cap,
 604                                                PP_Max_PCIELane));
 605
 606                data->dpm_table.pcie_speed_table.count = 6;
 607        }
 608        /* Populate last level for boot PCIE level, but do not increment count. */
 609        if (hwmgr->chip_family == AMDGPU_FAMILY_CI) {
 610                for (i = 0; i <= data->dpm_table.pcie_speed_table.count; i++)
 611                        phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i,
 612                                get_pcie_gen_support(data->pcie_gen_cap,
 613                                                PP_Max_PCIEGen),
 614                                data->vbios_boot_state.pcie_lane_bootup_value);
 615        } else {
 616                phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table,
 617                        data->dpm_table.pcie_speed_table.count,
 618                        get_pcie_gen_support(data->pcie_gen_cap,
 619                                        PP_Min_PCIEGen),
 620                        get_pcie_lane_support(data->pcie_lane_cap,
 621                                        PP_Max_PCIELane));
 622        }
 623        return 0;
 624}
 625
 626static int smu7_reset_dpm_tables(struct pp_hwmgr *hwmgr)
 627{
 628        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 629
 630        memset(&(data->dpm_table), 0x00, sizeof(data->dpm_table));
 631
 632        phm_reset_single_dpm_table(
 633                        &data->dpm_table.sclk_table,
 634                                smum_get_mac_definition(hwmgr,
 635                                        SMU_MAX_LEVELS_GRAPHICS),
 636                                        MAX_REGULAR_DPM_NUMBER);
 637        phm_reset_single_dpm_table(
 638                        &data->dpm_table.mclk_table,
 639                        smum_get_mac_definition(hwmgr,
 640                                SMU_MAX_LEVELS_MEMORY), MAX_REGULAR_DPM_NUMBER);
 641
 642        phm_reset_single_dpm_table(
 643                        &data->dpm_table.vddc_table,
 644                                smum_get_mac_definition(hwmgr,
 645                                        SMU_MAX_LEVELS_VDDC),
 646                                        MAX_REGULAR_DPM_NUMBER);
 647        phm_reset_single_dpm_table(
 648                        &data->dpm_table.vddci_table,
 649                        smum_get_mac_definition(hwmgr,
 650                                SMU_MAX_LEVELS_VDDCI), MAX_REGULAR_DPM_NUMBER);
 651
 652        phm_reset_single_dpm_table(
 653                        &data->dpm_table.mvdd_table,
 654                                smum_get_mac_definition(hwmgr,
 655                                        SMU_MAX_LEVELS_MVDD),
 656                                        MAX_REGULAR_DPM_NUMBER);
 657        return 0;
 658}
 659/*
 660 * This function is to initialize all DPM state tables
 661 * for SMU7 based on the dependency table.
 662 * Dynamic state patching function will then trim these
 663 * state tables to the allowed range based
 664 * on the power policy or external client requests,
 665 * such as UVD request, etc.
 666 */
 667
 668static int smu7_setup_dpm_tables_v0(struct pp_hwmgr *hwmgr)
 669{
 670        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 671        struct phm_clock_voltage_dependency_table *allowed_vdd_sclk_table =
 672                hwmgr->dyn_state.vddc_dependency_on_sclk;
 673        struct phm_clock_voltage_dependency_table *allowed_vdd_mclk_table =
 674                hwmgr->dyn_state.vddc_dependency_on_mclk;
 675        struct phm_cac_leakage_table *std_voltage_table =
 676                hwmgr->dyn_state.cac_leakage_table;
 677        uint32_t i;
 678
 679        PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL,
 680                "SCLK dependency table is missing. This table is mandatory", return -EINVAL);
 681        PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table->count >= 1,
 682                "SCLK dependency table has to have is missing. This table is mandatory", return -EINVAL);
 683
 684        PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
 685                "MCLK dependency table is missing. This table is mandatory", return -EINVAL);
 686        PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table->count >= 1,
 687                "VMCLK dependency table has to have is missing. This table is mandatory", return -EINVAL);
 688
 689
 690        /* Initialize Sclk DPM table based on allow Sclk values*/
 691        data->dpm_table.sclk_table.count = 0;
 692
 693        for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
 694                if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count-1].value !=
 695                                allowed_vdd_sclk_table->entries[i].clk) {
 696                        data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
 697                                allowed_vdd_sclk_table->entries[i].clk;
 698                        data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = (i == 0) ? 1 : 0;
 699                        data->dpm_table.sclk_table.count++;
 700                }
 701        }
 702
 703        PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
 704                "MCLK dependency table is missing. This table is mandatory", return -EINVAL);
 705        /* Initialize Mclk DPM table based on allow Mclk values */
 706        data->dpm_table.mclk_table.count = 0;
 707        for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
 708                if (i == 0 || data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count-1].value !=
 709                        allowed_vdd_mclk_table->entries[i].clk) {
 710                        data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
 711                                allowed_vdd_mclk_table->entries[i].clk;
 712                        data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = (i == 0) ? 1 : 0;
 713                        data->dpm_table.mclk_table.count++;
 714                }
 715        }
 716
 717        /* Initialize Vddc DPM table based on allow Vddc values.  And populate corresponding std values. */
 718        for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
 719                data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
 720                data->dpm_table.vddc_table.dpm_levels[i].param1 = std_voltage_table->entries[i].Leakage;
 721                /* param1 is for corresponding std voltage */
 722                data->dpm_table.vddc_table.dpm_levels[i].enabled = 1;
 723        }
 724
 725        data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count;
 726        allowed_vdd_mclk_table = hwmgr->dyn_state.vddci_dependency_on_mclk;
 727
 728        if (NULL != allowed_vdd_mclk_table) {
 729                /* Initialize Vddci DPM table based on allow Mclk values */
 730                for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
 731                        data->dpm_table.vddci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
 732                        data->dpm_table.vddci_table.dpm_levels[i].enabled = 1;
 733                }
 734                data->dpm_table.vddci_table.count = allowed_vdd_mclk_table->count;
 735        }
 736
 737        allowed_vdd_mclk_table = hwmgr->dyn_state.mvdd_dependency_on_mclk;
 738
 739        if (NULL != allowed_vdd_mclk_table) {
 740                /*
 741                 * Initialize MVDD DPM table based on allow Mclk
 742                 * values
 743                 */
 744                for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
 745                        data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
 746                        data->dpm_table.mvdd_table.dpm_levels[i].enabled = 1;
 747                }
 748                data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count;
 749        }
 750
 751        return 0;
 752}
 753
 754static int smu7_setup_dpm_tables_v1(struct pp_hwmgr *hwmgr)
 755{
 756        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 757        struct phm_ppt_v1_information *table_info =
 758                        (struct phm_ppt_v1_information *)(hwmgr->pptable);
 759        uint32_t i;
 760
 761        struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
 762        struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
 763
 764        if (table_info == NULL)
 765                return -EINVAL;
 766
 767        dep_sclk_table = table_info->vdd_dep_on_sclk;
 768        dep_mclk_table = table_info->vdd_dep_on_mclk;
 769
 770        PP_ASSERT_WITH_CODE(dep_sclk_table != NULL,
 771                        "SCLK dependency table is missing.",
 772                        return -EINVAL);
 773        PP_ASSERT_WITH_CODE(dep_sclk_table->count >= 1,
 774                        "SCLK dependency table count is 0.",
 775                        return -EINVAL);
 776
 777        PP_ASSERT_WITH_CODE(dep_mclk_table != NULL,
 778                        "MCLK dependency table is missing.",
 779                        return -EINVAL);
 780        PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1,
 781                        "MCLK dependency table count is 0",
 782                        return -EINVAL);
 783
 784        /* Initialize Sclk DPM table based on allow Sclk values */
 785        data->dpm_table.sclk_table.count = 0;
 786        for (i = 0; i < dep_sclk_table->count; i++) {
 787                if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count - 1].value !=
 788                                                dep_sclk_table->entries[i].clk) {
 789
 790                        data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
 791                                        dep_sclk_table->entries[i].clk;
 792
 793                        data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled =
 794                                        (i == 0) ? true : false;
 795                        data->dpm_table.sclk_table.count++;
 796                }
 797        }
 798        if (hwmgr->platform_descriptor.overdriveLimit.engineClock == 0)
 799                hwmgr->platform_descriptor.overdriveLimit.engineClock = dep_sclk_table->entries[i-1].clk;
 800        /* Initialize Mclk DPM table based on allow Mclk values */
 801        data->dpm_table.mclk_table.count = 0;
 802        for (i = 0; i < dep_mclk_table->count; i++) {
 803                if (i == 0 || data->dpm_table.mclk_table.dpm_levels
 804                                [data->dpm_table.mclk_table.count - 1].value !=
 805                                                dep_mclk_table->entries[i].clk) {
 806                        data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
 807                                                        dep_mclk_table->entries[i].clk;
 808                        data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled =
 809                                                        (i == 0) ? true : false;
 810                        data->dpm_table.mclk_table.count++;
 811                }
 812        }
 813
 814        if (hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0)
 815                hwmgr->platform_descriptor.overdriveLimit.memoryClock = dep_mclk_table->entries[i-1].clk;
 816        return 0;
 817}
 818
 819static int smu7_odn_initial_default_setting(struct pp_hwmgr *hwmgr)
 820{
 821        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 822        struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
 823        struct phm_ppt_v1_information *table_info =
 824                        (struct phm_ppt_v1_information *)(hwmgr->pptable);
 825        uint32_t i;
 826
 827        struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
 828        struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
 829        struct phm_odn_performance_level *entries;
 830
 831        if (table_info == NULL)
 832                return -EINVAL;
 833
 834        dep_sclk_table = table_info->vdd_dep_on_sclk;
 835        dep_mclk_table = table_info->vdd_dep_on_mclk;
 836
 837        odn_table->odn_core_clock_dpm_levels.num_of_pl =
 838                                                data->golden_dpm_table.sclk_table.count;
 839        entries = odn_table->odn_core_clock_dpm_levels.entries;
 840        for (i=0; i<data->golden_dpm_table.sclk_table.count; i++) {
 841                entries[i].clock = data->golden_dpm_table.sclk_table.dpm_levels[i].value;
 842                entries[i].enabled = true;
 843                entries[i].vddc = dep_sclk_table->entries[i].vddc;
 844        }
 845
 846        smu_get_voltage_dependency_table_ppt_v1(dep_sclk_table,
 847                (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk));
 848
 849        odn_table->odn_memory_clock_dpm_levels.num_of_pl =
 850                                                data->golden_dpm_table.mclk_table.count;
 851        entries = odn_table->odn_memory_clock_dpm_levels.entries;
 852        for (i=0; i<data->golden_dpm_table.mclk_table.count; i++) {
 853                entries[i].clock = data->golden_dpm_table.mclk_table.dpm_levels[i].value;
 854                entries[i].enabled = true;
 855                entries[i].vddc = dep_mclk_table->entries[i].vddc;
 856        }
 857
 858        smu_get_voltage_dependency_table_ppt_v1(dep_mclk_table,
 859                (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk));
 860
 861        return 0;
 862}
 863
 864static void smu7_setup_voltage_range_from_vbios(struct pp_hwmgr *hwmgr)
 865{
 866        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 867        struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
 868        struct phm_ppt_v1_information *table_info =
 869                        (struct phm_ppt_v1_information *)(hwmgr->pptable);
 870        uint32_t min_vddc = 0;
 871        uint32_t max_vddc = 0;
 872
 873        if (!table_info)
 874                return;
 875
 876        dep_sclk_table = table_info->vdd_dep_on_sclk;
 877
 878        atomctrl_get_voltage_range(hwmgr, &max_vddc, &min_vddc);
 879
 880        if (min_vddc == 0 || min_vddc > 2000
 881                || min_vddc > dep_sclk_table->entries[0].vddc)
 882                min_vddc = dep_sclk_table->entries[0].vddc;
 883
 884        if (max_vddc == 0 || max_vddc > 2000
 885                || max_vddc < dep_sclk_table->entries[dep_sclk_table->count-1].vddc)
 886                max_vddc = dep_sclk_table->entries[dep_sclk_table->count-1].vddc;
 887
 888        data->odn_dpm_table.min_vddc = min_vddc;
 889        data->odn_dpm_table.max_vddc = max_vddc;
 890}
 891
 892static void smu7_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
 893{
 894        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 895        struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
 896        struct phm_ppt_v1_information *table_info =
 897                        (struct phm_ppt_v1_information *)(hwmgr->pptable);
 898        uint32_t i;
 899
 900        struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
 901        struct phm_ppt_v1_clock_voltage_dependency_table *odn_dep_table;
 902
 903        if (table_info == NULL)
 904                return;
 905
 906        for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
 907                if (odn_table->odn_core_clock_dpm_levels.entries[i].clock !=
 908                                        data->dpm_table.sclk_table.dpm_levels[i].value) {
 909                        data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
 910                        break;
 911                }
 912        }
 913
 914        for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
 915                if (odn_table->odn_memory_clock_dpm_levels.entries[i].clock !=
 916                                        data->dpm_table.mclk_table.dpm_levels[i].value) {
 917                        data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
 918                        break;
 919                }
 920        }
 921
 922        dep_table = table_info->vdd_dep_on_mclk;
 923        odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk);
 924
 925        for (i = 0; i < dep_table->count; i++) {
 926                if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
 927                        data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK;
 928                        return;
 929                }
 930        }
 931
 932        dep_table = table_info->vdd_dep_on_sclk;
 933        odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk);
 934        for (i = 0; i < dep_table->count; i++) {
 935                if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
 936                        data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK;
 937                        return;
 938                }
 939        }
 940        if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
 941                data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
 942                data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK;
 943        }
 944}
 945
 946static int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
 947{
 948        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 949
 950        smu7_reset_dpm_tables(hwmgr);
 951
 952        if (hwmgr->pp_table_version == PP_TABLE_V1)
 953                smu7_setup_dpm_tables_v1(hwmgr);
 954        else if (hwmgr->pp_table_version == PP_TABLE_V0)
 955                smu7_setup_dpm_tables_v0(hwmgr);
 956
 957        smu7_setup_default_pcie_table(hwmgr);
 958
 959        /* save a copy of the default DPM table */
 960        memcpy(&(data->golden_dpm_table), &(data->dpm_table),
 961                        sizeof(struct smu7_dpm_table));
 962
 963        /* initialize ODN table */
 964        if (hwmgr->od_enabled) {
 965                if (data->odn_dpm_table.max_vddc) {
 966                        smu7_check_dpm_table_updated(hwmgr);
 967                } else {
 968                        smu7_setup_voltage_range_from_vbios(hwmgr);
 969                        smu7_odn_initial_default_setting(hwmgr);
 970                }
 971        }
 972        return 0;
 973}
 974
 975static int smu7_enable_vrhot_gpio_interrupt(struct pp_hwmgr *hwmgr)
 976{
 977
 978        if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
 979                        PHM_PlatformCaps_RegulatorHot))
 980                return smum_send_msg_to_smc(hwmgr,
 981                                PPSMC_MSG_EnableVRHotGPIOInterrupt);
 982
 983        return 0;
 984}
 985
 986static int smu7_enable_sclk_control(struct pp_hwmgr *hwmgr)
 987{
 988        PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
 989                        SCLK_PWRMGT_OFF, 0);
 990        return 0;
 991}
 992
 993static int smu7_enable_ulv(struct pp_hwmgr *hwmgr)
 994{
 995        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 996
 997        if (data->ulv_supported)
 998                return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableULV);
 999
1000        return 0;
1001}
1002
1003static int smu7_disable_ulv(struct pp_hwmgr *hwmgr)
1004{
1005        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1006
1007        if (data->ulv_supported)
1008                return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableULV);
1009
1010        return 0;
1011}
1012
1013static int smu7_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
1014{
1015        if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1016                        PHM_PlatformCaps_SclkDeepSleep)) {
1017                if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MASTER_DeepSleep_ON))
1018                        PP_ASSERT_WITH_CODE(false,
1019                                        "Attempt to enable Master Deep Sleep switch failed!",
1020                                        return -EINVAL);
1021        } else {
1022                if (smum_send_msg_to_smc(hwmgr,
1023                                PPSMC_MSG_MASTER_DeepSleep_OFF)) {
1024                        PP_ASSERT_WITH_CODE(false,
1025                                        "Attempt to disable Master Deep Sleep switch failed!",
1026                                        return -EINVAL);
1027                }
1028        }
1029
1030        return 0;
1031}
1032
1033static int smu7_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
1034{
1035        if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1036                        PHM_PlatformCaps_SclkDeepSleep)) {
1037                if (smum_send_msg_to_smc(hwmgr,
1038                                PPSMC_MSG_MASTER_DeepSleep_OFF)) {
1039                        PP_ASSERT_WITH_CODE(false,
1040                                        "Attempt to disable Master Deep Sleep switch failed!",
1041                                        return -EINVAL);
1042                }
1043        }
1044
1045        return 0;
1046}
1047
1048static int smu7_disable_sclk_vce_handshake(struct pp_hwmgr *hwmgr)
1049{
1050        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1051        uint32_t soft_register_value = 0;
1052        uint32_t handshake_disables_offset = data->soft_regs_start
1053                                + smum_get_offsetof(hwmgr,
1054                                        SMU_SoftRegisters, HandshakeDisables);
1055
1056        soft_register_value = cgs_read_ind_register(hwmgr->device,
1057                                CGS_IND_REG__SMC, handshake_disables_offset);
1058        soft_register_value |= SMU7_VCE_SCLK_HANDSHAKE_DISABLE;
1059        cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1060                        handshake_disables_offset, soft_register_value);
1061        return 0;
1062}
1063
1064static int smu7_disable_handshake_uvd(struct pp_hwmgr *hwmgr)
1065{
1066        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1067        uint32_t soft_register_value = 0;
1068        uint32_t handshake_disables_offset = data->soft_regs_start
1069                                + smum_get_offsetof(hwmgr,
1070                                        SMU_SoftRegisters, HandshakeDisables);
1071
1072        soft_register_value = cgs_read_ind_register(hwmgr->device,
1073                                CGS_IND_REG__SMC, handshake_disables_offset);
1074        soft_register_value |= smum_get_mac_definition(hwmgr,
1075                                        SMU_UVD_MCLK_HANDSHAKE_DISABLE);
1076        cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1077                        handshake_disables_offset, soft_register_value);
1078        return 0;
1079}
1080
1081static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
1082{
1083        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1084
1085        /* enable SCLK dpm */
1086        if (!data->sclk_dpm_key_disabled) {
1087                if (hwmgr->chip_id == CHIP_VEGAM)
1088                        smu7_disable_sclk_vce_handshake(hwmgr);
1089
1090                PP_ASSERT_WITH_CODE(
1091                (0 == smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Enable)),
1092                "Failed to enable SCLK DPM during DPM Start Function!",
1093                return -EINVAL);
1094        }
1095
1096        /* enable MCLK dpm */
1097        if (0 == data->mclk_dpm_key_disabled) {
1098                if (!(hwmgr->feature_mask & PP_UVD_HANDSHAKE_MASK))
1099                        smu7_disable_handshake_uvd(hwmgr);
1100
1101                PP_ASSERT_WITH_CODE(
1102                                (0 == smum_send_msg_to_smc(hwmgr,
1103                                                PPSMC_MSG_MCLKDPM_Enable)),
1104                                "Failed to enable MCLK DPM during DPM Start Function!",
1105                                return -EINVAL);
1106
1107                if (hwmgr->chip_family != CHIP_VEGAM)
1108                        PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
1109
1110
1111                if (hwmgr->chip_family == AMDGPU_FAMILY_CI) {
1112                        cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d30, 0x5);
1113                        cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d3c, 0x5);
1114                        cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d80, 0x100005);
1115                        udelay(10);
1116                        cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d30, 0x400005);
1117                        cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d3c, 0x400005);
1118                        cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d80, 0x500005);
1119                } else {
1120                        cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5);
1121                        cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x5);
1122                        cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x100005);
1123                        udelay(10);
1124                        if (hwmgr->chip_id == CHIP_VEGAM) {
1125                                cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400009);
1126                                cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400009);
1127                        } else {
1128                                cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400005);
1129                                cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400005);
1130                        }
1131                        cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x500005);
1132                }
1133        }
1134
1135        return 0;
1136}
1137
1138static int smu7_start_dpm(struct pp_hwmgr *hwmgr)
1139{
1140        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1141
1142        /*enable general power management */
1143
1144        PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
1145                        GLOBAL_PWRMGT_EN, 1);
1146
1147        /* enable sclk deep sleep */
1148
1149        PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
1150                        DYNAMIC_PM_EN, 1);
1151
1152        /* prepare for PCIE DPM */
1153
1154        cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1155                        data->soft_regs_start +
1156                        smum_get_offsetof(hwmgr, SMU_SoftRegisters,
1157                                                VoltageChangeTimeout), 0x1000);
1158        PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
1159                        SWRST_COMMAND_1, RESETLC, 0x0);
1160
1161        if (hwmgr->chip_family == AMDGPU_FAMILY_CI)
1162                cgs_write_register(hwmgr->device, 0x1488,
1163                        (cgs_read_register(hwmgr->device, 0x1488) & ~0x1));
1164
1165        if (smu7_enable_sclk_mclk_dpm(hwmgr)) {
1166                pr_err("Failed to enable Sclk DPM and Mclk DPM!");
1167                return -EINVAL;
1168        }
1169
1170        /* enable PCIE dpm */
1171        if (0 == data->pcie_dpm_key_disabled) {
1172                PP_ASSERT_WITH_CODE(
1173                                (0 == smum_send_msg_to_smc(hwmgr,
1174                                                PPSMC_MSG_PCIeDPM_Enable)),
1175                                "Failed to enable pcie DPM during DPM Start Function!",
1176                                return -EINVAL);
1177        }
1178
1179        if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1180                                PHM_PlatformCaps_Falcon_QuickTransition)) {
1181                PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr,
1182                                PPSMC_MSG_EnableACDCGPIOInterrupt)),
1183                                "Failed to enable AC DC GPIO Interrupt!",
1184                                );
1185        }
1186
1187        return 0;
1188}
1189
1190static int smu7_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
1191{
1192        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1193
1194        /* disable SCLK dpm */
1195        if (!data->sclk_dpm_key_disabled) {
1196                PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
1197                                "Trying to disable SCLK DPM when DPM is disabled",
1198                                return 0);
1199                smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Disable);
1200        }
1201
1202        /* disable MCLK dpm */
1203        if (!data->mclk_dpm_key_disabled) {
1204                PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
1205                                "Trying to disable MCLK DPM when DPM is disabled",
1206                                return 0);
1207                smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_Disable);
1208        }
1209
1210        return 0;
1211}
1212
1213static int smu7_stop_dpm(struct pp_hwmgr *hwmgr)
1214{
1215        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1216
1217        /* disable general power management */
1218        PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
1219                        GLOBAL_PWRMGT_EN, 0);
1220        /* disable sclk deep sleep */
1221        PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
1222                        DYNAMIC_PM_EN, 0);
1223
1224        /* disable PCIE dpm */
1225        if (!data->pcie_dpm_key_disabled) {
1226                PP_ASSERT_WITH_CODE(
1227                                (smum_send_msg_to_smc(hwmgr,
1228                                                PPSMC_MSG_PCIeDPM_Disable) == 0),
1229                                "Failed to disable pcie DPM during DPM Stop Function!",
1230                                return -EINVAL);
1231        }
1232
1233        smu7_disable_sclk_mclk_dpm(hwmgr);
1234
1235        PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
1236                        "Trying to disable voltage DPM when DPM is disabled",
1237                        return 0);
1238
1239        smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Disable);
1240
1241        return 0;
1242}
1243
1244static void smu7_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources)
1245{
1246        bool protection;
1247        enum DPM_EVENT_SRC src;
1248
1249        switch (sources) {
1250        default:
1251                pr_err("Unknown throttling event sources.");
1252                /* fall through */
1253        case 0:
1254                protection = false;
1255                /* src is unused */
1256                break;
1257        case (1 << PHM_AutoThrottleSource_Thermal):
1258                protection = true;
1259                src = DPM_EVENT_SRC_DIGITAL;
1260                break;
1261        case (1 << PHM_AutoThrottleSource_External):
1262                protection = true;
1263                src = DPM_EVENT_SRC_EXTERNAL;
1264                break;
1265        case (1 << PHM_AutoThrottleSource_External) |
1266                        (1 << PHM_AutoThrottleSource_Thermal):
1267                protection = true;
1268                src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL;
1269                break;
1270        }
1271        /* Order matters - don't enable thermal protection for the wrong source. */
1272        if (protection) {
1273                PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL,
1274                                DPM_EVENT_SRC, src);
1275                PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
1276                                THERMAL_PROTECTION_DIS,
1277                                !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1278                                                PHM_PlatformCaps_ThermalController));
1279        } else
1280                PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
1281                                THERMAL_PROTECTION_DIS, 1);
1282}
1283
1284static int smu7_enable_auto_throttle_source(struct pp_hwmgr *hwmgr,
1285                PHM_AutoThrottleSource source)
1286{
1287        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1288
1289        if (!(data->active_auto_throttle_sources & (1 << source))) {
1290                data->active_auto_throttle_sources |= 1 << source;
1291                smu7_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
1292        }
1293        return 0;
1294}
1295
1296static int smu7_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
1297{
1298        return smu7_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
1299}
1300
1301static int smu7_disable_auto_throttle_source(struct pp_hwmgr *hwmgr,
1302                PHM_AutoThrottleSource source)
1303{
1304        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1305
1306        if (data->active_auto_throttle_sources & (1 << source)) {
1307                data->active_auto_throttle_sources &= ~(1 << source);
1308                smu7_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
1309        }
1310        return 0;
1311}
1312
1313static int smu7_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
1314{
1315        return smu7_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
1316}
1317
1318static int smu7_pcie_performance_request(struct pp_hwmgr *hwmgr)
1319{
1320        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1321        data->pcie_performance_request = true;
1322
1323        return 0;
1324}
1325
1326static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
1327{
1328        int tmp_result = 0;
1329        int result = 0;
1330
1331        if (smu7_voltage_control(hwmgr)) {
1332                tmp_result = smu7_enable_voltage_control(hwmgr);
1333                PP_ASSERT_WITH_CODE(tmp_result == 0,
1334                                "Failed to enable voltage control!",
1335                                result = tmp_result);
1336
1337                tmp_result = smu7_construct_voltage_tables(hwmgr);
1338                PP_ASSERT_WITH_CODE((0 == tmp_result),
1339                                "Failed to construct voltage tables!",
1340                                result = tmp_result);
1341        }
1342        smum_initialize_mc_reg_table(hwmgr);
1343
1344        if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1345                        PHM_PlatformCaps_EngineSpreadSpectrumSupport))
1346                PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1347                                GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 1);
1348
1349        if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1350                        PHM_PlatformCaps_ThermalController))
1351                PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1352                                GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 0);
1353
1354        tmp_result = smu7_program_static_screen_threshold_parameters(hwmgr);
1355        PP_ASSERT_WITH_CODE((0 == tmp_result),
1356                        "Failed to program static screen threshold parameters!",
1357                        result = tmp_result);
1358
1359        tmp_result = smu7_enable_display_gap(hwmgr);
1360        PP_ASSERT_WITH_CODE((0 == tmp_result),
1361                        "Failed to enable display gap!", result = tmp_result);
1362
1363        tmp_result = smu7_program_voting_clients(hwmgr);
1364        PP_ASSERT_WITH_CODE((0 == tmp_result),
1365                        "Failed to program voting clients!", result = tmp_result);
1366
1367        tmp_result = smum_process_firmware_header(hwmgr);
1368        PP_ASSERT_WITH_CODE((0 == tmp_result),
1369                        "Failed to process firmware header!", result = tmp_result);
1370
1371        if (hwmgr->chip_id != CHIP_VEGAM) {
1372                tmp_result = smu7_initial_switch_from_arbf0_to_f1(hwmgr);
1373                PP_ASSERT_WITH_CODE((0 == tmp_result),
1374                                "Failed to initialize switch from ArbF0 to F1!",
1375                                result = tmp_result);
1376        }
1377
1378        result = smu7_setup_default_dpm_tables(hwmgr);
1379        PP_ASSERT_WITH_CODE(0 == result,
1380                        "Failed to setup default DPM tables!", return result);
1381
1382        tmp_result = smum_init_smc_table(hwmgr);
1383        PP_ASSERT_WITH_CODE((0 == tmp_result),
1384                        "Failed to initialize SMC table!", result = tmp_result);
1385
1386        tmp_result = smu7_enable_vrhot_gpio_interrupt(hwmgr);
1387        PP_ASSERT_WITH_CODE((0 == tmp_result),
1388                        "Failed to enable VR hot GPIO interrupt!", result = tmp_result);
1389
1390        smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_NoDisplay);
1391
1392        tmp_result = smu7_enable_sclk_control(hwmgr);
1393        PP_ASSERT_WITH_CODE((0 == tmp_result),
1394                        "Failed to enable SCLK control!", result = tmp_result);
1395
1396        tmp_result = smu7_enable_smc_voltage_controller(hwmgr);
1397        PP_ASSERT_WITH_CODE((0 == tmp_result),
1398                        "Failed to enable voltage control!", result = tmp_result);
1399
1400        tmp_result = smu7_enable_ulv(hwmgr);
1401        PP_ASSERT_WITH_CODE((0 == tmp_result),
1402                        "Failed to enable ULV!", result = tmp_result);
1403
1404        tmp_result = smu7_enable_deep_sleep_master_switch(hwmgr);
1405        PP_ASSERT_WITH_CODE((0 == tmp_result),
1406                        "Failed to enable deep sleep master switch!", result = tmp_result);
1407
1408        tmp_result = smu7_enable_didt_config(hwmgr);
1409        PP_ASSERT_WITH_CODE((tmp_result == 0),
1410                        "Failed to enable deep sleep master switch!", result = tmp_result);
1411
1412        tmp_result = smu7_start_dpm(hwmgr);
1413        PP_ASSERT_WITH_CODE((0 == tmp_result),
1414                        "Failed to start DPM!", result = tmp_result);
1415
1416        tmp_result = smu7_enable_smc_cac(hwmgr);
1417        PP_ASSERT_WITH_CODE((0 == tmp_result),
1418                        "Failed to enable SMC CAC!", result = tmp_result);
1419
1420        tmp_result = smu7_enable_power_containment(hwmgr);
1421        PP_ASSERT_WITH_CODE((0 == tmp_result),
1422                        "Failed to enable power containment!", result = tmp_result);
1423
1424        tmp_result = smu7_power_control_set_level(hwmgr);
1425        PP_ASSERT_WITH_CODE((0 == tmp_result),
1426                        "Failed to power control set level!", result = tmp_result);
1427
1428        tmp_result = smu7_enable_thermal_auto_throttle(hwmgr);
1429        PP_ASSERT_WITH_CODE((0 == tmp_result),
1430                        "Failed to enable thermal auto throttle!", result = tmp_result);
1431
1432        tmp_result = smu7_pcie_performance_request(hwmgr);
1433        PP_ASSERT_WITH_CODE((0 == tmp_result),
1434                        "pcie performance request failed!", result = tmp_result);
1435
1436        return 0;
1437}
1438
1439static int smu7_avfs_control(struct pp_hwmgr *hwmgr, bool enable)
1440{
1441        if (!hwmgr->avfs_supported)
1442                return 0;
1443
1444        if (enable) {
1445                if (!PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
1446                                CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) {
1447                        PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(
1448                                        hwmgr, PPSMC_MSG_EnableAvfs),
1449                                        "Failed to enable AVFS!",
1450                                        return -EINVAL);
1451                }
1452        } else if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
1453                        CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) {
1454                PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(
1455                                hwmgr, PPSMC_MSG_DisableAvfs),
1456                                "Failed to disable AVFS!",
1457                                return -EINVAL);
1458        }
1459
1460        return 0;
1461}
1462
1463static int smu7_update_avfs(struct pp_hwmgr *hwmgr)
1464{
1465        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1466
1467        if (!hwmgr->avfs_supported)
1468                return 0;
1469
1470        if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
1471                smu7_avfs_control(hwmgr, false);
1472        } else if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
1473                smu7_avfs_control(hwmgr, false);
1474                smu7_avfs_control(hwmgr, true);
1475        } else {
1476                smu7_avfs_control(hwmgr, true);
1477        }
1478
1479        return 0;
1480}
1481
1482int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
1483{
1484        int tmp_result, result = 0;
1485
1486        if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1487                        PHM_PlatformCaps_ThermalController))
1488                PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1489                                GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 1);
1490
1491        tmp_result = smu7_disable_power_containment(hwmgr);
1492        PP_ASSERT_WITH_CODE((tmp_result == 0),
1493                        "Failed to disable power containment!", result = tmp_result);
1494
1495        tmp_result = smu7_disable_smc_cac(hwmgr);
1496        PP_ASSERT_WITH_CODE((tmp_result == 0),
1497                        "Failed to disable SMC CAC!", result = tmp_result);
1498
1499        tmp_result = smu7_disable_didt_config(hwmgr);
1500        PP_ASSERT_WITH_CODE((tmp_result == 0),
1501                        "Failed to disable DIDT!", result = tmp_result);
1502
1503        PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1504                        CG_SPLL_SPREAD_SPECTRUM, SSEN, 0);
1505        PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1506                        GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 0);
1507
1508        tmp_result = smu7_disable_thermal_auto_throttle(hwmgr);
1509        PP_ASSERT_WITH_CODE((tmp_result == 0),
1510                        "Failed to disable thermal auto throttle!", result = tmp_result);
1511
1512        tmp_result = smu7_avfs_control(hwmgr, false);
1513        PP_ASSERT_WITH_CODE((tmp_result == 0),
1514                        "Failed to disable AVFS!", result = tmp_result);
1515
1516        tmp_result = smu7_stop_dpm(hwmgr);
1517        PP_ASSERT_WITH_CODE((tmp_result == 0),
1518                        "Failed to stop DPM!", result = tmp_result);
1519
1520        tmp_result = smu7_disable_deep_sleep_master_switch(hwmgr);
1521        PP_ASSERT_WITH_CODE((tmp_result == 0),
1522                        "Failed to disable deep sleep master switch!", result = tmp_result);
1523
1524        tmp_result = smu7_disable_ulv(hwmgr);
1525        PP_ASSERT_WITH_CODE((tmp_result == 0),
1526                        "Failed to disable ULV!", result = tmp_result);
1527
1528        tmp_result = smu7_clear_voting_clients(hwmgr);
1529        PP_ASSERT_WITH_CODE((tmp_result == 0),
1530                        "Failed to clear voting clients!", result = tmp_result);
1531
1532        tmp_result = smu7_reset_to_default(hwmgr);
1533        PP_ASSERT_WITH_CODE((tmp_result == 0),
1534                        "Failed to reset to default!", result = tmp_result);
1535
1536        tmp_result = smu7_force_switch_to_arbf0(hwmgr);
1537        PP_ASSERT_WITH_CODE((tmp_result == 0),
1538                        "Failed to force to switch arbf0!", result = tmp_result);
1539
1540        return result;
1541}
1542
1543int smu7_reset_asic_tasks(struct pp_hwmgr *hwmgr)
1544{
1545
1546        return 0;
1547}
1548
1549static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
1550{
1551        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1552        struct phm_ppt_v1_information *table_info =
1553                        (struct phm_ppt_v1_information *)(hwmgr->pptable);
1554        struct amdgpu_device *adev = hwmgr->adev;
1555
1556        data->dll_default_on = false;
1557        data->mclk_dpm0_activity_target = 0xa;
1558        data->vddc_vddgfx_delta = 300;
1559        data->static_screen_threshold = SMU7_STATICSCREENTHRESHOLD_DFLT;
1560        data->static_screen_threshold_unit = SMU7_STATICSCREENTHRESHOLDUNIT_DFLT;
1561        data->voting_rights_clients[0] = SMU7_VOTINGRIGHTSCLIENTS_DFLT0;
1562        data->voting_rights_clients[1]= SMU7_VOTINGRIGHTSCLIENTS_DFLT1;
1563        data->voting_rights_clients[2] = SMU7_VOTINGRIGHTSCLIENTS_DFLT2;
1564        data->voting_rights_clients[3]= SMU7_VOTINGRIGHTSCLIENTS_DFLT3;
1565        data->voting_rights_clients[4]= SMU7_VOTINGRIGHTSCLIENTS_DFLT4;
1566        data->voting_rights_clients[5]= SMU7_VOTINGRIGHTSCLIENTS_DFLT5;
1567        data->voting_rights_clients[6]= SMU7_VOTINGRIGHTSCLIENTS_DFLT6;
1568        data->voting_rights_clients[7]= SMU7_VOTINGRIGHTSCLIENTS_DFLT7;
1569
1570        data->mclk_dpm_key_disabled = hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true;
1571        data->sclk_dpm_key_disabled = hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true;
1572        data->pcie_dpm_key_disabled = hwmgr->feature_mask & PP_PCIE_DPM_MASK ? false : true;
1573        /* need to set voltage control types before EVV patching */
1574        data->voltage_control = SMU7_VOLTAGE_CONTROL_NONE;
1575        data->vddci_control = SMU7_VOLTAGE_CONTROL_NONE;
1576        data->mvdd_control = SMU7_VOLTAGE_CONTROL_NONE;
1577        data->enable_tdc_limit_feature = true;
1578        data->enable_pkg_pwr_tracking_feature = true;
1579        data->force_pcie_gen = PP_PCIEGenInvalid;
1580        data->ulv_supported = hwmgr->feature_mask & PP_ULV_MASK ? true : false;
1581        data->current_profile_setting.bupdate_sclk = 1;
1582        data->current_profile_setting.sclk_up_hyst = 0;
1583        data->current_profile_setting.sclk_down_hyst = 100;
1584        data->current_profile_setting.sclk_activity = SMU7_SCLK_TARGETACTIVITY_DFLT;
1585        data->current_profile_setting.bupdate_mclk = 1;
1586        data->current_profile_setting.mclk_up_hyst = 0;
1587        data->current_profile_setting.mclk_down_hyst = 100;
1588        data->current_profile_setting.mclk_activity = SMU7_MCLK_TARGETACTIVITY_DFLT;
1589        hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D];
1590        hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
1591        hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
1592
1593        if (hwmgr->chip_id == CHIP_POLARIS12 || hwmgr->is_kicker) {
1594                uint8_t tmp1, tmp2;
1595                uint16_t tmp3 = 0;
1596                atomctrl_get_svi2_info(hwmgr, VOLTAGE_TYPE_VDDC, &tmp1, &tmp2,
1597                                                &tmp3);
1598                tmp3 = (tmp3 >> 5) & 0x3;
1599                data->vddc_phase_shed_control = ((tmp3 << 1) | (tmp3 >> 1)) & 0x3;
1600        } else if (hwmgr->chip_family == AMDGPU_FAMILY_CI) {
1601                data->vddc_phase_shed_control = 1;
1602        } else {
1603                data->vddc_phase_shed_control = 0;
1604        }
1605
1606        if (hwmgr->chip_id  == CHIP_HAWAII) {
1607                data->thermal_temp_setting.temperature_low = 94500;
1608                data->thermal_temp_setting.temperature_high = 95000;
1609                data->thermal_temp_setting.temperature_shutdown = 104000;
1610        } else {
1611                data->thermal_temp_setting.temperature_low = 99500;
1612                data->thermal_temp_setting.temperature_high = 100000;
1613                data->thermal_temp_setting.temperature_shutdown = 104000;
1614        }
1615
1616        data->fast_watermark_threshold = 100;
1617        if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1618                        VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
1619                data->voltage_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1620        else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1621                        VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT))
1622                data->voltage_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
1623
1624        if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1625                        PHM_PlatformCaps_ControlVDDGFX)) {
1626                if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1627                        VOLTAGE_TYPE_VDDGFX, VOLTAGE_OBJ_SVID2)) {
1628                        data->vdd_gfx_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1629                }
1630        }
1631
1632        if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1633                        PHM_PlatformCaps_EnableMVDDControl)) {
1634                if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1635                                VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
1636                        data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
1637                else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1638                                VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
1639                        data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1640        }
1641
1642        if (SMU7_VOLTAGE_CONTROL_NONE == data->vdd_gfx_control)
1643                phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1644                        PHM_PlatformCaps_ControlVDDGFX);
1645
1646        if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1647                        PHM_PlatformCaps_ControlVDDCI)) {
1648                if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1649                                VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
1650                        data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
1651                else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1652                                VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
1653                        data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1654        }
1655
1656        if (data->mvdd_control == SMU7_VOLTAGE_CONTROL_NONE)
1657                phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1658                                PHM_PlatformCaps_EnableMVDDControl);
1659
1660        if (data->vddci_control == SMU7_VOLTAGE_CONTROL_NONE)
1661                phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1662                                PHM_PlatformCaps_ControlVDDCI);
1663
1664        if ((hwmgr->pp_table_version != PP_TABLE_V0) && (hwmgr->feature_mask & PP_CLOCK_STRETCH_MASK)
1665                && (table_info->cac_dtp_table->usClockStretchAmount != 0))
1666                phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1667                                        PHM_PlatformCaps_ClockStretcher);
1668
1669        data->pcie_gen_performance.max = PP_PCIEGen1;
1670        data->pcie_gen_performance.min = PP_PCIEGen3;
1671        data->pcie_gen_power_saving.max = PP_PCIEGen1;
1672        data->pcie_gen_power_saving.min = PP_PCIEGen3;
1673        data->pcie_lane_performance.max = 0;
1674        data->pcie_lane_performance.min = 16;
1675        data->pcie_lane_power_saving.max = 0;
1676        data->pcie_lane_power_saving.min = 16;
1677
1678
1679        if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
1680                phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1681                              PHM_PlatformCaps_UVDPowerGating);
1682        if (adev->pg_flags & AMD_PG_SUPPORT_VCE)
1683                phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1684                              PHM_PlatformCaps_VCEPowerGating);
1685}
1686
1687/**
1688* Get Leakage VDDC based on leakage ID.
1689*
1690* @param    hwmgr  the address of the powerplay hardware manager.
1691* @return   always 0
1692*/
1693static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr)
1694{
1695        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1696        uint16_t vv_id;
1697        uint16_t vddc = 0;
1698        uint16_t vddgfx = 0;
1699        uint16_t i, j;
1700        uint32_t sclk = 0;
1701        struct phm_ppt_v1_information *table_info =
1702                        (struct phm_ppt_v1_information *)hwmgr->pptable;
1703        struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = NULL;
1704
1705
1706        for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) {
1707                vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1708
1709                if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
1710                        if ((hwmgr->pp_table_version == PP_TABLE_V1)
1711                            && !phm_get_sclk_for_voltage_evv(hwmgr,
1712                                                table_info->vddgfx_lookup_table, vv_id, &sclk)) {
1713                                if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1714                                                        PHM_PlatformCaps_ClockStretcher)) {
1715                                        sclk_table = table_info->vdd_dep_on_sclk;
1716
1717                                        for (j = 1; j < sclk_table->count; j++) {
1718                                                if (sclk_table->entries[j].clk == sclk &&
1719                                                                sclk_table->entries[j].cks_enable == 0) {
1720                                                        sclk += 5000;
1721                                                        break;
1722                                                }
1723                                        }
1724                                }
1725                                if (0 == atomctrl_get_voltage_evv_on_sclk
1726                                    (hwmgr, VOLTAGE_TYPE_VDDGFX, sclk,
1727                                     vv_id, &vddgfx)) {
1728                                        /* need to make sure vddgfx is less than 2v or else, it could burn the ASIC. */
1729                                        PP_ASSERT_WITH_CODE((vddgfx < 2000 && vddgfx != 0), "Invalid VDDGFX value!", return -EINVAL);
1730
1731                                        /* the voltage should not be zero nor equal to leakage ID */
1732                                        if (vddgfx != 0 && vddgfx != vv_id) {
1733                                                data->vddcgfx_leakage.actual_voltage[data->vddcgfx_leakage.count] = vddgfx;
1734                                                data->vddcgfx_leakage.leakage_id[data->vddcgfx_leakage.count] = vv_id;
1735                                                data->vddcgfx_leakage.count++;
1736                                        }
1737                                } else {
1738                                        pr_info("Error retrieving EVV voltage value!\n");
1739                                }
1740                        }
1741                } else {
1742                        if ((hwmgr->pp_table_version == PP_TABLE_V0)
1743                                || !phm_get_sclk_for_voltage_evv(hwmgr,
1744                                        table_info->vddc_lookup_table, vv_id, &sclk)) {
1745                                if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1746                                                PHM_PlatformCaps_ClockStretcher)) {
1747                                        if (table_info == NULL)
1748                                                return -EINVAL;
1749                                        sclk_table = table_info->vdd_dep_on_sclk;
1750
1751                                        for (j = 1; j < sclk_table->count; j++) {
1752                                                if (sclk_table->entries[j].clk == sclk &&
1753                                                                sclk_table->entries[j].cks_enable == 0) {
1754                                                        sclk += 5000;
1755                                                        break;
1756                                                }
1757                                        }
1758                                }
1759
1760                                if (phm_get_voltage_evv_on_sclk(hwmgr,
1761                                                        VOLTAGE_TYPE_VDDC,
1762                                                        sclk, vv_id, &vddc) == 0) {
1763                                        if (vddc >= 2000 || vddc == 0)
1764                                                return -EINVAL;
1765                                } else {
1766                                        pr_debug("failed to retrieving EVV voltage!\n");
1767                                        continue;
1768                                }
1769
1770                                /* the voltage should not be zero nor equal to leakage ID */
1771                                if (vddc != 0 && vddc != vv_id) {
1772                                        data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = (uint16_t)(vddc);
1773                                        data->vddc_leakage.leakage_id[data->vddc_leakage.count] = vv_id;
1774                                        data->vddc_leakage.count++;
1775                                }
1776                        }
1777                }
1778        }
1779
1780        return 0;
1781}
1782
1783/**
1784 * Change virtual leakage voltage to actual value.
1785 *
1786 * @param     hwmgr  the address of the powerplay hardware manager.
1787 * @param     pointer to changing voltage
1788 * @param     pointer to leakage table
1789 */
1790static void smu7_patch_ppt_v1_with_vdd_leakage(struct pp_hwmgr *hwmgr,
1791                uint16_t *voltage, struct smu7_leakage_voltage *leakage_table)
1792{
1793        uint32_t index;
1794
1795        /* search for leakage voltage ID 0xff01 ~ 0xff08 */
1796        for (index = 0; index < leakage_table->count; index++) {
1797                /* if this voltage matches a leakage voltage ID */
1798                /* patch with actual leakage voltage */
1799                if (leakage_table->leakage_id[index] == *voltage) {
1800                        *voltage = leakage_table->actual_voltage[index];
1801                        break;
1802                }
1803        }
1804
1805        if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
1806                pr_err("Voltage value looks like a Leakage ID but it's not patched \n");
1807}
1808
1809/**
1810* Patch voltage lookup table by EVV leakages.
1811*
1812* @param     hwmgr  the address of the powerplay hardware manager.
1813* @param     pointer to voltage lookup table
1814* @param     pointer to leakage table
1815* @return     always 0
1816*/
1817static int smu7_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
1818                phm_ppt_v1_voltage_lookup_table *lookup_table,
1819                struct smu7_leakage_voltage *leakage_table)
1820{
1821        uint32_t i;
1822
1823        for (i = 0; i < lookup_table->count; i++)
1824                smu7_patch_ppt_v1_with_vdd_leakage(hwmgr,
1825                                &lookup_table->entries[i].us_vdd, leakage_table);
1826
1827        return 0;
1828}
1829
1830static int smu7_patch_clock_voltage_limits_with_vddc_leakage(
1831                struct pp_hwmgr *hwmgr, struct smu7_leakage_voltage *leakage_table,
1832                uint16_t *vddc)
1833{
1834        struct phm_ppt_v1_information *table_info =
1835                        (struct phm_ppt_v1_information *)(hwmgr->pptable);
1836        smu7_patch_ppt_v1_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table);
1837        hwmgr->dyn_state.max_clock_voltage_on_dc.vddc =
1838                        table_info->max_clock_voltage_on_dc.vddc;
1839        return 0;
1840}
1841
1842static int smu7_patch_voltage_dependency_tables_with_lookup_table(
1843                struct pp_hwmgr *hwmgr)
1844{
1845        uint8_t entry_id;
1846        uint8_t voltage_id;
1847        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1848        struct phm_ppt_v1_information *table_info =
1849                        (struct phm_ppt_v1_information *)(hwmgr->pptable);
1850
1851        struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
1852                        table_info->vdd_dep_on_sclk;
1853        struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table =
1854                        table_info->vdd_dep_on_mclk;
1855        struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1856                        table_info->mm_dep_table;
1857
1858        if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
1859                for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
1860                        voltage_id = sclk_table->entries[entry_id].vddInd;
1861                        sclk_table->entries[entry_id].vddgfx =
1862                                table_info->vddgfx_lookup_table->entries[voltage_id].us_vdd;
1863                }
1864        } else {
1865                for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
1866                        voltage_id = sclk_table->entries[entry_id].vddInd;
1867                        sclk_table->entries[entry_id].vddc =
1868                                table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
1869                }
1870        }
1871
1872        for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) {
1873                voltage_id = mclk_table->entries[entry_id].vddInd;
1874                mclk_table->entries[entry_id].vddc =
1875                        table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
1876        }
1877
1878        for (entry_id = 0; entry_id < mm_table->count; ++entry_id) {
1879                voltage_id = mm_table->entries[entry_id].vddcInd;
1880                mm_table->entries[entry_id].vddc =
1881                        table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
1882        }
1883
1884        return 0;
1885
1886}
1887
1888static int phm_add_voltage(struct pp_hwmgr *hwmgr,
1889                        phm_ppt_v1_voltage_lookup_table *look_up_table,
1890                        phm_ppt_v1_voltage_lookup_record *record)
1891{
1892        uint32_t i;
1893
1894        PP_ASSERT_WITH_CODE((NULL != look_up_table),
1895                "Lookup Table empty.", return -EINVAL);
1896        PP_ASSERT_WITH_CODE((0 != look_up_table->count),
1897                "Lookup Table empty.", return -EINVAL);
1898
1899        i = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDGFX);
1900        PP_ASSERT_WITH_CODE((i >= look_up_table->count),
1901                "Lookup Table is full.", return -EINVAL);
1902
1903        /* This is to avoid entering duplicate calculated records. */
1904        for (i = 0; i < look_up_table->count; i++) {
1905                if (look_up_table->entries[i].us_vdd == record->us_vdd) {
1906                        if (look_up_table->entries[i].us_calculated == 1)
1907                                return 0;
1908                        break;
1909                }
1910        }
1911
1912        look_up_table->entries[i].us_calculated = 1;
1913        look_up_table->entries[i].us_vdd = record->us_vdd;
1914        look_up_table->entries[i].us_cac_low = record->us_cac_low;
1915        look_up_table->entries[i].us_cac_mid = record->us_cac_mid;
1916        look_up_table->entries[i].us_cac_high = record->us_cac_high;
1917        /* Only increment the count when we're appending, not replacing duplicate entry. */
1918        if (i == look_up_table->count)
1919                look_up_table->count++;
1920
1921        return 0;
1922}
1923
1924
1925static int smu7_calc_voltage_dependency_tables(struct pp_hwmgr *hwmgr)
1926{
1927        uint8_t entry_id;
1928        struct phm_ppt_v1_voltage_lookup_record v_record;
1929        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1930        struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
1931
1932        phm_ppt_v1_clock_voltage_dependency_table *sclk_table = pptable_info->vdd_dep_on_sclk;
1933        phm_ppt_v1_clock_voltage_dependency_table *mclk_table = pptable_info->vdd_dep_on_mclk;
1934
1935        if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
1936                for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
1937                        if (sclk_table->entries[entry_id].vdd_offset & (1 << 15))
1938                                v_record.us_vdd = sclk_table->entries[entry_id].vddgfx +
1939                                        sclk_table->entries[entry_id].vdd_offset - 0xFFFF;
1940                        else
1941                                v_record.us_vdd = sclk_table->entries[entry_id].vddgfx +
1942                                        sclk_table->entries[entry_id].vdd_offset;
1943
1944                        sclk_table->entries[entry_id].vddc =
1945                                v_record.us_cac_low = v_record.us_cac_mid =
1946                                v_record.us_cac_high = v_record.us_vdd;
1947
1948                        phm_add_voltage(hwmgr, pptable_info->vddc_lookup_table, &v_record);
1949                }
1950
1951                for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) {
1952                        if (mclk_table->entries[entry_id].vdd_offset & (1 << 15))
1953                                v_record.us_vdd = mclk_table->entries[entry_id].vddc +
1954                                        mclk_table->entries[entry_id].vdd_offset - 0xFFFF;
1955                        else
1956                                v_record.us_vdd = mclk_table->entries[entry_id].vddc +
1957                                        mclk_table->entries[entry_id].vdd_offset;
1958
1959                        mclk_table->entries[entry_id].vddgfx = v_record.us_cac_low =
1960                                v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd;
1961                        phm_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record);
1962                }
1963        }
1964        return 0;
1965}
1966
1967static int smu7_calc_mm_voltage_dependency_table(struct pp_hwmgr *hwmgr)
1968{
1969        uint8_t entry_id;
1970        struct phm_ppt_v1_voltage_lookup_record v_record;
1971        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1972        struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
1973        phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table;
1974
1975        if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
1976                for (entry_id = 0; entry_id < mm_table->count; entry_id++) {
1977                        if (mm_table->entries[entry_id].vddgfx_offset & (1 << 15))
1978                                v_record.us_vdd = mm_table->entries[entry_id].vddc +
1979                                        mm_table->entries[entry_id].vddgfx_offset - 0xFFFF;
1980                        else
1981                                v_record.us_vdd = mm_table->entries[entry_id].vddc +
1982                                        mm_table->entries[entry_id].vddgfx_offset;
1983
1984                        /* Add the calculated VDDGFX to the VDDGFX lookup table */
1985                        mm_table->entries[entry_id].vddgfx = v_record.us_cac_low =
1986                                v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd;
1987                        phm_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record);
1988                }
1989        }
1990        return 0;
1991}
1992
1993static int smu7_sort_lookup_table(struct pp_hwmgr *hwmgr,
1994                struct phm_ppt_v1_voltage_lookup_table *lookup_table)
1995{
1996        uint32_t table_size, i, j;
1997        struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record;
1998        table_size = lookup_table->count;
1999
2000        PP_ASSERT_WITH_CODE(0 != lookup_table->count,
2001                "Lookup table is empty", return -EINVAL);
2002
2003        /* Sorting voltages */
2004        for (i = 0; i < table_size - 1; i++) {
2005                for (j = i + 1; j > 0; j--) {
2006                        if (lookup_table->entries[j].us_vdd <
2007                                        lookup_table->entries[j - 1].us_vdd) {
2008                                tmp_voltage_lookup_record = lookup_table->entries[j - 1];
2009                                lookup_table->entries[j - 1] = lookup_table->entries[j];
2010                                lookup_table->entries[j] = tmp_voltage_lookup_record;
2011                        }
2012                }
2013        }
2014
2015        return 0;
2016}
2017
2018static int smu7_complete_dependency_tables(struct pp_hwmgr *hwmgr)
2019{
2020        int result = 0;
2021        int tmp_result;
2022        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2023        struct phm_ppt_v1_information *table_info =
2024                        (struct phm_ppt_v1_information *)(hwmgr->pptable);
2025
2026        if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
2027                tmp_result = smu7_patch_lookup_table_with_leakage(hwmgr,
2028                        table_info->vddgfx_lookup_table, &(data->vddcgfx_leakage));
2029                if (tmp_result != 0)
2030                        result = tmp_result;
2031
2032                smu7_patch_ppt_v1_with_vdd_leakage(hwmgr,
2033                        &table_info->max_clock_voltage_on_dc.vddgfx, &(data->vddcgfx_leakage));
2034        } else {
2035
2036                tmp_result = smu7_patch_lookup_table_with_leakage(hwmgr,
2037                                table_info->vddc_lookup_table, &(data->vddc_leakage));
2038                if (tmp_result)
2039                        result = tmp_result;
2040
2041                tmp_result = smu7_patch_clock_voltage_limits_with_vddc_leakage(hwmgr,
2042                                &(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc);
2043                if (tmp_result)
2044                        result = tmp_result;
2045        }
2046
2047        tmp_result = smu7_patch_voltage_dependency_tables_with_lookup_table(hwmgr);
2048        if (tmp_result)
2049                result = tmp_result;
2050
2051        tmp_result = smu7_calc_voltage_dependency_tables(hwmgr);
2052        if (tmp_result)
2053                result = tmp_result;
2054
2055        tmp_result = smu7_calc_mm_voltage_dependency_table(hwmgr);
2056        if (tmp_result)
2057                result = tmp_result;
2058
2059        tmp_result = smu7_sort_lookup_table(hwmgr, table_info->vddgfx_lookup_table);
2060        if (tmp_result)
2061                result = tmp_result;
2062
2063        tmp_result = smu7_sort_lookup_table(hwmgr, table_info->vddc_lookup_table);
2064        if (tmp_result)
2065                result = tmp_result;
2066
2067        return result;
2068}
2069
2070static int smu7_set_private_data_based_on_pptable_v1(struct pp_hwmgr *hwmgr)
2071{
2072        struct phm_ppt_v1_information *table_info =
2073                        (struct phm_ppt_v1_information *)(hwmgr->pptable);
2074
2075        struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
2076                                                table_info->vdd_dep_on_sclk;
2077        struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table =
2078                                                table_info->vdd_dep_on_mclk;
2079
2080        PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL,
2081                "VDD dependency on SCLK table is missing.",
2082                return -EINVAL);
2083        PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
2084                "VDD dependency on SCLK table has to have is missing.",
2085                return -EINVAL);
2086
2087        PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL,
2088                "VDD dependency on MCLK table is missing",
2089                return -EINVAL);
2090        PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
2091                "VDD dependency on MCLK table has to have is missing.",
2092                return -EINVAL);
2093
2094        table_info->max_clock_voltage_on_ac.sclk =
2095                allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk;
2096        table_info->max_clock_voltage_on_ac.mclk =
2097                allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk;
2098        table_info->max_clock_voltage_on_ac.vddc =
2099                allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
2100        table_info->max_clock_voltage_on_ac.vddci =
2101                allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci;
2102
2103        hwmgr->dyn_state.max_clock_voltage_on_ac.sclk = table_info->max_clock_voltage_on_ac.sclk;
2104        hwmgr->dyn_state.max_clock_voltage_on_ac.mclk = table_info->max_clock_voltage_on_ac.mclk;
2105        hwmgr->dyn_state.max_clock_voltage_on_ac.vddc = table_info->max_clock_voltage_on_ac.vddc;
2106        hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = table_info->max_clock_voltage_on_ac.vddci;
2107
2108        return 0;
2109}
2110
2111static int smu7_patch_voltage_workaround(struct pp_hwmgr *hwmgr)
2112{
2113        struct phm_ppt_v1_information *table_info =
2114                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
2115        struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
2116        struct phm_ppt_v1_voltage_lookup_table *lookup_table;
2117        uint32_t i;
2118        uint32_t hw_revision, sub_vendor_id, sub_sys_id;
2119        struct amdgpu_device *adev = hwmgr->adev;
2120
2121        if (table_info != NULL) {
2122                dep_mclk_table = table_info->vdd_dep_on_mclk;
2123                lookup_table = table_info->vddc_lookup_table;
2124        } else
2125                return 0;
2126
2127        hw_revision = adev->pdev->revision;
2128        sub_sys_id = adev->pdev->subsystem_device;
2129        sub_vendor_id = adev->pdev->subsystem_vendor;
2130
2131        if (hwmgr->chip_id == CHIP_POLARIS10 && hw_revision == 0xC7 &&
2132                        ((sub_sys_id == 0xb37 && sub_vendor_id == 0x1002) ||
2133                    (sub_sys_id == 0x4a8 && sub_vendor_id == 0x1043) ||
2134                    (sub_sys_id == 0x9480 && sub_vendor_id == 0x1682))) {
2135                if (lookup_table->entries[dep_mclk_table->entries[dep_mclk_table->count-1].vddInd].us_vdd >= 1000)
2136                        return 0;
2137
2138                for (i = 0; i < lookup_table->count; i++) {
2139                        if (lookup_table->entries[i].us_vdd < 0xff01 && lookup_table->entries[i].us_vdd >= 1000) {
2140                                dep_mclk_table->entries[dep_mclk_table->count-1].vddInd = (uint8_t) i;
2141                                return 0;
2142                        }
2143                }
2144        }
2145        return 0;
2146}
2147
2148static int smu7_thermal_parameter_init(struct pp_hwmgr *hwmgr)
2149{
2150        struct pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
2151        uint32_t temp_reg;
2152        struct phm_ppt_v1_information *table_info =
2153                        (struct phm_ppt_v1_information *)(hwmgr->pptable);
2154
2155
2156        if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) {
2157                temp_reg = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL);
2158                switch (gpio_pin_assignment.uc_gpio_pin_bit_shift) {
2159                case 0:
2160                        temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x1);
2161                        break;
2162                case 1:
2163                        temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x2);
2164                        break;
2165                case 2:
2166                        temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW, 0x1);
2167                        break;
2168                case 3:
2169                        temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, FORCE_NB_PS1, 0x1);
2170                        break;
2171                case 4:
2172                        temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, DPM_ENABLED, 0x1);
2173                        break;
2174                default:
2175                        break;
2176                }
2177                cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL, temp_reg);
2178        }
2179
2180        if (table_info == NULL)
2181                return 0;
2182
2183        if (table_info->cac_dtp_table->usDefaultTargetOperatingTemp != 0 &&
2184                hwmgr->thermal_controller.advanceFanControlParameters.ucFanControlMode) {
2185                hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMinLimit =
2186                        (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit;
2187
2188                hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMaxLimit =
2189                        (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
2190
2191                hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMStep = 1;
2192
2193                hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMaxLimit = 100;
2194
2195                hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMinLimit =
2196                        (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit;
2197
2198                hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMStep = 1;
2199
2200                table_info->cac_dtp_table->usDefaultTargetOperatingTemp = (table_info->cac_dtp_table->usDefaultTargetOperatingTemp >= 50) ?
2201                                                                (table_info->cac_dtp_table->usDefaultTargetOperatingTemp - 50) : 0;
2202
2203                table_info->cac_dtp_table->usOperatingTempMaxLimit = table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
2204                table_info->cac_dtp_table->usOperatingTempStep = 1;
2205                table_info->cac_dtp_table->usOperatingTempHyst = 1;
2206
2207                hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM =
2208                               hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
2209
2210                hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM =
2211                               hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM;
2212
2213                hwmgr->dyn_state.cac_dtp_table->usOperatingTempMinLimit =
2214                               table_info->cac_dtp_table->usOperatingTempMinLimit;
2215
2216                hwmgr->dyn_state.cac_dtp_table->usOperatingTempMaxLimit =
2217                               table_info->cac_dtp_table->usOperatingTempMaxLimit;
2218
2219                hwmgr->dyn_state.cac_dtp_table->usDefaultTargetOperatingTemp =
2220                               table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
2221
2222                hwmgr->dyn_state.cac_dtp_table->usOperatingTempStep =
2223                               table_info->cac_dtp_table->usOperatingTempStep;
2224
2225                hwmgr->dyn_state.cac_dtp_table->usTargetOperatingTemp =
2226                               table_info->cac_dtp_table->usTargetOperatingTemp;
2227                if (hwmgr->feature_mask & PP_OD_FUZZY_FAN_CONTROL_MASK)
2228                        phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2229                                        PHM_PlatformCaps_ODFuzzyFanControlSupport);
2230        }
2231
2232        return 0;
2233}
2234
2235/**
2236 * Change virtual leakage voltage to actual value.
2237 *
2238 * @param     hwmgr  the address of the powerplay hardware manager.
2239 * @param     pointer to changing voltage
2240 * @param     pointer to leakage table
2241 */
2242static void smu7_patch_ppt_v0_with_vdd_leakage(struct pp_hwmgr *hwmgr,
2243                uint32_t *voltage, struct smu7_leakage_voltage *leakage_table)
2244{
2245        uint32_t index;
2246
2247        /* search for leakage voltage ID 0xff01 ~ 0xff08 */
2248        for (index = 0; index < leakage_table->count; index++) {
2249                /* if this voltage matches a leakage voltage ID */
2250                /* patch with actual leakage voltage */
2251                if (leakage_table->leakage_id[index] == *voltage) {
2252                        *voltage = leakage_table->actual_voltage[index];
2253                        break;
2254                }
2255        }
2256
2257        if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
2258                pr_err("Voltage value looks like a Leakage ID but it's not patched \n");
2259}
2260
2261
2262static int smu7_patch_vddc(struct pp_hwmgr *hwmgr,
2263                              struct phm_clock_voltage_dependency_table *tab)
2264{
2265        uint16_t i;
2266        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2267
2268        if (tab)
2269                for (i = 0; i < tab->count; i++)
2270                        smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2271                                                &data->vddc_leakage);
2272
2273        return 0;
2274}
2275
2276static int smu7_patch_vddci(struct pp_hwmgr *hwmgr,
2277                               struct phm_clock_voltage_dependency_table *tab)
2278{
2279        uint16_t i;
2280        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2281
2282        if (tab)
2283                for (i = 0; i < tab->count; i++)
2284                        smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2285                                                        &data->vddci_leakage);
2286
2287        return 0;
2288}
2289
2290static int smu7_patch_vce_vddc(struct pp_hwmgr *hwmgr,
2291                                  struct phm_vce_clock_voltage_dependency_table *tab)
2292{
2293        uint16_t i;
2294        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2295
2296        if (tab)
2297                for (i = 0; i < tab->count; i++)
2298                        smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2299                                                        &data->vddc_leakage);
2300
2301        return 0;
2302}
2303
2304
2305static int smu7_patch_uvd_vddc(struct pp_hwmgr *hwmgr,
2306                                  struct phm_uvd_clock_voltage_dependency_table *tab)
2307{
2308        uint16_t i;
2309        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2310
2311        if (tab)
2312                for (i = 0; i < tab->count; i++)
2313                        smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2314                                                        &data->vddc_leakage);
2315
2316        return 0;
2317}
2318
2319static int smu7_patch_vddc_shed_limit(struct pp_hwmgr *hwmgr,
2320                                         struct phm_phase_shedding_limits_table *tab)
2321{
2322        uint16_t i;
2323        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2324
2325        if (tab)
2326                for (i = 0; i < tab->count; i++)
2327                        smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].Voltage,
2328                                                        &data->vddc_leakage);
2329
2330        return 0;
2331}
2332
2333static int smu7_patch_samu_vddc(struct pp_hwmgr *hwmgr,
2334                                   struct phm_samu_clock_voltage_dependency_table *tab)
2335{
2336        uint16_t i;
2337        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2338
2339        if (tab)
2340                for (i = 0; i < tab->count; i++)
2341                        smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2342                                                        &data->vddc_leakage);
2343
2344        return 0;
2345}
2346
2347static int smu7_patch_acp_vddc(struct pp_hwmgr *hwmgr,
2348                                  struct phm_acp_clock_voltage_dependency_table *tab)
2349{
2350        uint16_t i;
2351        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2352
2353        if (tab)
2354                for (i = 0; i < tab->count; i++)
2355                        smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2356                                        &data->vddc_leakage);
2357
2358        return 0;
2359}
2360
2361static int smu7_patch_limits_vddc(struct pp_hwmgr *hwmgr,
2362                                  struct phm_clock_and_voltage_limits *tab)
2363{
2364        uint32_t vddc, vddci;
2365        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2366
2367        if (tab) {
2368                vddc = tab->vddc;
2369                smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc,
2370                                                   &data->vddc_leakage);
2371                tab->vddc = vddc;
2372                vddci = tab->vddci;
2373                smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddci,
2374                                                   &data->vddci_leakage);
2375                tab->vddci = vddci;
2376        }
2377
2378        return 0;
2379}
2380
2381static int smu7_patch_cac_vddc(struct pp_hwmgr *hwmgr, struct phm_cac_leakage_table *tab)
2382{
2383        uint32_t i;
2384        uint32_t vddc;
2385        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2386
2387        if (tab) {
2388                for (i = 0; i < tab->count; i++) {
2389                        vddc = (uint32_t)(tab->entries[i].Vddc);
2390                        smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc, &data->vddc_leakage);
2391                        tab->entries[i].Vddc = (uint16_t)vddc;
2392                }
2393        }
2394
2395        return 0;
2396}
2397
2398static int smu7_patch_dependency_tables_with_leakage(struct pp_hwmgr *hwmgr)
2399{
2400        int tmp;
2401
2402        tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_sclk);
2403        if (tmp)
2404                return -EINVAL;
2405
2406        tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_mclk);
2407        if (tmp)
2408                return -EINVAL;
2409
2410        tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
2411        if (tmp)
2412                return -EINVAL;
2413
2414        tmp = smu7_patch_vddci(hwmgr, hwmgr->dyn_state.vddci_dependency_on_mclk);
2415        if (tmp)
2416                return -EINVAL;
2417
2418        tmp = smu7_patch_vce_vddc(hwmgr, hwmgr->dyn_state.vce_clock_voltage_dependency_table);
2419        if (tmp)
2420                return -EINVAL;
2421
2422        tmp = smu7_patch_uvd_vddc(hwmgr, hwmgr->dyn_state.uvd_clock_voltage_dependency_table);
2423        if (tmp)
2424                return -EINVAL;
2425
2426        tmp = smu7_patch_samu_vddc(hwmgr, hwmgr->dyn_state.samu_clock_voltage_dependency_table);
2427        if (tmp)
2428                return -EINVAL;
2429
2430        tmp = smu7_patch_acp_vddc(hwmgr, hwmgr->dyn_state.acp_clock_voltage_dependency_table);
2431        if (tmp)
2432                return -EINVAL;
2433
2434        tmp = smu7_patch_vddc_shed_limit(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table);
2435        if (tmp)
2436                return -EINVAL;
2437
2438        tmp = smu7_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_ac);
2439        if (tmp)
2440                return -EINVAL;
2441
2442        tmp = smu7_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_dc);
2443        if (tmp)
2444                return -EINVAL;
2445
2446        tmp = smu7_patch_cac_vddc(hwmgr, hwmgr->dyn_state.cac_leakage_table);
2447        if (tmp)
2448                return -EINVAL;
2449
2450        return 0;
2451}
2452
2453
2454static int smu7_set_private_data_based_on_pptable_v0(struct pp_hwmgr *hwmgr)
2455{
2456        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2457
2458        struct phm_clock_voltage_dependency_table *allowed_sclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_sclk;
2459        struct phm_clock_voltage_dependency_table *allowed_mclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_mclk;
2460        struct phm_clock_voltage_dependency_table *allowed_mclk_vddci_table = hwmgr->dyn_state.vddci_dependency_on_mclk;
2461
2462        PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table != NULL,
2463                "VDDC dependency on SCLK table is missing. This table is mandatory",
2464                return -EINVAL);
2465        PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table->count >= 1,
2466                "VDDC dependency on SCLK table has to have is missing. This table is mandatory",
2467                return -EINVAL);
2468
2469        PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table != NULL,
2470                "VDDC dependency on MCLK table is missing. This table is mandatory",
2471                return -EINVAL);
2472        PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table->count >= 1,
2473                "VDD dependency on MCLK table has to have is missing. This table is mandatory",
2474                return -EINVAL);
2475
2476        data->min_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[0].v;
2477        data->max_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
2478
2479        hwmgr->dyn_state.max_clock_voltage_on_ac.sclk =
2480                allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
2481        hwmgr->dyn_state.max_clock_voltage_on_ac.mclk =
2482                allowed_mclk_vddc_table->entries[allowed_mclk_vddc_table->count - 1].clk;
2483        hwmgr->dyn_state.max_clock_voltage_on_ac.vddc =
2484                allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
2485
2486        if (allowed_mclk_vddci_table != NULL && allowed_mclk_vddci_table->count >= 1) {
2487                data->min_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[0].v;
2488                data->max_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
2489        }
2490
2491        if (hwmgr->dyn_state.vddci_dependency_on_mclk != NULL && hwmgr->dyn_state.vddci_dependency_on_mclk->count >= 1)
2492                hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = hwmgr->dyn_state.vddci_dependency_on_mclk->entries[hwmgr->dyn_state.vddci_dependency_on_mclk->count - 1].v;
2493
2494        return 0;
2495}
2496
2497static int smu7_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
2498{
2499        kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
2500        hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
2501        kfree(hwmgr->backend);
2502        hwmgr->backend = NULL;
2503
2504        return 0;
2505}
2506
2507static int smu7_get_elb_voltages(struct pp_hwmgr *hwmgr)
2508{
2509        uint16_t virtual_voltage_id, vddc, vddci, efuse_voltage_id;
2510        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2511        int i;
2512
2513        if (atomctrl_get_leakage_id_from_efuse(hwmgr, &efuse_voltage_id) == 0) {
2514                for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) {
2515                        virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
2516                        if (atomctrl_get_leakage_vddc_base_on_leakage(hwmgr, &vddc, &vddci,
2517                                                                virtual_voltage_id,
2518                                                                efuse_voltage_id) == 0) {
2519                                if (vddc != 0 && vddc != virtual_voltage_id) {
2520                                        data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = vddc;
2521                                        data->vddc_leakage.leakage_id[data->vddc_leakage.count] = virtual_voltage_id;
2522                                        data->vddc_leakage.count++;
2523                                }
2524                                if (vddci != 0 && vddci != virtual_voltage_id) {
2525                                        data->vddci_leakage.actual_voltage[data->vddci_leakage.count] = vddci;
2526                                        data->vddci_leakage.leakage_id[data->vddci_leakage.count] = virtual_voltage_id;
2527                                        data->vddci_leakage.count++;
2528                                }
2529                        }
2530                }
2531        }
2532        return 0;
2533}
2534
2535static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
2536{
2537        struct smu7_hwmgr *data;
2538        int result = 0;
2539
2540        data = kzalloc(sizeof(struct smu7_hwmgr), GFP_KERNEL);
2541        if (data == NULL)
2542                return -ENOMEM;
2543
2544        hwmgr->backend = data;
2545        smu7_patch_voltage_workaround(hwmgr);
2546        smu7_init_dpm_defaults(hwmgr);
2547
2548        /* Get leakage voltage based on leakage ID. */
2549        if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2550                        PHM_PlatformCaps_EVV)) {
2551                result = smu7_get_evv_voltages(hwmgr);
2552                if (result) {
2553                        pr_info("Get EVV Voltage Failed.  Abort Driver loading!\n");
2554                        return -EINVAL;
2555                }
2556        } else {
2557                smu7_get_elb_voltages(hwmgr);
2558        }
2559
2560        if (hwmgr->pp_table_version == PP_TABLE_V1) {
2561                smu7_complete_dependency_tables(hwmgr);
2562                smu7_set_private_data_based_on_pptable_v1(hwmgr);
2563        } else if (hwmgr->pp_table_version == PP_TABLE_V0) {
2564                smu7_patch_dependency_tables_with_leakage(hwmgr);
2565                smu7_set_private_data_based_on_pptable_v0(hwmgr);
2566        }
2567
2568        /* Initalize Dynamic State Adjustment Rule Settings */
2569        result = phm_initializa_dynamic_state_adjustment_rule_settings(hwmgr);
2570
2571        if (0 == result) {
2572                struct amdgpu_device *adev = hwmgr->adev;
2573
2574                data->is_tlu_enabled = false;
2575
2576                hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
2577                                                        SMU7_MAX_HARDWARE_POWERLEVELS;
2578                hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
2579                hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
2580
2581                data->pcie_gen_cap = adev->pm.pcie_gen_mask;
2582                if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
2583                        data->pcie_spc_cap = 20;
2584                data->pcie_lane_cap = adev->pm.pcie_mlw_mask;
2585
2586                hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
2587/* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
2588                hwmgr->platform_descriptor.clockStep.engineClock = 500;
2589                hwmgr->platform_descriptor.clockStep.memoryClock = 500;
2590                smu7_thermal_parameter_init(hwmgr);
2591        } else {
2592                /* Ignore return value in here, we are cleaning up a mess. */
2593                smu7_hwmgr_backend_fini(hwmgr);
2594        }
2595
2596        return 0;
2597}
2598
2599static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr)
2600{
2601        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2602        uint32_t level, tmp;
2603
2604        if (!data->pcie_dpm_key_disabled) {
2605                if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
2606                        level = 0;
2607                        tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask;
2608                        while (tmp >>= 1)
2609                                level++;
2610
2611                        if (level)
2612                                smum_send_msg_to_smc_with_parameter(hwmgr,
2613                                                PPSMC_MSG_PCIeDPM_ForceLevel, level);
2614                }
2615        }
2616
2617        if (!data->sclk_dpm_key_disabled) {
2618                if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
2619                        level = 0;
2620                        tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask;
2621                        while (tmp >>= 1)
2622                                level++;
2623
2624                        if (level)
2625                                smum_send_msg_to_smc_with_parameter(hwmgr,
2626                                                PPSMC_MSG_SCLKDPM_SetEnabledMask,
2627                                                (1 << level));
2628                }
2629        }
2630
2631        if (!data->mclk_dpm_key_disabled) {
2632                if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
2633                        level = 0;
2634                        tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask;
2635                        while (tmp >>= 1)
2636                                level++;
2637
2638                        if (level)
2639                                smum_send_msg_to_smc_with_parameter(hwmgr,
2640                                                PPSMC_MSG_MCLKDPM_SetEnabledMask,
2641                                                (1 << level));
2642                }
2643        }
2644
2645        return 0;
2646}
2647
2648static int smu7_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
2649{
2650        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2651
2652        if (hwmgr->pp_table_version == PP_TABLE_V1)
2653                phm_apply_dal_min_voltage_request(hwmgr);
2654/* TO DO  for v0 iceland and Ci*/
2655
2656        if (!data->sclk_dpm_key_disabled) {
2657                if (data->dpm_level_enable_mask.sclk_dpm_enable_mask)
2658                        smum_send_msg_to_smc_with_parameter(hwmgr,
2659                                        PPSMC_MSG_SCLKDPM_SetEnabledMask,
2660                                        data->dpm_level_enable_mask.sclk_dpm_enable_mask);
2661        }
2662
2663        if (!data->mclk_dpm_key_disabled) {
2664                if (data->dpm_level_enable_mask.mclk_dpm_enable_mask)
2665                        smum_send_msg_to_smc_with_parameter(hwmgr,
2666                                        PPSMC_MSG_MCLKDPM_SetEnabledMask,
2667                                        data->dpm_level_enable_mask.mclk_dpm_enable_mask);
2668        }
2669
2670        return 0;
2671}
2672
2673static int smu7_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
2674{
2675        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2676
2677        if (!smum_is_dpm_running(hwmgr))
2678                return -EINVAL;
2679
2680        if (!data->pcie_dpm_key_disabled) {
2681                smum_send_msg_to_smc(hwmgr,
2682                                PPSMC_MSG_PCIeDPM_UnForceLevel);
2683        }
2684
2685        return smu7_upload_dpm_level_enable_mask(hwmgr);
2686}
2687
2688static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr)
2689{
2690        struct smu7_hwmgr *data =
2691                        (struct smu7_hwmgr *)(hwmgr->backend);
2692        uint32_t level;
2693
2694        if (!data->sclk_dpm_key_disabled)
2695                if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
2696                        level = phm_get_lowest_enabled_level(hwmgr,
2697                                                              data->dpm_level_enable_mask.sclk_dpm_enable_mask);
2698                        smum_send_msg_to_smc_with_parameter(hwmgr,
2699                                                            PPSMC_MSG_SCLKDPM_SetEnabledMask,
2700                                                            (1 << level));
2701
2702        }
2703
2704        if (!data->mclk_dpm_key_disabled) {
2705                if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
2706                        level = phm_get_lowest_enabled_level(hwmgr,
2707                                                              data->dpm_level_enable_mask.mclk_dpm_enable_mask);
2708                        smum_send_msg_to_smc_with_parameter(hwmgr,
2709                                                            PPSMC_MSG_MCLKDPM_SetEnabledMask,
2710                                                            (1 << level));
2711                }
2712        }
2713
2714        if (!data->pcie_dpm_key_disabled) {
2715                if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
2716                        level = phm_get_lowest_enabled_level(hwmgr,
2717                                                              data->dpm_level_enable_mask.pcie_dpm_enable_mask);
2718                        smum_send_msg_to_smc_with_parameter(hwmgr,
2719                                                            PPSMC_MSG_PCIeDPM_ForceLevel,
2720                                                            (level));
2721                }
2722        }
2723
2724        return 0;
2725}
2726
2727static int smu7_get_profiling_clk(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level,
2728                                uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *pcie_mask)
2729{
2730        uint32_t percentage;
2731        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2732        struct smu7_dpm_table *golden_dpm_table = &data->golden_dpm_table;
2733        int32_t tmp_mclk;
2734        int32_t tmp_sclk;
2735        int32_t count;
2736
2737        if (golden_dpm_table->mclk_table.count < 1)
2738                return -EINVAL;
2739
2740        percentage = 100 * golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count - 1].value /
2741                        golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value;
2742
2743        if (golden_dpm_table->mclk_table.count == 1) {
2744                percentage = 70;
2745                tmp_mclk = golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value;
2746                *mclk_mask = golden_dpm_table->mclk_table.count - 1;
2747        } else {
2748                tmp_mclk = golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 2].value;
2749                *mclk_mask = golden_dpm_table->mclk_table.count - 2;
2750        }
2751
2752        tmp_sclk = tmp_mclk * percentage / 100;
2753
2754        if (hwmgr->pp_table_version == PP_TABLE_V0) {
2755                for (count = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1;
2756                        count >= 0; count--) {
2757                        if (tmp_sclk >= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk) {
2758                                tmp_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk;
2759                                *sclk_mask = count;
2760                                break;
2761                        }
2762                }
2763                if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
2764                        *sclk_mask = 0;
2765                        tmp_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].clk;
2766                }
2767
2768                if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
2769                        *sclk_mask = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1;
2770        } else if (hwmgr->pp_table_version == PP_TABLE_V1) {
2771                struct phm_ppt_v1_information *table_info =
2772                                (struct phm_ppt_v1_information *)(hwmgr->pptable);
2773
2774                for (count = table_info->vdd_dep_on_sclk->count-1; count >= 0; count--) {
2775                        if (tmp_sclk >= table_info->vdd_dep_on_sclk->entries[count].clk) {
2776                                tmp_sclk = table_info->vdd_dep_on_sclk->entries[count].clk;
2777                                *sclk_mask = count;
2778                                break;
2779                        }
2780                }
2781                if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
2782                        *sclk_mask = 0;
2783                        tmp_sclk =  table_info->vdd_dep_on_sclk->entries[0].clk;
2784                }
2785
2786                if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
2787                        *sclk_mask = table_info->vdd_dep_on_sclk->count - 1;
2788        }
2789
2790        if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK)
2791                *mclk_mask = 0;
2792        else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
2793                *mclk_mask = golden_dpm_table->mclk_table.count - 1;
2794
2795        *pcie_mask = data->dpm_table.pcie_speed_table.count - 1;
2796        hwmgr->pstate_sclk = tmp_sclk;
2797        hwmgr->pstate_mclk = tmp_mclk;
2798
2799        return 0;
2800}
2801
2802static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr,
2803                                enum amd_dpm_forced_level level)
2804{
2805        int ret = 0;
2806        uint32_t sclk_mask = 0;
2807        uint32_t mclk_mask = 0;
2808        uint32_t pcie_mask = 0;
2809
2810        if (hwmgr->pstate_sclk == 0)
2811                smu7_get_profiling_clk(hwmgr, level, &sclk_mask, &mclk_mask, &pcie_mask);
2812
2813        switch (level) {
2814        case AMD_DPM_FORCED_LEVEL_HIGH:
2815                ret = smu7_force_dpm_highest(hwmgr);
2816                break;
2817        case AMD_DPM_FORCED_LEVEL_LOW:
2818                ret = smu7_force_dpm_lowest(hwmgr);
2819                break;
2820        case AMD_DPM_FORCED_LEVEL_AUTO:
2821                ret = smu7_unforce_dpm_levels(hwmgr);
2822                break;
2823        case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
2824        case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
2825        case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
2826        case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
2827                ret = smu7_get_profiling_clk(hwmgr, level, &sclk_mask, &mclk_mask, &pcie_mask);
2828                if (ret)
2829                        return ret;
2830                smu7_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
2831                smu7_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
2832                smu7_force_clock_level(hwmgr, PP_PCIE, 1<<pcie_mask);
2833                break;
2834        case AMD_DPM_FORCED_LEVEL_MANUAL:
2835        case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
2836        default:
2837                break;
2838        }
2839
2840        if (!ret) {
2841                if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
2842                        smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
2843                else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
2844                        smu7_fan_ctrl_reset_fan_speed_to_default(hwmgr);
2845        }
2846        return ret;
2847}
2848
2849static int smu7_get_power_state_size(struct pp_hwmgr *hwmgr)
2850{
2851        return sizeof(struct smu7_power_state);
2852}
2853
2854static int smu7_vblank_too_short(struct pp_hwmgr *hwmgr,
2855                                 uint32_t vblank_time_us)
2856{
2857        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2858        uint32_t switch_limit_us;
2859
2860        switch (hwmgr->chip_id) {
2861        case CHIP_POLARIS10:
2862        case CHIP_POLARIS11:
2863        case CHIP_POLARIS12:
2864                if (hwmgr->is_kicker)
2865                        switch_limit_us = data->is_memory_gddr5 ? 450 : 150;
2866                else
2867                        switch_limit_us = data->is_memory_gddr5 ? 190 : 150;
2868                break;
2869        case CHIP_VEGAM:
2870                switch_limit_us = 30;
2871                break;
2872        default:
2873                switch_limit_us = data->is_memory_gddr5 ? 450 : 150;
2874                break;
2875        }
2876
2877        if (vblank_time_us < switch_limit_us)
2878                return true;
2879        else
2880                return false;
2881}
2882
2883static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
2884                                struct pp_power_state *request_ps,
2885                        const struct pp_power_state *current_ps)
2886{
2887        struct amdgpu_device *adev = hwmgr->adev;
2888        struct smu7_power_state *smu7_ps =
2889                                cast_phw_smu7_power_state(&request_ps->hardware);
2890        uint32_t sclk;
2891        uint32_t mclk;
2892        struct PP_Clocks minimum_clocks = {0};
2893        bool disable_mclk_switching;
2894        bool disable_mclk_switching_for_frame_lock;
2895        const struct phm_clock_and_voltage_limits *max_limits;
2896        uint32_t i;
2897        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2898        struct phm_ppt_v1_information *table_info =
2899                        (struct phm_ppt_v1_information *)(hwmgr->pptable);
2900        int32_t count;
2901        int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
2902
2903        data->battery_state = (PP_StateUILabel_Battery ==
2904                        request_ps->classification.ui_label);
2905
2906        PP_ASSERT_WITH_CODE(smu7_ps->performance_level_count == 2,
2907                                 "VI should always have 2 performance levels",
2908                                );
2909
2910        max_limits = adev->pm.ac_power ?
2911                        &(hwmgr->dyn_state.max_clock_voltage_on_ac) :
2912                        &(hwmgr->dyn_state.max_clock_voltage_on_dc);
2913
2914        /* Cap clock DPM tables at DC MAX if it is in DC. */
2915        if (!adev->pm.ac_power) {
2916                for (i = 0; i < smu7_ps->performance_level_count; i++) {
2917                        if (smu7_ps->performance_levels[i].memory_clock > max_limits->mclk)
2918                                smu7_ps->performance_levels[i].memory_clock = max_limits->mclk;
2919                        if (smu7_ps->performance_levels[i].engine_clock > max_limits->sclk)
2920                                smu7_ps->performance_levels[i].engine_clock = max_limits->sclk;
2921                }
2922        }
2923
2924        minimum_clocks.engineClock = hwmgr->display_config->min_core_set_clock;
2925        minimum_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock;
2926
2927        if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2928                        PHM_PlatformCaps_StablePState)) {
2929                max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
2930                stable_pstate_sclk = (max_limits->sclk * 75) / 100;
2931
2932                for (count = table_info->vdd_dep_on_sclk->count - 1;
2933                                count >= 0; count--) {
2934                        if (stable_pstate_sclk >=
2935                                        table_info->vdd_dep_on_sclk->entries[count].clk) {
2936                                stable_pstate_sclk =
2937                                                table_info->vdd_dep_on_sclk->entries[count].clk;
2938                                break;
2939                        }
2940                }
2941
2942                if (count < 0)
2943                        stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
2944
2945                stable_pstate_mclk = max_limits->mclk;
2946
2947                minimum_clocks.engineClock = stable_pstate_sclk;
2948                minimum_clocks.memoryClock = stable_pstate_mclk;
2949        }
2950
2951        disable_mclk_switching_for_frame_lock = phm_cap_enabled(
2952                                    hwmgr->platform_descriptor.platformCaps,
2953                                    PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
2954
2955
2956        if (hwmgr->display_config->num_display == 0)
2957                disable_mclk_switching = false;
2958        else
2959                disable_mclk_switching = ((1 < hwmgr->display_config->num_display) &&
2960                                          !hwmgr->display_config->multi_monitor_in_sync) ||
2961                        disable_mclk_switching_for_frame_lock ||
2962                        smu7_vblank_too_short(hwmgr, hwmgr->display_config->min_vblank_time);
2963
2964        sclk = smu7_ps->performance_levels[0].engine_clock;
2965        mclk = smu7_ps->performance_levels[0].memory_clock;
2966
2967        if (disable_mclk_switching)
2968                mclk = smu7_ps->performance_levels
2969                [smu7_ps->performance_level_count - 1].memory_clock;
2970
2971        if (sclk < minimum_clocks.engineClock)
2972                sclk = (minimum_clocks.engineClock > max_limits->sclk) ?
2973                                max_limits->sclk : minimum_clocks.engineClock;
2974
2975        if (mclk < minimum_clocks.memoryClock)
2976                mclk = (minimum_clocks.memoryClock > max_limits->mclk) ?
2977                                max_limits->mclk : minimum_clocks.memoryClock;
2978
2979        smu7_ps->performance_levels[0].engine_clock = sclk;
2980        smu7_ps->performance_levels[0].memory_clock = mclk;
2981
2982        smu7_ps->performance_levels[1].engine_clock =
2983                (smu7_ps->performance_levels[1].engine_clock >=
2984                                smu7_ps->performance_levels[0].engine_clock) ?
2985                                                smu7_ps->performance_levels[1].engine_clock :
2986                                                smu7_ps->performance_levels[0].engine_clock;
2987
2988        if (disable_mclk_switching) {
2989                if (mclk < smu7_ps->performance_levels[1].memory_clock)
2990                        mclk = smu7_ps->performance_levels[1].memory_clock;
2991
2992                smu7_ps->performance_levels[0].memory_clock = mclk;
2993                smu7_ps->performance_levels[1].memory_clock = mclk;
2994        } else {
2995                if (smu7_ps->performance_levels[1].memory_clock <
2996                                smu7_ps->performance_levels[0].memory_clock)
2997                        smu7_ps->performance_levels[1].memory_clock =
2998                                        smu7_ps->performance_levels[0].memory_clock;
2999        }
3000
3001        if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3002                        PHM_PlatformCaps_StablePState)) {
3003                for (i = 0; i < smu7_ps->performance_level_count; i++) {
3004                        smu7_ps->performance_levels[i].engine_clock = stable_pstate_sclk;
3005                        smu7_ps->performance_levels[i].memory_clock = stable_pstate_mclk;
3006                        smu7_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max;
3007                        smu7_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max;
3008                }
3009        }
3010        return 0;
3011}
3012
3013
3014static uint32_t smu7_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
3015{
3016        struct pp_power_state  *ps;
3017        struct smu7_power_state  *smu7_ps;
3018
3019        if (hwmgr == NULL)
3020                return -EINVAL;
3021
3022        ps = hwmgr->request_ps;
3023
3024        if (ps == NULL)
3025                return -EINVAL;
3026
3027        smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
3028
3029        if (low)
3030                return smu7_ps->performance_levels[0].memory_clock;
3031        else
3032                return smu7_ps->performance_levels
3033                                [smu7_ps->performance_level_count-1].memory_clock;
3034}
3035
3036static uint32_t smu7_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
3037{
3038        struct pp_power_state  *ps;
3039        struct smu7_power_state  *smu7_ps;
3040
3041        if (hwmgr == NULL)
3042                return -EINVAL;
3043
3044        ps = hwmgr->request_ps;
3045
3046        if (ps == NULL)
3047                return -EINVAL;
3048
3049        smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
3050
3051        if (low)
3052                return smu7_ps->performance_levels[0].engine_clock;
3053        else
3054                return smu7_ps->performance_levels
3055                                [smu7_ps->performance_level_count-1].engine_clock;
3056}
3057
3058static int smu7_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
3059                                        struct pp_hw_power_state *hw_ps)
3060{
3061        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3062        struct smu7_power_state *ps = (struct smu7_power_state *)hw_ps;
3063        ATOM_FIRMWARE_INFO_V2_2 *fw_info;
3064        uint16_t size;
3065        uint8_t frev, crev;
3066        int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
3067
3068        /* First retrieve the Boot clocks and VDDC from the firmware info table.
3069         * We assume here that fw_info is unchanged if this call fails.
3070         */
3071        fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)smu_atom_get_data_table(hwmgr->adev, index,
3072                        &size, &frev, &crev);
3073        if (!fw_info)
3074                /* During a test, there is no firmware info table. */
3075                return 0;
3076
3077        /* Patch the state. */
3078        data->vbios_boot_state.sclk_bootup_value =
3079                        le32_to_cpu(fw_info->ulDefaultEngineClock);
3080        data->vbios_boot_state.mclk_bootup_value =
3081                        le32_to_cpu(fw_info->ulDefaultMemoryClock);
3082        data->vbios_boot_state.mvdd_bootup_value =
3083                        le16_to_cpu(fw_info->usBootUpMVDDCVoltage);
3084        data->vbios_boot_state.vddc_bootup_value =
3085                        le16_to_cpu(fw_info->usBootUpVDDCVoltage);
3086        data->vbios_boot_state.vddci_bootup_value =
3087                        le16_to_cpu(fw_info->usBootUpVDDCIVoltage);
3088        data->vbios_boot_state.pcie_gen_bootup_value =
3089                        smu7_get_current_pcie_speed(hwmgr);
3090
3091        data->vbios_boot_state.pcie_lane_bootup_value =
3092                        (uint16_t)smu7_get_current_pcie_lane_number(hwmgr);
3093
3094        /* set boot power state */
3095        ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value;
3096        ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value;
3097        ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value;
3098        ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value;
3099
3100        return 0;
3101}
3102
3103static int smu7_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr)
3104{
3105        int result;
3106        unsigned long ret = 0;
3107
3108        if (hwmgr->pp_table_version == PP_TABLE_V0) {
3109                result = pp_tables_get_num_of_entries(hwmgr, &ret);
3110                return result ? 0 : ret;
3111        } else if (hwmgr->pp_table_version == PP_TABLE_V1) {
3112                result = get_number_of_powerplay_table_entries_v1_0(hwmgr);
3113                return result;
3114        }
3115        return 0;
3116}
3117
3118static int smu7_get_pp_table_entry_callback_func_v1(struct pp_hwmgr *hwmgr,
3119                void *state, struct pp_power_state *power_state,
3120                void *pp_table, uint32_t classification_flag)
3121{
3122        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3123        struct smu7_power_state  *smu7_power_state =
3124                        (struct smu7_power_state *)(&(power_state->hardware));
3125        struct smu7_performance_level *performance_level;
3126        ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state;
3127        ATOM_Tonga_POWERPLAYTABLE *powerplay_table =
3128                        (ATOM_Tonga_POWERPLAYTABLE *)pp_table;
3129        PPTable_Generic_SubTable_Header *sclk_dep_table =
3130                        (PPTable_Generic_SubTable_Header *)
3131                        (((unsigned long)powerplay_table) +
3132                                le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
3133
3134        ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
3135                        (ATOM_Tonga_MCLK_Dependency_Table *)
3136                        (((unsigned long)powerplay_table) +
3137                                le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
3138
3139        /* The following fields are not initialized here: id orderedList allStatesList */
3140        power_state->classification.ui_label =
3141                        (le16_to_cpu(state_entry->usClassification) &
3142                        ATOM_PPLIB_CLASSIFICATION_UI_MASK) >>
3143                        ATOM_PPLIB_CLASSIFICATION_UI_SHIFT;
3144        power_state->classification.flags = classification_flag;
3145        /* NOTE: There is a classification2 flag in BIOS that is not being used right now */
3146
3147        power_state->classification.temporary_state = false;
3148        power_state->classification.to_be_deleted = false;
3149
3150        power_state->validation.disallowOnDC =
3151                        (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
3152                                        ATOM_Tonga_DISALLOW_ON_DC));
3153
3154        power_state->pcie.lanes = 0;
3155
3156        power_state->display.disableFrameModulation = false;
3157        power_state->display.limitRefreshrate = false;
3158        power_state->display.enableVariBright =
3159                        (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
3160                                        ATOM_Tonga_ENABLE_VARIBRIGHT));
3161
3162        power_state->validation.supportedPowerLevels = 0;
3163        power_state->uvd_clocks.VCLK = 0;
3164        power_state->uvd_clocks.DCLK = 0;
3165        power_state->temperatures.min = 0;
3166        power_state->temperatures.max = 0;
3167
3168        performance_level = &(smu7_power_state->performance_levels
3169                        [smu7_power_state->performance_level_count++]);
3170
3171        PP_ASSERT_WITH_CODE(
3172                        (smu7_power_state->performance_level_count < smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_GRAPHICS)),
3173                        "Performance levels exceeds SMC limit!",
3174                        return -EINVAL);
3175
3176        PP_ASSERT_WITH_CODE(
3177                        (smu7_power_state->performance_level_count <=
3178                                        hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
3179                        "Performance levels exceeds Driver limit!",
3180                        return -EINVAL);
3181
3182        /* Performance levels are arranged from low to high. */
3183        performance_level->memory_clock = mclk_dep_table->entries
3184                        [state_entry->ucMemoryClockIndexLow].ulMclk;
3185        if (sclk_dep_table->ucRevId == 0)
3186                performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
3187                        [state_entry->ucEngineClockIndexLow].ulSclk;
3188        else if (sclk_dep_table->ucRevId == 1)
3189                performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
3190                        [state_entry->ucEngineClockIndexLow].ulSclk;
3191        performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
3192                        state_entry->ucPCIEGenLow);
3193        performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
3194                        state_entry->ucPCIELaneLow);
3195
3196        performance_level = &(smu7_power_state->performance_levels
3197                        [smu7_power_state->performance_level_count++]);
3198        performance_level->memory_clock = mclk_dep_table->entries
3199                        [state_entry->ucMemoryClockIndexHigh].ulMclk;
3200
3201        if (sclk_dep_table->ucRevId == 0)
3202                performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
3203                        [state_entry->ucEngineClockIndexHigh].ulSclk;
3204        else if (sclk_dep_table->ucRevId == 1)
3205                performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
3206                        [state_entry->ucEngineClockIndexHigh].ulSclk;
3207
3208        performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
3209                        state_entry->ucPCIEGenHigh);
3210        performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
3211                        state_entry->ucPCIELaneHigh);
3212
3213        return 0;
3214}
3215
3216static int smu7_get_pp_table_entry_v1(struct pp_hwmgr *hwmgr,
3217                unsigned long entry_index, struct pp_power_state *state)
3218{
3219        int result;
3220        struct smu7_power_state *ps;
3221        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3222        struct phm_ppt_v1_information *table_info =
3223                        (struct phm_ppt_v1_information *)(hwmgr->pptable);
3224        struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
3225                        table_info->vdd_dep_on_mclk;
3226
3227        state->hardware.magic = PHM_VIslands_Magic;
3228
3229        ps = (struct smu7_power_state *)(&state->hardware);
3230
3231        result = get_powerplay_table_entry_v1_0(hwmgr, entry_index, state,
3232                        smu7_get_pp_table_entry_callback_func_v1);
3233
3234        /* This is the earliest time we have all the dependency table and the VBIOS boot state
3235         * as PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot state
3236         * if there is only one VDDCI/MCLK level, check if it's the same as VBIOS boot state
3237         */
3238        if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
3239                if (dep_mclk_table->entries[0].clk !=
3240                                data->vbios_boot_state.mclk_bootup_value)
3241                        pr_debug("Single MCLK entry VDDCI/MCLK dependency table "
3242                                        "does not match VBIOS boot MCLK level");
3243                if (dep_mclk_table->entries[0].vddci !=
3244                                data->vbios_boot_state.vddci_bootup_value)
3245                        pr_debug("Single VDDCI entry VDDCI/MCLK dependency table "
3246                                        "does not match VBIOS boot VDDCI level");
3247        }
3248
3249        /* set DC compatible flag if this state supports DC */
3250        if (!state->validation.disallowOnDC)
3251                ps->dc_compatible = true;
3252
3253        if (state->classification.flags & PP_StateClassificationFlag_ACPI)
3254                data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen;
3255
3256        ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
3257        ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
3258
3259        if (!result) {
3260                uint32_t i;
3261
3262                switch (state->classification.ui_label) {
3263                case PP_StateUILabel_Performance:
3264                        data->use_pcie_performance_levels = true;
3265                        for (i = 0; i < ps->performance_level_count; i++) {
3266                                if (data->pcie_gen_performance.max <
3267                                                ps->performance_levels[i].pcie_gen)
3268                                        data->pcie_gen_performance.max =
3269                                                        ps->performance_levels[i].pcie_gen;
3270
3271                                if (data->pcie_gen_performance.min >
3272                                                ps->performance_levels[i].pcie_gen)
3273                                        data->pcie_gen_performance.min =
3274                                                        ps->performance_levels[i].pcie_gen;
3275
3276                                if (data->pcie_lane_performance.max <
3277                                                ps->performance_levels[i].pcie_lane)
3278                                        data->pcie_lane_performance.max =
3279                                                        ps->performance_levels[i].pcie_lane;
3280                                if (data->pcie_lane_performance.min >
3281                                                ps->performance_levels[i].pcie_lane)
3282                                        data->pcie_lane_performance.min =
3283                                                        ps->performance_levels[i].pcie_lane;
3284                        }
3285                        break;
3286                case PP_StateUILabel_Battery:
3287                        data->use_pcie_power_saving_levels = true;
3288
3289                        for (i = 0; i < ps->performance_level_count; i++) {
3290                                if (data->pcie_gen_power_saving.max <
3291                                                ps->performance_levels[i].pcie_gen)
3292                                        data->pcie_gen_power_saving.max =
3293                                                        ps->performance_levels[i].pcie_gen;
3294
3295                                if (data->pcie_gen_power_saving.min >
3296                                                ps->performance_levels[i].pcie_gen)
3297                                        data->pcie_gen_power_saving.min =
3298                                                        ps->performance_levels[i].pcie_gen;
3299
3300                                if (data->pcie_lane_power_saving.max <
3301                                                ps->performance_levels[i].pcie_lane)
3302                                        data->pcie_lane_power_saving.max =
3303                                                        ps->performance_levels[i].pcie_lane;
3304
3305                                if (data->pcie_lane_power_saving.min >
3306                                                ps->performance_levels[i].pcie_lane)
3307                                        data->pcie_lane_power_saving.min =
3308                                                        ps->performance_levels[i].pcie_lane;
3309                        }
3310                        break;
3311                default:
3312                        break;
3313                }
3314        }
3315        return 0;
3316}
3317
3318static int smu7_get_pp_table_entry_callback_func_v0(struct pp_hwmgr *hwmgr,
3319                                        struct pp_hw_power_state *power_state,
3320                                        unsigned int index, const void *clock_info)
3321{
3322        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3323        struct smu7_power_state  *ps = cast_phw_smu7_power_state(power_state);
3324        const ATOM_PPLIB_CI_CLOCK_INFO *visland_clk_info = clock_info;
3325        struct smu7_performance_level *performance_level;
3326        uint32_t engine_clock, memory_clock;
3327        uint16_t pcie_gen_from_bios;
3328
3329        engine_clock = visland_clk_info->ucEngineClockHigh << 16 | visland_clk_info->usEngineClockLow;
3330        memory_clock = visland_clk_info->ucMemoryClockHigh << 16 | visland_clk_info->usMemoryClockLow;
3331
3332        if (!(data->mc_micro_code_feature & DISABLE_MC_LOADMICROCODE) && memory_clock > data->highest_mclk)
3333                data->highest_mclk = memory_clock;
3334
3335        PP_ASSERT_WITH_CODE(
3336                        (ps->performance_level_count < smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_GRAPHICS)),
3337                        "Performance levels exceeds SMC limit!",
3338                        return -EINVAL);
3339
3340        PP_ASSERT_WITH_CODE(
3341                        (ps->performance_level_count <
3342                                        hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
3343                        "Performance levels exceeds Driver limit, Skip!",
3344                        return 0);
3345
3346        performance_level = &(ps->performance_levels
3347                        [ps->performance_level_count++]);
3348
3349        /* Performance levels are arranged from low to high. */
3350        performance_level->memory_clock = memory_clock;
3351        performance_level->engine_clock = engine_clock;
3352
3353        pcie_gen_from_bios = visland_clk_info->ucPCIEGen;
3354
3355        performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, pcie_gen_from_bios);
3356        performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, visland_clk_info->usPCIELane);
3357
3358        return 0;
3359}
3360
3361static int smu7_get_pp_table_entry_v0(struct pp_hwmgr *hwmgr,
3362                unsigned long entry_index, struct pp_power_state *state)
3363{
3364        int result;
3365        struct smu7_power_state *ps;
3366        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3367        struct phm_clock_voltage_dependency_table *dep_mclk_table =
3368                        hwmgr->dyn_state.vddci_dependency_on_mclk;
3369
3370        memset(&state->hardware, 0x00, sizeof(struct pp_hw_power_state));
3371
3372        state->hardware.magic = PHM_VIslands_Magic;
3373
3374        ps = (struct smu7_power_state *)(&state->hardware);
3375
3376        result = pp_tables_get_entry(hwmgr, entry_index, state,
3377                        smu7_get_pp_table_entry_callback_func_v0);
3378
3379        /*
3380         * This is the earliest time we have all the dependency table
3381         * and the VBIOS boot state as
3382         * PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot
3383         * state if there is only one VDDCI/MCLK level, check if it's
3384         * the same as VBIOS boot state
3385         */
3386        if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
3387                if (dep_mclk_table->entries[0].clk !=
3388                                data->vbios_boot_state.mclk_bootup_value)
3389                        pr_debug("Single MCLK entry VDDCI/MCLK dependency table "
3390                                        "does not match VBIOS boot MCLK level");
3391                if (dep_mclk_table->entries[0].v !=
3392                                data->vbios_boot_state.vddci_bootup_value)
3393                        pr_debug("Single VDDCI entry VDDCI/MCLK dependency table "
3394                                        "does not match VBIOS boot VDDCI level");
3395        }
3396
3397        /* set DC compatible flag if this state supports DC */
3398        if (!state->validation.disallowOnDC)
3399                ps->dc_compatible = true;
3400
3401        if (state->classification.flags & PP_StateClassificationFlag_ACPI)
3402                data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen;
3403
3404        ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
3405        ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
3406
3407        if (!result) {
3408                uint32_t i;
3409
3410                switch (state->classification.ui_label) {
3411                case PP_StateUILabel_Performance:
3412                        data->use_pcie_performance_levels = true;
3413
3414                        for (i = 0; i < ps->performance_level_count; i++) {
3415                                if (data->pcie_gen_performance.max <
3416                                                ps->performance_levels[i].pcie_gen)
3417                                        data->pcie_gen_performance.max =
3418                                                        ps->performance_levels[i].pcie_gen;
3419
3420                                if (data->pcie_gen_performance.min >
3421                                                ps->performance_levels[i].pcie_gen)
3422                                        data->pcie_gen_performance.min =
3423                                                        ps->performance_levels[i].pcie_gen;
3424
3425                                if (data->pcie_lane_performance.max <
3426                                                ps->performance_levels[i].pcie_lane)
3427                                        data->pcie_lane_performance.max =
3428                                                        ps->performance_levels[i].pcie_lane;
3429
3430                                if (data->pcie_lane_performance.min >
3431                                                ps->performance_levels[i].pcie_lane)
3432                                        data->pcie_lane_performance.min =
3433                                                        ps->performance_levels[i].pcie_lane;
3434                        }
3435                        break;
3436                case PP_StateUILabel_Battery:
3437                        data->use_pcie_power_saving_levels = true;
3438
3439                        for (i = 0; i < ps->performance_level_count; i++) {
3440                                if (data->pcie_gen_power_saving.max <
3441                                                ps->performance_levels[i].pcie_gen)
3442                                        data->pcie_gen_power_saving.max =
3443                                                        ps->performance_levels[i].pcie_gen;
3444
3445                                if (data->pcie_gen_power_saving.min >
3446                                                ps->performance_levels[i].pcie_gen)
3447                                        data->pcie_gen_power_saving.min =
3448                                                        ps->performance_levels[i].pcie_gen;
3449
3450                                if (data->pcie_lane_power_saving.max <
3451                                                ps->performance_levels[i].pcie_lane)
3452                                        data->pcie_lane_power_saving.max =
3453                                                        ps->performance_levels[i].pcie_lane;
3454
3455                                if (data->pcie_lane_power_saving.min >
3456                                                ps->performance_levels[i].pcie_lane)
3457                                        data->pcie_lane_power_saving.min =
3458                                                        ps->performance_levels[i].pcie_lane;
3459                        }
3460                        break;
3461                default:
3462                        break;
3463                }
3464        }
3465        return 0;
3466}
3467
3468static int smu7_get_pp_table_entry(struct pp_hwmgr *hwmgr,
3469                unsigned long entry_index, struct pp_power_state *state)
3470{
3471        if (hwmgr->pp_table_version == PP_TABLE_V0)
3472                return smu7_get_pp_table_entry_v0(hwmgr, entry_index, state);
3473        else if (hwmgr->pp_table_version == PP_TABLE_V1)
3474                return smu7_get_pp_table_entry_v1(hwmgr, entry_index, state);
3475
3476        return 0;
3477}
3478
3479static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, u32 *query)
3480{
3481        struct amdgpu_device *adev = hwmgr->adev;
3482        int i;
3483        u32 tmp = 0;
3484
3485        if (!query)
3486                return -EINVAL;
3487
3488        /*
3489         * PPSMC_MSG_GetCurrPkgPwr is not supported on:
3490         *  - Hawaii
3491         *  - Bonaire
3492         *  - Fiji
3493         *  - Tonga
3494         */
3495        if ((adev->asic_type != CHIP_HAWAII) &&
3496            (adev->asic_type != CHIP_BONAIRE) &&
3497            (adev->asic_type != CHIP_FIJI) &&
3498            (adev->asic_type != CHIP_TONGA)) {
3499                smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0);
3500                tmp = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
3501                *query = tmp;
3502
3503                if (tmp != 0)
3504                        return 0;
3505        }
3506
3507        smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart);
3508        cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3509                                                        ixSMU_PM_STATUS_95, 0);
3510
3511        for (i = 0; i < 10; i++) {
3512                msleep(500);
3513                smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogSample);
3514                tmp = cgs_read_ind_register(hwmgr->device,
3515                                                CGS_IND_REG__SMC,
3516                                                ixSMU_PM_STATUS_95);
3517                if (tmp != 0)
3518                        break;
3519        }
3520        *query = tmp;
3521
3522        return 0;
3523}
3524
3525static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx,
3526                            void *value, int *size)
3527{
3528        uint32_t sclk, mclk, activity_percent;
3529        uint32_t offset, val_vid;
3530        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3531
3532        /* size must be at least 4 bytes for all sensors */
3533        if (*size < 4)
3534                return -EINVAL;
3535
3536        switch (idx) {
3537        case AMDGPU_PP_SENSOR_GFX_SCLK:
3538                smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency);
3539                sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
3540                *((uint32_t *)value) = sclk;
3541                *size = 4;
3542                return 0;
3543        case AMDGPU_PP_SENSOR_GFX_MCLK:
3544                smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency);
3545                mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
3546                *((uint32_t *)value) = mclk;
3547                *size = 4;
3548                return 0;
3549        case AMDGPU_PP_SENSOR_GPU_LOAD:
3550        case AMDGPU_PP_SENSOR_MEM_LOAD:
3551                offset = data->soft_regs_start + smum_get_offsetof(hwmgr,
3552                                                                SMU_SoftRegisters,
3553                                                                (idx == AMDGPU_PP_SENSOR_GPU_LOAD) ?
3554                                                                AverageGraphicsActivity:
3555                                                                AverageMemoryActivity);
3556
3557                activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset);
3558                activity_percent += 0x80;
3559                activity_percent >>= 8;
3560                *((uint32_t *)value) = activity_percent > 100 ? 100 : activity_percent;
3561                *size = 4;
3562                return 0;
3563        case AMDGPU_PP_SENSOR_GPU_TEMP:
3564                *((uint32_t *)value) = smu7_thermal_get_temperature(hwmgr);
3565                *size = 4;
3566                return 0;
3567        case AMDGPU_PP_SENSOR_UVD_POWER:
3568                *((uint32_t *)value) = data->uvd_power_gated ? 0 : 1;
3569                *size = 4;
3570                return 0;
3571        case AMDGPU_PP_SENSOR_VCE_POWER:
3572                *((uint32_t *)value) = data->vce_power_gated ? 0 : 1;
3573                *size = 4;
3574                return 0;
3575        case AMDGPU_PP_SENSOR_GPU_POWER:
3576                return smu7_get_gpu_power(hwmgr, (uint32_t *)value);
3577        case AMDGPU_PP_SENSOR_VDDGFX:
3578                if ((data->vr_config & 0xff) == 0x2)
3579                        val_vid = PHM_READ_INDIRECT_FIELD(hwmgr->device,
3580                                        CGS_IND_REG__SMC, PWR_SVI2_STATUS, PLANE2_VID);
3581                else
3582                        val_vid = PHM_READ_INDIRECT_FIELD(hwmgr->device,
3583                                        CGS_IND_REG__SMC, PWR_SVI2_STATUS, PLANE1_VID);
3584
3585                *((uint32_t *)value) = (uint32_t)convert_to_vddc(val_vid);
3586                return 0;
3587        default:
3588                return -EINVAL;
3589        }
3590}
3591
3592static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
3593{
3594        const struct phm_set_power_state_input *states =
3595                        (const struct phm_set_power_state_input *)input;
3596        const struct smu7_power_state *smu7_ps =
3597                        cast_const_phw_smu7_power_state(states->pnew_state);
3598        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3599        struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
3600        uint32_t sclk = smu7_ps->performance_levels
3601                        [smu7_ps->performance_level_count - 1].engine_clock;
3602        struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
3603        uint32_t mclk = smu7_ps->performance_levels
3604                        [smu7_ps->performance_level_count - 1].memory_clock;
3605        struct PP_Clocks min_clocks = {0};
3606        uint32_t i;
3607
3608        for (i = 0; i < sclk_table->count; i++) {
3609                if (sclk == sclk_table->dpm_levels[i].value)
3610                        break;
3611        }
3612
3613        if (i >= sclk_table->count) {
3614                if (sclk > sclk_table->dpm_levels[i-1].value) {
3615                        data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
3616                        sclk_table->dpm_levels[i-1].value = sclk;
3617                }
3618        } else {
3619        /* TODO: Check SCLK in DAL's minimum clocks
3620         * in case DeepSleep divider update is required.
3621         */
3622                if (data->display_timing.min_clock_in_sr != min_clocks.engineClockInSR &&
3623                        (min_clocks.engineClockInSR >= SMU7_MINIMUM_ENGINE_CLOCK ||
3624                                data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK))
3625                        data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
3626        }
3627
3628        for (i = 0; i < mclk_table->count; i++) {
3629                if (mclk == mclk_table->dpm_levels[i].value)
3630                        break;
3631        }
3632
3633        if (i >= mclk_table->count) {
3634                if (mclk > mclk_table->dpm_levels[i-1].value) {
3635                        data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
3636                        mclk_table->dpm_levels[i-1].value = mclk;
3637                }
3638        }
3639
3640        if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
3641                data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
3642
3643        return 0;
3644}
3645
3646static uint16_t smu7_get_maximum_link_speed(struct pp_hwmgr *hwmgr,
3647                const struct smu7_power_state *smu7_ps)
3648{
3649        uint32_t i;
3650        uint32_t sclk, max_sclk = 0;
3651        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3652        struct smu7_dpm_table *dpm_table = &data->dpm_table;
3653
3654        for (i = 0; i < smu7_ps->performance_level_count; i++) {
3655                sclk = smu7_ps->performance_levels[i].engine_clock;
3656                if (max_sclk < sclk)
3657                        max_sclk = sclk;
3658        }
3659
3660        for (i = 0; i < dpm_table->sclk_table.count; i++) {
3661                if (dpm_table->sclk_table.dpm_levels[i].value == max_sclk)
3662                        return (uint16_t) ((i >= dpm_table->pcie_speed_table.count) ?
3663                                        dpm_table->pcie_speed_table.dpm_levels
3664                                        [dpm_table->pcie_speed_table.count - 1].value :
3665                                        dpm_table->pcie_speed_table.dpm_levels[i].value);
3666        }
3667
3668        return 0;
3669}
3670
3671static int smu7_request_link_speed_change_before_state_change(
3672                struct pp_hwmgr *hwmgr, const void *input)
3673{
3674        const struct phm_set_power_state_input *states =
3675                        (const struct phm_set_power_state_input *)input;
3676        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3677        const struct smu7_power_state *smu7_nps =
3678                        cast_const_phw_smu7_power_state(states->pnew_state);
3679        const struct smu7_power_state *polaris10_cps =
3680                        cast_const_phw_smu7_power_state(states->pcurrent_state);
3681
3682        uint16_t target_link_speed = smu7_get_maximum_link_speed(hwmgr, smu7_nps);
3683        uint16_t current_link_speed;
3684
3685        if (data->force_pcie_gen == PP_PCIEGenInvalid)
3686                current_link_speed = smu7_get_maximum_link_speed(hwmgr, polaris10_cps);
3687        else
3688                current_link_speed = data->force_pcie_gen;
3689
3690        data->force_pcie_gen = PP_PCIEGenInvalid;
3691        data->pspp_notify_required = false;
3692
3693        if (target_link_speed > current_link_speed) {
3694                switch (target_link_speed) {
3695#ifdef CONFIG_ACPI
3696                case PP_PCIEGen3:
3697                        if (0 == amdgpu_acpi_pcie_performance_request(hwmgr->adev, PCIE_PERF_REQ_GEN3, false))
3698                                break;
3699                        data->force_pcie_gen = PP_PCIEGen2;
3700                        if (current_link_speed == PP_PCIEGen2)
3701                                break;
3702                        /* fall through */
3703                case PP_PCIEGen2:
3704                        if (0 == amdgpu_acpi_pcie_performance_request(hwmgr->adev, PCIE_PERF_REQ_GEN2, false))
3705                                break;
3706#endif
3707                        /* fall through */
3708                default:
3709                        data->force_pcie_gen = smu7_get_current_pcie_speed(hwmgr);
3710                        break;
3711                }
3712        } else {
3713                if (target_link_speed < current_link_speed)
3714                        data->pspp_notify_required = true;
3715        }
3716
3717        return 0;
3718}
3719
3720static int smu7_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
3721{
3722        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3723
3724        if (0 == data->need_update_smu7_dpm_table)
3725                return 0;
3726
3727        if ((0 == data->sclk_dpm_key_disabled) &&
3728                (data->need_update_smu7_dpm_table &
3729                        (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
3730                PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
3731                                "Trying to freeze SCLK DPM when DPM is disabled",
3732                                );
3733                PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
3734                                PPSMC_MSG_SCLKDPM_FreezeLevel),
3735                                "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!",
3736                                return -EINVAL);
3737        }
3738
3739        if ((0 == data->mclk_dpm_key_disabled) &&
3740                (data->need_update_smu7_dpm_table &
3741                 DPMTABLE_OD_UPDATE_MCLK)) {
3742                PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
3743                                "Trying to freeze MCLK DPM when DPM is disabled",
3744                                );
3745                PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
3746                                PPSMC_MSG_MCLKDPM_FreezeLevel),
3747                                "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!",
3748                                return -EINVAL);
3749        }
3750
3751        return 0;
3752}
3753
3754static int smu7_populate_and_upload_sclk_mclk_dpm_levels(
3755                struct pp_hwmgr *hwmgr, const void *input)
3756{
3757        int result = 0;
3758        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3759        struct smu7_dpm_table *dpm_table = &data->dpm_table;
3760        uint32_t count;
3761        struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
3762        struct phm_odn_clock_levels *odn_sclk_table = &(odn_table->odn_core_clock_dpm_levels);
3763        struct phm_odn_clock_levels *odn_mclk_table = &(odn_table->odn_memory_clock_dpm_levels);
3764
3765        if (0 == data->need_update_smu7_dpm_table)
3766                return 0;
3767
3768        if (hwmgr->od_enabled && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
3769                for (count = 0; count < dpm_table->sclk_table.count; count++) {
3770                        dpm_table->sclk_table.dpm_levels[count].enabled = odn_sclk_table->entries[count].enabled;
3771                        dpm_table->sclk_table.dpm_levels[count].value = odn_sclk_table->entries[count].clock;
3772                }
3773        }
3774
3775        if (hwmgr->od_enabled && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
3776                for (count = 0; count < dpm_table->mclk_table.count; count++) {
3777                        dpm_table->mclk_table.dpm_levels[count].enabled = odn_mclk_table->entries[count].enabled;
3778                        dpm_table->mclk_table.dpm_levels[count].value = odn_mclk_table->entries[count].clock;
3779                }
3780        }
3781
3782        if (data->need_update_smu7_dpm_table &
3783                        (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) {
3784                result = smum_populate_all_graphic_levels(hwmgr);
3785                PP_ASSERT_WITH_CODE((0 == result),
3786                                "Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
3787                                return result);
3788        }
3789
3790        if (data->need_update_smu7_dpm_table &
3791                        (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
3792                /*populate MCLK dpm table to SMU7 */
3793                result = smum_populate_all_memory_levels(hwmgr);
3794                PP_ASSERT_WITH_CODE((0 == result),
3795                                "Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
3796                                return result);
3797        }
3798
3799        return result;
3800}
3801
3802static int smu7_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
3803                          struct smu7_single_dpm_table *dpm_table,
3804                        uint32_t low_limit, uint32_t high_limit)
3805{
3806        uint32_t i;
3807
3808        for (i = 0; i < dpm_table->count; i++) {
3809        /*skip the trim if od is enabled*/
3810                if (!hwmgr->od_enabled && (dpm_table->dpm_levels[i].value < low_limit
3811                        || dpm_table->dpm_levels[i].value > high_limit))
3812                        dpm_table->dpm_levels[i].enabled = false;
3813                else
3814                        dpm_table->dpm_levels[i].enabled = true;
3815        }
3816
3817        return 0;
3818}
3819
3820static int smu7_trim_dpm_states(struct pp_hwmgr *hwmgr,
3821                const struct smu7_power_state *smu7_ps)
3822{
3823        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3824        uint32_t high_limit_count;
3825
3826        PP_ASSERT_WITH_CODE((smu7_ps->performance_level_count >= 1),
3827                        "power state did not have any performance level",
3828                        return -EINVAL);
3829
3830        high_limit_count = (1 == smu7_ps->performance_level_count) ? 0 : 1;
3831
3832        smu7_trim_single_dpm_states(hwmgr,
3833                        &(data->dpm_table.sclk_table),
3834                        smu7_ps->performance_levels[0].engine_clock,
3835                        smu7_ps->performance_levels[high_limit_count].engine_clock);
3836
3837        smu7_trim_single_dpm_states(hwmgr,
3838                        &(data->dpm_table.mclk_table),
3839                        smu7_ps->performance_levels[0].memory_clock,
3840                        smu7_ps->performance_levels[high_limit_count].memory_clock);
3841
3842        return 0;
3843}
3844
3845static int smu7_generate_dpm_level_enable_mask(
3846                struct pp_hwmgr *hwmgr, const void *input)
3847{
3848        int result = 0;
3849        const struct phm_set_power_state_input *states =
3850                        (const struct phm_set_power_state_input *)input;
3851        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3852        const struct smu7_power_state *smu7_ps =
3853                        cast_const_phw_smu7_power_state(states->pnew_state);
3854
3855
3856        result = smu7_trim_dpm_states(hwmgr, smu7_ps);
3857        if (result)
3858                return result;
3859
3860        data->dpm_level_enable_mask.sclk_dpm_enable_mask =
3861                        phm_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table);
3862        data->dpm_level_enable_mask.mclk_dpm_enable_mask =
3863                        phm_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table);
3864        data->dpm_level_enable_mask.pcie_dpm_enable_mask =
3865                        phm_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table);
3866
3867        return 0;
3868}
3869
3870static int smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
3871{
3872        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3873
3874        if (0 == data->need_update_smu7_dpm_table)
3875                return 0;
3876
3877        if ((0 == data->sclk_dpm_key_disabled) &&
3878                (data->need_update_smu7_dpm_table &
3879                (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
3880
3881                PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
3882                                "Trying to Unfreeze SCLK DPM when DPM is disabled",
3883                                );
3884                PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
3885                                PPSMC_MSG_SCLKDPM_UnfreezeLevel),
3886                        "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!",
3887                        return -EINVAL);
3888        }
3889
3890        if ((0 == data->mclk_dpm_key_disabled) &&
3891                (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
3892
3893                PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
3894                                "Trying to Unfreeze MCLK DPM when DPM is disabled",
3895                                );
3896                PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
3897                                PPSMC_MSG_MCLKDPM_UnfreezeLevel),
3898                    "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!",
3899                    return -EINVAL);
3900        }
3901
3902        data->need_update_smu7_dpm_table &= DPMTABLE_OD_UPDATE_VDDC;
3903
3904        return 0;
3905}
3906
3907static int smu7_notify_link_speed_change_after_state_change(
3908                struct pp_hwmgr *hwmgr, const void *input)
3909{
3910        const struct phm_set_power_state_input *states =
3911                        (const struct phm_set_power_state_input *)input;
3912        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3913        const struct smu7_power_state *smu7_ps =
3914                        cast_const_phw_smu7_power_state(states->pnew_state);
3915        uint16_t target_link_speed = smu7_get_maximum_link_speed(hwmgr, smu7_ps);
3916        uint8_t  request;
3917
3918        if (data->pspp_notify_required) {
3919                if (target_link_speed == PP_PCIEGen3)
3920                        request = PCIE_PERF_REQ_GEN3;
3921                else if (target_link_speed == PP_PCIEGen2)
3922                        request = PCIE_PERF_REQ_GEN2;
3923                else
3924                        request = PCIE_PERF_REQ_GEN1;
3925
3926                if (request == PCIE_PERF_REQ_GEN1 &&
3927                                smu7_get_current_pcie_speed(hwmgr) > 0)
3928                        return 0;
3929
3930#ifdef CONFIG_ACPI
3931                if (amdgpu_acpi_pcie_performance_request(hwmgr->adev, request, false)) {
3932                        if (PP_PCIEGen2 == target_link_speed)
3933                                pr_info("PSPP request to switch to Gen2 from Gen3 Failed!");
3934                        else
3935                                pr_info("PSPP request to switch to Gen1 from Gen2 Failed!");
3936                }
3937#endif
3938        }
3939
3940        return 0;
3941}
3942
3943static int smu7_notify_smc_display(struct pp_hwmgr *hwmgr)
3944{
3945        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3946
3947        if (hwmgr->feature_mask & PP_VBI_TIME_SUPPORT_MASK) {
3948                if (hwmgr->chip_id == CHIP_VEGAM)
3949                        smum_send_msg_to_smc_with_parameter(hwmgr,
3950                                        (PPSMC_Msg)PPSMC_MSG_SetVBITimeout_VEGAM, data->frame_time_x2);
3951                else
3952                        smum_send_msg_to_smc_with_parameter(hwmgr,
3953                                        (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2);
3954        }
3955        return (smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ?  0 : -EINVAL;
3956}
3957
3958static int smu7_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
3959{
3960        int tmp_result, result = 0;
3961        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3962
3963        tmp_result = smu7_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
3964        PP_ASSERT_WITH_CODE((0 == tmp_result),
3965                        "Failed to find DPM states clocks in DPM table!",
3966                        result = tmp_result);
3967
3968        if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3969                        PHM_PlatformCaps_PCIEPerformanceRequest)) {
3970                tmp_result =
3971                        smu7_request_link_speed_change_before_state_change(hwmgr, input);
3972                PP_ASSERT_WITH_CODE((0 == tmp_result),
3973                                "Failed to request link speed change before state change!",
3974                                result = tmp_result);
3975        }
3976
3977        tmp_result = smu7_freeze_sclk_mclk_dpm(hwmgr);
3978        PP_ASSERT_WITH_CODE((0 == tmp_result),
3979                        "Failed to freeze SCLK MCLK DPM!", result = tmp_result);
3980
3981        tmp_result = smu7_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input);
3982        PP_ASSERT_WITH_CODE((0 == tmp_result),
3983                        "Failed to populate and upload SCLK MCLK DPM levels!",
3984                        result = tmp_result);
3985
3986        tmp_result = smu7_update_avfs(hwmgr);
3987        PP_ASSERT_WITH_CODE((0 == tmp_result),
3988                        "Failed to update avfs voltages!",
3989                        result = tmp_result);
3990
3991        tmp_result = smu7_generate_dpm_level_enable_mask(hwmgr, input);
3992        PP_ASSERT_WITH_CODE((0 == tmp_result),
3993                        "Failed to generate DPM level enabled mask!",
3994                        result = tmp_result);
3995
3996        tmp_result = smum_update_sclk_threshold(hwmgr);
3997        PP_ASSERT_WITH_CODE((0 == tmp_result),
3998                        "Failed to update SCLK threshold!",
3999                        result = tmp_result);
4000
4001        tmp_result = smu7_notify_smc_display(hwmgr);
4002        PP_ASSERT_WITH_CODE((0 == tmp_result),
4003                        "Failed to notify smc display settings!",
4004                        result = tmp_result);
4005
4006        tmp_result = smu7_unfreeze_sclk_mclk_dpm(hwmgr);
4007        PP_ASSERT_WITH_CODE((0 == tmp_result),
4008                        "Failed to unfreeze SCLK MCLK DPM!",
4009                        result = tmp_result);
4010
4011        tmp_result = smu7_upload_dpm_level_enable_mask(hwmgr);
4012        PP_ASSERT_WITH_CODE((0 == tmp_result),
4013                        "Failed to upload DPM level enabled mask!",
4014                        result = tmp_result);
4015
4016        if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4017                        PHM_PlatformCaps_PCIEPerformanceRequest)) {
4018                tmp_result =
4019                        smu7_notify_link_speed_change_after_state_change(hwmgr, input);
4020                PP_ASSERT_WITH_CODE((0 == tmp_result),
4021                                "Failed to notify link speed change after state change!",
4022                                result = tmp_result);
4023        }
4024        data->apply_optimized_settings = false;
4025        return result;
4026}
4027
4028static int smu7_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm)
4029{
4030        hwmgr->thermal_controller.
4031        advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm;
4032
4033        return smum_send_msg_to_smc_with_parameter(hwmgr,
4034                        PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm);
4035}
4036
4037static int
4038smu7_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
4039{
4040        PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay;
4041
4042        return (smum_send_msg_to_smc(hwmgr, msg) == 0) ?  0 : -1;
4043}
4044
4045static int
4046smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
4047{
4048        if (hwmgr->display_config->num_display > 1 &&
4049                        !hwmgr->display_config->multi_monitor_in_sync)
4050                smu7_notify_smc_display_change(hwmgr, false);
4051
4052        return 0;
4053}
4054
4055/**
4056* Programs the display gap
4057*
4058* @param    hwmgr  the address of the powerplay hardware manager.
4059* @return   always OK
4060*/
4061static int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
4062{
4063        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4064        uint32_t display_gap = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL);
4065        uint32_t display_gap2;
4066        uint32_t pre_vbi_time_in_us;
4067        uint32_t frame_time_in_us;
4068        uint32_t ref_clock, refresh_rate;
4069
4070        display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (hwmgr->display_config->num_display > 0) ? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE);
4071        cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL, display_gap);
4072
4073        ref_clock =  amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev);
4074        refresh_rate = hwmgr->display_config->vrefresh;
4075
4076        if (0 == refresh_rate)
4077                refresh_rate = 60;
4078
4079        frame_time_in_us = 1000000 / refresh_rate;
4080
4081        pre_vbi_time_in_us = frame_time_in_us - 200 - hwmgr->display_config->min_vblank_time;
4082
4083        data->frame_time_x2 = frame_time_in_us * 2 / 100;
4084
4085        if (data->frame_time_x2 < 280) {
4086                pr_debug("%s: enforce minimal VBITimeout: %d -> 280\n", __func__, data->frame_time_x2);
4087                data->frame_time_x2 = 280;
4088        }
4089
4090        display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
4091
4092        cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2);
4093
4094        cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4095                        data->soft_regs_start + smum_get_offsetof(hwmgr,
4096                                                        SMU_SoftRegisters,
4097                                                        PreVBlankGap), 0x64);
4098
4099        cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4100                        data->soft_regs_start + smum_get_offsetof(hwmgr,
4101                                                        SMU_SoftRegisters,
4102                                                        VBlankTimeout),
4103                                        (frame_time_in_us - pre_vbi_time_in_us));
4104
4105        return 0;
4106}
4107
4108static int smu7_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
4109{
4110        return smu7_program_display_gap(hwmgr);
4111}
4112
4113/**
4114*  Set maximum target operating fan output RPM
4115*
4116* @param    hwmgr:  the address of the powerplay hardware manager.
4117* @param    usMaxFanRpm:  max operating fan RPM value.
4118* @return   The response that came from the SMC.
4119*/
4120static int smu7_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_rpm)
4121{
4122        hwmgr->thermal_controller.
4123        advanceFanControlParameters.usMaxFanRPM = us_max_fan_rpm;
4124
4125        return smum_send_msg_to_smc_with_parameter(hwmgr,
4126                        PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm);
4127}
4128
4129static const struct amdgpu_irq_src_funcs smu7_irq_funcs = {
4130        .process = phm_irq_process,
4131};
4132
4133static int smu7_register_irq_handlers(struct pp_hwmgr *hwmgr)
4134{
4135        struct amdgpu_irq_src *source =
4136                kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL);
4137
4138        if (!source)
4139                return -ENOMEM;
4140
4141        source->funcs = &smu7_irq_funcs;
4142
4143        amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
4144                        AMDGPU_IRQ_CLIENTID_LEGACY,
4145                        VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH,
4146                        source);
4147        amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
4148                        AMDGPU_IRQ_CLIENTID_LEGACY,
4149                        VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW,
4150                        source);
4151
4152        /* Register CTF(GPIO_19) interrupt */
4153        amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
4154                        AMDGPU_IRQ_CLIENTID_LEGACY,
4155                        VISLANDS30_IV_SRCID_GPIO_19,
4156                        source);
4157
4158        return 0;
4159}
4160
4161static bool
4162smu7_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
4163{
4164        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4165        bool is_update_required = false;
4166
4167        if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
4168                is_update_required = true;
4169
4170        if (data->display_timing.vrefresh != hwmgr->display_config->vrefresh)
4171                is_update_required = true;
4172
4173        if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
4174                if (data->display_timing.min_clock_in_sr != hwmgr->display_config->min_core_set_clock_in_sr &&
4175                        (data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK ||
4176                        hwmgr->display_config->min_core_set_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK))
4177                        is_update_required = true;
4178        }
4179        return is_update_required;
4180}
4181
4182static inline bool smu7_are_power_levels_equal(const struct smu7_performance_level *pl1,
4183                                                           const struct smu7_performance_level *pl2)
4184{
4185        return ((pl1->memory_clock == pl2->memory_clock) &&
4186                  (pl1->engine_clock == pl2->engine_clock) &&
4187                  (pl1->pcie_gen == pl2->pcie_gen) &&
4188                  (pl1->pcie_lane == pl2->pcie_lane));
4189}
4190
4191static int smu7_check_states_equal(struct pp_hwmgr *hwmgr,
4192                const struct pp_hw_power_state *pstate1,
4193                const struct pp_hw_power_state *pstate2, bool *equal)
4194{
4195        const struct smu7_power_state *psa;
4196        const struct smu7_power_state *psb;
4197        int i;
4198        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4199
4200        if (pstate1 == NULL || pstate2 == NULL || equal == NULL)
4201                return -EINVAL;
4202
4203        psa = cast_const_phw_smu7_power_state(pstate1);
4204        psb = cast_const_phw_smu7_power_state(pstate2);
4205        /* If the two states don't even have the same number of performance levels they cannot be the same state. */
4206        if (psa->performance_level_count != psb->performance_level_count) {
4207                *equal = false;
4208                return 0;
4209        }
4210
4211        for (i = 0; i < psa->performance_level_count; i++) {
4212                if (!smu7_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
4213                        /* If we have found even one performance level pair that is different the states are different. */
4214                        *equal = false;
4215                        return 0;
4216                }
4217        }
4218
4219        /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
4220        *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
4221        *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
4222        *equal &= (psa->sclk_threshold == psb->sclk_threshold);
4223        /* For OD call, set value based on flag */
4224        *equal &= !(data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK |
4225                                                        DPMTABLE_OD_UPDATE_MCLK |
4226                                                        DPMTABLE_OD_UPDATE_VDDC));
4227
4228        return 0;
4229}
4230
4231static int smu7_check_mc_firmware(struct pp_hwmgr *hwmgr)
4232{
4233        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4234
4235        uint32_t vbios_version;
4236        uint32_t tmp;
4237
4238        /* Read MC indirect register offset 0x9F bits [3:0] to see
4239         * if VBIOS has already loaded a full version of MC ucode
4240         * or not.
4241         */
4242
4243        smu7_get_mc_microcode_version(hwmgr);
4244        vbios_version = hwmgr->microcode_version_info.MC & 0xf;
4245
4246        data->need_long_memory_training = false;
4247
4248        cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX,
4249                                                        ixMC_IO_DEBUG_UP_13);
4250        tmp = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA);
4251
4252        if (tmp & (1 << 23)) {
4253                data->mem_latency_high = MEM_LATENCY_HIGH;
4254                data->mem_latency_low = MEM_LATENCY_LOW;
4255                if ((hwmgr->chip_id == CHIP_POLARIS10) ||
4256                    (hwmgr->chip_id == CHIP_POLARIS11) ||
4257                    (hwmgr->chip_id == CHIP_POLARIS12))
4258                        smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableFFC);
4259        } else {
4260                data->mem_latency_high = 330;
4261                data->mem_latency_low = 330;
4262                if ((hwmgr->chip_id == CHIP_POLARIS10) ||
4263                    (hwmgr->chip_id == CHIP_POLARIS11) ||
4264                    (hwmgr->chip_id == CHIP_POLARIS12))
4265                        smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableFFC);
4266        }
4267
4268        return 0;
4269}
4270
4271static int smu7_read_clock_registers(struct pp_hwmgr *hwmgr)
4272{
4273        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4274
4275        data->clock_registers.vCG_SPLL_FUNC_CNTL         =
4276                cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL);
4277        data->clock_registers.vCG_SPLL_FUNC_CNTL_2       =
4278                cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_2);
4279        data->clock_registers.vCG_SPLL_FUNC_CNTL_3       =
4280                cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_3);
4281        data->clock_registers.vCG_SPLL_FUNC_CNTL_4       =
4282                cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_4);
4283        data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM   =
4284                cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM);
4285        data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2 =
4286                cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM_2);
4287        data->clock_registers.vDLL_CNTL                  =
4288                cgs_read_register(hwmgr->device, mmDLL_CNTL);
4289        data->clock_registers.vMCLK_PWRMGT_CNTL          =
4290                cgs_read_register(hwmgr->device, mmMCLK_PWRMGT_CNTL);
4291        data->clock_registers.vMPLL_AD_FUNC_CNTL         =
4292                cgs_read_register(hwmgr->device, mmMPLL_AD_FUNC_CNTL);
4293        data->clock_registers.vMPLL_DQ_FUNC_CNTL         =
4294                cgs_read_register(hwmgr->device, mmMPLL_DQ_FUNC_CNTL);
4295        data->clock_registers.vMPLL_FUNC_CNTL            =
4296                cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL);
4297        data->clock_registers.vMPLL_FUNC_CNTL_1          =
4298                cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_1);
4299        data->clock_registers.vMPLL_FUNC_CNTL_2          =
4300                cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_2);
4301        data->clock_registers.vMPLL_SS1                  =
4302                cgs_read_register(