linux/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
<<
>>
Prefs
   1/*
   2 * Copyright 2015 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23#include "pp_debug.h"
  24#include <linux/delay.h>
  25#include <linux/fb.h>
  26#include <linux/module.h>
  27#include <linux/pci.h>
  28#include <linux/slab.h>
  29#include <asm/div64.h>
  30#include <drm/amdgpu_drm.h>
  31#include "ppatomctrl.h"
  32#include "atombios.h"
  33#include "pptable_v1_0.h"
  34#include "pppcielanes.h"
  35#include "amd_pcie_helpers.h"
  36#include "hardwaremanager.h"
  37#include "process_pptables_v1_0.h"
  38#include "cgs_common.h"
  39
  40#include "smu7_common.h"
  41
  42#include "hwmgr.h"
  43#include "smu7_hwmgr.h"
  44#include "smu_ucode_xfer_vi.h"
  45#include "smu7_powertune.h"
  46#include "smu7_dyn_defaults.h"
  47#include "smu7_thermal.h"
  48#include "smu7_clockpowergating.h"
  49#include "processpptables.h"
  50#include "pp_thermal.h"
  51#include "smu7_baco.h"
  52
  53#include "ivsrcid/ivsrcid_vislands30.h"
  54
  55#define MC_CG_ARB_FREQ_F0           0x0a
  56#define MC_CG_ARB_FREQ_F1           0x0b
  57#define MC_CG_ARB_FREQ_F2           0x0c
  58#define MC_CG_ARB_FREQ_F3           0x0d
  59
  60#define MC_CG_SEQ_DRAMCONF_S0       0x05
  61#define MC_CG_SEQ_DRAMCONF_S1       0x06
  62#define MC_CG_SEQ_YCLK_SUSPEND      0x04
  63#define MC_CG_SEQ_YCLK_RESUME       0x0a
  64
  65#define SMC_CG_IND_START            0xc0030000
  66#define SMC_CG_IND_END              0xc0040000
  67
  68#define MEM_FREQ_LOW_LATENCY        25000
  69#define MEM_FREQ_HIGH_LATENCY       80000
  70
  71#define MEM_LATENCY_HIGH            45
  72#define MEM_LATENCY_LOW             35
  73#define MEM_LATENCY_ERR             0xFFFF
  74
  75#define MC_SEQ_MISC0_GDDR5_SHIFT 28
  76#define MC_SEQ_MISC0_GDDR5_MASK  0xf0000000
  77#define MC_SEQ_MISC0_GDDR5_VALUE 5
  78
  79#define PCIE_BUS_CLK                10000
  80#define TCLK                        (PCIE_BUS_CLK / 10)
  81
  82static struct profile_mode_setting smu7_profiling[7] =
  83                                        {{0, 0, 0, 0, 0, 0, 0, 0},
  84                                         {1, 0, 100, 30, 1, 0, 100, 10},
  85                                         {1, 10, 0, 30, 0, 0, 0, 0},
  86                                         {0, 0, 0, 0, 1, 10, 16, 31},
  87                                         {1, 0, 11, 50, 1, 0, 100, 10},
  88                                         {1, 0, 5, 30, 0, 0, 0, 0},
  89                                         {0, 0, 0, 0, 0, 0, 0, 0},
  90                                        };
  91
  92#define PPSMC_MSG_SetVBITimeout_VEGAM    ((uint16_t) 0x310)
  93
  94#define ixPWR_SVI2_PLANE1_LOAD                     0xC0200280
  95#define PWR_SVI2_PLANE1_LOAD__PSI1_MASK                    0x00000020L
  96#define PWR_SVI2_PLANE1_LOAD__PSI0_EN_MASK                 0x00000040L
  97#define PWR_SVI2_PLANE1_LOAD__PSI1__SHIFT                  0x00000005
  98#define PWR_SVI2_PLANE1_LOAD__PSI0_EN__SHIFT               0x00000006
  99
 100/** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */
 101enum DPM_EVENT_SRC {
 102        DPM_EVENT_SRC_ANALOG = 0,
 103        DPM_EVENT_SRC_EXTERNAL = 1,
 104        DPM_EVENT_SRC_DIGITAL = 2,
 105        DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
 106        DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4
 107};
 108
 109static const unsigned long PhwVIslands_Magic = (unsigned long)(PHM_VIslands_Magic);
 110static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
 111                enum pp_clock_type type, uint32_t mask);
 112
 113static struct smu7_power_state *cast_phw_smu7_power_state(
 114                                  struct pp_hw_power_state *hw_ps)
 115{
 116        PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic),
 117                                "Invalid Powerstate Type!",
 118                                 return NULL);
 119
 120        return (struct smu7_power_state *)hw_ps;
 121}
 122
 123static const struct smu7_power_state *cast_const_phw_smu7_power_state(
 124                                 const struct pp_hw_power_state *hw_ps)
 125{
 126        PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic),
 127                                "Invalid Powerstate Type!",
 128                                 return NULL);
 129
 130        return (const struct smu7_power_state *)hw_ps;
 131}
 132
 133/**
 134 * Find the MC microcode version and store it in the HwMgr struct
 135 *
 136 * @param    hwmgr  the address of the powerplay hardware manager.
 137 * @return   always 0
 138 */
 139static int smu7_get_mc_microcode_version(struct pp_hwmgr *hwmgr)
 140{
 141        cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
 142
 143        hwmgr->microcode_version_info.MC = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA);
 144
 145        return 0;
 146}
 147
 148static uint16_t smu7_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
 149{
 150        uint32_t speedCntl = 0;
 151
 152        /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
 153        speedCntl = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__PCIE,
 154                        ixPCIE_LC_SPEED_CNTL);
 155        return((uint16_t)PHM_GET_FIELD(speedCntl,
 156                        PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
 157}
 158
 159static int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
 160{
 161        uint32_t link_width;
 162
 163        /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
 164        link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
 165                        PCIE_LC_LINK_WIDTH_CNTL, LC_LINK_WIDTH_RD);
 166
 167        PP_ASSERT_WITH_CODE((7 >= link_width),
 168                        "Invalid PCIe lane width!", return 0);
 169
 170        return decode_pcie_lane_width(link_width);
 171}
 172
 173/**
 174* Enable voltage control
 175*
 176* @param    pHwMgr  the address of the powerplay hardware manager.
 177* @return   always PP_Result_OK
 178*/
 179static int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
 180{
 181        if (hwmgr->chip_id == CHIP_VEGAM) {
 182                PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device,
 183                                CGS_IND_REG__SMC, PWR_SVI2_PLANE1_LOAD, PSI1, 0);
 184                PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device,
 185                                CGS_IND_REG__SMC, PWR_SVI2_PLANE1_LOAD, PSI0_EN, 0);
 186        }
 187
 188        if (hwmgr->feature_mask & PP_SMC_VOLTAGE_CONTROL_MASK)
 189                smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Enable, NULL);
 190
 191        return 0;
 192}
 193
 194/**
 195* Checks if we want to support voltage control
 196*
 197* @param    hwmgr  the address of the powerplay hardware manager.
 198*/
 199static bool smu7_voltage_control(const struct pp_hwmgr *hwmgr)
 200{
 201        const struct smu7_hwmgr *data =
 202                        (const struct smu7_hwmgr *)(hwmgr->backend);
 203
 204        return (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control);
 205}
 206
 207/**
 208* Enable voltage control
 209*
 210* @param    hwmgr  the address of the powerplay hardware manager.
 211* @return   always 0
 212*/
 213static int smu7_enable_voltage_control(struct pp_hwmgr *hwmgr)
 214{
 215        /* enable voltage control */
 216        PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
 217                        GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1);
 218
 219        return 0;
 220}
 221
 222static int phm_get_svi2_voltage_table_v0(pp_atomctrl_voltage_table *voltage_table,
 223                struct phm_clock_voltage_dependency_table *voltage_dependency_table
 224                )
 225{
 226        uint32_t i;
 227
 228        PP_ASSERT_WITH_CODE((NULL != voltage_table),
 229                        "Voltage Dependency Table empty.", return -EINVAL;);
 230
 231        voltage_table->mask_low = 0;
 232        voltage_table->phase_delay = 0;
 233        voltage_table->count = voltage_dependency_table->count;
 234
 235        for (i = 0; i < voltage_dependency_table->count; i++) {
 236                voltage_table->entries[i].value =
 237                        voltage_dependency_table->entries[i].v;
 238                voltage_table->entries[i].smio_low = 0;
 239        }
 240
 241        return 0;
 242}
 243
 244
 245/**
 246* Create Voltage Tables.
 247*
 248* @param    hwmgr  the address of the powerplay hardware manager.
 249* @return   always 0
 250*/
 251static int smu7_construct_voltage_tables(struct pp_hwmgr *hwmgr)
 252{
 253        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 254        struct phm_ppt_v1_information *table_info =
 255                        (struct phm_ppt_v1_information *)hwmgr->pptable;
 256        int result = 0;
 257        uint32_t tmp;
 258
 259        if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
 260                result = atomctrl_get_voltage_table_v3(hwmgr,
 261                                VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT,
 262                                &(data->mvdd_voltage_table));
 263                PP_ASSERT_WITH_CODE((0 == result),
 264                                "Failed to retrieve MVDD table.",
 265                                return result);
 266        } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
 267                if (hwmgr->pp_table_version == PP_TABLE_V1)
 268                        result = phm_get_svi2_mvdd_voltage_table(&(data->mvdd_voltage_table),
 269                                        table_info->vdd_dep_on_mclk);
 270                else if (hwmgr->pp_table_version == PP_TABLE_V0)
 271                        result = phm_get_svi2_voltage_table_v0(&(data->mvdd_voltage_table),
 272                                        hwmgr->dyn_state.mvdd_dependency_on_mclk);
 273
 274                PP_ASSERT_WITH_CODE((0 == result),
 275                                "Failed to retrieve SVI2 MVDD table from dependency table.",
 276                                return result;);
 277        }
 278
 279        if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
 280                result = atomctrl_get_voltage_table_v3(hwmgr,
 281                                VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT,
 282                                &(data->vddci_voltage_table));
 283                PP_ASSERT_WITH_CODE((0 == result),
 284                                "Failed to retrieve VDDCI table.",
 285                                return result);
 286        } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
 287                if (hwmgr->pp_table_version == PP_TABLE_V1)
 288                        result = phm_get_svi2_vddci_voltage_table(&(data->vddci_voltage_table),
 289                                        table_info->vdd_dep_on_mclk);
 290                else if (hwmgr->pp_table_version == PP_TABLE_V0)
 291                        result = phm_get_svi2_voltage_table_v0(&(data->vddci_voltage_table),
 292                                        hwmgr->dyn_state.vddci_dependency_on_mclk);
 293                PP_ASSERT_WITH_CODE((0 == result),
 294                                "Failed to retrieve SVI2 VDDCI table from dependency table.",
 295                                return result);
 296        }
 297
 298        if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
 299                /* VDDGFX has only SVI2 voltage control */
 300                result = phm_get_svi2_vdd_voltage_table(&(data->vddgfx_voltage_table),
 301                                        table_info->vddgfx_lookup_table);
 302                PP_ASSERT_WITH_CODE((0 == result),
 303                        "Failed to retrieve SVI2 VDDGFX table from lookup table.", return result;);
 304        }
 305
 306
 307        if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control) {
 308                result = atomctrl_get_voltage_table_v3(hwmgr,
 309                                        VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT,
 310                                        &data->vddc_voltage_table);
 311                PP_ASSERT_WITH_CODE((0 == result),
 312                        "Failed to retrieve VDDC table.", return result;);
 313        } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
 314
 315                if (hwmgr->pp_table_version == PP_TABLE_V0)
 316                        result = phm_get_svi2_voltage_table_v0(&data->vddc_voltage_table,
 317                                        hwmgr->dyn_state.vddc_dependency_on_mclk);
 318                else if (hwmgr->pp_table_version == PP_TABLE_V1)
 319                        result = phm_get_svi2_vdd_voltage_table(&(data->vddc_voltage_table),
 320                                table_info->vddc_lookup_table);
 321
 322                PP_ASSERT_WITH_CODE((0 == result),
 323                        "Failed to retrieve SVI2 VDDC table from dependency table.", return result;);
 324        }
 325
 326        tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDC);
 327        PP_ASSERT_WITH_CODE(
 328                        (data->vddc_voltage_table.count <= tmp),
 329                "Too many voltage values for VDDC. Trimming to fit state table.",
 330                        phm_trim_voltage_table_to_fit_state_table(tmp,
 331                                                &(data->vddc_voltage_table)));
 332
 333        tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDGFX);
 334        PP_ASSERT_WITH_CODE(
 335                        (data->vddgfx_voltage_table.count <= tmp),
 336                "Too many voltage values for VDDC. Trimming to fit state table.",
 337                        phm_trim_voltage_table_to_fit_state_table(tmp,
 338                                                &(data->vddgfx_voltage_table)));
 339
 340        tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDCI);
 341        PP_ASSERT_WITH_CODE(
 342                        (data->vddci_voltage_table.count <= tmp),
 343                "Too many voltage values for VDDCI. Trimming to fit state table.",
 344                        phm_trim_voltage_table_to_fit_state_table(tmp,
 345                                        &(data->vddci_voltage_table)));
 346
 347        tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_MVDD);
 348        PP_ASSERT_WITH_CODE(
 349                        (data->mvdd_voltage_table.count <= tmp),
 350                "Too many voltage values for MVDD. Trimming to fit state table.",
 351                        phm_trim_voltage_table_to_fit_state_table(tmp,
 352                                                &(data->mvdd_voltage_table)));
 353
 354        return 0;
 355}
 356
 357/**
 358* Programs static screed detection parameters
 359*
 360* @param    hwmgr  the address of the powerplay hardware manager.
 361* @return   always 0
 362*/
 363static int smu7_program_static_screen_threshold_parameters(
 364                                                        struct pp_hwmgr *hwmgr)
 365{
 366        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 367
 368        /* Set static screen threshold unit */
 369        PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
 370                        CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT,
 371                        data->static_screen_threshold_unit);
 372        /* Set static screen threshold */
 373        PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
 374                        CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD,
 375                        data->static_screen_threshold);
 376
 377        return 0;
 378}
 379
 380/**
 381* Setup display gap for glitch free memory clock switching.
 382*
 383* @param    hwmgr  the address of the powerplay hardware manager.
 384* @return   always  0
 385*/
 386static int smu7_enable_display_gap(struct pp_hwmgr *hwmgr)
 387{
 388        uint32_t display_gap =
 389                        cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
 390                                        ixCG_DISPLAY_GAP_CNTL);
 391
 392        display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
 393                        DISP_GAP, DISPLAY_GAP_IGNORE);
 394
 395        display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
 396                        DISP_GAP_MCHG, DISPLAY_GAP_VBLANK);
 397
 398        cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
 399                        ixCG_DISPLAY_GAP_CNTL, display_gap);
 400
 401        return 0;
 402}
 403
 404/**
 405* Programs activity state transition voting clients
 406*
 407* @param    hwmgr  the address of the powerplay hardware manager.
 408* @return   always  0
 409*/
 410static int smu7_program_voting_clients(struct pp_hwmgr *hwmgr)
 411{
 412        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 413        int i;
 414
 415        /* Clear reset for voting clients before enabling DPM */
 416        PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
 417                        SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0);
 418        PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
 419                        SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0);
 420
 421        for (i = 0; i < 8; i++)
 422                cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
 423                                        ixCG_FREQ_TRAN_VOTING_0 + i * 4,
 424                                        data->voting_rights_clients[i]);
 425        return 0;
 426}
 427
 428static int smu7_clear_voting_clients(struct pp_hwmgr *hwmgr)
 429{
 430        int i;
 431
 432        /* Reset voting clients before disabling DPM */
 433        PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
 434                        SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1);
 435        PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
 436                        SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1);
 437
 438        for (i = 0; i < 8; i++)
 439                cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
 440                                ixCG_FREQ_TRAN_VOTING_0 + i * 4, 0);
 441
 442        return 0;
 443}
 444
 445/* Copy one arb setting to another and then switch the active set.
 446 * arb_src and arb_dest is one of the MC_CG_ARB_FREQ_Fx constants.
 447 */
 448static int smu7_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
 449                uint32_t arb_src, uint32_t arb_dest)
 450{
 451        uint32_t mc_arb_dram_timing;
 452        uint32_t mc_arb_dram_timing2;
 453        uint32_t burst_time;
 454        uint32_t mc_cg_config;
 455
 456        switch (arb_src) {
 457        case MC_CG_ARB_FREQ_F0:
 458                mc_arb_dram_timing  = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
 459                mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
 460                burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
 461                break;
 462        case MC_CG_ARB_FREQ_F1:
 463                mc_arb_dram_timing  = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1);
 464                mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1);
 465                burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1);
 466                break;
 467        default:
 468                return -EINVAL;
 469        }
 470
 471        switch (arb_dest) {
 472        case MC_CG_ARB_FREQ_F0:
 473                cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
 474                cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
 475                PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time);
 476                break;
 477        case MC_CG_ARB_FREQ_F1:
 478                cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
 479                cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
 480                PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time);
 481                break;
 482        default:
 483                return -EINVAL;
 484        }
 485
 486        mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG);
 487        mc_cg_config |= 0x0000000F;
 488        cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config);
 489        PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arb_dest);
 490
 491        return 0;
 492}
 493
 494static int smu7_reset_to_default(struct pp_hwmgr *hwmgr)
 495{
 496        return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ResetToDefaults, NULL);
 497}
 498
 499/**
 500* Initial switch from ARB F0->F1
 501*
 502* @param    hwmgr  the address of the powerplay hardware manager.
 503* @return   always 0
 504* This function is to be called from the SetPowerState table.
 505*/
 506static int smu7_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr)
 507{
 508        return smu7_copy_and_switch_arb_sets(hwmgr,
 509                        MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
 510}
 511
 512static int smu7_force_switch_to_arbf0(struct pp_hwmgr *hwmgr)
 513{
 514        uint32_t tmp;
 515
 516        tmp = (cgs_read_ind_register(hwmgr->device,
 517                        CGS_IND_REG__SMC, ixSMC_SCRATCH9) &
 518                        0x0000ff00) >> 8;
 519
 520        if (tmp == MC_CG_ARB_FREQ_F0)
 521                return 0;
 522
 523        return smu7_copy_and_switch_arb_sets(hwmgr,
 524                        tmp, MC_CG_ARB_FREQ_F0);
 525}
 526
 527static int smu7_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
 528{
 529        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 530
 531        struct phm_ppt_v1_information *table_info =
 532                        (struct phm_ppt_v1_information *)(hwmgr->pptable);
 533        struct phm_ppt_v1_pcie_table *pcie_table = NULL;
 534
 535        uint32_t i, max_entry;
 536        uint32_t tmp;
 537
 538        PP_ASSERT_WITH_CODE((data->use_pcie_performance_levels ||
 539                        data->use_pcie_power_saving_levels), "No pcie performance levels!",
 540                        return -EINVAL);
 541
 542        if (table_info != NULL)
 543                pcie_table = table_info->pcie_table;
 544
 545        if (data->use_pcie_performance_levels &&
 546                        !data->use_pcie_power_saving_levels) {
 547                data->pcie_gen_power_saving = data->pcie_gen_performance;
 548                data->pcie_lane_power_saving = data->pcie_lane_performance;
 549        } else if (!data->use_pcie_performance_levels &&
 550                        data->use_pcie_power_saving_levels) {
 551                data->pcie_gen_performance = data->pcie_gen_power_saving;
 552                data->pcie_lane_performance = data->pcie_lane_power_saving;
 553        }
 554        tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_LINK);
 555        phm_reset_single_dpm_table(&data->dpm_table.pcie_speed_table,
 556                                        tmp,
 557                                        MAX_REGULAR_DPM_NUMBER);
 558
 559        if (pcie_table != NULL) {
 560                /* max_entry is used to make sure we reserve one PCIE level
 561                 * for boot level (fix for A+A PSPP issue).
 562                 * If PCIE table from PPTable have ULV entry + 8 entries,
 563                 * then ignore the last entry.*/
 564                max_entry = (tmp < pcie_table->count) ? tmp : pcie_table->count;
 565                for (i = 1; i < max_entry; i++) {
 566                        phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i - 1,
 567                                        get_pcie_gen_support(data->pcie_gen_cap,
 568                                                        pcie_table->entries[i].gen_speed),
 569                                        get_pcie_lane_support(data->pcie_lane_cap,
 570                                                        pcie_table->entries[i].lane_width));
 571                }
 572                data->dpm_table.pcie_speed_table.count = max_entry - 1;
 573                smum_update_smc_table(hwmgr, SMU_BIF_TABLE);
 574        } else {
 575                /* Hardcode Pcie Table */
 576                phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0,
 577                                get_pcie_gen_support(data->pcie_gen_cap,
 578                                                PP_Min_PCIEGen),
 579                                get_pcie_lane_support(data->pcie_lane_cap,
 580                                                PP_Max_PCIELane));
 581                phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1,
 582                                get_pcie_gen_support(data->pcie_gen_cap,
 583                                                PP_Min_PCIEGen),
 584                                get_pcie_lane_support(data->pcie_lane_cap,
 585                                                PP_Max_PCIELane));
 586                phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2,
 587                                get_pcie_gen_support(data->pcie_gen_cap,
 588                                                PP_Max_PCIEGen),
 589                                get_pcie_lane_support(data->pcie_lane_cap,
 590                                                PP_Max_PCIELane));
 591                phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3,
 592                                get_pcie_gen_support(data->pcie_gen_cap,
 593                                                PP_Max_PCIEGen),
 594                                get_pcie_lane_support(data->pcie_lane_cap,
 595                                                PP_Max_PCIELane));
 596                phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4,
 597                                get_pcie_gen_support(data->pcie_gen_cap,
 598                                                PP_Max_PCIEGen),
 599                                get_pcie_lane_support(data->pcie_lane_cap,
 600                                                PP_Max_PCIELane));
 601                phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5,
 602                                get_pcie_gen_support(data->pcie_gen_cap,
 603                                                PP_Max_PCIEGen),
 604                                get_pcie_lane_support(data->pcie_lane_cap,
 605                                                PP_Max_PCIELane));
 606
 607                data->dpm_table.pcie_speed_table.count = 6;
 608        }
 609        /* Populate last level for boot PCIE level, but do not increment count. */
 610        if (hwmgr->chip_family == AMDGPU_FAMILY_CI) {
 611                for (i = 0; i <= data->dpm_table.pcie_speed_table.count; i++)
 612                        phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i,
 613                                get_pcie_gen_support(data->pcie_gen_cap,
 614                                                PP_Max_PCIEGen),
 615                                data->vbios_boot_state.pcie_lane_bootup_value);
 616        } else {
 617                phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table,
 618                        data->dpm_table.pcie_speed_table.count,
 619                        get_pcie_gen_support(data->pcie_gen_cap,
 620                                        PP_Min_PCIEGen),
 621                        get_pcie_lane_support(data->pcie_lane_cap,
 622                                        PP_Max_PCIELane));
 623        }
 624        return 0;
 625}
 626
 627static int smu7_reset_dpm_tables(struct pp_hwmgr *hwmgr)
 628{
 629        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 630
 631        memset(&(data->dpm_table), 0x00, sizeof(data->dpm_table));
 632
 633        phm_reset_single_dpm_table(
 634                        &data->dpm_table.sclk_table,
 635                                smum_get_mac_definition(hwmgr,
 636                                        SMU_MAX_LEVELS_GRAPHICS),
 637                                        MAX_REGULAR_DPM_NUMBER);
 638        phm_reset_single_dpm_table(
 639                        &data->dpm_table.mclk_table,
 640                        smum_get_mac_definition(hwmgr,
 641                                SMU_MAX_LEVELS_MEMORY), MAX_REGULAR_DPM_NUMBER);
 642
 643        phm_reset_single_dpm_table(
 644                        &data->dpm_table.vddc_table,
 645                                smum_get_mac_definition(hwmgr,
 646                                        SMU_MAX_LEVELS_VDDC),
 647                                        MAX_REGULAR_DPM_NUMBER);
 648        phm_reset_single_dpm_table(
 649                        &data->dpm_table.vddci_table,
 650                        smum_get_mac_definition(hwmgr,
 651                                SMU_MAX_LEVELS_VDDCI), MAX_REGULAR_DPM_NUMBER);
 652
 653        phm_reset_single_dpm_table(
 654                        &data->dpm_table.mvdd_table,
 655                                smum_get_mac_definition(hwmgr,
 656                                        SMU_MAX_LEVELS_MVDD),
 657                                        MAX_REGULAR_DPM_NUMBER);
 658        return 0;
 659}
 660/*
 661 * This function is to initialize all DPM state tables
 662 * for SMU7 based on the dependency table.
 663 * Dynamic state patching function will then trim these
 664 * state tables to the allowed range based
 665 * on the power policy or external client requests,
 666 * such as UVD request, etc.
 667 */
 668
 669static int smu7_setup_dpm_tables_v0(struct pp_hwmgr *hwmgr)
 670{
 671        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 672        struct phm_clock_voltage_dependency_table *allowed_vdd_sclk_table =
 673                hwmgr->dyn_state.vddc_dependency_on_sclk;
 674        struct phm_clock_voltage_dependency_table *allowed_vdd_mclk_table =
 675                hwmgr->dyn_state.vddc_dependency_on_mclk;
 676        struct phm_cac_leakage_table *std_voltage_table =
 677                hwmgr->dyn_state.cac_leakage_table;
 678        uint32_t i;
 679
 680        PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL,
 681                "SCLK dependency table is missing. This table is mandatory", return -EINVAL);
 682        PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table->count >= 1,
 683                "SCLK dependency table has to have is missing. This table is mandatory", return -EINVAL);
 684
 685        PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
 686                "MCLK dependency table is missing. This table is mandatory", return -EINVAL);
 687        PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table->count >= 1,
 688                "VMCLK dependency table has to have is missing. This table is mandatory", return -EINVAL);
 689
 690
 691        /* Initialize Sclk DPM table based on allow Sclk values*/
 692        data->dpm_table.sclk_table.count = 0;
 693
 694        for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
 695                if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count-1].value !=
 696                                allowed_vdd_sclk_table->entries[i].clk) {
 697                        data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
 698                                allowed_vdd_sclk_table->entries[i].clk;
 699                        data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = (i == 0) ? 1 : 0;
 700                        data->dpm_table.sclk_table.count++;
 701                }
 702        }
 703
 704        PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
 705                "MCLK dependency table is missing. This table is mandatory", return -EINVAL);
 706        /* Initialize Mclk DPM table based on allow Mclk values */
 707        data->dpm_table.mclk_table.count = 0;
 708        for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
 709                if (i == 0 || data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count-1].value !=
 710                        allowed_vdd_mclk_table->entries[i].clk) {
 711                        data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
 712                                allowed_vdd_mclk_table->entries[i].clk;
 713                        data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = (i == 0) ? 1 : 0;
 714                        data->dpm_table.mclk_table.count++;
 715                }
 716        }
 717
 718        /* Initialize Vddc DPM table based on allow Vddc values.  And populate corresponding std values. */
 719        for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
 720                data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
 721                data->dpm_table.vddc_table.dpm_levels[i].param1 = std_voltage_table->entries[i].Leakage;
 722                /* param1 is for corresponding std voltage */
 723                data->dpm_table.vddc_table.dpm_levels[i].enabled = true;
 724        }
 725
 726        data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count;
 727        allowed_vdd_mclk_table = hwmgr->dyn_state.vddci_dependency_on_mclk;
 728
 729        if (NULL != allowed_vdd_mclk_table) {
 730                /* Initialize Vddci DPM table based on allow Mclk values */
 731                for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
 732                        data->dpm_table.vddci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
 733                        data->dpm_table.vddci_table.dpm_levels[i].enabled = true;
 734                }
 735                data->dpm_table.vddci_table.count = allowed_vdd_mclk_table->count;
 736        }
 737
 738        allowed_vdd_mclk_table = hwmgr->dyn_state.mvdd_dependency_on_mclk;
 739
 740        if (NULL != allowed_vdd_mclk_table) {
 741                /*
 742                 * Initialize MVDD DPM table based on allow Mclk
 743                 * values
 744                 */
 745                for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
 746                        data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
 747                        data->dpm_table.mvdd_table.dpm_levels[i].enabled = true;
 748                }
 749                data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count;
 750        }
 751
 752        return 0;
 753}
 754
 755static int smu7_setup_dpm_tables_v1(struct pp_hwmgr *hwmgr)
 756{
 757        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 758        struct phm_ppt_v1_information *table_info =
 759                        (struct phm_ppt_v1_information *)(hwmgr->pptable);
 760        uint32_t i;
 761
 762        struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
 763        struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
 764
 765        if (table_info == NULL)
 766                return -EINVAL;
 767
 768        dep_sclk_table = table_info->vdd_dep_on_sclk;
 769        dep_mclk_table = table_info->vdd_dep_on_mclk;
 770
 771        PP_ASSERT_WITH_CODE(dep_sclk_table != NULL,
 772                        "SCLK dependency table is missing.",
 773                        return -EINVAL);
 774        PP_ASSERT_WITH_CODE(dep_sclk_table->count >= 1,
 775                        "SCLK dependency table count is 0.",
 776                        return -EINVAL);
 777
 778        PP_ASSERT_WITH_CODE(dep_mclk_table != NULL,
 779                        "MCLK dependency table is missing.",
 780                        return -EINVAL);
 781        PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1,
 782                        "MCLK dependency table count is 0",
 783                        return -EINVAL);
 784
 785        /* Initialize Sclk DPM table based on allow Sclk values */
 786        data->dpm_table.sclk_table.count = 0;
 787        for (i = 0; i < dep_sclk_table->count; i++) {
 788                if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count - 1].value !=
 789                                                dep_sclk_table->entries[i].clk) {
 790
 791                        data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
 792                                        dep_sclk_table->entries[i].clk;
 793
 794                        data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled =
 795                                        (i == 0) ? true : false;
 796                        data->dpm_table.sclk_table.count++;
 797                }
 798        }
 799        if (hwmgr->platform_descriptor.overdriveLimit.engineClock == 0)
 800                hwmgr->platform_descriptor.overdriveLimit.engineClock = dep_sclk_table->entries[i-1].clk;
 801        /* Initialize Mclk DPM table based on allow Mclk values */
 802        data->dpm_table.mclk_table.count = 0;
 803        for (i = 0; i < dep_mclk_table->count; i++) {
 804                if (i == 0 || data->dpm_table.mclk_table.dpm_levels
 805                                [data->dpm_table.mclk_table.count - 1].value !=
 806                                                dep_mclk_table->entries[i].clk) {
 807                        data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
 808                                                        dep_mclk_table->entries[i].clk;
 809                        data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled =
 810                                                        (i == 0) ? true : false;
 811                        data->dpm_table.mclk_table.count++;
 812                }
 813        }
 814
 815        if (hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0)
 816                hwmgr->platform_descriptor.overdriveLimit.memoryClock = dep_mclk_table->entries[i-1].clk;
 817        return 0;
 818}
 819
 820static int smu7_odn_initial_default_setting(struct pp_hwmgr *hwmgr)
 821{
 822        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 823        struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
 824        struct phm_ppt_v1_information *table_info =
 825                        (struct phm_ppt_v1_information *)(hwmgr->pptable);
 826        uint32_t i;
 827
 828        struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
 829        struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
 830        struct phm_odn_performance_level *entries;
 831
 832        if (table_info == NULL)
 833                return -EINVAL;
 834
 835        dep_sclk_table = table_info->vdd_dep_on_sclk;
 836        dep_mclk_table = table_info->vdd_dep_on_mclk;
 837
 838        odn_table->odn_core_clock_dpm_levels.num_of_pl =
 839                                                data->golden_dpm_table.sclk_table.count;
 840        entries = odn_table->odn_core_clock_dpm_levels.entries;
 841        for (i=0; i<data->golden_dpm_table.sclk_table.count; i++) {
 842                entries[i].clock = data->golden_dpm_table.sclk_table.dpm_levels[i].value;
 843                entries[i].enabled = true;
 844                entries[i].vddc = dep_sclk_table->entries[i].vddc;
 845        }
 846
 847        smu_get_voltage_dependency_table_ppt_v1(dep_sclk_table,
 848                (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk));
 849
 850        odn_table->odn_memory_clock_dpm_levels.num_of_pl =
 851                                                data->golden_dpm_table.mclk_table.count;
 852        entries = odn_table->odn_memory_clock_dpm_levels.entries;
 853        for (i=0; i<data->golden_dpm_table.mclk_table.count; i++) {
 854                entries[i].clock = data->golden_dpm_table.mclk_table.dpm_levels[i].value;
 855                entries[i].enabled = true;
 856                entries[i].vddc = dep_mclk_table->entries[i].vddc;
 857        }
 858
 859        smu_get_voltage_dependency_table_ppt_v1(dep_mclk_table,
 860                (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk));
 861
 862        return 0;
 863}
 864
 865static void smu7_setup_voltage_range_from_vbios(struct pp_hwmgr *hwmgr)
 866{
 867        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 868        struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
 869        struct phm_ppt_v1_information *table_info =
 870                        (struct phm_ppt_v1_information *)(hwmgr->pptable);
 871        uint32_t min_vddc = 0;
 872        uint32_t max_vddc = 0;
 873
 874        if (!table_info)
 875                return;
 876
 877        dep_sclk_table = table_info->vdd_dep_on_sclk;
 878
 879        atomctrl_get_voltage_range(hwmgr, &max_vddc, &min_vddc);
 880
 881        if (min_vddc == 0 || min_vddc > 2000
 882                || min_vddc > dep_sclk_table->entries[0].vddc)
 883                min_vddc = dep_sclk_table->entries[0].vddc;
 884
 885        if (max_vddc == 0 || max_vddc > 2000
 886                || max_vddc < dep_sclk_table->entries[dep_sclk_table->count-1].vddc)
 887                max_vddc = dep_sclk_table->entries[dep_sclk_table->count-1].vddc;
 888
 889        data->odn_dpm_table.min_vddc = min_vddc;
 890        data->odn_dpm_table.max_vddc = max_vddc;
 891}
 892
 893static void smu7_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
 894{
 895        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 896        struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
 897        struct phm_ppt_v1_information *table_info =
 898                        (struct phm_ppt_v1_information *)(hwmgr->pptable);
 899        uint32_t i;
 900
 901        struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
 902        struct phm_ppt_v1_clock_voltage_dependency_table *odn_dep_table;
 903
 904        if (table_info == NULL)
 905                return;
 906
 907        for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
 908                if (odn_table->odn_core_clock_dpm_levels.entries[i].clock !=
 909                                        data->dpm_table.sclk_table.dpm_levels[i].value) {
 910                        data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
 911                        break;
 912                }
 913        }
 914
 915        for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
 916                if (odn_table->odn_memory_clock_dpm_levels.entries[i].clock !=
 917                                        data->dpm_table.mclk_table.dpm_levels[i].value) {
 918                        data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
 919                        break;
 920                }
 921        }
 922
 923        dep_table = table_info->vdd_dep_on_mclk;
 924        odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk);
 925
 926        for (i = 0; i < dep_table->count; i++) {
 927                if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
 928                        data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK;
 929                        return;
 930                }
 931        }
 932
 933        dep_table = table_info->vdd_dep_on_sclk;
 934        odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk);
 935        for (i = 0; i < dep_table->count; i++) {
 936                if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
 937                        data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK;
 938                        return;
 939                }
 940        }
 941        if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
 942                data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
 943                data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK;
 944        }
 945}
 946
 947static int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
 948{
 949        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 950
 951        smu7_reset_dpm_tables(hwmgr);
 952
 953        if (hwmgr->pp_table_version == PP_TABLE_V1)
 954                smu7_setup_dpm_tables_v1(hwmgr);
 955        else if (hwmgr->pp_table_version == PP_TABLE_V0)
 956                smu7_setup_dpm_tables_v0(hwmgr);
 957
 958        smu7_setup_default_pcie_table(hwmgr);
 959
 960        /* save a copy of the default DPM table */
 961        memcpy(&(data->golden_dpm_table), &(data->dpm_table),
 962                        sizeof(struct smu7_dpm_table));
 963
 964        /* initialize ODN table */
 965        if (hwmgr->od_enabled) {
 966                if (data->odn_dpm_table.max_vddc) {
 967                        smu7_check_dpm_table_updated(hwmgr);
 968                } else {
 969                        smu7_setup_voltage_range_from_vbios(hwmgr);
 970                        smu7_odn_initial_default_setting(hwmgr);
 971                }
 972        }
 973        return 0;
 974}
 975
 976static int smu7_enable_vrhot_gpio_interrupt(struct pp_hwmgr *hwmgr)
 977{
 978
 979        if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
 980                        PHM_PlatformCaps_RegulatorHot))
 981                return smum_send_msg_to_smc(hwmgr,
 982                                PPSMC_MSG_EnableVRHotGPIOInterrupt,
 983                                NULL);
 984
 985        return 0;
 986}
 987
 988static int smu7_enable_sclk_control(struct pp_hwmgr *hwmgr)
 989{
 990        PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
 991                        SCLK_PWRMGT_OFF, 0);
 992        return 0;
 993}
 994
 995static int smu7_enable_ulv(struct pp_hwmgr *hwmgr)
 996{
 997        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 998
 999        if (data->ulv_supported)
1000                return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableULV, NULL);
1001
1002        return 0;
1003}
1004
1005static int smu7_disable_ulv(struct pp_hwmgr *hwmgr)
1006{
1007        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1008
1009        if (data->ulv_supported)
1010                return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableULV, NULL);
1011
1012        return 0;
1013}
1014
1015static int smu7_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
1016{
1017        if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1018                        PHM_PlatformCaps_SclkDeepSleep)) {
1019                if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MASTER_DeepSleep_ON, NULL))
1020                        PP_ASSERT_WITH_CODE(false,
1021                                        "Attempt to enable Master Deep Sleep switch failed!",
1022                                        return -EINVAL);
1023        } else {
1024                if (smum_send_msg_to_smc(hwmgr,
1025                                PPSMC_MSG_MASTER_DeepSleep_OFF,
1026                                NULL)) {
1027                        PP_ASSERT_WITH_CODE(false,
1028                                        "Attempt to disable Master Deep Sleep switch failed!",
1029                                        return -EINVAL);
1030                }
1031        }
1032
1033        return 0;
1034}
1035
1036static int smu7_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
1037{
1038        if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1039                        PHM_PlatformCaps_SclkDeepSleep)) {
1040                if (smum_send_msg_to_smc(hwmgr,
1041                                PPSMC_MSG_MASTER_DeepSleep_OFF,
1042                                NULL)) {
1043                        PP_ASSERT_WITH_CODE(false,
1044                                        "Attempt to disable Master Deep Sleep switch failed!",
1045                                        return -EINVAL);
1046                }
1047        }
1048
1049        return 0;
1050}
1051
1052static int smu7_disable_sclk_vce_handshake(struct pp_hwmgr *hwmgr)
1053{
1054        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1055        uint32_t soft_register_value = 0;
1056        uint32_t handshake_disables_offset = data->soft_regs_start
1057                                + smum_get_offsetof(hwmgr,
1058                                        SMU_SoftRegisters, HandshakeDisables);
1059
1060        soft_register_value = cgs_read_ind_register(hwmgr->device,
1061                                CGS_IND_REG__SMC, handshake_disables_offset);
1062        soft_register_value |= SMU7_VCE_SCLK_HANDSHAKE_DISABLE;
1063        cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1064                        handshake_disables_offset, soft_register_value);
1065        return 0;
1066}
1067
1068static int smu7_disable_handshake_uvd(struct pp_hwmgr *hwmgr)
1069{
1070        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1071        uint32_t soft_register_value = 0;
1072        uint32_t handshake_disables_offset = data->soft_regs_start
1073                                + smum_get_offsetof(hwmgr,
1074                                        SMU_SoftRegisters, HandshakeDisables);
1075
1076        soft_register_value = cgs_read_ind_register(hwmgr->device,
1077                                CGS_IND_REG__SMC, handshake_disables_offset);
1078        soft_register_value |= smum_get_mac_definition(hwmgr,
1079                                        SMU_UVD_MCLK_HANDSHAKE_DISABLE);
1080        cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1081                        handshake_disables_offset, soft_register_value);
1082        return 0;
1083}
1084
1085static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
1086{
1087        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1088
1089        /* enable SCLK dpm */
1090        if (!data->sclk_dpm_key_disabled) {
1091                if (hwmgr->chip_id == CHIP_VEGAM)
1092                        smu7_disable_sclk_vce_handshake(hwmgr);
1093
1094                PP_ASSERT_WITH_CODE(
1095                (0 == smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Enable, NULL)),
1096                "Failed to enable SCLK DPM during DPM Start Function!",
1097                return -EINVAL);
1098        }
1099
1100        /* enable MCLK dpm */
1101        if (0 == data->mclk_dpm_key_disabled) {
1102                if (!(hwmgr->feature_mask & PP_UVD_HANDSHAKE_MASK))
1103                        smu7_disable_handshake_uvd(hwmgr);
1104
1105                PP_ASSERT_WITH_CODE(
1106                                (0 == smum_send_msg_to_smc(hwmgr,
1107                                                PPSMC_MSG_MCLKDPM_Enable,
1108                                                NULL)),
1109                                "Failed to enable MCLK DPM during DPM Start Function!",
1110                                return -EINVAL);
1111
1112                if (hwmgr->chip_family != CHIP_VEGAM)
1113                        PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
1114
1115
1116                if (hwmgr->chip_family == AMDGPU_FAMILY_CI) {
1117                        cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d30, 0x5);
1118                        cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d3c, 0x5);
1119                        cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d80, 0x100005);
1120                        udelay(10);
1121                        cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d30, 0x400005);
1122                        cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d3c, 0x400005);
1123                        cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d80, 0x500005);
1124                } else {
1125                        cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5);
1126                        cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x5);
1127                        cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x100005);
1128                        udelay(10);
1129                        if (hwmgr->chip_id == CHIP_VEGAM) {
1130                                cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400009);
1131                                cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400009);
1132                        } else {
1133                                cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400005);
1134                                cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400005);
1135                        }
1136                        cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x500005);
1137                }
1138        }
1139
1140        return 0;
1141}
1142
1143static int smu7_start_dpm(struct pp_hwmgr *hwmgr)
1144{
1145        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1146
1147        /*enable general power management */
1148
1149        PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
1150                        GLOBAL_PWRMGT_EN, 1);
1151
1152        /* enable sclk deep sleep */
1153
1154        PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
1155                        DYNAMIC_PM_EN, 1);
1156
1157        /* prepare for PCIE DPM */
1158
1159        cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1160                        data->soft_regs_start +
1161                        smum_get_offsetof(hwmgr, SMU_SoftRegisters,
1162                                                VoltageChangeTimeout), 0x1000);
1163        PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
1164                        SWRST_COMMAND_1, RESETLC, 0x0);
1165
1166        if (hwmgr->chip_family == AMDGPU_FAMILY_CI)
1167                cgs_write_register(hwmgr->device, 0x1488,
1168                        (cgs_read_register(hwmgr->device, 0x1488) & ~0x1));
1169
1170        if (smu7_enable_sclk_mclk_dpm(hwmgr)) {
1171                pr_err("Failed to enable Sclk DPM and Mclk DPM!");
1172                return -EINVAL;
1173        }
1174
1175        /* enable PCIE dpm */
1176        if (0 == data->pcie_dpm_key_disabled) {
1177                PP_ASSERT_WITH_CODE(
1178                                (0 == smum_send_msg_to_smc(hwmgr,
1179                                                PPSMC_MSG_PCIeDPM_Enable,
1180                                                NULL)),
1181                                "Failed to enable pcie DPM during DPM Start Function!",
1182                                return -EINVAL);
1183        }
1184
1185        if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1186                                PHM_PlatformCaps_Falcon_QuickTransition)) {
1187                PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr,
1188                                PPSMC_MSG_EnableACDCGPIOInterrupt,
1189                                NULL)),
1190                                "Failed to enable AC DC GPIO Interrupt!",
1191                                );
1192        }
1193
1194        return 0;
1195}
1196
1197static int smu7_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
1198{
1199        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1200
1201        /* disable SCLK dpm */
1202        if (!data->sclk_dpm_key_disabled) {
1203                PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
1204                                "Trying to disable SCLK DPM when DPM is disabled",
1205                                return 0);
1206                smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Disable, NULL);
1207        }
1208
1209        /* disable MCLK dpm */
1210        if (!data->mclk_dpm_key_disabled) {
1211                PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
1212                                "Trying to disable MCLK DPM when DPM is disabled",
1213                                return 0);
1214                smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_Disable, NULL);
1215        }
1216
1217        return 0;
1218}
1219
1220static int smu7_stop_dpm(struct pp_hwmgr *hwmgr)
1221{
1222        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1223
1224        /* disable general power management */
1225        PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
1226                        GLOBAL_PWRMGT_EN, 0);
1227        /* disable sclk deep sleep */
1228        PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
1229                        DYNAMIC_PM_EN, 0);
1230
1231        /* disable PCIE dpm */
1232        if (!data->pcie_dpm_key_disabled) {
1233                PP_ASSERT_WITH_CODE(
1234                                (smum_send_msg_to_smc(hwmgr,
1235                                                PPSMC_MSG_PCIeDPM_Disable,
1236                                                NULL) == 0),
1237                                "Failed to disable pcie DPM during DPM Stop Function!",
1238                                return -EINVAL);
1239        }
1240
1241        smu7_disable_sclk_mclk_dpm(hwmgr);
1242
1243        PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
1244                        "Trying to disable voltage DPM when DPM is disabled",
1245                        return 0);
1246
1247        smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Disable, NULL);
1248
1249        return 0;
1250}
1251
1252static void smu7_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources)
1253{
1254        bool protection;
1255        enum DPM_EVENT_SRC src;
1256
1257        switch (sources) {
1258        default:
1259                pr_err("Unknown throttling event sources.");
1260                fallthrough;
1261        case 0:
1262                protection = false;
1263                /* src is unused */
1264                break;
1265        case (1 << PHM_AutoThrottleSource_Thermal):
1266                protection = true;
1267                src = DPM_EVENT_SRC_DIGITAL;
1268                break;
1269        case (1 << PHM_AutoThrottleSource_External):
1270                protection = true;
1271                src = DPM_EVENT_SRC_EXTERNAL;
1272                break;
1273        case (1 << PHM_AutoThrottleSource_External) |
1274                        (1 << PHM_AutoThrottleSource_Thermal):
1275                protection = true;
1276                src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL;
1277                break;
1278        }
1279        /* Order matters - don't enable thermal protection for the wrong source. */
1280        if (protection) {
1281                PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL,
1282                                DPM_EVENT_SRC, src);
1283                PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
1284                                THERMAL_PROTECTION_DIS,
1285                                !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1286                                                PHM_PlatformCaps_ThermalController));
1287        } else
1288                PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
1289                                THERMAL_PROTECTION_DIS, 1);
1290}
1291
1292static int smu7_enable_auto_throttle_source(struct pp_hwmgr *hwmgr,
1293                PHM_AutoThrottleSource source)
1294{
1295        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1296
1297        if (!(data->active_auto_throttle_sources & (1 << source))) {
1298                data->active_auto_throttle_sources |= 1 << source;
1299                smu7_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
1300        }
1301        return 0;
1302}
1303
1304static int smu7_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
1305{
1306        return smu7_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
1307}
1308
1309static int smu7_disable_auto_throttle_source(struct pp_hwmgr *hwmgr,
1310                PHM_AutoThrottleSource source)
1311{
1312        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1313
1314        if (data->active_auto_throttle_sources & (1 << source)) {
1315                data->active_auto_throttle_sources &= ~(1 << source);
1316                smu7_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
1317        }
1318        return 0;
1319}
1320
1321static int smu7_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
1322{
1323        return smu7_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
1324}
1325
1326static int smu7_pcie_performance_request(struct pp_hwmgr *hwmgr)
1327{
1328        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1329        data->pcie_performance_request = true;
1330
1331        return 0;
1332}
1333
1334static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
1335{
1336        int tmp_result = 0;
1337        int result = 0;
1338
1339        if (smu7_voltage_control(hwmgr)) {
1340                tmp_result = smu7_enable_voltage_control(hwmgr);
1341                PP_ASSERT_WITH_CODE(tmp_result == 0,
1342                                "Failed to enable voltage control!",
1343                                result = tmp_result);
1344
1345                tmp_result = smu7_construct_voltage_tables(hwmgr);
1346                PP_ASSERT_WITH_CODE((0 == tmp_result),
1347                                "Failed to construct voltage tables!",
1348                                result = tmp_result);
1349        }
1350        smum_initialize_mc_reg_table(hwmgr);
1351
1352        if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1353                        PHM_PlatformCaps_EngineSpreadSpectrumSupport))
1354                PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1355                                GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 1);
1356
1357        if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1358                        PHM_PlatformCaps_ThermalController))
1359                PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1360                                GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 0);
1361
1362        tmp_result = smu7_program_static_screen_threshold_parameters(hwmgr);
1363        PP_ASSERT_WITH_CODE((0 == tmp_result),
1364                        "Failed to program static screen threshold parameters!",
1365                        result = tmp_result);
1366
1367        tmp_result = smu7_enable_display_gap(hwmgr);
1368        PP_ASSERT_WITH_CODE((0 == tmp_result),
1369                        "Failed to enable display gap!", result = tmp_result);
1370
1371        tmp_result = smu7_program_voting_clients(hwmgr);
1372        PP_ASSERT_WITH_CODE((0 == tmp_result),
1373                        "Failed to program voting clients!", result = tmp_result);
1374
1375        tmp_result = smum_process_firmware_header(hwmgr);
1376        PP_ASSERT_WITH_CODE((0 == tmp_result),
1377                        "Failed to process firmware header!", result = tmp_result);
1378
1379        if (hwmgr->chip_id != CHIP_VEGAM) {
1380                tmp_result = smu7_initial_switch_from_arbf0_to_f1(hwmgr);
1381                PP_ASSERT_WITH_CODE((0 == tmp_result),
1382                                "Failed to initialize switch from ArbF0 to F1!",
1383                                result = tmp_result);
1384        }
1385
1386        result = smu7_setup_default_dpm_tables(hwmgr);
1387        PP_ASSERT_WITH_CODE(0 == result,
1388                        "Failed to setup default DPM tables!", return result);
1389
1390        tmp_result = smum_init_smc_table(hwmgr);
1391        PP_ASSERT_WITH_CODE((0 == tmp_result),
1392                        "Failed to initialize SMC table!", result = tmp_result);
1393
1394        tmp_result = smu7_enable_vrhot_gpio_interrupt(hwmgr);
1395        PP_ASSERT_WITH_CODE((0 == tmp_result),
1396                        "Failed to enable VR hot GPIO interrupt!", result = tmp_result);
1397
1398        smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_NoDisplay, NULL);
1399
1400        tmp_result = smu7_enable_sclk_control(hwmgr);
1401        PP_ASSERT_WITH_CODE((0 == tmp_result),
1402                        "Failed to enable SCLK control!", result = tmp_result);
1403
1404        tmp_result = smu7_enable_smc_voltage_controller(hwmgr);
1405        PP_ASSERT_WITH_CODE((0 == tmp_result),
1406                        "Failed to enable voltage control!", result = tmp_result);
1407
1408        tmp_result = smu7_enable_ulv(hwmgr);
1409        PP_ASSERT_WITH_CODE((0 == tmp_result),
1410                        "Failed to enable ULV!", result = tmp_result);
1411
1412        tmp_result = smu7_enable_deep_sleep_master_switch(hwmgr);
1413        PP_ASSERT_WITH_CODE((0 == tmp_result),
1414                        "Failed to enable deep sleep master switch!", result = tmp_result);
1415
1416        tmp_result = smu7_enable_didt_config(hwmgr);
1417        PP_ASSERT_WITH_CODE((tmp_result == 0),
1418                        "Failed to enable deep sleep master switch!", result = tmp_result);
1419
1420        tmp_result = smu7_start_dpm(hwmgr);
1421        PP_ASSERT_WITH_CODE((0 == tmp_result),
1422                        "Failed to start DPM!", result = tmp_result);
1423
1424        tmp_result = smu7_enable_smc_cac(hwmgr);
1425        PP_ASSERT_WITH_CODE((0 == tmp_result),
1426                        "Failed to enable SMC CAC!", result = tmp_result);
1427
1428        tmp_result = smu7_enable_power_containment(hwmgr);
1429        PP_ASSERT_WITH_CODE((0 == tmp_result),
1430                        "Failed to enable power containment!", result = tmp_result);
1431
1432        tmp_result = smu7_power_control_set_level(hwmgr);
1433        PP_ASSERT_WITH_CODE((0 == tmp_result),
1434                        "Failed to power control set level!", result = tmp_result);
1435
1436        tmp_result = smu7_enable_thermal_auto_throttle(hwmgr);
1437        PP_ASSERT_WITH_CODE((0 == tmp_result),
1438                        "Failed to enable thermal auto throttle!", result = tmp_result);
1439
1440        tmp_result = smu7_pcie_performance_request(hwmgr);
1441        PP_ASSERT_WITH_CODE((0 == tmp_result),
1442                        "pcie performance request failed!", result = tmp_result);
1443
1444        return 0;
1445}
1446
1447static int smu7_avfs_control(struct pp_hwmgr *hwmgr, bool enable)
1448{
1449        if (!hwmgr->avfs_supported)
1450                return 0;
1451
1452        if (enable) {
1453                if (!PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
1454                                CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) {
1455                        PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(
1456                                        hwmgr, PPSMC_MSG_EnableAvfs, NULL),
1457                                        "Failed to enable AVFS!",
1458                                        return -EINVAL);
1459                }
1460        } else if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
1461                        CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) {
1462                PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(
1463                                hwmgr, PPSMC_MSG_DisableAvfs, NULL),
1464                                "Failed to disable AVFS!",
1465                                return -EINVAL);
1466        }
1467
1468        return 0;
1469}
1470
1471static int smu7_update_avfs(struct pp_hwmgr *hwmgr)
1472{
1473        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1474
1475        if (!hwmgr->avfs_supported)
1476                return 0;
1477
1478        if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
1479                smu7_avfs_control(hwmgr, false);
1480        } else if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
1481                smu7_avfs_control(hwmgr, false);
1482                smu7_avfs_control(hwmgr, true);
1483        } else {
1484                smu7_avfs_control(hwmgr, true);
1485        }
1486
1487        return 0;
1488}
1489
1490static int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
1491{
1492        int tmp_result, result = 0;
1493
1494        if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1495                        PHM_PlatformCaps_ThermalController))
1496                PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1497                                GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 1);
1498
1499        tmp_result = smu7_disable_power_containment(hwmgr);
1500        PP_ASSERT_WITH_CODE((tmp_result == 0),
1501                        "Failed to disable power containment!", result = tmp_result);
1502
1503        tmp_result = smu7_disable_smc_cac(hwmgr);
1504        PP_ASSERT_WITH_CODE((tmp_result == 0),
1505                        "Failed to disable SMC CAC!", result = tmp_result);
1506
1507        tmp_result = smu7_disable_didt_config(hwmgr);
1508        PP_ASSERT_WITH_CODE((tmp_result == 0),
1509                        "Failed to disable DIDT!", result = tmp_result);
1510
1511        PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1512                        CG_SPLL_SPREAD_SPECTRUM, SSEN, 0);
1513        PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1514                        GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 0);
1515
1516        tmp_result = smu7_disable_thermal_auto_throttle(hwmgr);
1517        PP_ASSERT_WITH_CODE((tmp_result == 0),
1518                        "Failed to disable thermal auto throttle!", result = tmp_result);
1519
1520        tmp_result = smu7_avfs_control(hwmgr, false);
1521        PP_ASSERT_WITH_CODE((tmp_result == 0),
1522                        "Failed to disable AVFS!", result = tmp_result);
1523
1524        tmp_result = smu7_stop_dpm(hwmgr);
1525        PP_ASSERT_WITH_CODE((tmp_result == 0),
1526                        "Failed to stop DPM!", result = tmp_result);
1527
1528        tmp_result = smu7_disable_deep_sleep_master_switch(hwmgr);
1529        PP_ASSERT_WITH_CODE((tmp_result == 0),
1530                        "Failed to disable deep sleep master switch!", result = tmp_result);
1531
1532        tmp_result = smu7_disable_ulv(hwmgr);
1533        PP_ASSERT_WITH_CODE((tmp_result == 0),
1534                        "Failed to disable ULV!", result = tmp_result);
1535
1536        tmp_result = smu7_clear_voting_clients(hwmgr);
1537        PP_ASSERT_WITH_CODE((tmp_result == 0),
1538                        "Failed to clear voting clients!", result = tmp_result);
1539
1540        tmp_result = smu7_reset_to_default(hwmgr);
1541        PP_ASSERT_WITH_CODE((tmp_result == 0),
1542                        "Failed to reset to default!", result = tmp_result);
1543
1544        tmp_result = smu7_force_switch_to_arbf0(hwmgr);
1545        PP_ASSERT_WITH_CODE((tmp_result == 0),
1546                        "Failed to force to switch arbf0!", result = tmp_result);
1547
1548        return result;
1549}
1550
1551static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
1552{
1553        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1554        struct phm_ppt_v1_information *table_info =
1555                        (struct phm_ppt_v1_information *)(hwmgr->pptable);
1556        struct amdgpu_device *adev = hwmgr->adev;
1557
1558        data->dll_default_on = false;
1559        data->mclk_dpm0_activity_target = 0xa;
1560        data->vddc_vddgfx_delta = 300;
1561        data->static_screen_threshold = SMU7_STATICSCREENTHRESHOLD_DFLT;
1562        data->static_screen_threshold_unit = SMU7_STATICSCREENTHRESHOLDUNIT_DFLT;
1563        data->voting_rights_clients[0] = SMU7_VOTINGRIGHTSCLIENTS_DFLT0;
1564        data->voting_rights_clients[1]= SMU7_VOTINGRIGHTSCLIENTS_DFLT1;
1565        data->voting_rights_clients[2] = SMU7_VOTINGRIGHTSCLIENTS_DFLT2;
1566        data->voting_rights_clients[3]= SMU7_VOTINGRIGHTSCLIENTS_DFLT3;
1567        data->voting_rights_clients[4]= SMU7_VOTINGRIGHTSCLIENTS_DFLT4;
1568        data->voting_rights_clients[5]= SMU7_VOTINGRIGHTSCLIENTS_DFLT5;
1569        data->voting_rights_clients[6]= SMU7_VOTINGRIGHTSCLIENTS_DFLT6;
1570        data->voting_rights_clients[7]= SMU7_VOTINGRIGHTSCLIENTS_DFLT7;
1571
1572        data->mclk_dpm_key_disabled = hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true;
1573        data->sclk_dpm_key_disabled = hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true;
1574        data->pcie_dpm_key_disabled = hwmgr->feature_mask & PP_PCIE_DPM_MASK ? false : true;
1575        /* need to set voltage control types before EVV patching */
1576        data->voltage_control = SMU7_VOLTAGE_CONTROL_NONE;
1577        data->vddci_control = SMU7_VOLTAGE_CONTROL_NONE;
1578        data->mvdd_control = SMU7_VOLTAGE_CONTROL_NONE;
1579        data->enable_tdc_limit_feature = true;
1580        data->enable_pkg_pwr_tracking_feature = true;
1581        data->force_pcie_gen = PP_PCIEGenInvalid;
1582        data->ulv_supported = hwmgr->feature_mask & PP_ULV_MASK ? true : false;
1583        data->current_profile_setting.bupdate_sclk = 1;
1584        data->current_profile_setting.sclk_up_hyst = 0;
1585        data->current_profile_setting.sclk_down_hyst = 100;
1586        data->current_profile_setting.sclk_activity = SMU7_SCLK_TARGETACTIVITY_DFLT;
1587        data->current_profile_setting.bupdate_mclk = 1;
1588        data->current_profile_setting.mclk_up_hyst = 0;
1589        data->current_profile_setting.mclk_down_hyst = 100;
1590        data->current_profile_setting.mclk_activity = SMU7_MCLK_TARGETACTIVITY_DFLT;
1591        hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D];
1592        hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
1593        hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
1594
1595        if (hwmgr->chip_id == CHIP_POLARIS12 || hwmgr->is_kicker) {
1596                uint8_t tmp1, tmp2;
1597                uint16_t tmp3 = 0;
1598                atomctrl_get_svi2_info(hwmgr, VOLTAGE_TYPE_VDDC, &tmp1, &tmp2,
1599                                                &tmp3);
1600                tmp3 = (tmp3 >> 5) & 0x3;
1601                data->vddc_phase_shed_control = ((tmp3 << 1) | (tmp3 >> 1)) & 0x3;
1602        } else if (hwmgr->chip_family == AMDGPU_FAMILY_CI) {
1603                data->vddc_phase_shed_control = 1;
1604        } else {
1605                data->vddc_phase_shed_control = 0;
1606        }
1607
1608        if (hwmgr->chip_id  == CHIP_HAWAII) {
1609                data->thermal_temp_setting.temperature_low = 94500;
1610                data->thermal_temp_setting.temperature_high = 95000;
1611                data->thermal_temp_setting.temperature_shutdown = 104000;
1612        } else {
1613                data->thermal_temp_setting.temperature_low = 99500;
1614                data->thermal_temp_setting.temperature_high = 100000;
1615                data->thermal_temp_setting.temperature_shutdown = 104000;
1616        }
1617
1618        data->fast_watermark_threshold = 100;
1619        if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1620                        VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
1621                data->voltage_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1622        else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1623                        VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT))
1624                data->voltage_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
1625
1626        if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1627                        PHM_PlatformCaps_ControlVDDGFX)) {
1628                if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1629                        VOLTAGE_TYPE_VDDGFX, VOLTAGE_OBJ_SVID2)) {
1630                        data->vdd_gfx_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1631                }
1632        }
1633
1634        if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1635                        PHM_PlatformCaps_EnableMVDDControl)) {
1636                if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1637                                VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
1638                        data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
1639                else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1640                                VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
1641                        data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1642        }
1643
1644        if (SMU7_VOLTAGE_CONTROL_NONE == data->vdd_gfx_control)
1645                phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1646                        PHM_PlatformCaps_ControlVDDGFX);
1647
1648        if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1649                        PHM_PlatformCaps_ControlVDDCI)) {
1650                if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1651                                VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
1652                        data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
1653                else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1654                                VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
1655                        data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1656        }
1657
1658        if (data->mvdd_control == SMU7_VOLTAGE_CONTROL_NONE)
1659                phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1660                                PHM_PlatformCaps_EnableMVDDControl);
1661
1662        if (data->vddci_control == SMU7_VOLTAGE_CONTROL_NONE)
1663                phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1664                                PHM_PlatformCaps_ControlVDDCI);
1665
1666        if ((hwmgr->pp_table_version != PP_TABLE_V0) && (hwmgr->feature_mask & PP_CLOCK_STRETCH_MASK)
1667                && (table_info->cac_dtp_table->usClockStretchAmount != 0))
1668                phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1669                                        PHM_PlatformCaps_ClockStretcher);
1670
1671        data->pcie_gen_performance.max = PP_PCIEGen1;
1672        data->pcie_gen_performance.min = PP_PCIEGen3;
1673        data->pcie_gen_power_saving.max = PP_PCIEGen1;
1674        data->pcie_gen_power_saving.min = PP_PCIEGen3;
1675        data->pcie_lane_performance.max = 0;
1676        data->pcie_lane_performance.min = 16;
1677        data->pcie_lane_power_saving.max = 0;
1678        data->pcie_lane_power_saving.min = 16;
1679
1680
1681        if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
1682                phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1683                              PHM_PlatformCaps_UVDPowerGating);
1684        if (adev->pg_flags & AMD_PG_SUPPORT_VCE)
1685                phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1686                              PHM_PlatformCaps_VCEPowerGating);
1687}
1688
1689/**
1690* Get Leakage VDDC based on leakage ID.
1691*
1692* @param    hwmgr  the address of the powerplay hardware manager.
1693* @return   always 0
1694*/
1695static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr)
1696{
1697        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1698        uint16_t vv_id;
1699        uint16_t vddc = 0;
1700        uint16_t vddgfx = 0;
1701        uint16_t i, j;
1702        uint32_t sclk = 0;
1703        struct phm_ppt_v1_information *table_info =
1704                        (struct phm_ppt_v1_information *)hwmgr->pptable;
1705        struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = NULL;
1706
1707
1708        for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) {
1709                vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1710
1711                if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
1712                        if ((hwmgr->pp_table_version == PP_TABLE_V1)
1713                            && !phm_get_sclk_for_voltage_evv(hwmgr,
1714                                                table_info->vddgfx_lookup_table, vv_id, &sclk)) {
1715                                if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1716                                                        PHM_PlatformCaps_ClockStretcher)) {
1717                                        sclk_table = table_info->vdd_dep_on_sclk;
1718
1719                                        for (j = 1; j < sclk_table->count; j++) {
1720                                                if (sclk_table->entries[j].clk == sclk &&
1721                                                                sclk_table->entries[j].cks_enable == 0) {
1722                                                        sclk += 5000;
1723                                                        break;
1724                                                }
1725                                        }
1726                                }
1727                                if (0 == atomctrl_get_voltage_evv_on_sclk
1728                                    (hwmgr, VOLTAGE_TYPE_VDDGFX, sclk,
1729                                     vv_id, &vddgfx)) {
1730                                        /* need to make sure vddgfx is less than 2v or else, it could burn the ASIC. */
1731                                        PP_ASSERT_WITH_CODE((vddgfx < 2000 && vddgfx != 0), "Invalid VDDGFX value!", return -EINVAL);
1732
1733                                        /* the voltage should not be zero nor equal to leakage ID */
1734                                        if (vddgfx != 0 && vddgfx != vv_id) {
1735                                                data->vddcgfx_leakage.actual_voltage[data->vddcgfx_leakage.count] = vddgfx;
1736                                                data->vddcgfx_leakage.leakage_id[data->vddcgfx_leakage.count] = vv_id;
1737                                                data->vddcgfx_leakage.count++;
1738                                        }
1739                                } else {
1740                                        pr_info("Error retrieving EVV voltage value!\n");
1741                                }
1742                        }
1743                } else {
1744                        if ((hwmgr->pp_table_version == PP_TABLE_V0)
1745                                || !phm_get_sclk_for_voltage_evv(hwmgr,
1746                                        table_info->vddc_lookup_table, vv_id, &sclk)) {
1747                                if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1748                                                PHM_PlatformCaps_ClockStretcher)) {
1749                                        if (table_info == NULL)
1750                                                return -EINVAL;
1751                                        sclk_table = table_info->vdd_dep_on_sclk;
1752
1753                                        for (j = 1; j < sclk_table->count; j++) {
1754                                                if (sclk_table->entries[j].clk == sclk &&
1755                                                                sclk_table->entries[j].cks_enable == 0) {
1756                                                        sclk += 5000;
1757                                                        break;
1758                                                }
1759                                        }
1760                                }
1761
1762                                if (phm_get_voltage_evv_on_sclk(hwmgr,
1763                                                        VOLTAGE_TYPE_VDDC,
1764                                                        sclk, vv_id, &vddc) == 0) {
1765                                        if (vddc >= 2000 || vddc == 0)
1766                                                return -EINVAL;
1767                                } else {
1768                                        pr_debug("failed to retrieving EVV voltage!\n");
1769                                        continue;
1770                                }
1771
1772                                /* the voltage should not be zero nor equal to leakage ID */
1773                                if (vddc != 0 && vddc != vv_id) {
1774                                        data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = (uint16_t)(vddc);
1775                                        data->vddc_leakage.leakage_id[data->vddc_leakage.count] = vv_id;
1776                                        data->vddc_leakage.count++;
1777                                }
1778                        }
1779                }
1780        }
1781
1782        return 0;
1783}
1784
1785/**
1786 * Change virtual leakage voltage to actual value.
1787 *
1788 * @param     hwmgr  the address of the powerplay hardware manager.
1789 * @param     pointer to changing voltage
1790 * @param     pointer to leakage table
1791 */
1792static void smu7_patch_ppt_v1_with_vdd_leakage(struct pp_hwmgr *hwmgr,
1793                uint16_t *voltage, struct smu7_leakage_voltage *leakage_table)
1794{
1795        uint32_t index;
1796
1797        /* search for leakage voltage ID 0xff01 ~ 0xff08 */
1798        for (index = 0; index < leakage_table->count; index++) {
1799                /* if this voltage matches a leakage voltage ID */
1800                /* patch with actual leakage voltage */
1801                if (leakage_table->leakage_id[index] == *voltage) {
1802                        *voltage = leakage_table->actual_voltage[index];
1803                        break;
1804                }
1805        }
1806
1807        if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
1808                pr_err("Voltage value looks like a Leakage ID but it's not patched \n");
1809}
1810
1811/**
1812* Patch voltage lookup table by EVV leakages.
1813*
1814* @param     hwmgr  the address of the powerplay hardware manager.
1815* @param     pointer to voltage lookup table
1816* @param     pointer to leakage table
1817* @return     always 0
1818*/
1819static int smu7_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
1820                phm_ppt_v1_voltage_lookup_table *lookup_table,
1821                struct smu7_leakage_voltage *leakage_table)
1822{
1823        uint32_t i;
1824
1825        for (i = 0; i < lookup_table->count; i++)
1826                smu7_patch_ppt_v1_with_vdd_leakage(hwmgr,
1827                                &lookup_table->entries[i].us_vdd, leakage_table);
1828
1829        return 0;
1830}
1831
1832static int smu7_patch_clock_voltage_limits_with_vddc_leakage(
1833                struct pp_hwmgr *hwmgr, struct smu7_leakage_voltage *leakage_table,
1834                uint16_t *vddc)
1835{
1836        struct phm_ppt_v1_information *table_info =
1837                        (struct phm_ppt_v1_information *)(hwmgr->pptable);
1838        smu7_patch_ppt_v1_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table);
1839        hwmgr->dyn_state.max_clock_voltage_on_dc.vddc =
1840                        table_info->max_clock_voltage_on_dc.vddc;
1841        return 0;
1842}
1843
1844static int smu7_patch_voltage_dependency_tables_with_lookup_table(
1845                struct pp_hwmgr *hwmgr)
1846{
1847        uint8_t entry_id;
1848        uint8_t voltage_id;
1849        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1850        struct phm_ppt_v1_information *table_info =
1851                        (struct phm_ppt_v1_information *)(hwmgr->pptable);
1852
1853        struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
1854                        table_info->vdd_dep_on_sclk;
1855        struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table =
1856                        table_info->vdd_dep_on_mclk;
1857        struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1858                        table_info->mm_dep_table;
1859
1860        if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
1861                for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
1862                        voltage_id = sclk_table->entries[entry_id].vddInd;
1863                        sclk_table->entries[entry_id].vddgfx =
1864                                table_info->vddgfx_lookup_table->entries[voltage_id].us_vdd;
1865                }
1866        } else {
1867                for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
1868                        voltage_id = sclk_table->entries[entry_id].vddInd;
1869                        sclk_table->entries[entry_id].vddc =
1870                                table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
1871                }
1872        }
1873
1874        for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) {
1875                voltage_id = mclk_table->entries[entry_id].vddInd;
1876                mclk_table->entries[entry_id].vddc =
1877                        table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
1878        }
1879
1880        for (entry_id = 0; entry_id < mm_table->count; ++entry_id) {
1881                voltage_id = mm_table->entries[entry_id].vddcInd;
1882                mm_table->entries[entry_id].vddc =
1883                        table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
1884        }
1885
1886        return 0;
1887
1888}
1889
1890static int phm_add_voltage(struct pp_hwmgr *hwmgr,
1891                        phm_ppt_v1_voltage_lookup_table *look_up_table,
1892                        phm_ppt_v1_voltage_lookup_record *record)
1893{
1894        uint32_t i;
1895
1896        PP_ASSERT_WITH_CODE((NULL != look_up_table),
1897                "Lookup Table empty.", return -EINVAL);
1898        PP_ASSERT_WITH_CODE((0 != look_up_table->count),
1899                "Lookup Table empty.", return -EINVAL);
1900
1901        i = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDGFX);
1902        PP_ASSERT_WITH_CODE((i >= look_up_table->count),
1903                "Lookup Table is full.", return -EINVAL);
1904
1905        /* This is to avoid entering duplicate calculated records. */
1906        for (i = 0; i < look_up_table->count; i++) {
1907                if (look_up_table->entries[i].us_vdd == record->us_vdd) {
1908                        if (look_up_table->entries[i].us_calculated == 1)
1909                                return 0;
1910                        break;
1911                }
1912        }
1913
1914        look_up_table->entries[i].us_calculated = 1;
1915        look_up_table->entries[i].us_vdd = record->us_vdd;
1916        look_up_table->entries[i].us_cac_low = record->us_cac_low;
1917        look_up_table->entries[i].us_cac_mid = record->us_cac_mid;
1918        look_up_table->entries[i].us_cac_high = record->us_cac_high;
1919        /* Only increment the count when we're appending, not replacing duplicate entry. */
1920        if (i == look_up_table->count)
1921                look_up_table->count++;
1922
1923        return 0;
1924}
1925
1926
1927static int smu7_calc_voltage_dependency_tables(struct pp_hwmgr *hwmgr)
1928{
1929        uint8_t entry_id;
1930        struct phm_ppt_v1_voltage_lookup_record v_record;
1931        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1932        struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
1933
1934        phm_ppt_v1_clock_voltage_dependency_table *sclk_table = pptable_info->vdd_dep_on_sclk;
1935        phm_ppt_v1_clock_voltage_dependency_table *mclk_table = pptable_info->vdd_dep_on_mclk;
1936
1937        if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
1938                for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
1939                        if (sclk_table->entries[entry_id].vdd_offset & (1 << 15))
1940                                v_record.us_vdd = sclk_table->entries[entry_id].vddgfx +
1941                                        sclk_table->entries[entry_id].vdd_offset - 0xFFFF;
1942                        else
1943                                v_record.us_vdd = sclk_table->entries[entry_id].vddgfx +
1944                                        sclk_table->entries[entry_id].vdd_offset;
1945
1946                        sclk_table->entries[entry_id].vddc =
1947                                v_record.us_cac_low = v_record.us_cac_mid =
1948                                v_record.us_cac_high = v_record.us_vdd;
1949
1950                        phm_add_voltage(hwmgr, pptable_info->vddc_lookup_table, &v_record);
1951                }
1952
1953                for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) {
1954                        if (mclk_table->entries[entry_id].vdd_offset & (1 << 15))
1955                                v_record.us_vdd = mclk_table->entries[entry_id].vddc +
1956                                        mclk_table->entries[entry_id].vdd_offset - 0xFFFF;
1957                        else
1958                                v_record.us_vdd = mclk_table->entries[entry_id].vddc +
1959                                        mclk_table->entries[entry_id].vdd_offset;
1960
1961                        mclk_table->entries[entry_id].vddgfx = v_record.us_cac_low =
1962                                v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd;
1963                        phm_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record);
1964                }
1965        }
1966        return 0;
1967}
1968
1969static int smu7_calc_mm_voltage_dependency_table(struct pp_hwmgr *hwmgr)
1970{
1971        uint8_t entry_id;
1972        struct phm_ppt_v1_voltage_lookup_record v_record;
1973        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1974        struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
1975        phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table;
1976
1977        if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
1978                for (entry_id = 0; entry_id < mm_table->count; entry_id++) {
1979                        if (mm_table->entries[entry_id].vddgfx_offset & (1 << 15))
1980                                v_record.us_vdd = mm_table->entries[entry_id].vddc +
1981                                        mm_table->entries[entry_id].vddgfx_offset - 0xFFFF;
1982                        else
1983                                v_record.us_vdd = mm_table->entries[entry_id].vddc +
1984                                        mm_table->entries[entry_id].vddgfx_offset;
1985
1986                        /* Add the calculated VDDGFX to the VDDGFX lookup table */
1987                        mm_table->entries[entry_id].vddgfx = v_record.us_cac_low =
1988                                v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd;
1989                        phm_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record);
1990                }
1991        }
1992        return 0;
1993}
1994
1995static int smu7_sort_lookup_table(struct pp_hwmgr *hwmgr,
1996                struct phm_ppt_v1_voltage_lookup_table *lookup_table)
1997{
1998        uint32_t table_size, i, j;
1999        table_size = lookup_table->count;
2000
2001        PP_ASSERT_WITH_CODE(0 != lookup_table->count,
2002                "Lookup table is empty", return -EINVAL);
2003
2004        /* Sorting voltages */
2005        for (i = 0; i < table_size - 1; i++) {
2006                for (j = i + 1; j > 0; j--) {
2007                        if (lookup_table->entries[j].us_vdd <
2008                                        lookup_table->entries[j - 1].us_vdd) {
2009                                swap(lookup_table->entries[j - 1],
2010                                     lookup_table->entries[j]);
2011                        }
2012                }
2013        }
2014
2015        return 0;
2016}
2017
2018static int smu7_complete_dependency_tables(struct pp_hwmgr *hwmgr)
2019{
2020        int result = 0;
2021        int tmp_result;
2022        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2023        struct phm_ppt_v1_information *table_info =
2024                        (struct phm_ppt_v1_information *)(hwmgr->pptable);
2025
2026        if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
2027                tmp_result = smu7_patch_lookup_table_with_leakage(hwmgr,
2028                        table_info->vddgfx_lookup_table, &(data->vddcgfx_leakage));
2029                if (tmp_result != 0)
2030                        result = tmp_result;
2031
2032                smu7_patch_ppt_v1_with_vdd_leakage(hwmgr,
2033                        &table_info->max_clock_voltage_on_dc.vddgfx, &(data->vddcgfx_leakage));
2034        } else {
2035
2036                tmp_result = smu7_patch_lookup_table_with_leakage(hwmgr,
2037                                table_info->vddc_lookup_table, &(data->vddc_leakage));
2038                if (tmp_result)
2039                        result = tmp_result;
2040
2041                tmp_result = smu7_patch_clock_voltage_limits_with_vddc_leakage(hwmgr,
2042                                &(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc);
2043                if (tmp_result)
2044                        result = tmp_result;
2045        }
2046
2047        tmp_result = smu7_patch_voltage_dependency_tables_with_lookup_table(hwmgr);
2048        if (tmp_result)
2049                result = tmp_result;
2050
2051        tmp_result = smu7_calc_voltage_dependency_tables(hwmgr);
2052        if (tmp_result)
2053                result = tmp_result;
2054
2055        tmp_result = smu7_calc_mm_voltage_dependency_table(hwmgr);
2056        if (tmp_result)
2057                result = tmp_result;
2058
2059        tmp_result = smu7_sort_lookup_table(hwmgr, table_info->vddgfx_lookup_table);
2060        if (tmp_result)
2061                result = tmp_result;
2062
2063        tmp_result = smu7_sort_lookup_table(hwmgr, table_info->vddc_lookup_table);
2064        if (tmp_result)
2065                result = tmp_result;
2066
2067        return result;
2068}
2069
2070static int smu7_set_private_data_based_on_pptable_v1(struct pp_hwmgr *hwmgr)
2071{
2072        struct phm_ppt_v1_information *table_info =
2073                        (struct phm_ppt_v1_information *)(hwmgr->pptable);
2074
2075        struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
2076                                                table_info->vdd_dep_on_sclk;
2077        struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table =
2078                                                table_info->vdd_dep_on_mclk;
2079
2080        PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL,
2081                "VDD dependency on SCLK table is missing.",
2082                return -EINVAL);
2083        PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
2084                "VDD dependency on SCLK table has to have is missing.",
2085                return -EINVAL);
2086
2087        PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL,
2088                "VDD dependency on MCLK table is missing",
2089                return -EINVAL);
2090        PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
2091                "VDD dependency on MCLK table has to have is missing.",
2092                return -EINVAL);
2093
2094        table_info->max_clock_voltage_on_ac.sclk =
2095                allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk;
2096        table_info->max_clock_voltage_on_ac.mclk =
2097                allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk;
2098        table_info->max_clock_voltage_on_ac.vddc =
2099                allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
2100        table_info->max_clock_voltage_on_ac.vddci =
2101                allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci;
2102
2103        hwmgr->dyn_state.max_clock_voltage_on_ac.sclk = table_info->max_clock_voltage_on_ac.sclk;
2104        hwmgr->dyn_state.max_clock_voltage_on_ac.mclk = table_info->max_clock_voltage_on_ac.mclk;
2105        hwmgr->dyn_state.max_clock_voltage_on_ac.vddc = table_info->max_clock_voltage_on_ac.vddc;
2106        hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = table_info->max_clock_voltage_on_ac.vddci;
2107
2108        return 0;
2109}
2110
2111static int smu7_patch_voltage_workaround(struct pp_hwmgr *hwmgr)
2112{
2113        struct phm_ppt_v1_information *table_info =
2114                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
2115        struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
2116        struct phm_ppt_v1_voltage_lookup_table *lookup_table;
2117        uint32_t i;
2118        uint32_t hw_revision, sub_vendor_id, sub_sys_id;
2119        struct amdgpu_device *adev = hwmgr->adev;
2120
2121        if (table_info != NULL) {
2122                dep_mclk_table = table_info->vdd_dep_on_mclk;
2123                lookup_table = table_info->vddc_lookup_table;
2124        } else
2125                return 0;
2126
2127        hw_revision = adev->pdev->revision;
2128        sub_sys_id = adev->pdev->subsystem_device;
2129        sub_vendor_id = adev->pdev->subsystem_vendor;
2130
2131        if (hwmgr->chip_id == CHIP_POLARIS10 && hw_revision == 0xC7 &&
2132                        ((sub_sys_id == 0xb37 && sub_vendor_id == 0x1002) ||
2133                    (sub_sys_id == 0x4a8 && sub_vendor_id == 0x1043) ||
2134                    (sub_sys_id == 0x9480 && sub_vendor_id == 0x1682))) {
2135                if (lookup_table->entries[dep_mclk_table->entries[dep_mclk_table->count-1].vddInd].us_vdd >= 1000)
2136                        return 0;
2137
2138                for (i = 0; i < lookup_table->count; i++) {
2139                        if (lookup_table->entries[i].us_vdd < 0xff01 && lookup_table->entries[i].us_vdd >= 1000) {
2140                                dep_mclk_table->entries[dep_mclk_table->count-1].vddInd = (uint8_t) i;
2141                                return 0;
2142                        }
2143                }
2144        }
2145        return 0;
2146}
2147
2148static int smu7_thermal_parameter_init(struct pp_hwmgr *hwmgr)
2149{
2150        struct pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
2151        uint32_t temp_reg;
2152        struct phm_ppt_v1_information *table_info =
2153                        (struct phm_ppt_v1_information *)(hwmgr->pptable);
2154
2155
2156        if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) {
2157                temp_reg = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL);
2158                switch (gpio_pin_assignment.uc_gpio_pin_bit_shift) {
2159                case 0:
2160                        temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x1);
2161                        break;
2162                case 1:
2163                        temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x2);
2164                        break;
2165                case 2:
2166                        temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW, 0x1);
2167                        break;
2168                case 3:
2169                        temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, FORCE_NB_PS1, 0x1);
2170                        break;
2171                case 4:
2172                        temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, DPM_ENABLED, 0x1);
2173                        break;
2174                default:
2175                        break;
2176                }
2177                cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL, temp_reg);
2178        }
2179
2180        if (table_info == NULL)
2181                return 0;
2182
2183        if (table_info->cac_dtp_table->usDefaultTargetOperatingTemp != 0 &&
2184                hwmgr->thermal_controller.advanceFanControlParameters.ucFanControlMode) {
2185                hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMinLimit =
2186                        (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit;
2187
2188                hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMaxLimit =
2189                        (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
2190
2191                hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMStep = 1;
2192
2193                hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMaxLimit = 100;
2194
2195                hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMinLimit =
2196                        (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit;
2197
2198                hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMStep = 1;
2199
2200                table_info->cac_dtp_table->usDefaultTargetOperatingTemp = (table_info->cac_dtp_table->usDefaultTargetOperatingTemp >= 50) ?
2201                                                                (table_info->cac_dtp_table->usDefaultTargetOperatingTemp - 50) : 0;
2202
2203                table_info->cac_dtp_table->usOperatingTempMaxLimit = table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
2204                table_info->cac_dtp_table->usOperatingTempStep = 1;
2205                table_info->cac_dtp_table->usOperatingTempHyst = 1;
2206
2207                hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM =
2208                               hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
2209
2210                hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM =
2211                               hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM;
2212
2213                hwmgr->dyn_state.cac_dtp_table->usOperatingTempMinLimit =
2214                               table_info->cac_dtp_table->usOperatingTempMinLimit;
2215
2216                hwmgr->dyn_state.cac_dtp_table->usOperatingTempMaxLimit =
2217                               table_info->cac_dtp_table->usOperatingTempMaxLimit;
2218
2219                hwmgr->dyn_state.cac_dtp_table->usDefaultTargetOperatingTemp =
2220                               table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
2221
2222                hwmgr->dyn_state.cac_dtp_table->usOperatingTempStep =
2223                               table_info->cac_dtp_table->usOperatingTempStep;
2224
2225                hwmgr->dyn_state.cac_dtp_table->usTargetOperatingTemp =
2226                               table_info->cac_dtp_table->usTargetOperatingTemp;
2227                if (hwmgr->feature_mask & PP_OD_FUZZY_FAN_CONTROL_MASK)
2228                        phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2229                                        PHM_PlatformCaps_ODFuzzyFanControlSupport);
2230        }
2231
2232        return 0;
2233}
2234
2235/**
2236 * Change virtual leakage voltage to actual value.
2237 *
2238 * @param     hwmgr  the address of the powerplay hardware manager.
2239 * @param     pointer to changing voltage
2240 * @param     pointer to leakage table
2241 */
2242static void smu7_patch_ppt_v0_with_vdd_leakage(struct pp_hwmgr *hwmgr,
2243                uint32_t *voltage, struct smu7_leakage_voltage *leakage_table)
2244{
2245        uint32_t index;
2246
2247        /* search for leakage voltage ID 0xff01 ~ 0xff08 */
2248        for (index = 0; index < leakage_table->count; index++) {
2249                /* if this voltage matches a leakage voltage ID */
2250                /* patch with actual leakage voltage */
2251                if (leakage_table->leakage_id[index] == *voltage) {
2252                        *voltage = leakage_table->actual_voltage[index];
2253                        break;
2254                }
2255        }
2256
2257        if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
2258                pr_err("Voltage value looks like a Leakage ID but it's not patched \n");
2259}
2260
2261
2262static int smu7_patch_vddc(struct pp_hwmgr *hwmgr,
2263                              struct phm_clock_voltage_dependency_table *tab)
2264{
2265        uint16_t i;
2266        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2267
2268        if (tab)
2269                for (i = 0; i < tab->count; i++)
2270                        smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2271                                                &data->vddc_leakage);
2272
2273        return 0;
2274}
2275
2276static int smu7_patch_vddci(struct pp_hwmgr *hwmgr,
2277                               struct phm_clock_voltage_dependency_table *tab)
2278{
2279        uint16_t i;
2280        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2281
2282        if (tab)
2283                for (i = 0; i < tab->count; i++)
2284                        smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2285                                                        &data->vddci_leakage);
2286
2287        return 0;
2288}
2289
2290static int smu7_patch_vce_vddc(struct pp_hwmgr *hwmgr,
2291                                  struct phm_vce_clock_voltage_dependency_table *tab)
2292{
2293        uint16_t i;
2294        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2295
2296        if (tab)
2297                for (i = 0; i < tab->count; i++)
2298                        smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2299                                                        &data->vddc_leakage);
2300
2301        return 0;
2302}
2303
2304
2305static int smu7_patch_uvd_vddc(struct pp_hwmgr *hwmgr,
2306                                  struct phm_uvd_clock_voltage_dependency_table *tab)
2307{
2308        uint16_t i;
2309        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2310
2311        if (tab)
2312                for (i = 0; i < tab->count; i++)
2313                        smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2314                                                        &data->vddc_leakage);
2315
2316        return 0;
2317}
2318
2319static int smu7_patch_vddc_shed_limit(struct pp_hwmgr *hwmgr,
2320                                         struct phm_phase_shedding_limits_table *tab)
2321{
2322        uint16_t i;
2323        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2324
2325        if (tab)
2326                for (i = 0; i < tab->count; i++)
2327                        smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].Voltage,
2328                                                        &data->vddc_leakage);
2329
2330        return 0;
2331}
2332
2333static int smu7_patch_samu_vddc(struct pp_hwmgr *hwmgr,
2334                                   struct phm_samu_clock_voltage_dependency_table *tab)
2335{
2336        uint16_t i;
2337        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2338
2339        if (tab)
2340                for (i = 0; i < tab->count; i++)
2341                        smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2342                                                        &data->vddc_leakage);
2343
2344        return 0;
2345}
2346
2347static int smu7_patch_acp_vddc(struct pp_hwmgr *hwmgr,
2348                                  struct phm_acp_clock_voltage_dependency_table *tab)
2349{
2350        uint16_t i;
2351        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2352
2353        if (tab)
2354                for (i = 0; i < tab->count; i++)
2355                        smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2356                                        &data->vddc_leakage);
2357
2358        return 0;
2359}
2360
2361static int smu7_patch_limits_vddc(struct pp_hwmgr *hwmgr,
2362                                  struct phm_clock_and_voltage_limits *tab)
2363{
2364        uint32_t vddc, vddci;
2365        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2366
2367        if (tab) {
2368                vddc = tab->vddc;
2369                smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc,
2370                                                   &data->vddc_leakage);
2371                tab->vddc = vddc;
2372                vddci = tab->vddci;
2373                smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddci,
2374                                                   &data->vddci_leakage);
2375                tab->vddci = vddci;
2376        }
2377
2378        return 0;
2379}
2380
2381static int smu7_patch_cac_vddc(struct pp_hwmgr *hwmgr, struct phm_cac_leakage_table *tab)
2382{
2383        uint32_t i;
2384        uint32_t vddc;
2385        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2386
2387        if (tab) {
2388                for (i = 0; i < tab->count; i++) {
2389                        vddc = (uint32_t)(tab->entries[i].Vddc);
2390                        smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc, &data->vddc_leakage);
2391                        tab->entries[i].Vddc = (uint16_t)vddc;
2392                }
2393        }
2394
2395        return 0;
2396}
2397
2398static int smu7_patch_dependency_tables_with_leakage(struct pp_hwmgr *hwmgr)
2399{
2400        int tmp;
2401
2402        tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_sclk);
2403        if (tmp)
2404                return -EINVAL;
2405
2406        tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_mclk);
2407        if (tmp)
2408                return -EINVAL;
2409
2410        tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
2411        if (tmp)
2412                return -EINVAL;
2413
2414        tmp = smu7_patch_vddci(hwmgr, hwmgr->dyn_state.vddci_dependency_on_mclk);
2415        if (tmp)
2416                return -EINVAL;
2417
2418        tmp = smu7_patch_vce_vddc(hwmgr, hwmgr->dyn_state.vce_clock_voltage_dependency_table);
2419        if (tmp)
2420                return -EINVAL;
2421
2422        tmp = smu7_patch_uvd_vddc(hwmgr, hwmgr->dyn_state.uvd_clock_voltage_dependency_table);
2423        if (tmp)
2424                return -EINVAL;
2425
2426        tmp = smu7_patch_samu_vddc(hwmgr, hwmgr->dyn_state.samu_clock_voltage_dependency_table);
2427        if (tmp)
2428                return -EINVAL;
2429
2430        tmp = smu7_patch_acp_vddc(hwmgr, hwmgr->dyn_state.acp_clock_voltage_dependency_table);
2431        if (tmp)
2432                return -EINVAL;
2433
2434        tmp = smu7_patch_vddc_shed_limit(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table);
2435        if (tmp)
2436                return -EINVAL;
2437
2438        tmp = smu7_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_ac);
2439        if (tmp)
2440                return -EINVAL;
2441
2442        tmp = smu7_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_dc);
2443        if (tmp)
2444                return -EINVAL;
2445
2446        tmp = smu7_patch_cac_vddc(hwmgr, hwmgr->dyn_state.cac_leakage_table);
2447        if (tmp)
2448                return -EINVAL;
2449
2450        return 0;
2451}
2452
2453
2454static int smu7_set_private_data_based_on_pptable_v0(struct pp_hwmgr *hwmgr)
2455{
2456        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2457
2458        struct phm_clock_voltage_dependency_table *allowed_sclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_sclk;
2459        struct phm_clock_voltage_dependency_table *allowed_mclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_mclk;
2460        struct phm_clock_voltage_dependency_table *allowed_mclk_vddci_table = hwmgr->dyn_state.vddci_dependency_on_mclk;
2461
2462        PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table != NULL,
2463                "VDDC dependency on SCLK table is missing. This table is mandatory",
2464                return -EINVAL);
2465        PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table->count >= 1,
2466                "VDDC dependency on SCLK table has to have is missing. This table is mandatory",
2467                return -EINVAL);
2468
2469        PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table != NULL,
2470                "VDDC dependency on MCLK table is missing. This table is mandatory",
2471                return -EINVAL);
2472        PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table->count >= 1,
2473                "VDD dependency on MCLK table has to have is missing. This table is mandatory",
2474                return -EINVAL);
2475
2476        data->min_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[0].v;
2477        data->max_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
2478
2479        hwmgr->dyn_state.max_clock_voltage_on_ac.sclk =
2480                allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
2481        hwmgr->dyn_state.max_clock_voltage_on_ac.mclk =
2482                allowed_mclk_vddc_table->entries[allowed_mclk_vddc_table->count - 1].clk;
2483        hwmgr->dyn_state.max_clock_voltage_on_ac.vddc =
2484                allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
2485
2486        if (allowed_mclk_vddci_table != NULL && allowed_mclk_vddci_table->count >= 1) {
2487                data->min_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[0].v;
2488                data->max_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
2489        }
2490
2491        if (hwmgr->dyn_state.vddci_dependency_on_mclk != NULL && hwmgr->dyn_state.vddci_dependency_on_mclk->count >= 1)
2492                hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = hwmgr->dyn_state.vddci_dependency_on_mclk->entries[hwmgr->dyn_state.vddci_dependency_on_mclk->count - 1].v;
2493
2494        return 0;
2495}
2496
2497static int smu7_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
2498{
2499        kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
2500        hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
2501        kfree(hwmgr->backend);
2502        hwmgr->backend = NULL;
2503
2504        return 0;
2505}
2506
2507static int smu7_get_elb_voltages(struct pp_hwmgr *hwmgr)
2508{
2509        uint16_t virtual_voltage_id, vddc, vddci, efuse_voltage_id;
2510        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2511        int i;
2512
2513        if (atomctrl_get_leakage_id_from_efuse(hwmgr, &efuse_voltage_id) == 0) {
2514                for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) {
2515                        virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
2516                        if (atomctrl_get_leakage_vddc_base_on_leakage(hwmgr, &vddc, &vddci,
2517                                                                virtual_voltage_id,
2518                                                                efuse_voltage_id) == 0) {
2519                                if (vddc != 0 && vddc != virtual_voltage_id) {
2520                                        data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = vddc;
2521                                        data->vddc_leakage.leakage_id[data->vddc_leakage.count] = virtual_voltage_id;
2522                                        data->vddc_leakage.count++;
2523                                }
2524                                if (vddci != 0 && vddci != virtual_voltage_id) {
2525                                        data->vddci_leakage.actual_voltage[data->vddci_leakage.count] = vddci;
2526                                        data->vddci_leakage.leakage_id[data->vddci_leakage.count] = virtual_voltage_id;
2527                                        data->vddci_leakage.count++;
2528                                }
2529                        }
2530                }
2531        }
2532        return 0;
2533}
2534
2535static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
2536{
2537        struct smu7_hwmgr *data;
2538        int result = 0;
2539
2540        data = kzalloc(sizeof(struct smu7_hwmgr), GFP_KERNEL);
2541        if (data == NULL)
2542                return -ENOMEM;
2543
2544        hwmgr->backend = data;
2545        smu7_patch_voltage_workaround(hwmgr);
2546        smu7_init_dpm_defaults(hwmgr);
2547
2548        /* Get leakage voltage based on leakage ID. */
2549        if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2550                        PHM_PlatformCaps_EVV)) {
2551                result = smu7_get_evv_voltages(hwmgr);
2552                if (result) {
2553                        pr_info("Get EVV Voltage Failed.  Abort Driver loading!\n");
2554                        return -EINVAL;
2555                }
2556        } else {
2557                smu7_get_elb_voltages(hwmgr);
2558        }
2559
2560        if (hwmgr->pp_table_version == PP_TABLE_V1) {
2561                smu7_complete_dependency_tables(hwmgr);
2562                smu7_set_private_data_based_on_pptable_v1(hwmgr);
2563        } else if (hwmgr->pp_table_version == PP_TABLE_V0) {
2564                smu7_patch_dependency_tables_with_leakage(hwmgr);
2565                smu7_set_private_data_based_on_pptable_v0(hwmgr);
2566        }
2567
2568        /* Initalize Dynamic State Adjustment Rule Settings */
2569        result = phm_initializa_dynamic_state_adjustment_rule_settings(hwmgr);
2570
2571        if (0 == result) {
2572                struct amdgpu_device *adev = hwmgr->adev;
2573
2574                data->is_tlu_enabled = false;
2575
2576                hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
2577                                                        SMU7_MAX_HARDWARE_POWERLEVELS;
2578                hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
2579                hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
2580
2581                data->pcie_gen_cap = adev->pm.pcie_gen_mask;
2582                if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
2583                        data->pcie_spc_cap = 20;
2584                data->pcie_lane_cap = adev->pm.pcie_mlw_mask;
2585
2586                hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
2587/* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
2588                hwmgr->platform_descriptor.clockStep.engineClock = 500;
2589                hwmgr->platform_descriptor.clockStep.memoryClock = 500;
2590                smu7_thermal_parameter_init(hwmgr);
2591        } else {
2592                /* Ignore return value in here, we are cleaning up a mess. */
2593                smu7_hwmgr_backend_fini(hwmgr);
2594        }
2595
2596        return 0;
2597}
2598
2599static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr)
2600{
2601        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2602        uint32_t level, tmp;
2603
2604        if (!data->pcie_dpm_key_disabled) {
2605                if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
2606                        level = 0;
2607                        tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask;
2608                        while (tmp >>= 1)
2609                                level++;
2610
2611                        if (level)
2612                                smum_send_msg_to_smc_with_parameter(hwmgr,
2613                                                PPSMC_MSG_PCIeDPM_ForceLevel, level,
2614                                                NULL);
2615                }
2616        }
2617
2618        if (!data->sclk_dpm_key_disabled) {
2619                if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
2620                        level = 0;
2621                        tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask;
2622                        while (tmp >>= 1)
2623                                level++;
2624
2625                        if (level)
2626                                smum_send_msg_to_smc_with_parameter(hwmgr,
2627                                                PPSMC_MSG_SCLKDPM_SetEnabledMask,
2628                                                (1 << level),
2629                                                NULL);
2630                }
2631        }
2632
2633        if (!data->mclk_dpm_key_disabled) {
2634                if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
2635                        level = 0;
2636                        tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask;
2637                        while (tmp >>= 1)
2638                                level++;
2639
2640                        if (level)
2641                                smum_send_msg_to_smc_with_parameter(hwmgr,
2642                                                PPSMC_MSG_MCLKDPM_SetEnabledMask,
2643                                                (1 << level),
2644                                                NULL);
2645                }
2646        }
2647
2648        return 0;
2649}
2650
2651static int smu7_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
2652{
2653        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2654
2655        if (hwmgr->pp_table_version == PP_TABLE_V1)
2656                phm_apply_dal_min_voltage_request(hwmgr);
2657/* TO DO  for v0 iceland and Ci*/
2658
2659        if (!data->sclk_dpm_key_disabled) {
2660                if (data->dpm_level_enable_mask.sclk_dpm_enable_mask)
2661                        smum_send_msg_to_smc_with_parameter(hwmgr,
2662                                        PPSMC_MSG_SCLKDPM_SetEnabledMask,
2663                                        data->dpm_level_enable_mask.sclk_dpm_enable_mask,
2664                                        NULL);
2665        }
2666
2667        if (!data->mclk_dpm_key_disabled) {
2668                if (data->dpm_level_enable_mask.mclk_dpm_enable_mask)
2669                        smum_send_msg_to_smc_with_parameter(hwmgr,
2670                                        PPSMC_MSG_MCLKDPM_SetEnabledMask,
2671                                        data->dpm_level_enable_mask.mclk_dpm_enable_mask,
2672                                        NULL);
2673        }
2674
2675        return 0;
2676}
2677
2678static int smu7_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
2679{
2680        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2681
2682        if (!smum_is_dpm_running(hwmgr))
2683                return -EINVAL;
2684
2685        if (!data->pcie_dpm_key_disabled) {
2686                smum_send_msg_to_smc(hwmgr,
2687                                PPSMC_MSG_PCIeDPM_UnForceLevel,
2688                                NULL);
2689        }
2690
2691        return smu7_upload_dpm_level_enable_mask(hwmgr);
2692}
2693
2694static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr)
2695{
2696        struct smu7_hwmgr *data =
2697                        (struct smu7_hwmgr *)(hwmgr->backend);
2698        uint32_t level;
2699
2700        if (!data->sclk_dpm_key_disabled)
2701                if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
2702                        level = phm_get_lowest_enabled_level(hwmgr,
2703                                                              data->dpm_level_enable_mask.sclk_dpm_enable_mask);
2704                        smum_send_msg_to_smc_with_parameter(hwmgr,
2705                                                            PPSMC_MSG_SCLKDPM_SetEnabledMask,
2706                                                            (1 << level),
2707                                                            NULL);
2708
2709        }
2710
2711        if (!data->mclk_dpm_key_disabled) {
2712                if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
2713                        level = phm_get_lowest_enabled_level(hwmgr,
2714                                                              data->dpm_level_enable_mask.mclk_dpm_enable_mask);
2715                        smum_send_msg_to_smc_with_parameter(hwmgr,
2716                                                            PPSMC_MSG_MCLKDPM_SetEnabledMask,
2717                                                            (1 << level),
2718                                                            NULL);
2719                }
2720        }
2721
2722        if (!data->pcie_dpm_key_disabled) {
2723                if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
2724                        level = phm_get_lowest_enabled_level(hwmgr,
2725                                                              data->dpm_level_enable_mask.pcie_dpm_enable_mask);
2726                        smum_send_msg_to_smc_with_parameter(hwmgr,
2727                                                            PPSMC_MSG_PCIeDPM_ForceLevel,
2728                                                            (level),
2729                                                            NULL);
2730                }
2731        }
2732
2733        return 0;
2734}
2735
2736static int smu7_get_profiling_clk(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level,
2737                                uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *pcie_mask)
2738{
2739        uint32_t percentage;
2740        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2741        struct smu7_dpm_table *golden_dpm_table = &data->golden_dpm_table;
2742        int32_t tmp_mclk;
2743        int32_t tmp_sclk;
2744        int32_t count;
2745
2746        if (golden_dpm_table->mclk_table.count < 1)
2747                return -EINVAL;
2748
2749        percentage = 100 * golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count - 1].value /
2750                        golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value;
2751
2752        if (golden_dpm_table->mclk_table.count == 1) {
2753                percentage = 70;
2754                tmp_mclk = golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value;
2755                *mclk_mask = golden_dpm_table->mclk_table.count - 1;
2756        } else {
2757                tmp_mclk = golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 2].value;
2758                *mclk_mask = golden_dpm_table->mclk_table.count - 2;
2759        }
2760
2761        tmp_sclk = tmp_mclk * percentage / 100;
2762
2763        if (hwmgr->pp_table_version == PP_TABLE_V0) {
2764                for (count = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1;
2765                        count >= 0; count--) {
2766                        if (tmp_sclk >= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk) {
2767                                tmp_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk;
2768                                *sclk_mask = count;
2769                                break;
2770                        }
2771                }
2772                if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
2773                        *sclk_mask = 0;
2774                        tmp_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].clk;
2775                }
2776
2777                if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
2778                        *sclk_mask = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1;
2779        } else if (hwmgr->pp_table_version == PP_TABLE_V1) {
2780                struct phm_ppt_v1_information *table_info =
2781                                (struct phm_ppt_v1_information *)(hwmgr->pptable);
2782
2783                for (count = table_info->vdd_dep_on_sclk->count-1; count >= 0; count--) {
2784                        if (tmp_sclk >= table_info->vdd_dep_on_sclk->entries[count].clk) {
2785                                tmp_sclk = table_info->vdd_dep_on_sclk->entries[count].clk;
2786                                *sclk_mask = count;
2787                                break;
2788                        }
2789                }
2790                if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
2791                        *sclk_mask = 0;
2792                        tmp_sclk =  table_info->vdd_dep_on_sclk->entries[0].clk;
2793                }
2794
2795                if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
2796                        *sclk_mask = table_info->vdd_dep_on_sclk->count - 1;
2797        }
2798
2799        if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK)
2800                *mclk_mask = 0;
2801        else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
2802                *mclk_mask = golden_dpm_table->mclk_table.count - 1;
2803
2804        *pcie_mask = data->dpm_table.pcie_speed_table.count - 1;
2805        hwmgr->pstate_sclk = tmp_sclk;
2806        hwmgr->pstate_mclk = tmp_mclk;
2807
2808        return 0;
2809}
2810
2811static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr,
2812                                enum amd_dpm_forced_level level)
2813{
2814        int ret = 0;
2815        uint32_t sclk_mask = 0;
2816        uint32_t mclk_mask = 0;
2817        uint32_t pcie_mask = 0;
2818
2819        if (hwmgr->pstate_sclk == 0)
2820                smu7_get_profiling_clk(hwmgr, level, &sclk_mask, &mclk_mask, &pcie_mask);
2821
2822        switch (level) {
2823        case AMD_DPM_FORCED_LEVEL_HIGH:
2824                ret = smu7_force_dpm_highest(hwmgr);
2825                break;
2826        case AMD_DPM_FORCED_LEVEL_LOW:
2827                ret = smu7_force_dpm_lowest(hwmgr);
2828                break;
2829        case AMD_DPM_FORCED_LEVEL_AUTO:
2830                ret = smu7_unforce_dpm_levels(hwmgr);
2831                break;
2832        case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
2833        case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
2834        case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
2835        case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
2836                ret = smu7_get_profiling_clk(hwmgr, level, &sclk_mask, &mclk_mask, &pcie_mask);
2837                if (ret)
2838                        return ret;
2839                smu7_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
2840                smu7_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
2841                smu7_force_clock_level(hwmgr, PP_PCIE, 1<<pcie_mask);
2842                break;
2843        case AMD_DPM_FORCED_LEVEL_MANUAL:
2844        case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
2845        default:
2846                break;
2847        }
2848
2849        if (!ret) {
2850                if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
2851                        smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
2852                else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
2853                        smu7_fan_ctrl_reset_fan_speed_to_default(hwmgr);
2854        }
2855        return ret;
2856}
2857
2858static int smu7_get_power_state_size(struct pp_hwmgr *hwmgr)
2859{
2860        return sizeof(struct smu7_power_state);
2861}
2862
2863static int smu7_vblank_too_short(struct pp_hwmgr *hwmgr,
2864                                 uint32_t vblank_time_us)
2865{
2866        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2867        uint32_t switch_limit_us;
2868
2869        switch (hwmgr->chip_id) {
2870        case CHIP_POLARIS10:
2871        case CHIP_POLARIS11:
2872        case CHIP_POLARIS12:
2873                if (hwmgr->is_kicker)
2874                        switch_limit_us = data->is_memory_gddr5 ? 450 : 150;
2875                else
2876                        switch_limit_us = data->is_memory_gddr5 ? 190 : 150;
2877                break;
2878        case CHIP_VEGAM:
2879                switch_limit_us = 30;
2880                break;
2881        default:
2882                switch_limit_us = data->is_memory_gddr5 ? 450 : 150;
2883                break;
2884        }
2885
2886        if (vblank_time_us < switch_limit_us)
2887                return true;
2888        else
2889                return false;
2890}
2891
2892static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
2893                                struct pp_power_state *request_ps,
2894                        const struct pp_power_state *current_ps)
2895{
2896        struct amdgpu_device *adev = hwmgr->adev;
2897        struct smu7_power_state *smu7_ps =
2898                                cast_phw_smu7_power_state(&request_ps->hardware);
2899        uint32_t sclk;
2900        uint32_t mclk;
2901        struct PP_Clocks minimum_clocks = {0};
2902        bool disable_mclk_switching;
2903        bool disable_mclk_switching_for_frame_lock;
2904        const struct phm_clock_and_voltage_limits *max_limits;
2905        uint32_t i;
2906        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2907        struct phm_ppt_v1_information *table_info =
2908                        (struct phm_ppt_v1_information *)(hwmgr->pptable);
2909        int32_t count;
2910        int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
2911
2912        data->battery_state = (PP_StateUILabel_Battery ==
2913                        request_ps->classification.ui_label);
2914
2915        PP_ASSERT_WITH_CODE(smu7_ps->performance_level_count == 2,
2916                                 "VI should always have 2 performance levels",
2917                                );
2918
2919        max_limits = adev->pm.ac_power ?
2920                        &(hwmgr->dyn_state.max_clock_voltage_on_ac) :
2921                        &(hwmgr->dyn_state.max_clock_voltage_on_dc);
2922
2923        /* Cap clock DPM tables at DC MAX if it is in DC. */
2924        if (!adev->pm.ac_power) {
2925                for (i = 0; i < smu7_ps->performance_level_count; i++) {
2926                        if (smu7_ps->performance_levels[i].memory_clock > max_limits->mclk)
2927                                smu7_ps->performance_levels[i].memory_clock = max_limits->mclk;
2928                        if (smu7_ps->performance_levels[i].engine_clock > max_limits->sclk)
2929                                smu7_ps->performance_levels[i].engine_clock = max_limits->sclk;
2930                }
2931        }
2932
2933        minimum_clocks.engineClock = hwmgr->display_config->min_core_set_clock;
2934        minimum_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock;
2935
2936        if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2937                        PHM_PlatformCaps_StablePState)) {
2938                max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
2939                stable_pstate_sclk = (max_limits->sclk * 75) / 100;
2940
2941                for (count = table_info->vdd_dep_on_sclk->count - 1;
2942                                count >= 0; count--) {
2943                        if (stable_pstate_sclk >=
2944                                        table_info->vdd_dep_on_sclk->entries[count].clk) {
2945                                stable_pstate_sclk =
2946                                                table_info->vdd_dep_on_sclk->entries[count].clk;
2947                                break;
2948                        }
2949                }
2950
2951                if (count < 0)
2952                        stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
2953
2954                stable_pstate_mclk = max_limits->mclk;
2955
2956                minimum_clocks.engineClock = stable_pstate_sclk;
2957                minimum_clocks.memoryClock = stable_pstate_mclk;
2958        }
2959
2960        disable_mclk_switching_for_frame_lock = phm_cap_enabled(
2961                                    hwmgr->platform_descriptor.platformCaps,
2962                                    PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
2963
2964
2965        if (hwmgr->display_config->num_display == 0)
2966                disable_mclk_switching = false;
2967        else
2968                disable_mclk_switching = ((1 < hwmgr->display_config->num_display) &&
2969                                          !hwmgr->display_config->multi_monitor_in_sync) ||
2970                        disable_mclk_switching_for_frame_lock ||
2971                        smu7_vblank_too_short(hwmgr, hwmgr->display_config->min_vblank_time);
2972
2973        sclk = smu7_ps->performance_levels[0].engine_clock;
2974        mclk = smu7_ps->performance_levels[0].memory_clock;
2975
2976        if (disable_mclk_switching)
2977                mclk = smu7_ps->performance_levels
2978                [smu7_ps->performance_level_count - 1].memory_clock;
2979
2980        if (sclk < minimum_clocks.engineClock)
2981                sclk = (minimum_clocks.engineClock > max_limits->sclk) ?
2982                                max_limits->sclk : minimum_clocks.engineClock;
2983
2984        if (mclk < minimum_clocks.memoryClock)
2985                mclk = (minimum_clocks.memoryClock > max_limits->mclk) ?
2986                                max_limits->mclk : minimum_clocks.memoryClock;
2987
2988        smu7_ps->performance_levels[0].engine_clock = sclk;
2989        smu7_ps->performance_levels[0].memory_clock = mclk;
2990
2991        smu7_ps->performance_levels[1].engine_clock =
2992                (smu7_ps->performance_levels[1].engine_clock >=
2993                                smu7_ps->performance_levels[0].engine_clock) ?
2994                                                smu7_ps->performance_levels[1].engine_clock :
2995                                                smu7_ps->performance_levels[0].engine_clock;
2996
2997        if (disable_mclk_switching) {
2998                if (mclk < smu7_ps->performance_levels[1].memory_clock)
2999                        mclk = smu7_ps->performance_levels[1].memory_clock;
3000
3001                smu7_ps->performance_levels[0].memory_clock = mclk;
3002                smu7_ps->performance_levels[1].memory_clock = mclk;
3003        } else {
3004                if (smu7_ps->performance_levels[1].memory_clock <
3005                                smu7_ps->performance_levels[0].memory_clock)
3006                        smu7_ps->performance_levels[1].memory_clock =
3007                                        smu7_ps->performance_levels[0].memory_clock;
3008        }
3009
3010        if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3011                        PHM_PlatformCaps_StablePState)) {
3012                for (i = 0; i < smu7_ps->performance_level_count; i++) {
3013                        smu7_ps->performance_levels[i].engine_clock = stable_pstate_sclk;
3014                        smu7_ps->performance_levels[i].memory_clock = stable_pstate_mclk;
3015                        smu7_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max;
3016                        smu7_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max;
3017                }
3018        }
3019        return 0;
3020}
3021
3022
3023static uint32_t smu7_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
3024{
3025        struct pp_power_state  *ps;
3026        struct smu7_power_state  *smu7_ps;
3027
3028        if (hwmgr == NULL)
3029                return -EINVAL;
3030
3031        ps = hwmgr->request_ps;
3032
3033        if (ps == NULL)
3034                return -EINVAL;
3035
3036        smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
3037
3038        if (low)
3039                return smu7_ps->performance_levels[0].memory_clock;
3040        else
3041                return smu7_ps->performance_levels
3042                                [smu7_ps->performance_level_count-1].memory_clock;
3043}
3044
3045static uint32_t smu7_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
3046{
3047        struct pp_power_state  *ps;
3048        struct smu7_power_state  *smu7_ps;
3049
3050        if (hwmgr == NULL)
3051                return -EINVAL;
3052
3053        ps = hwmgr->request_ps;
3054
3055        if (ps == NULL)
3056                return -EINVAL;
3057
3058        smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
3059
3060        if (low)
3061                return smu7_ps->performance_levels[0].engine_clock;
3062        else
3063                return smu7_ps->performance_levels
3064                                [smu7_ps->performance_level_count-1].engine_clock;
3065}
3066
3067static int smu7_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
3068                                        struct pp_hw_power_state *hw_ps)
3069{
3070        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3071        struct smu7_power_state *ps = (struct smu7_power_state *)hw_ps;
3072        ATOM_FIRMWARE_INFO_V2_2 *fw_info;
3073        uint16_t size;
3074        uint8_t frev, crev;
3075        int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
3076
3077        /* First retrieve the Boot clocks and VDDC from the firmware info table.
3078         * We assume here that fw_info is unchanged if this call fails.
3079         */
3080        fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)smu_atom_get_data_table(hwmgr->adev, index,
3081                        &size, &frev, &crev);
3082        if (!fw_info)
3083                /* During a test, there is no firmware info table. */
3084                return 0;
3085
3086        /* Patch the state. */
3087        data->vbios_boot_state.sclk_bootup_value =
3088                        le32_to_cpu(fw_info->ulDefaultEngineClock);
3089        data->vbios_boot_state.mclk_bootup_value =
3090                        le32_to_cpu(fw_info->ulDefaultMemoryClock);
3091        data->vbios_boot_state.mvdd_bootup_value =
3092                        le16_to_cpu(fw_info->usBootUpMVDDCVoltage);
3093        data->vbios_boot_state.vddc_bootup_value =
3094                        le16_to_cpu(fw_info->usBootUpVDDCVoltage);
3095        data->vbios_boot_state.vddci_bootup_value =
3096                        le16_to_cpu(fw_info->usBootUpVDDCIVoltage);
3097        data->vbios_boot_state.pcie_gen_bootup_value =
3098                        smu7_get_current_pcie_speed(hwmgr);
3099
3100        data->vbios_boot_state.pcie_lane_bootup_value =
3101                        (uint16_t)smu7_get_current_pcie_lane_number(hwmgr);
3102
3103        /* set boot power state */
3104        ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value;
3105        ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value;
3106        ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value;
3107        ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value;
3108
3109        return 0;
3110}
3111
3112static int smu7_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr)
3113{
3114        int result;
3115        unsigned long ret = 0;
3116
3117        if (hwmgr->pp_table_version == PP_TABLE_V0) {
3118                result = pp_tables_get_num_of_entries(hwmgr, &ret);
3119                return result ? 0 : ret;
3120        } else if (hwmgr->pp_table_version == PP_TABLE_V1) {
3121                result = get_number_of_powerplay_table_entries_v1_0(hwmgr);
3122                return result;
3123        }
3124        return 0;
3125}
3126
3127static int smu7_get_pp_table_entry_callback_func_v1(struct pp_hwmgr *hwmgr,
3128                void *state, struct pp_power_state *power_state,
3129                void *pp_table, uint32_t classification_flag)
3130{
3131        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3132        struct smu7_power_state  *smu7_power_state =
3133                        (struct smu7_power_state *)(&(power_state->hardware));
3134        struct smu7_performance_level *performance_level;
3135        ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state;
3136        ATOM_Tonga_POWERPLAYTABLE *powerplay_table =
3137                        (ATOM_Tonga_POWERPLAYTABLE *)pp_table;
3138        PPTable_Generic_SubTable_Header *sclk_dep_table =
3139                        (PPTable_Generic_SubTable_Header *)
3140                        (((unsigned long)powerplay_table) +
3141                                le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
3142
3143        ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
3144                        (ATOM_Tonga_MCLK_Dependency_Table *)
3145                        (((unsigned long)powerplay_table) +
3146                                le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
3147
3148        /* The following fields are not initialized here: id orderedList allStatesList */
3149        power_state->classification.ui_label =
3150                        (le16_to_cpu(state_entry->usClassification) &
3151                        ATOM_PPLIB_CLASSIFICATION_UI_MASK) >>
3152                        ATOM_PPLIB_CLASSIFICATION_UI_SHIFT;
3153        power_state->classification.flags = classification_flag;
3154        /* NOTE: There is a classification2 flag in BIOS that is not being used right now */
3155
3156        power_state->classification.temporary_state = false;
3157        power_state->classification.to_be_deleted = false;
3158
3159        power_state->validation.disallowOnDC =
3160                        (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
3161                                        ATOM_Tonga_DISALLOW_ON_DC));
3162
3163        power_state->pcie.lanes = 0;
3164
3165        power_state->display.disableFrameModulation = false;
3166        power_state->display.limitRefreshrate = false;
3167        power_state->display.enableVariBright =
3168                        (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
3169                                        ATOM_Tonga_ENABLE_VARIBRIGHT));
3170
3171        power_state->validation.supportedPowerLevels = 0;
3172        power_state->uvd_clocks.VCLK = 0;
3173        power_state->uvd_clocks.DCLK = 0;
3174        power_state->temperatures.min = 0;
3175        power_state->temperatures.max = 0;
3176
3177        performance_level = &(smu7_power_state->performance_levels
3178                        [smu7_power_state->performance_level_count++]);
3179
3180        PP_ASSERT_WITH_CODE(
3181                        (smu7_power_state->performance_level_count < smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_GRAPHICS)),
3182                        "Performance levels exceeds SMC limit!",
3183                        return -EINVAL);
3184
3185        PP_ASSERT_WITH_CODE(
3186                        (smu7_power_state->performance_level_count <=
3187                                        hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
3188                        "Performance levels exceeds Driver limit!",
3189                        return -EINVAL);
3190
3191        /* Performance levels are arranged from low to high. */
3192        performance_level->memory_clock = mclk_dep_table->entries
3193                        [state_entry->ucMemoryClockIndexLow].ulMclk;
3194        if (sclk_dep_table->ucRevId == 0)
3195                performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
3196                        [state_entry->ucEngineClockIndexLow].ulSclk;
3197        else if (sclk_dep_table->ucRevId == 1)
3198                performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
3199                        [state_entry->ucEngineClockIndexLow].ulSclk;
3200        performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
3201                        state_entry->ucPCIEGenLow);
3202        performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
3203                        state_entry->ucPCIELaneLow);
3204
3205        performance_level = &(smu7_power_state->performance_levels
3206                        [smu7_power_state->performance_level_count++]);
3207        performance_level->memory_clock = mclk_dep_table->entries
3208                        [state_entry->ucMemoryClockIndexHigh].ulMclk;
3209
3210        if (sclk_dep_table->ucRevId == 0)
3211                performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
3212                        [state_entry->ucEngineClockIndexHigh].ulSclk;
3213        else if (sclk_dep_table->ucRevId == 1)
3214                performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
3215                        [state_entry->ucEngineClockIndexHigh].ulSclk;
3216
3217        performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
3218                        state_entry->ucPCIEGenHigh);
3219        performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
3220                        state_entry->ucPCIELaneHigh);
3221
3222        return 0;
3223}
3224
3225static int smu7_get_pp_table_entry_v1(struct pp_hwmgr *hwmgr,
3226                unsigned long entry_index, struct pp_power_state *state)
3227{
3228        int result;
3229        struct smu7_power_state *ps;
3230        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3231        struct phm_ppt_v1_information *table_info =
3232                        (struct phm_ppt_v1_information *)(hwmgr->pptable);
3233        struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
3234                        table_info->vdd_dep_on_mclk;
3235
3236        state->hardware.magic = PHM_VIslands_Magic;
3237
3238        ps = (struct smu7_power_state *)(&state->hardware);
3239
3240        result = get_powerplay_table_entry_v1_0(hwmgr, entry_index, state,
3241                        smu7_get_pp_table_entry_callback_func_v1);
3242
3243        /* This is the earliest time we have all the dependency table and the VBIOS boot state
3244         * as PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot state
3245         * if there is only one VDDCI/MCLK level, check if it's the same as VBIOS boot state
3246         */
3247        if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
3248                if (dep_mclk_table->entries[0].clk !=
3249                                data->vbios_boot_state.mclk_bootup_value)
3250                        pr_debug("Single MCLK entry VDDCI/MCLK dependency table "
3251                                        "does not match VBIOS boot MCLK level");
3252                if (dep_mclk_table->entries[0].vddci !=
3253                                data->vbios_boot_state.vddci_bootup_value)
3254                        pr_debug("Single VDDCI entry VDDCI/MCLK dependency table "
3255                                        "does not match VBIOS boot VDDCI level");
3256        }
3257
3258        /* set DC compatible flag if this state supports DC */
3259        if (!state->validation.disallowOnDC)
3260                ps->dc_compatible = true;
3261
3262        if (state->classification.flags & PP_StateClassificationFlag_ACPI)
3263                data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen;
3264
3265        ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
3266        ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
3267
3268        if (!result) {
3269                uint32_t i;
3270
3271                switch (state->classification.ui_label) {
3272                case PP_StateUILabel_Performance:
3273                        data->use_pcie_performance_levels = true;
3274                        for (i = 0; i < ps->performance_level_count; i++) {
3275                                if (data->pcie_gen_performance.max <
3276                                                ps->performance_levels[i].pcie_gen)
3277                                        data->pcie_gen_performance.max =
3278                                                        ps->performance_levels[i].pcie_gen;
3279
3280                                if (data->pcie_gen_performance.min >
3281                                                ps->performance_levels[i].pcie_gen)
3282                                        data->pcie_gen_performance.min =
3283                                                        ps->performance_levels[i].pcie_gen;
3284
3285                                if (data->pcie_lane_performance.max <
3286                                                ps->performance_levels[i].pcie_lane)
3287                                        data->pcie_lane_performance.max =
3288                                                        ps->performance_levels[i].pcie_lane;
3289                                if (data->pcie_lane_performance.min >
3290                                                ps->performance_levels[i].pcie_lane)
3291                                        data->pcie_lane_performance.min =
3292                                                        ps->performance_levels[i].pcie_lane;
3293                        }
3294                        break;
3295                case PP_StateUILabel_Battery:
3296                        data->use_pcie_power_saving_levels = true;
3297
3298                        for (i = 0; i < ps->performance_level_count; i++) {
3299                                if (data->pcie_gen_power_saving.max <
3300                                                ps->performance_levels[i].pcie_gen)
3301                                        data->pcie_gen_power_saving.max =
3302                                                        ps->performance_levels[i].pcie_gen;
3303
3304                                if (data->pcie_gen_power_saving.min >
3305                                                ps->performance_levels[i].pcie_gen)
3306                                        data->pcie_gen_power_saving.min =
3307                                                        ps->performance_levels[i].pcie_gen;
3308
3309                                if (data->pcie_lane_power_saving.max <
3310                                                ps->performance_levels[i].pcie_lane)
3311                                        data->pcie_lane_power_saving.max =
3312                                                        ps->performance_levels[i].pcie_lane;
3313
3314                                if (data->pcie_lane_power_saving.min >
3315                                                ps->performance_levels[i].pcie_lane)
3316                                        data->pcie_lane_power_saving.min =
3317                                                        ps->performance_levels[i].pcie_lane;
3318                        }
3319                        break;
3320                default:
3321                        break;
3322                }
3323        }
3324        return 0;
3325}
3326
3327static int smu7_get_pp_table_entry_callback_func_v0(struct pp_hwmgr *hwmgr,
3328                                        struct pp_hw_power_state *power_state,
3329                                        unsigned int index, const void *clock_info)
3330{
3331        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3332        struct smu7_power_state  *ps = cast_phw_smu7_power_state(power_state);
3333        const ATOM_PPLIB_CI_CLOCK_INFO *visland_clk_info = clock_info;
3334        struct smu7_performance_level *performance_level;
3335        uint32_t engine_clock, memory_clock;
3336        uint16_t pcie_gen_from_bios;
3337
3338        engine_clock = visland_clk_info->ucEngineClockHigh << 16 | visland_clk_info->usEngineClockLow;
3339        memory_clock = visland_clk_info->ucMemoryClockHigh << 16 | visland_clk_info->usMemoryClockLow;
3340
3341        if (!(data->mc_micro_code_feature & DISABLE_MC_LOADMICROCODE) && memory_clock > data->highest_mclk)
3342                data->highest_mclk = memory_clock;
3343
3344        PP_ASSERT_WITH_CODE(
3345                        (ps->performance_level_count < smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_GRAPHICS)),
3346                        "Performance levels exceeds SMC limit!",
3347                        return -EINVAL);
3348
3349        PP_ASSERT_WITH_CODE(
3350                        (ps->performance_level_count <
3351                                        hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
3352                        "Performance levels exceeds Driver limit, Skip!",
3353                        return 0);
3354
3355        performance_level = &(ps->performance_levels
3356                        [ps->performance_level_count++]);
3357
3358        /* Performance levels are arranged from low to high. */
3359        performance_level->memory_clock = memory_clock;
3360        performance_level->engine_clock = engine_clock;
3361
3362        pcie_gen_from_bios = visland_clk_info->ucPCIEGen;
3363
3364        performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, pcie_gen_from_bios);
3365        performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, visland_clk_info->usPCIELane);
3366
3367        return 0;
3368}
3369
3370static int smu7_get_pp_table_entry_v0(struct pp_hwmgr *hwmgr,
3371                unsigned long entry_index, struct pp_power_state *state)
3372{
3373        int result;
3374        struct smu7_power_state *ps;
3375        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3376        struct phm_clock_voltage_dependency_table *dep_mclk_table =
3377                        hwmgr->dyn_state.vddci_dependency_on_mclk;
3378
3379        memset(&state->hardware, 0x00, sizeof(struct pp_hw_power_state));
3380
3381        state->hardware.magic = PHM_VIslands_Magic;
3382
3383        ps = (struct smu7_power_state *)(&state->hardware);
3384
3385        result = pp_tables_get_entry(hwmgr, entry_index, state,
3386                        smu7_get_pp_table_entry_callback_func_v0);
3387
3388        /*
3389         * This is the earliest time we have all the dependency table
3390         * and the VBIOS boot state as
3391         * PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot
3392         * state if there is only one VDDCI/MCLK level, check if it's
3393         * the same as VBIOS boot state
3394         */
3395        if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
3396                if (dep_mclk_table->entries[0].clk !=
3397                                data->vbios_boot_state.mclk_bootup_value)
3398                        pr_debug("Single MCLK entry VDDCI/MCLK dependency table "
3399                                        "does not match VBIOS boot MCLK level");
3400                if (dep_mclk_table->entries[0].v !=
3401                                data->vbios_boot_state.vddci_bootup_value)
3402                        pr_debug("Single VDDCI entry VDDCI/MCLK dependency table "
3403                                        "does not match VBIOS boot VDDCI level");
3404        }
3405
3406        /* set DC compatible flag if this state supports DC */
3407        if (!state->validation.disallowOnDC)
3408                ps->dc_compatible = true;
3409
3410        if (state->classification.flags & PP_StateClassificationFlag_ACPI)
3411                data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen;
3412
3413        ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
3414        ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
3415
3416        if (!result) {
3417                uint32_t i;
3418
3419                switch (state->classification.ui_label) {
3420                case PP_StateUILabel_Performance:
3421                        data->use_pcie_performance_levels = true;
3422
3423                        for (i = 0; i < ps->performance_level_count; i++) {
3424                                if (data->pcie_gen_performance.max <
3425                                                ps->performance_levels[i].pcie_gen)
3426                                        data->pcie_gen_performance.max =
3427                                                        ps->performance_levels[i].pcie_gen;
3428
3429                                if (data->pcie_gen_performance.min >
3430                                                ps->performance_levels[i].pcie_gen)
3431                                        data->pcie_gen_performance.min =
3432                                                        ps->performance_levels[i].pcie_gen;
3433
3434                                if (data->pcie_lane_performance.max <
3435                                                ps->performance_levels[i].pcie_lane)
3436                                        data->pcie_lane_performance.max =
3437                                                        ps->performance_levels[i].pcie_lane;
3438
3439                                if (data->pcie_lane_performance.min >
3440                                                ps->performance_levels[i].pcie_lane)
3441                                        data->pcie_lane_performance.min =
3442                                                        ps->performance_levels[i].pcie_lane;
3443                        }
3444                        break;
3445                case PP_StateUILabel_Battery:
3446                        data->use_pcie_power_saving_levels = true;
3447
3448                        for (i = 0; i < ps->performance_level_count; i++) {
3449                                if (data->pcie_gen_power_saving.max <
3450                                                ps->performance_levels[i].pcie_gen)
3451                                        data->pcie_gen_power_saving.max =
3452                                                        ps->performance_levels[i].pcie_gen;
3453
3454                                if (data->pcie_gen_power_saving.min >
3455                                                ps->performance_levels[i].pcie_gen)
3456                                        data->pcie_gen_power_saving.min =
3457                                                        ps->performance_levels[i].pcie_gen;
3458
3459                                if (data->pcie_lane_power_saving.max <
3460                                                ps->performance_levels[i].pcie_lane)
3461                                        data->pcie_lane_power_saving.max =
3462                                                        ps->performance_levels[i].pcie_lane;
3463
3464                                if (data->pcie_lane_power_saving.min >
3465                                                ps->performance_levels[i].pcie_lane)
3466                                        data->pcie_lane_power_saving.min =
3467                                                        ps->performance_levels[i].pcie_lane;
3468                        }
3469                        break;
3470                default:
3471                        break;
3472                }
3473        }
3474        return 0;
3475}
3476
3477static int smu7_get_pp_table_entry(struct pp_hwmgr *hwmgr,
3478                unsigned long entry_index, struct pp_power_state *state)
3479{
3480        if (hwmgr->pp_table_version == PP_TABLE_V0)
3481                return smu7_get_pp_table_entry_v0(hwmgr, entry_index, state);
3482        else if (hwmgr->pp_table_version == PP_TABLE_V1)
3483                return smu7_get_pp_table_entry_v1(hwmgr, entry_index, state);
3484
3485        return 0;
3486}
3487
3488static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, u32 *query)
3489{
3490        struct amdgpu_device *adev = hwmgr->adev;
3491        int i;
3492        u32 tmp = 0;
3493
3494        if (!query)
3495                return -EINVAL;
3496
3497        /*
3498         * PPSMC_MSG_GetCurrPkgPwr is not supported on:
3499         *  - Hawaii
3500         *  - Bonaire
3501         *  - Fiji
3502         *  - Tonga
3503         */
3504        if ((adev->asic_type != CHIP_HAWAII) &&
3505            (adev->asic_type != CHIP_BONAIRE) &&
3506            (adev->asic_type != CHIP_FIJI) &&
3507            (adev->asic_type != CHIP_TONGA)) {
3508                smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0, &tmp);
3509                *query = tmp;
3510
3511                if (tmp != 0)
3512                        return 0;
3513        }
3514
3515        smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart, NULL);
3516        cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3517                                                        ixSMU_PM_STATUS_95, 0);
3518
3519        for (i = 0; i < 10; i++) {
3520                msleep(500);
3521                smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogSample, NULL);
3522                tmp = cgs_read_ind_register(hwmgr->device,
3523                                                CGS_IND_REG__SMC,
3524                                                ixSMU_PM_STATUS_95);
3525                if (tmp != 0)
3526                        break;
3527        }
3528        *query = tmp;
3529
3530        return 0;
3531}
3532
3533static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx,
3534                            void *value, int *size)
3535{
3536        uint32_t sclk, mclk, activity_percent;
3537        uint32_t offset, val_vid;
3538        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3539
3540        /* size must be at least 4 bytes for all sensors */
3541        if (*size < 4)
3542                return -EINVAL;
3543
3544        switch (idx) {
3545        case AMDGPU_PP_SENSOR_GFX_SCLK:
3546                smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency, &sclk);
3547                *((uint32_t *)value) = sclk;
3548                *size = 4;
3549                return 0;
3550        case AMDGPU_PP_SENSOR_GFX_MCLK:
3551                smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency, &mclk);
3552                *((uint32_t *)value) = mclk;
3553                *size = 4;
3554                return 0;
3555        case AMDGPU_PP_SENSOR_GPU_LOAD:
3556        case AMDGPU_PP_SENSOR_MEM_LOAD:
3557                offset = data->soft_regs_start + smum_get_offsetof(hwmgr,
3558                                                                SMU_SoftRegisters,
3559                                                                (idx == AMDGPU_PP_SENSOR_GPU_LOAD) ?
3560                                                                AverageGraphicsActivity:
3561                                                                AverageMemoryActivity);
3562
3563                activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset);
3564                activity_percent += 0x80;
3565                activity_percent >>= 8;
3566                *((uint32_t *)value) = activity_percent > 100 ? 100 : activity_percent;
3567                *size = 4;
3568                return 0;
3569        case AMDGPU_PP_SENSOR_GPU_TEMP:
3570                *((uint32_t *)value) = smu7_thermal_get_temperature(hwmgr);
3571                *size = 4;
3572                return 0;
3573        case AMDGPU_PP_SENSOR_UVD_POWER:
3574                *((uint32_t *)value) = data->uvd_power_gated ? 0 : 1;
3575                *size = 4;
3576                return 0;
3577        case AMDGPU_PP_SENSOR_VCE_POWER:
3578                *((uint32_t *)value) = data->vce_power_gated ? 0 : 1;
3579                *size = 4;
3580                return 0;
3581        case AMDGPU_PP_SENSOR_GPU_POWER:
3582                return smu7_get_gpu_power(hwmgr, (uint32_t *)value);
3583        case AMDGPU_PP_SENSOR_VDDGFX:
3584                if ((data->vr_config & VRCONF_VDDGFX_MASK) ==
3585                    (VR_SVI2_PLANE_2 << VRCONF_VDDGFX_SHIFT))
3586                        val_vid = PHM_READ_INDIRECT_FIELD(hwmgr->device,
3587                                        CGS_IND_REG__SMC, PWR_SVI2_STATUS, PLANE2_VID);
3588                else
3589                        val_vid = PHM_READ_INDIRECT_FIELD(hwmgr->device,
3590                                        CGS_IND_REG__SMC, PWR_SVI2_STATUS, PLANE1_VID);
3591
3592                *((uint32_t *)value) = (uint32_t)convert_to_vddc(val_vid);
3593                return 0;
3594        default:
3595                return -EINVAL;
3596        }
3597}
3598
3599static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
3600{
3601        const struct phm_set_power_state_input *states =
3602                        (const struct phm_set_power_state_input *)input;
3603        const struct smu7_power_state *smu7_ps =
3604                        cast_const_phw_smu7_power_state(states->pnew_state);
3605        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3606        struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
3607        uint32_t sclk = smu7_ps->performance_levels
3608                        [smu7_ps->performance_level_count - 1].engine_clock;
3609        struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
3610        uint32_t mclk = smu7_ps->performance_levels
3611                        [smu7_ps->performance_level_count - 1].memory_clock;
3612        struct PP_Clocks min_clocks = {0};
3613        uint32_t i;
3614
3615        for (i = 0; i < sclk_table->count; i++) {
3616                if (sclk == sclk_table->dpm_levels[i].value)
3617                        break;
3618        }
3619
3620        if (i >= sclk_table->count) {
3621                if (sclk > sclk_table->dpm_levels[i-1].value) {
3622                        data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
3623                        sclk_table->dpm_levels[i-1].value = sclk;
3624                }
3625        } else {
3626        /* TODO: Check SCLK in DAL's minimum clocks
3627         * in case DeepSleep divider update is required.
3628         */
3629                if (data->display_timing.min_clock_in_sr != min_clocks.engineClockInSR &&
3630                        (min_clocks.engineClockInSR >= SMU7_MINIMUM_ENGINE_CLOCK ||
3631                                data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK))
3632                        data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
3633        }
3634
3635        for (i = 0; i < mclk_table->count; i++) {
3636                if (mclk == mclk_table->dpm_levels[i].value)
3637                        break;
3638        }
3639
3640        if (i >= mclk_table->count) {
3641                if (mclk > mclk_table->dpm_levels[i-1].value) {
3642                        data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
3643                        mclk_table->dpm_levels[i-1].value = mclk;
3644                }
3645        }
3646
3647        if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
3648                data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
3649
3650        return 0;
3651}
3652
3653static uint16_t smu7_get_maximum_link_speed(struct pp_hwmgr *hwmgr,
3654                const struct smu7_power_state *smu7_ps)
3655{
3656        uint32_t i;
3657        uint32_t sclk, max_sclk = 0;
3658        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3659        struct smu7_dpm_table *dpm_table = &data->dpm_table;
3660
3661        for (i = 0; i < smu7_ps->performance_level_count; i++) {
3662                sclk = smu7_ps->performance_levels[i].engine_clock;
3663                if (max_sclk < sclk)
3664                        max_sclk = sclk;
3665        }
3666
3667        for (i = 0; i < dpm_table->sclk_table.count; i++) {
3668                if (dpm_table->sclk_table.dpm_levels[i].value == max_sclk)
3669                        return (uint16_t) ((i >= dpm_table->pcie_speed_table.count) ?
3670                                        dpm_table->pcie_speed_table.dpm_levels
3671                                        [dpm_table->pcie_speed_table.count - 1].value :
3672                                        dpm_table->pcie_speed_table.dpm_levels[i].value);
3673        }
3674
3675        return 0;
3676}
3677
3678static int smu7_request_link_speed_change_before_state_change(
3679                struct pp_hwmgr *hwmgr, const void *input)
3680{
3681        const struct phm_set_power_state_input *states =
3682                        (const struct phm_set_power_state_input *)input;
3683        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3684        const struct smu7_power_state *smu7_nps =
3685                        cast_const_phw_smu7_power_state(states->pnew_state);
3686        const struct smu7_power_state *polaris10_cps =
3687                        cast_const_phw_smu7_power_state(states->pcurrent_state);
3688
3689        uint16_t target_link_speed = smu7_get_maximum_link_speed(hwmgr, smu7_nps);
3690        uint16_t current_link_speed;
3691
3692        if (data->force_pcie_gen == PP_PCIEGenInvalid)
3693                current_link_speed = smu7_get_maximum_link_speed(hwmgr, polaris10_cps);
3694        else
3695                current_link_speed = data->force_pcie_gen;
3696
3697        data->force_pcie_gen = PP_PCIEGenInvalid;
3698        data->pspp_notify_required = false;
3699
3700        if (target_link_speed > current_link_speed) {
3701                switch (target_link_speed) {
3702#ifdef CONFIG_ACPI
3703                case PP_PCIEGen3:
3704                        if (0 == amdgpu_acpi_pcie_performance_request(hwmgr->adev, PCIE_PERF_REQ_GEN3, false))
3705                                break;
3706                        data->force_pcie_gen = PP_PCIEGen2;
3707                        if (current_link_speed == PP_PCIEGen2)
3708                                break;
3709                        fallthrough;
3710                case PP_PCIEGen2:
3711                        if (0 == amdgpu_acpi_pcie_performance_request(hwmgr->adev, PCIE_PERF_REQ_GEN2, false))
3712                                break;
3713                        fallthrough;
3714#endif
3715                default:
3716                        data->force_pcie_gen = smu7_get_current_pcie_speed(hwmgr);
3717                        break;
3718                }
3719        } else {
3720                if (target_link_speed < current_link_speed)
3721                        data->pspp_notify_required = true;
3722        }
3723
3724        return 0;
3725}
3726
3727static int smu7_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
3728{
3729        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3730
3731        if (0 == data->need_update_smu7_dpm_table)
3732                return 0;
3733
3734        if ((0 == data->sclk_dpm_key_disabled) &&
3735                (data->need_update_smu7_dpm_table &
3736                        (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
3737                PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
3738                                "Trying to freeze SCLK DPM when DPM is disabled",
3739                                );
3740                PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
3741                                PPSMC_MSG_SCLKDPM_FreezeLevel,
3742                                NULL),
3743                                "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!",
3744                                return -EINVAL);
3745        }
3746
3747        if ((0 == data->mclk_dpm_key_disabled) &&
3748                (data->need_update_smu7_dpm_table &
3749                 DPMTABLE_OD_UPDATE_MCLK)) {
3750                PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
3751                                "Trying to freeze MCLK DPM when DPM is disabled",
3752                                );
3753                PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
3754                                PPSMC_MSG_MCLKDPM_FreezeLevel,
3755                                NULL),
3756                                "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!",
3757                                return -EINVAL);
3758        }
3759
3760        return 0;
3761}
3762
3763static int smu7_populate_and_upload_sclk_mclk_dpm_levels(
3764                struct pp_hwmgr *hwmgr, const void *input)
3765{
3766        int result = 0;
3767        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3768        struct smu7_dpm_table *dpm_table = &data->dpm_table;
3769        uint32_t count;
3770        struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
3771        struct phm_odn_clock_levels *odn_sclk_table = &(odn_table->odn_core_clock_dpm_levels);
3772        struct phm_odn_clock_levels *odn_mclk_table = &(odn_table->odn_memory_clock_dpm_levels);
3773
3774        if (0 == data->need_update_smu7_dpm_table)
3775                return 0;
3776
3777        if (hwmgr->od_enabled && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
3778                for (count = 0; count < dpm_table->sclk_table.count; count++) {
3779                        dpm_table->sclk_table.dpm_levels[count].enabled = odn_sclk_table->entries[count].enabled;
3780                        dpm_table->sclk_table.dpm_levels[count].value = odn_sclk_table->entries[count].clock;
3781                }
3782        }
3783
3784        if (hwmgr->od_enabled && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
3785                for (count = 0; count < dpm_table->mclk_table.count; count++) {
3786                        dpm_table->mclk_table.dpm_levels[count].enabled = odn_mclk_table->entries[count].enabled;
3787                        dpm_table->mclk_table.dpm_levels[count].value = odn_mclk_table->entries[count].clock;
3788                }
3789        }
3790
3791        if (data->need_update_smu7_dpm_table &
3792                        (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) {
3793                result = smum_populate_all_graphic_levels(hwmgr);
3794                PP_ASSERT_WITH_CODE((0 == result),
3795                                "Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
3796                                return result);
3797        }
3798
3799        if (data->need_update_smu7_dpm_table &
3800                        (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
3801                /*populate MCLK dpm table to SMU7 */
3802                result = smum_populate_all_memory_levels(hwmgr);
3803                PP_ASSERT_WITH_CODE((0 == result),
3804                                "Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
3805                                return result);
3806        }
3807
3808        return result;
3809}
3810
3811static int smu7_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
3812                          struct smu7_single_dpm_table *dpm_table,
3813                        uint32_t low_limit, uint32_t high_limit)
3814{
3815        uint32_t i;
3816
3817        /* force the trim if mclk_switching is disabled to prevent flicker */
3818        bool force_trim = (low_limit == high_limit);
3819        for (i = 0; i < dpm_table->count; i++) {
3820        /*skip the trim if od is enabled*/
3821                if ((!hwmgr->od_enabled || force_trim)
3822                        && (dpm_table->dpm_levels[i].value < low_limit
3823                        || dpm_table->dpm_levels[i].value > high_limit))
3824                        dpm_table->dpm_levels[i].enabled = false;
3825                else
3826                        dpm_table->dpm_levels[i].enabled = true;
3827        }
3828
3829        return 0;
3830}
3831
3832static int smu7_trim_dpm_states(struct pp_hwmgr *hwmgr,
3833                const struct smu7_power_state *smu7_ps)
3834{
3835        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3836        uint32_t high_limit_count;
3837
3838        PP_ASSERT_WITH_CODE((smu7_ps->performance_level_count >= 1),
3839                        "power state did not have any performance level",
3840                        return -EINVAL);
3841
3842        high_limit_count = (1 == smu7_ps->performance_level_count) ? 0 : 1;
3843
3844        smu7_trim_single_dpm_states(hwmgr,
3845                        &(data->dpm_table.sclk_table),
3846                        smu7_ps->performance_levels[0].engine_clock,
3847                        smu7_ps->performance_levels[high_limit_count].engine_clock);
3848
3849        smu7_trim_single_dpm_states(hwmgr,
3850                        &(data->dpm_table.mclk_table),
3851                        smu7_ps->performance_levels[0].memory_clock,
3852                        smu7_ps->performance_levels[high_limit_count].memory_clock);
3853
3854        return 0;
3855}
3856
3857static int smu7_generate_dpm_level_enable_mask(
3858                struct pp_hwmgr *hwmgr, const void *input)
3859{
3860        int result = 0;
3861        const struct phm_set_power_state_input *states =
3862                        (const struct phm_set_power_state_input *)input;
3863        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3864        const struct smu7_power_state *smu7_ps =
3865                        cast_const_phw_smu7_power_state(states->pnew_state);
3866
3867
3868        result = smu7_trim_dpm_states(hwmgr, smu7_ps);
3869        if (result)
3870                return result;
3871
3872        data->dpm_level_enable_mask.sclk_dpm_enable_mask =
3873                        phm_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table);
3874        data->dpm_level_enable_mask.mclk_dpm_enable_mask =
3875                        phm_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table);
3876        data->dpm_level_enable_mask.pcie_dpm_enable_mask =
3877                        phm_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table);
3878
3879        return 0;
3880}
3881
3882static int smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
3883{
3884        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3885
3886        if (0 == data->need_update_smu7_dpm_table)
3887                return 0;
3888
3889        if ((0 == data->sclk_dpm_key_disabled) &&
3890                (data->need_update_smu7_dpm_table &
3891                (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
3892
3893                PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
3894                                "Trying to Unfreeze SCLK DPM when DPM is disabled",
3895                                );
3896                PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
3897                                PPSMC_MSG_SCLKDPM_UnfreezeLevel,
3898                                NULL),
3899                        "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!",
3900                        return -EINVAL);
3901        }
3902
3903        if ((0 == data->mclk_dpm_key_disabled) &&
3904                (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
3905
3906                PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
3907                                "Trying to Unfreeze MCLK DPM when DPM is disabled",
3908                                );
3909                PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
3910                                PPSMC_MSG_MCLKDPM_UnfreezeLevel,
3911                                NULL),
3912                    "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!",
3913                    return -EINVAL);
3914        }
3915
3916        data->need_update_smu7_dpm_table &= DPMTABLE_OD_UPDATE_VDDC;
3917
3918        return 0;
3919}
3920
3921static int smu7_notify_link_speed_change_after_state_change(
3922                struct pp_hwmgr *hwmgr, const void *input)
3923{
3924        const struct phm_set_power_state_input *states =
3925                        (const struct phm_set_power_state_input *)input;
3926        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3927        const struct smu7_power_state *smu7_ps =
3928                        cast_const_phw_smu7_power_state(states->pnew_state);
3929        uint16_t target_link_speed = smu7_get_maximum_link_speed(hwmgr, smu7_ps);
3930        uint8_t  request;
3931
3932        if (data->pspp_notify_required) {
3933                if (target_link_speed == PP_PCIEGen3)
3934                        request = PCIE_PERF_REQ_GEN3;
3935                else if (target_link_speed == PP_PCIEGen2)
3936                        request = PCIE_PERF_REQ_GEN2;
3937                else
3938                        request = PCIE_PERF_REQ_GEN1;
3939
3940                if (request == PCIE_PERF_REQ_GEN1 &&
3941                                smu7_get_current_pcie_speed(hwmgr) > 0)
3942                        return 0;
3943
3944#ifdef CONFIG_ACPI
3945                if (amdgpu_acpi_pcie_performance_request(hwmgr->adev, request, false)) {
3946                        if (PP_PCIEGen2 == target_link_speed)
3947                                pr_info("PSPP request to switch to Gen2 from Gen3 Failed!");
3948                        else
3949                                pr_info("PSPP request to switch to Gen1 from Gen2 Failed!");
3950                }
3951#endif
3952        }
3953
3954        return 0;
3955}
3956
3957static int smu7_notify_smc_display(struct pp_hwmgr *hwmgr)
3958{
3959        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3960
3961        if (hwmgr->feature_mask & PP_VBI_TIME_SUPPORT_MASK) {
3962                if (hwmgr->chip_id == CHIP_VEGAM)
3963                        smum_send_msg_to_smc_with_parameter(hwmgr,
3964                                        (PPSMC_Msg)PPSMC_MSG_SetVBITimeout_VEGAM, data->frame_time_x2,
3965                                        NULL);
3966                else
3967                        smum_send_msg_to_smc_with_parameter(hwmgr,
3968                                        (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2,
3969                                        NULL);
3970        }
3971        return (smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_HasDisplay, NULL) == 0) ?  0 : -EINVAL;
3972}
3973
3974static int smu7_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
3975{
3976        int tmp_result, result = 0;
3977        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3978
3979        tmp_result = smu7_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
3980        PP_ASSERT_WITH_CODE((0 == tmp_result),
3981                        "Failed to find DPM states clocks in DPM table!",
3982                        result = tmp_result);
3983
3984        if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3985                        PHM_PlatformCaps_PCIEPerformanceRequest)) {
3986                tmp_result =
3987                        smu7_request_link_speed_change_before_state_change(hwmgr, input);
3988                PP_ASSERT_WITH_CODE((0 == tmp_result),
3989                                "Failed to request link speed change before state change!",
3990                                result = tmp_result);
3991        }
3992
3993        tmp_result = smu7_freeze_sclk_mclk_dpm(hwmgr);
3994        PP_ASSERT_WITH_CODE((0 == tmp_result),
3995                        "Failed to freeze SCLK MCLK DPM!", result = tmp_result);
3996
3997        tmp_result = smu7_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input);
3998        PP_ASSERT_WITH_CODE((0 == tmp_result),
3999                        "Failed to populate and upload SCLK MCLK DPM levels!",
4000                        result = tmp_result);
4001
4002        /*
4003         * If a custom pp table is loaded, set DPMTABLE_OD_UPDATE_VDDC flag.
4004         * That effectively disables AVFS feature.
4005         */
4006        if (hwmgr->hardcode_pp_table != NULL)
4007                data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC;
4008
4009        tmp_result = smu7_update_avfs(hwmgr);
4010        PP_ASSERT_WITH_CODE((0 == tmp_result),
4011                        "Failed to update avfs voltages!",
4012                        result = tmp_result);
4013
4014        tmp_result = smu7_generate_dpm_level_enable_mask(hwmgr, input);
4015        PP_ASSERT_WITH_CODE((0 == tmp_result),
4016                        "Failed to generate DPM level enabled mask!",
4017                        result = tmp_result);
4018
4019        tmp_result = smum_update_sclk_threshold(hwmgr);
4020        PP_ASSERT_WITH_CODE((0 == tmp_result),
4021                        "Failed to update SCLK threshold!",
4022                        result = tmp_result);
4023
4024        tmp_result = smu7_notify_smc_display(hwmgr);
4025        PP_ASSERT_WITH_CODE((0 == tmp_result),
4026                        "Failed to notify smc display settings!",
4027                        result = tmp_result);
4028
4029        tmp_result = smu7_unfreeze_sclk_mclk_dpm(hwmgr);
4030        PP_ASSERT_WITH_CODE((0 == tmp_result),
4031                        "Failed to unfreeze SCLK MCLK DPM!",
4032                        result = tmp_result);
4033
4034        tmp_result = smu7_upload_dpm_level_enable_mask(hwmgr);
4035        PP_ASSERT_WITH_CODE((0 == tmp_result),
4036                        "Failed to upload DPM level enabled mask!",
4037                        result = tmp_result);
4038
4039        if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4040                        PHM_PlatformCaps_PCIEPerformanceRequest)) {
4041                tmp_result =
4042                        smu7_notify_link_speed_change_after_state_change(hwmgr, input);
4043                PP_ASSERT_WITH_CODE((0 == tmp_result),
4044                                "Failed to notify link speed change after state change!",
4045                                result = tmp_result);
4046        }
4047        data->apply_optimized_settings = false;
4048        return result;
4049}
4050
4051static int smu7_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm)
4052{
4053        hwmgr->thermal_controller.
4054        advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm;
4055
4056        return smum_send_msg_to_smc_with_parameter(hwmgr,
4057                        PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm,
4058                        NULL);
4059}
4060
4061static int
4062smu7_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
4063{
4064        PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay;
4065
4066        return (smum_send_msg_to_smc(hwmgr, msg, NULL) == 0) ?  0 : -1;
4067}
4068
4069static int
4070smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
4071{
4072        if (hwmgr->display_config->num_display > 1 &&
4073                        !hwmgr->display_config->multi_monitor_in_sync)
4074                smu7_notify_smc_display_change(hwmgr, false);
4075
4076        return 0;
4077}
4078
4079/**
4080* Programs the display gap
4081*
4082* @param    hwmgr  the address of the powerplay hardware manager.
4083* @return   always OK
4084*/
4085static int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
4086{
4087        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4088        uint32_t display_gap = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL);
4089        uint32_t display_gap2;
4090        uint32_t pre_vbi_time_in_us;
4091        uint32_t frame_time_in_us;
4092        uint32_t ref_clock, refresh_rate;
4093
4094        display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (hwmgr->display_config->num_display > 0) ? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE);
4095        cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL, display_gap);
4096
4097        ref_clock =  amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev);
4098        refresh_rate = hwmgr->display_config->vrefresh;
4099
4100        if (0 == refresh_rate)
4101                refresh_rate = 60;
4102
4103        frame_time_in_us = 1000000 / refresh_rate;
4104
4105        pre_vbi_time_in_us = frame_time_in_us - 200 - hwmgr->display_config->min_vblank_time;
4106
4107        data->frame_time_x2 = frame_time_in_us * 2 / 100;
4108
4109        if (data->frame_time_x2 < 280) {
4110                pr_debug("%s: enforce minimal VBITimeout: %d -> 280\n", __func__, data->frame_time_x2);
4111                data->frame_time_x2 = 280;
4112        }
4113
4114        display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
4115
4116        cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2);
4117
4118        cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4119                        data->soft_regs_start + smum_get_offsetof(hwmgr,
4120                                                        SMU_SoftRegisters,
4121                                                        PreVBlankGap), 0x64);
4122
4123        cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4124                        data->soft_regs_start + smum_get_offsetof(hwmgr,
4125                                                        SMU_SoftRegisters,
4126                                                        VBlankTimeout),
4127                                        (frame_time_in_us - pre_vbi_time_in_us));
4128
4129        return 0;
4130}
4131
4132static int smu7_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
4133{
4134        return smu7_program_display_gap(hwmgr);
4135}
4136
4137/**
4138*  Set maximum target operating fan output RPM
4139*
4140* @param    hwmgr:  the address of the powerplay hardware manager.
4141* @param    usMaxFanRpm:  max operating fan RPM value.
4142* @return   The response that came from the SMC.
4143*/
4144static int smu7_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_rpm)
4145{
4146        hwmgr->thermal_controller.
4147        advanceFanControlParameters.usMaxFanRPM = us_max_fan_rpm;
4148
4149        return smum_send_msg_to_smc_with_parameter(hwmgr,
4150                        PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm,
4151                        NULL);
4152}
4153
4154static const struct amdgpu_irq_src_funcs smu7_irq_funcs = {
4155        .process = phm_irq_process,
4156};
4157
4158static int smu7_register_irq_handlers(struct pp_hwmgr *hwmgr)
4159{
4160        struct amdgpu_irq_src *source =
4161                kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL);
4162
4163        if (!source)
4164                return -ENOMEM;
4165
4166        source->funcs = &smu7_irq_funcs;
4167
4168        amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
4169                        AMDGPU_IRQ_CLIENTID_LEGACY,
4170                        VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH,
4171                        source);
4172        amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
4173                        AMDGPU_IRQ_CLIENTID_LEGACY,
4174                        VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW,
4175                        source);
4176
4177        /* Register CTF(GPIO_19) interrupt */
4178        amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
4179                        AMDGPU_IRQ_CLIENTID_LEGACY,
4180                        VISLANDS30_IV_SRCID_GPIO_19,
4181                        source);
4182
4183        return 0;
4184}
4185
4186static bool
4187smu7_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
4188{
4189        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4190        bool is_update_required = false;
4191
4192        if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
4193                is_update_required = true;
4194
4195        if (data->display_timing.vrefresh != hwmgr->display_config->vrefresh)
4196                is_update_required = true;
4197
4198        if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
4199                if (data->display_timing.min_clock_in_sr != hwmgr->display_config->min_core_set_clock_in_sr &&
4200                        (data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK ||
4201                        hwmgr->display_config->min_core_set_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK))
4202                        is_update_required = true;
4203        }
4204        return is_update_required;
4205}
4206
4207static inline bool smu7_are_power_levels_equal(const struct smu7_performance_level *pl1,
4208                                                           const struct smu7_performance_level *pl2)
4209{
4210        return ((pl1->memory_clock == pl2->memory_clock) &&
4211                  (pl1->engine_clock == pl2->engine_clock) &&
4212                  (pl1->pcie_gen == pl2->pcie_gen) &&
4213                  (pl1->pcie_lane == pl2->pcie_lane));
4214}
4215
4216static int smu7_check_states_equal(struct pp_hwmgr *hwmgr,
4217                const struct pp_hw_power_state *pstate1,
4218                const struct pp_hw_power_state *pstate2, bool *equal)
4219{
4220        const struct smu7_power_state *psa;
4221        const struct smu7_power_state *psb;
4222        int i;
4223        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4224
4225        if (pstate1 == NULL || pstate2 == NULL || equal == NULL)
4226                return -EINVAL;
4227
4228        psa = cast_const_phw_smu7_power_state(pstate1);
4229        psb = cast_const_phw_smu7_power_state(pstate2);
4230        /* If the two states don't even have the same number of performance levels they cannot be the same state. */
4231        if (psa->performance_level_count != psb->performance_level_count) {
4232                *equal = false;
4233                return 0;
4234        }
4235
4236        for (i = 0; i < psa->performance_level_count; i++) {
4237                if (!smu7_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
4238                        /* If we have found even one performance level pair that is different the states are different. */
4239                        *equal = false;
4240                        return 0;
4241                }
4242        }
4243
4244        /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
4245        *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
4246        *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
4247        *equal &= (psa->sclk_threshold == psb->sclk_threshold);
4248        /* For OD call, set value based on flag */
4249        *equal &= !(data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK |
4250                                                        DPMTABLE_OD_UPDATE_MCLK |
4251                                                        DPMTABLE_OD_UPDATE_VDDC));
4252
4253        return 0;
4254}
4255
4256static int smu7_check_mc_firmware(struct pp_hwmgr *hwmgr)
4257{
4258        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4259
4260        uint32_t tmp;
4261
4262        /* Read MC indirect register offset 0x9F bits [3:0] to see
4263         * if VBIOS has already loaded a full version of MC ucode
4264         * or not.
4265         */
4266
4267        smu7_get_mc_microcode_version(hwmgr);
4268
4269        data->need_long_memory_training = false;
4270
4271        cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX,
4272                                                        ixMC_IO_DEBUG_UP_13);
4273        tmp = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA);
4274
4275        if (tmp & (1 << 23)) {
4276                data->mem_latency_high = MEM_LATENCY_HIGH;
4277                data->mem_latency_low = MEM_LATENCY_LOW;
4278                if ((hwmgr->chip_id == CHIP_POLARIS10) ||
4279                    (hwmgr->chip_id == CHIP_POLARIS11) ||
4280                    (hwmgr->chip_id == CHIP_POLARIS12))
4281                        smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableFFC, NULL);
4282        } else {
4283                data->mem_latency_high = 330;
4284                data->mem_latency_low = 330;
4285                if ((hwmgr->chip_id == CHIP_POLARIS10) ||
4286                    (hwmgr->chip_id == CHIP_POLARIS11) ||
4287                    (hwmgr->chip_id == CHIP_POLARIS12))
4288                        smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableFFC, NULL);
4289        }
4290
4291        return 0;
4292}
4293
4294static int smu7_read_clock_registers(struct pp_hwmgr *hwmgr)
4295{
4296        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4297
4298        data->clock_registers.vCG_SPLL_FUNC_CNTL         =
4299                cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL);
4300        data->clock_registers.vCG_SPLL_FUNC_CNTL_2       =
4301                cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_2);
4302        data->clock_registers.vCG_SPLL_FUNC_CNTL_3       =
4303                cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_3);
4304        data->clock_registers.vCG_SPLL_FUNC_CNTL_4       =
4305                cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_4);
4306        data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM   =
4307                cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM);
4308        data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2 =
4309                cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM_2);
4310        data->clock_registers.vDLL_CNTL                  =
4311                cgs_read_register(hwmgr->device, mmDLL_CNTL);
4312        data->clock_registers.vMCLK_PWRMGT_CNTL          =
4313                cgs_read_register(hwmgr->device, mmMCLK_PWRMGT_CNTL);
4314        data->clock_registers.vMPLL_AD_FUNC_CNTL         =
4315                cgs_read_register(hwmgr->device, mmMPLL_AD_FUNC_CNTL);
4316        data->clock_registers.vMPLL_DQ_FUNC_CNTL         =
4317                cgs_read_register(hwmgr->device, mmMPLL_DQ_FUNC_CNTL);
4318        data->clock_registers.vMPLL_FUNC_CNTL            =
4319                cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL);
4320        data->clock_registers.vMPLL_FUNC_CNTL_1          =
4321                cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_1);
4322        data->clock_registers.vMPLL_FUNC_CNTL_2          =
4323                cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_2);
4324        data->clock_registers.vMPLL_SS1                  =
4325                cgs_read_register(hwmgr->device, mmMPLL_SS1);
4326        data->clock_registers.vMPLL_SS2                  =
4327                cgs_read_register(hwmgr->device, mmMPLL_SS2);
4328        return 0;
4329
4330}
4331
4332/**
4333 * Find out if memory is GDDR5.
4334 *
4335 * @param    hwmgr  the address of the powerplay hardware manager.
4336 * @return   always 0
4337 */
4338static int smu7_get_memory_type(struct pp_hwmgr *hwmgr)
4339{
4340        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4341        struct amdgpu_device *adev = hwmgr->adev;
4342
4343        data->is_memory_gddr5 = (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5);
4344
4345        return 0;
4346}
4347
4348/**
4349 * Enables Dynamic Power Management by SMC
4350 *
4351 * @param    hwmgr  the address of the powerplay hardware manager.
4352 * @return   always 0
4353 */
4354static int smu7_enable_acpi_power_management(struct pp_hwmgr *hwmgr)
4355{
4356        PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
4357                        GENERAL_PWRMGT, STATIC_PM_EN, 1);
4358
4359        return 0;
4360}
4361
4362/**
4363 * Initialize PowerGating States for different engines
4364 *
4365 * @param    hwmgr  the address of the powerplay hardware manager.
4366 * @return   always 0
4367 */
4368static int smu7_init_power_gate_state(struct pp_hwmgr *hwmgr)
4369{
4370        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4371
4372        data->uvd_power_gated = false;
4373        data->vce_power_gated = false;
4374
4375        return 0;
4376}
4377
4378static int smu7_init_sclk_threshold(struct pp_hwmgr *hwmgr)
4379{
4380        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4381
4382        data->low_sclk_interrupt_threshold = 0;
4383        return 0;
4384}
4385
4386static int smu7_setup_asic_task(struct pp_hwmgr *hwmgr)
4387{
4388        int tmp_result, result = 0;
4389
4390        smu7_check_mc_firmware(hwmgr);
4391
4392        tmp_result = smu7_read_clock_registers(hwmgr);
4393        PP_ASSERT_WITH_CODE((0 == tmp_result),
4394                        "Failed to read clock registers!", result = tmp_result);
4395
4396        tmp_result = smu7_get_memory_type(hwmgr);
4397        PP_ASSERT_WITH_CODE((0 == tmp_result),
4398                        "Failed to get memory type!", result = tmp_result);
4399
4400        tmp_result = smu7_enable_acpi_power_management(hwmgr);
4401        PP_ASSERT_WITH_CODE((0 == tmp_result),
4402                        "Failed to enable ACPI power management!", result = tmp_result);
4403
4404        tmp_result = smu7_init_power_gate_state(hwmgr);
4405        PP_ASSERT_WITH_CODE((0 == tmp_result),
4406                        "Failed to init power gate state!", result = tmp_result);
4407
4408        tmp_result = smu7_get_mc_microcode_version(hwmgr);
4409        PP_ASSERT_WITH_CODE((0 == tmp_result),
4410                        "Failed to get MC microcode version!", result = tmp_result);
4411
4412        tmp_result = smu7_init_sclk_threshold(hwmgr);
4413        PP_ASSERT_WITH_CODE((0 == tmp_result),
4414                        "Failed to init sclk threshold!", result = tmp_result);
4415
4416        return result;
4417}
4418
4419static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
4420                enum pp_clock_type type, uint32_t mask)
4421{
4422        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4423
4424        if (mask == 0)
4425                return -EINVAL;
4426
4427        switch (type) {
4428        case PP_SCLK:
4429                if (!data->sclk_dpm_key_disabled)
4430                        smum_send_msg_to_smc_with_parameter(hwmgr,
4431                                        PPSMC_MSG_SCLKDPM_SetEnabledMask,
4432                                        data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask,
4433                                        NULL);
4434                break;
4435        case PP_MCLK:
4436                if (!data->mclk_dpm_key_disabled)
4437                        smum_send_msg_to_smc_with_parameter(hwmgr,
4438                                        PPSMC_MSG_MCLKDPM_SetEnabledMask,
4439                                        data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask,
4440                                        NULL);
4441                break;
4442        case PP_PCIE:
4443        {
4444                uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask;
4445
4446                if (!data->pcie_dpm_key_disabled) {
4447                        if (fls(tmp) != ffs(tmp))
4448                                smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PCIeDPM_UnForceLevel,
4449                                                NULL);
4450                        else
4451                                smum_send_msg_to_smc_with_parameter(hwmgr,
4452                                        PPSMC_MSG_PCIeDPM_ForceLevel,
4453                                        fls(tmp) - 1,
4454                                        NULL);
4455                }
4456                break;
4457        }
4458        default:
4459                break;
4460        }
4461
4462        return 0;
4463}
4464
4465static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
4466                enum pp_clock_type type, char *buf)
4467{
4468        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4469        struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
4470        struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
4471        struct smu7_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table);
4472        struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
4473        struct phm_odn_clock_levels *odn_sclk_table = &(odn_table->odn_core_clock_dpm_levels);
4474        struct phm_odn_clock_levels *odn_mclk_table = &(odn_table->odn_memory_clock_dpm_levels);
4475        int i, now, size = 0;
4476        uint32_t clock, pcie_speed;
4477
4478        switch (type) {
4479        case PP_SCLK:
4480                smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency, &clock);
4481
4482                for (i = 0; i < sclk_table->count; i++) {
4483                        if (clock > sclk_table->dpm_levels[i].value)
4484                                continue;
4485                        break;
4486                }
4487                now = i;
4488
4489                for (i = 0; i < sclk_table->count; i++)
4490                        size += sprintf(buf + size, "%d: %uMhz %s\n",
4491                                        i, sclk_table->dpm_levels[i].value / 100,
4492                                        (i == now) ? "*" : "");
4493                break;
4494        case PP_MCLK:
4495                smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency, &clock);
4496
4497                for (i = 0; i < mclk_table->count; i++) {
4498                        if (clock > mclk_table->dpm_levels[i].value)
4499                                continue;
4500                        break;
4501                }
4502                now = i;
4503
4504                for (i = 0; i < mclk_table->count; i++)
4505                        size += sprintf(buf + size, "%d: %uMhz %s\n",
4506                                        i, mclk_table->dpm_levels[i].value / 100,
4507                                        (i == now) ? "*" : "");
4508                break;
4509        case PP_PCIE:
4510                pcie_speed = smu7_get_current_pcie_speed(hwmgr);
4511                for (i = 0; i < pcie_table->count; i++) {
4512                        if (pcie_speed != pcie_table->dpm_levels[i].value)
4513                                continue;
4514                        break;
4515                }
4516                now = i;
4517
4518                for (i = 0; i < pcie_table->count; i++)
4519                        size += sprintf(buf + size, "%d: %s %s\n", i,
4520                                        (pcie_table->dpm_levels[i].value == 0) ? "2.5GT/s, x8" :
4521                                        (pcie_table->dpm_levels[i].value == 1) ? "5.0GT/s, x16" :
4522                                        (pcie_table->dpm_levels[i].value == 2) ? "8.0GT/s, x16" : "",
4523                                        (i == now) ? "*" : "");
4524                break;
4525        case OD_SCLK:
4526                if (hwmgr->od_enabled) {
4527                        size = sprintf(buf, "%s:\n", "OD_SCLK");
4528                        for (i = 0; i < odn_sclk_table->num_of_pl; i++)
4529                                size += sprintf(buf + size, "%d: %10uMHz %10umV\n",
4530                                        i, odn_sclk_table->entries[i].clock/100,
4531                                        odn_sclk_table->entries[i].vddc);
4532                }
4533                break;
4534        case OD_MCLK:
4535                if (hwmgr->od_enabled) {
4536                        size = sprintf(buf, "%s:\n", "OD_MCLK");
4537                        for (i = 0; i < odn_mclk_table->num_of_pl; i++)
4538                                size += sprintf(buf + size, "%d: %10uMHz %10umV\n",
4539                                        i, odn_mclk_table->entries[i].clock/100,
4540                                        odn_mclk_table->entries[i].vddc);
4541                }
4542                break;
4543        case OD_RANGE:
4544                if (hwmgr->od_enabled) {
4545                        size = sprintf(buf, "%s:\n", "OD_RANGE");
4546                        size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
4547                                data->golden_dpm_table.sclk_table.dpm_levels[0].value/100,
4548                                hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
4549                        size += sprintf(buf + size, "MCLK: %7uMHz %10uMHz\n",
4550                                data->golden_dpm_table.mclk_table.dpm_levels[0].value/100,
4551                                hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
4552                        size += sprintf(buf + size, "VDDC: %7umV %11umV\n",
4553                                data->odn_dpm_table.min_vddc,
4554                                data->odn_dpm_table.max_vddc);
4555                }
4556                break;
4557        default:
4558                break;
4559        }
4560        return size;
4561}
4562
4563static void smu7_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
4564{
4565        switch (mode) {
4566        case AMD_FAN_CTRL_NONE:
4567                smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
4568                break;
4569        case AMD_FAN_CTRL_MANUAL:
4570                if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4571                        PHM_PlatformCaps_MicrocodeFanControl))
4572                        smu7_fan_ctrl_stop_smc_fan_control(hwmgr);
4573                break;
4574        case AMD_FAN_CTRL_AUTO:
4575                if (!smu7_fan_ctrl_set_static_mode(hwmgr, mode))
4576                        smu7_fan_ctrl_start_smc_fan_control(hwmgr);
4577                break;
4578        default:
4579                break;
4580        }
4581}
4582
4583static uint32_t smu7_get_fan_control_mode(struct pp_hwmgr *hwmgr)
4584{
4585        return hwmgr->fan_ctrl_enabled ? AMD_FAN_CTRL_AUTO : AMD_FAN_CTRL_MANUAL;
4586}
4587
4588static int smu7_get_sclk_od(struct pp_hwmgr *hwmgr)
4589{
4590        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4591        struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
4592        struct smu7_single_dpm_table *golden_sclk_table =
4593                        &(data->golden_dpm_table.sclk_table);
4594        int value = sclk_table->dpm_levels[sclk_table->count - 1].value;
4595        int golden_value = golden_sclk_table->dpm_levels
4596                        [golden_sclk_table->count - 1].value;
4597
4598        value -= golden_value;
4599        value = DIV_ROUND_UP(value * 100, golden_value);
4600
4601        return value;
4602}
4603
4604static int smu7_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
4605{
4606        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4607        struct smu7_single_dpm_table *golden_sclk_table =
4608                        &(data->golden_dpm_table.sclk_table);
4609        struct pp_power_state  *ps;
4610        struct smu7_power_state  *smu7_ps;
4611
4612        if (value > 20)
4613                value = 20;
4614
4615        ps = hwmgr->request_ps;
4616
4617        if (ps == NULL)
4618                return -EINVAL;
4619
4620        smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
4621
4622        smu7_ps->performance_levels[smu7_ps->performance_level_count - 1].engine_clock =
4623                        golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
4624                        value / 100 +
4625                        golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
4626
4627        return 0;
4628}
4629
4630static int smu7_get_mclk_od(struct pp_hwmgr *hwmgr)
4631{
4632        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4633        struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
4634        struct smu7_single_dpm_table *golden_mclk_table =
4635                        &(data->golden_dpm_table.mclk_table);
4636        int value = mclk_table->dpm_levels[mclk_table->count - 1].value;
4637        int golden_value = golden_mclk_table->dpm_levels
4638                        [golden_mclk_table->count - 1].value;
4639
4640        value -= golden_value;
4641        value = DIV_ROUND_UP(value * 100, golden_value);
4642
4643        return value;
4644}
4645
4646static int smu7_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
4647{
4648        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4649        struct smu7_single_dpm_table *golden_mclk_table =
4650                        &(data->golden_dpm_table.mclk_table);
4651        struct pp_power_state  *ps;
4652        struct smu7_power_state  *smu7_ps;
4653
4654        if (value > 20)
4655                value = 20;
4656
4657        ps = hwmgr->request_ps;
4658
4659        if (ps == NULL)
4660                return -EINVAL;
4661
4662        smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
4663
4664        smu7_ps->performance_levels[smu7_ps->performance_level_count - 1].memory_clock =
4665                        golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
4666                        value / 100 +
4667                        golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
4668
4669        return 0;
4670}
4671
4672
4673static int smu7_get_sclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
4674{
4675        struct phm_ppt_v1_information *table_info =
4676                        (struct phm_ppt_v1_information *)hwmgr->pptable;
4677        struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table = NULL;
4678        struct phm_clock_voltage_dependency_table *sclk_table;
4679        int i;
4680
4681        if (hwmgr->pp_table_version == PP_TABLE_V1) {
4682                if (table_info == NULL || table_info->vdd_dep_on_sclk == NULL)
4683                        return -EINVAL;
4684                dep_sclk_table = table_info->vdd_dep_on_sclk;
4685                for (i = 0; i < dep_sclk_table->count; i++)
4686                        clocks->clock[i] = dep_sclk_table->entries[i].clk * 10;
4687                clocks->count = dep_sclk_table->count;
4688        } else if (hwmgr->pp_table_version == PP_TABLE_V0) {
4689                sclk_table = hwmgr->dyn_state.vddc_dependency_on_sclk;
4690                for (i = 0; i < sclk_table->count; i++)
4691                        clocks->clock[i] = sclk_table->entries[i].clk * 10;
4692                clocks->count = sclk_table->count;
4693        }
4694
4695        return 0;
4696}
4697
4698static uint32_t smu7_get_mem_latency(struct pp_hwmgr *hwmgr, uint32_t clk)
4699{
4700        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4701
4702        if (clk >= MEM_FREQ_LOW_LATENCY && clk < MEM_FREQ_HIGH_LATENCY)
4703                return data->mem_latency_high;
4704        else if (clk >= MEM_FREQ_HIGH_LATENCY)
4705                return data->mem_latency_low;
4706        else
4707                return MEM_LATENCY_ERR;
4708}
4709
4710static int smu7_get_mclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
4711{
4712        struct phm_ppt_v1_information *table_info =
4713                        (struct phm_ppt_v1_information *)hwmgr->pptable;
4714        struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
4715        int i;
4716        struct phm_clock_voltage_dependency_table *mclk_table;
4717
4718        if (hwmgr->pp_table_version == PP_TABLE_V1) {
4719                if (table_info == NULL)
4720                        return -EINVAL;
4721                dep_mclk_table = table_info->vdd_dep_on_mclk;
4722                for (i = 0; i < dep_mclk_table->count; i++) {
4723                        clocks->clock[i] = dep_mclk_table->entries[i].clk * 10;
4724                        clocks->latency[i] = smu7_get_mem_latency(hwmgr,
4725                                                dep_mclk_table->entries[i].clk);
4726                }
4727                clocks->count = dep_mclk_table->count;
4728        } else if (hwmgr->pp_table_version == PP_TABLE_V0) {
4729                mclk_table = hwmgr->dyn_state.vddc_dependency_on_mclk;
4730                for (i = 0; i < mclk_table->count; i++)
4731                        clocks->clock[i] = mclk_table->entries[i].clk * 10;
4732                clocks->count = mclk_table->count;
4733        }
4734        return 0;
4735}
4736
4737static int smu7_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type,
4738                                                struct amd_pp_clocks *clocks)
4739{
4740        switch (type) {
4741        case amd_pp_sys_clock:
4742                smu7_get_sclks(hwmgr, clocks);
4743                break;
4744        case amd_pp_mem_clock:
4745                smu7_get_mclks(hwmgr, clocks);
4746                break;
4747        default:
4748                return -EINVAL;
4749        }
4750
4751        return 0;
4752}
4753
4754static int smu7_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
4755                                        uint32_t virtual_addr_low,
4756                                        uint32_t virtual_addr_hi,
4757                                        uint32_t mc_addr_low,
4758                                        uint32_t mc_addr_hi,
4759                                        uint32_t size)
4760{
4761        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4762
4763        cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4764                                        data->soft_regs_start +
4765                                        smum_get_offsetof(hwmgr,
4766                                        SMU_SoftRegisters, DRAM_LOG_ADDR_H),
4767                                        mc_addr_hi);
4768
4769        cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4770                                        data->soft_regs_start +
4771                                        smum_get_offsetof(hwmgr,
4772                                        SMU_SoftRegisters, DRAM_LOG_ADDR_L),
4773                                        mc_addr_low);
4774
4775        cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4776                                        data->soft_regs_start +
4777                                        smum_get_offsetof(hwmgr,
4778                                        SMU_SoftRegisters, DRAM_LOG_PHY_ADDR_H),
4779                                        virtual_addr_hi);
4780
4781        cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4782                                        data->soft_regs_start +
4783                                        smum_get_offsetof(hwmgr,
4784                                        SMU_SoftRegisters, DRAM_LOG_PHY_ADDR_L),
4785                                        virtual_addr_low);
4786
4787        cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4788                                        data->soft_regs_start +
4789                                        smum_get_offsetof(hwmgr,
4790                                        SMU_SoftRegisters, DRAM_LOG_BUFF_SIZE),
4791                                        size);
4792        return 0;
4793}
4794
4795static int smu7_get_max_high_clocks(struct pp_hwmgr *hwmgr,
4796                                        struct amd_pp_simple_clock_info *clocks)
4797{
4798        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4799        struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
4800        struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
4801
4802        if (clocks == NULL)
4803                return -EINVAL;
4804
4805        clocks->memory_max_clock = mclk_table->count > 1 ?
4806                                mclk_table->dpm_levels[mclk_table->count-1].value :
4807                                mclk_table->dpm_levels[0].value;
4808        clocks->engine_max_clock = sclk_table->count > 1 ?
4809                                sclk_table->dpm_levels[sclk_table->count-1].value :
4810                                sclk_table->dpm_levels[0].value;
4811        return 0;
4812}
4813
4814static int smu7_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
4815                struct PP_TemperatureRange *thermal_data)
4816{
4817        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4818        struct phm_ppt_v1_information *table_info =
4819                        (struct phm_ppt_v1_information *)hwmgr->pptable;
4820
4821        memcpy(thermal_data, &SMU7ThermalPolicy[0], sizeof(struct PP_TemperatureRange));
4822
4823        if (hwmgr->pp_table_version == PP_TABLE_V1)
4824                thermal_data->max = table_info->cac_dtp_table->usSoftwareShutdownTemp *
4825                        PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
4826        else if (hwmgr->pp_table_version == PP_TABLE_V0)
4827                thermal_data->max = data->thermal_temp_setting.temperature_shutdown *
4828                        PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
4829
4830        return 0;
4831}
4832
4833static bool smu7_check_clk_voltage_valid(struct pp_hwmgr *hwmgr,
4834                                        enum PP_OD_DPM_TABLE_COMMAND type,
4835                                        uint32_t clk,
4836                                        uint32_t voltage)
4837{
4838        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4839
4840        if (voltage < data->odn_dpm_table.min_vddc || voltage > data->odn_dpm_table.max_vddc) {
4841                pr_info("OD voltage is out of range [%d - %d] mV\n",
4842                                                data->odn_dpm_table.min_vddc,
4843                                                data->odn_dpm_table.max_vddc);
4844                return false;
4845        }
4846
4847        if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) {
4848                if (data->golden_dpm_table.sclk_table.dpm_levels[0].value > clk ||
4849                        hwmgr->platform_descriptor.overdriveLimit.engineClock < clk) {
4850                        pr_info("OD engine clock is out of range [%d - %d] MHz\n",
4851                                data->golden_dpm_table.sclk_table.dpm_levels[0].value/100,
4852                                hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
4853                        return false;
4854                }
4855        } else if (type == PP_OD_EDIT_MCLK_VDDC_TABLE) {
4856                if (data->golden_dpm_table.mclk_table.dpm_levels[0].value > clk ||
4857                        hwmgr->platform_descriptor.overdriveLimit.memoryClock < clk) {
4858                        pr_info("OD memory clock is out of range [%d - %d] MHz\n",
4859                                data->golden_dpm_table.mclk_table.dpm_levels[0].value/100,
4860                                hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
4861                        return false;
4862                }
4863        } else {
4864                return false;
4865        }
4866
4867        return true;
4868}
4869
4870static int smu7_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
4871                                        enum PP_OD_DPM_TABLE_COMMAND type,
4872                                        long *input, uint32_t size)
4873{
4874        uint32_t i;
4875        struct phm_odn_clock_levels *podn_dpm_table_in_backend = NULL;
4876        struct smu7_odn_clock_voltage_dependency_table *podn_vdd_dep_in_backend = NULL;
4877        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4878
4879        uint32_t input_clk;
4880        uint32_t input_vol;
4881        uint32_t input_level;
4882
4883        PP_ASSERT_WITH_CODE(input, "NULL user input for clock and voltage",
4884                                return -EINVAL);
4885
4886        if (!hwmgr->od_enabled) {
4887                pr_info("OverDrive feature not enabled\n");
4888                return -EINVAL;
4889        }
4890
4891        if (PP_OD_EDIT_SCLK_VDDC_TABLE == type) {
4892                podn_dpm_table_in_backend = &data->odn_dpm_table.odn_core_clock_dpm_levels;
4893                podn_vdd_dep_in_backend = &data->odn_dpm_table.vdd_dependency_on_sclk;
4894                PP_ASSERT_WITH_CODE((podn_dpm_table_in_backend && podn_vdd_dep_in_backend),
4895                                "Failed to get ODN SCLK and Voltage tables",
4896                                return -EINVAL);
4897        } else if (PP_OD_EDIT_MCLK_VDDC_TABLE == type) {
4898                podn_dpm_table_in_backend = &data->odn_dpm_table.odn_memory_clock_dpm_levels;
4899                podn_vdd_dep_in_backend = &data->odn_dpm_table.vdd_dependency_on_mclk;
4900
4901                PP_ASSERT_WITH_CODE((podn_dpm_table_in_backend && podn_vdd_dep_in_backend),
4902                        "Failed to get ODN MCLK and Voltage tables",
4903                        return -EINVAL);
4904        } else if (PP_OD_RESTORE_DEFAULT_TABLE == type) {
4905                smu7_odn_initial_default_setting(hwmgr);
4906                return 0;
4907        } else if (PP_OD_COMMIT_DPM_TABLE == type) {
4908                smu7_check_dpm_table_updated(hwmgr);
4909                return 0;
4910        } else {
4911                return -EINVAL;
4912        }
4913
4914        for (i = 0; i < size; i += 3) {
4915                if (i + 3 > size || input[i] >= podn_dpm_table_in_backend->num_of_pl) {
4916                        pr_info("invalid clock voltage input \n");
4917                        return 0;
4918                }
4919                input_level = input[i];
4920                input_clk = input[i+1] * 100;
4921                input_vol = input[i+2];
4922
4923                if (smu7_check_clk_voltage_valid(hwmgr, type, input_clk, input_vol)) {
4924                        podn_dpm_table_in_backend->entries[input_level].clock = input_clk;
4925                        podn_vdd_dep_in_backend->entries[input_level].clk = input_clk;
4926                        podn_dpm_table_in_backend->entries[input_level].vddc = input_vol;
4927                        podn_vdd_dep_in_backend->entries[input_level].vddc = input_vol;
4928                        podn_vdd_dep_in_backend->entries[input_level].vddgfx = input_vol;
4929                } else {
4930                        return -EINVAL;
4931                }
4932        }
4933
4934        return 0;
4935}
4936
4937static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
4938{
4939        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4940        uint32_t i, size = 0;
4941        uint32_t len;
4942
4943        static const char *profile_name[7] = {"BOOTUP_DEFAULT",
4944                                        "3D_FULL_SCREEN",
4945                                        "POWER_SAVING",
4946                                        "VIDEO",
4947                                        "VR",
4948                                        "COMPUTE",
4949                                        "CUSTOM"};
4950
4951        static const char *title[8] = {"NUM",
4952                        "MODE_NAME",
4953                        "SCLK_UP_HYST",
4954                        "SCLK_DOWN_HYST",
4955                        "SCLK_ACTIVE_LEVEL",
4956                        "MCLK_UP_HYST",
4957                        "MCLK_DOWN_HYST",
4958                        "MCLK_ACTIVE_LEVEL"};
4959
4960        if (!buf)
4961                return -EINVAL;
4962
4963        size += sprintf(buf + size, "%s %16s %16s %16s %16s %16s %16s %16s\n",
4964                        title[0], title[1], title[2], title[3],
4965                        title[4], title[5], title[6], title[7]);
4966
4967        len = ARRAY_SIZE(smu7_profiling);
4968
4969        for (i = 0; i < len; i++) {
4970                if (i == hwmgr->power_profile_mode) {
4971                        size += sprintf(buf + size, "%3d %14s %s: %8d %16d %16d %16d %16d %16d\n",
4972                        i, profile_name[i], "*",
4973                        data->current_profile_setting.sclk_up_hyst,
4974                        data->current_profile_setting.sclk_down_hyst,
4975                        data->current_profile_setting.sclk_activity,
4976                        data->current_profile_setting.mclk_up_hyst,
4977                        data->current_profile_setting.mclk_down_hyst,
4978                        data->current_profile_setting.mclk_activity);
4979                        continue;
4980                }
4981                if (smu7_profiling[i].bupdate_sclk)
4982                        size += sprintf(buf + size, "%3d %16s: %8d %16d %16d ",
4983                        i, profile_name[i], smu7_profiling[i].sclk_up_hyst,
4984                        smu7_profiling[i].sclk_down_hyst,
4985                        smu7_profiling[i].sclk_activity);
4986                else
4987                        size += sprintf(buf + size, "%3d %16s: %8s %16s %16s ",
4988                        i, profile_name[i], "-", "-", "-");
4989
4990                if (smu7_profiling[i].bupdate_mclk)
4991                        size += sprintf(buf + size, "%16d %16d %16d\n",
4992                        smu7_profiling[i].mclk_up_hyst,
4993                        smu7_profiling[i].mclk_down_hyst,
4994                        smu7_profiling[i].mclk_activity);
4995                else
4996                        size += sprintf(buf + size, "%16s %16s %16s\n",
4997                        "-", "-", "-");
4998        }
4999
5000        return size;
5001}
5002
5003static void smu7_patch_compute_profile_mode(struct pp_hwmgr *hwmgr,
5004                                        enum PP_SMC_POWER_PROFILE requst)
5005{
5006        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5007        uint32_t tmp, level;
5008
5009        if (requst == PP_SMC_POWER_PROFILE_COMPUTE) {
5010                if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
5011                        level = 0;
5012                        tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask;
5013                        while (tmp >>= 1)
5014                                level++;
5015                        if (level > 0)
5016                                smu7_force_clock_level(hwmgr, PP_SCLK, 3 << (level-1));
5017                }
5018        } else if (hwmgr->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE) {
5019                smu7_force_clock_level(hwmgr, PP_SCLK, data->dpm_level_enable_mask.sclk_dpm_enable_mask);
5020        }
5021}
5022
5023static int smu7_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size)
5024{
5025        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5026        struct profile_mode_setting tmp;
5027        enum PP_SMC_POWER_PROFILE mode;
5028
5029        if (input == NULL)
5030                return -EINVAL;
5031
5032        mode = input[size];
5033        switch (mode) {
5034        case PP_SMC_POWER_PROFILE_CUSTOM:
5035                if (size < 8 && size != 0)
5036                        return -EINVAL;
5037                /* If only CUSTOM is passed in, use the saved values. Check
5038                 * that we actually have a CUSTOM profile by ensuring that
5039                 * the "use sclk" or the "use mclk" bits are set
5040                 */
5041                tmp = smu7_profiling[PP_SMC_POWER_PROFILE_CUSTOM];
5042                if (size == 0) {
5043                        if (tmp.bupdate_sclk == 0 && tmp.bupdate_mclk == 0)
5044                                return -EINVAL;
5045                } else {
5046                        tmp.bupdate_sclk = input[0];
5047                        tmp.sclk_up_hyst = input[1];
5048                        tmp.sclk_down_hyst = input[2];
5049                        tmp.sclk_activity = input[3];
5050                        tmp.bupdate_mclk = input[4];
5051                        tmp.mclk_up_hyst = input[5];
5052                        tmp.mclk_down_hyst = input[6];
5053                        tmp.mclk_activity = input[7];
5054                        smu7_profiling[PP_SMC_POWER_PROFILE_CUSTOM] = tmp;
5055                }
5056                if (!smum_update_dpm_settings(hwmgr, &tmp)) {
5057                        memcpy(&data->current_profile_setting, &tmp, sizeof(struct profile_mode_setting));
5058                        hwmgr->power_profile_mode = mode;
5059                }
5060                break;
5061        case PP_SMC_POWER_PROFILE_FULLSCREEN3D:
5062        case PP_SMC_POWER_PROFILE_POWERSAVING:
5063        case PP_SMC_POWER_PROFILE_VIDEO:
5064        case PP_SMC_POWER_PROFILE_VR:
5065        case PP_SMC_POWER_PROFILE_COMPUTE:
5066                if (mode == hwmgr->power_profile_mode)
5067                        return 0;
5068
5069                memcpy(&tmp, &smu7_profiling[mode], sizeof(struct profile_mode_setting));
5070                if (!smum_update_dpm_settings(hwmgr, &tmp)) {
5071                        if (tmp.bupdate_sclk) {
5072                                data->current_profile_setting.bupdate_sclk = tmp.bupdate_sclk;
5073                                data->current_profile_setting.sclk_up_hyst = tmp.sclk_up_hyst;
5074                                data->current_profile_setting.sclk_down_hyst = tmp.sclk_down_hyst;
5075                                data->current_profile_setting.sclk_activity = tmp.sclk_activity;
5076                        }
5077                        if (tmp.bupdate_mclk) {
5078                                data->current_profile_setting.bupdate_mclk = tmp.bupdate_mclk;
5079                                data->current_profile_setting.mclk_up_hyst = tmp.mclk_up_hyst;
5080                                data->current_profile_setting.mclk_down_hyst = tmp.mclk_down_hyst;
5081                                data->current_profile_setting.mclk_activity = tmp.mclk_activity;
5082                        }
5083                        smu7_patch_compute_profile_mode(hwmgr, mode);
5084                        hwmgr->power_profile_mode = mode;
5085                }
5086                break;
5087        default:
5088                return -EINVAL;
5089        }
5090
5091        return 0;
5092}
5093
5094static int smu7_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
5095                                PHM_PerformanceLevelDesignation designation, uint32_t index,
5096                                PHM_PerformanceLevel *level)
5097{
5098        const struct smu7_power_state *ps;
5099        uint32_t i;
5100
5101        if (level == NULL || hwmgr == NULL || state == NULL)
5102                return -EINVAL;
5103
5104        ps = cast_const_phw_smu7_power_state(state);
5105
5106        i = index > ps->performance_level_count - 1 ?
5107                        ps->performance_level_count - 1 : index;
5108
5109        level->coreClock = ps->performance_levels[i].engine_clock;
5110        level->memory_clock = ps->performance_levels[i].memory_clock;
5111
5112        return 0;
5113}
5114
5115static int smu7_power_off_asic(struct pp_hwmgr *hwmgr)
5116{
5117        int result;
5118
5119        result = smu7_disable_dpm_tasks(hwmgr);
5120        PP_ASSERT_WITH_CODE((0 == result),
5121                        "[disable_dpm_tasks] Failed to disable DPM!",
5122                        );
5123
5124        return result;
5125}
5126
5127static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
5128        .backend_init = &smu7_hwmgr_backend_init,
5129        .backend_fini = &smu7_hwmgr_backend_fini,
5130        .asic_setup = &smu7_setup_asic_task,
5131        .dynamic_state_management_enable = &smu7_enable_dpm_tasks,
5132        .apply_state_adjust_rules = smu7_apply_state_adjust_rules,
5133        .force_dpm_level = &smu7_force_dpm_level,
5134        .power_state_set = smu7_set_power_state_tasks,
5135        .get_power_state_size = smu7_get_power_state_size,
5136        .get_mclk = smu7_dpm_get_mclk,
5137        .get_sclk = smu7_dpm_get_sclk,
5138        .patch_boot_state = smu7_dpm_patch_boot_state,
5139        .get_pp_table_entry = smu7_get_pp_table_entry,
5140        .get_num_of_pp_table_entries = smu7_get_number_of_powerplay_table_entries,
5141        .powerdown_uvd = smu7_powerdown_uvd,
5142        .powergate_uvd = smu7_powergate_uvd,
5143        .powergate_vce = smu7_powergate_vce,
5144        .disable_clock_power_gating = smu7_disable_clock_power_gating,
5145        .update_clock_gatings = smu7_update_clock_gatings,
5146        .notify_smc_display_config_after_ps_adjustment = smu7_notify_smc_display_config_after_ps_adjustment,
5147        .display_config_changed = smu7_display_configuration_changed_task,
5148        .set_max_fan_pwm_output = smu7_set_max_fan_pwm_output,
5149        .set_max_fan_rpm_output = smu7_set_max_fan_rpm_output,
5150        .stop_thermal_controller = smu7_thermal_stop_thermal_controller,
5151        .get_fan_speed_info = smu7_fan_ctrl_get_fan_speed_info,
5152        .get_fan_speed_percent = smu7_fan_ctrl_get_fan_speed_percent,
5153        .set_fan_speed_percent = smu7_fan_ctrl_set_fan_speed_percent,
5154        .reset_fan_speed_to_default = smu7_fan_ctrl_reset_fan_speed_to_default,
5155        .get_fan_speed_rpm = smu7_fan_ctrl_get_fan_speed_rpm,
5156        .set_fan_speed_rpm = smu7_fan_ctrl_set_fan_speed_rpm,
5157        .uninitialize_thermal_controller = smu7_thermal_ctrl_uninitialize_thermal_controller,
5158        .register_irq_handlers = smu7_register_irq_handlers,
5159        .check_smc_update_required_for_display_configuration = smu7_check_smc_update_required_for_display_configuration,
5160        .check_states_equal = smu7_check_states_equal,
5161        .set_fan_control_mode = smu7_set_fan_control_mode,
5162        .get_fan_control_mode = smu7_get_fan_control_mode,
5163        .force_clock_level = smu7_force_clock_level,
5164        .print_clock_levels = smu7_print_clock_levels,
5165        .powergate_gfx = smu7_powergate_gfx,
5166        .get_sclk_od = smu7_get_sclk_od,
5167        .set_sclk_od = smu7_set_sclk_od,
5168        .get_mclk_od = smu7_get_mclk_od,
5169        .set_mclk_od = smu7_set_mclk_od,
5170        .get_clock_by_type = smu7_get_clock_by_type,
5171        .read_sensor = smu7_read_sensor,
5172        .dynamic_state_management_disable = smu7_disable_dpm_tasks,
5173        .avfs_control = smu7_avfs_control,
5174        .disable_smc_firmware_ctf = smu7_thermal_disable_alert,
5175        .start_thermal_controller = smu7_start_thermal_controller,
5176        .notify_cac_buffer_info = smu7_notify_cac_buffer_info,
5177        .get_max_high_clocks = smu7_get_max_high_clocks,
5178        .get_thermal_temperature_range = smu7_get_thermal_temperature_range,
5179        .odn_edit_dpm_table = smu7_odn_edit_dpm_table,
5180        .set_power_limit = smu7_set_power_limit,
5181        .get_power_profile_mode = smu7_get_power_profile_mode,
5182        .set_power_profile_mode = smu7_set_power_profile_mode,
5183        .get_performance_level = smu7_get_performance_level,
5184        .get_asic_baco_capability = smu7_baco_get_capability,
5185        .get_asic_baco_state = smu7_baco_get_state,
5186        .set_asic_baco_state = smu7_baco_set_state,
5187        .power_off_asic = smu7_power_off_asic,
5188};
5189
5190uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock,
5191                uint32_t clock_insr)
5192{
5193        uint8_t i;
5194        uint32_t temp;
5195        uint32_t min = max(clock_insr, (uint32_t)SMU7_MINIMUM_ENGINE_CLOCK);
5196
5197        PP_ASSERT_WITH_CODE((clock >= min), "Engine clock can't satisfy stutter requirement!", return 0);
5198        for (i = SMU7_MAX_DEEPSLEEP_DIVIDER_ID;  ; i--) {
5199                temp = clock >> i;
5200
5201                if (temp >= min || i == 0)
5202                        break;
5203        }
5204        return i;
5205}
5206
5207int smu7_init_function_pointers(struct pp_hwmgr *hwmgr)
5208{
5209        hwmgr->hwmgr_func = &smu7_hwmgr_funcs;
5210        if (hwmgr->pp_table_version == PP_TABLE_V0)
5211                hwmgr->pptable_func = &pptable_funcs;
5212        else if (hwmgr->pp_table_version == PP_TABLE_V1)
5213                hwmgr->pptable_func = &pptable_v1_0_funcs;
5214
5215        return 0;
5216}
5217