linux/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
<<
>>
Prefs
   1/*
   2 * Copyright 2019 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 */
  22
  23#include <linux/firmware.h>
  24#include <linux/module.h>
  25#include <linux/pci.h>
  26
  27#include "pp_debug.h"
  28#include "amdgpu.h"
  29#include "amdgpu_smu.h"
  30#include "atomfirmware.h"
  31#include "amdgpu_atomfirmware.h"
  32#include "smu_v11_0.h"
  33#include "soc15_common.h"
  34#include "atom.h"
  35#include "vega20_ppt.h"
  36#include "arcturus_ppt.h"
  37#include "navi10_ppt.h"
  38
  39#include "asic_reg/thm/thm_11_0_2_offset.h"
  40#include "asic_reg/thm/thm_11_0_2_sh_mask.h"
  41#include "asic_reg/mp/mp_11_0_offset.h"
  42#include "asic_reg/mp/mp_11_0_sh_mask.h"
  43#include "asic_reg/nbio/nbio_7_4_offset.h"
  44#include "asic_reg/nbio/nbio_7_4_sh_mask.h"
  45#include "asic_reg/smuio/smuio_11_0_0_offset.h"
  46#include "asic_reg/smuio/smuio_11_0_0_sh_mask.h"
  47
  48MODULE_FIRMWARE("amdgpu/vega20_smc.bin");
  49MODULE_FIRMWARE("amdgpu/arcturus_smc.bin");
  50MODULE_FIRMWARE("amdgpu/navi10_smc.bin");
  51MODULE_FIRMWARE("amdgpu/navi14_smc.bin");
  52MODULE_FIRMWARE("amdgpu/navi12_smc.bin");
  53
  54#define SMU11_VOLTAGE_SCALE 4
  55
  56static int smu_v11_0_send_msg_without_waiting(struct smu_context *smu,
  57                                              uint16_t msg)
  58{
  59        struct amdgpu_device *adev = smu->adev;
  60        WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
  61        return 0;
  62}
  63
  64static int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg)
  65{
  66        struct amdgpu_device *adev = smu->adev;
  67
  68        *arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
  69        return 0;
  70}
  71
  72static int smu_v11_0_wait_for_response(struct smu_context *smu)
  73{
  74        struct amdgpu_device *adev = smu->adev;
  75        uint32_t cur_value, i, timeout = adev->usec_timeout * 10;
  76
  77        for (i = 0; i < timeout; i++) {
  78                cur_value = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
  79                if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0)
  80                        break;
  81                udelay(1);
  82        }
  83
  84        /* timeout means wrong logic */
  85        if (i == timeout)
  86                return -ETIME;
  87
  88        return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO;
  89}
  90
  91static int smu_v11_0_send_msg(struct smu_context *smu, uint16_t msg)
  92{
  93        struct amdgpu_device *adev = smu->adev;
  94        int ret = 0, index = 0;
  95
  96        index = smu_msg_get_index(smu, msg);
  97        if (index < 0)
  98                return index;
  99
 100        smu_v11_0_wait_for_response(smu);
 101
 102        WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
 103
 104        smu_v11_0_send_msg_without_waiting(smu, (uint16_t)index);
 105
 106        ret = smu_v11_0_wait_for_response(smu);
 107
 108        if (ret)
 109                pr_err("failed send message: %10s (%d) response %#x\n",
 110                       smu_get_message_name(smu, msg), index, ret);
 111
 112        return ret;
 113
 114}
 115
 116static int
 117smu_v11_0_send_msg_with_param(struct smu_context *smu, uint16_t msg,
 118                              uint32_t param)
 119{
 120
 121        struct amdgpu_device *adev = smu->adev;
 122        int ret = 0, index = 0;
 123
 124        index = smu_msg_get_index(smu, msg);
 125        if (index < 0)
 126                return index;
 127
 128        ret = smu_v11_0_wait_for_response(smu);
 129        if (ret)
 130                pr_err("failed send message: %10s (%d) \tparam: 0x%08x response %#x\n",
 131                       smu_get_message_name(smu, msg), index, param, ret);
 132
 133        WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
 134
 135        WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
 136
 137        smu_v11_0_send_msg_without_waiting(smu, (uint16_t)index);
 138
 139        ret = smu_v11_0_wait_for_response(smu);
 140        if (ret)
 141                pr_err("failed send message: %10s (%d) \tparam: 0x%08x response %#x\n",
 142                       smu_get_message_name(smu, msg), index, param, ret);
 143
 144        return ret;
 145}
 146
 147static int smu_v11_0_init_microcode(struct smu_context *smu)
 148{
 149        struct amdgpu_device *adev = smu->adev;
 150        const char *chip_name;
 151        char fw_name[30];
 152        int err = 0;
 153        const struct smc_firmware_header_v1_0 *hdr;
 154        const struct common_firmware_header *header;
 155        struct amdgpu_firmware_info *ucode = NULL;
 156
 157        switch (adev->asic_type) {
 158        case CHIP_VEGA20:
 159                chip_name = "vega20";
 160                break;
 161        case CHIP_ARCTURUS:
 162                chip_name = "arcturus";
 163                break;
 164        case CHIP_NAVI10:
 165                chip_name = "navi10";
 166                break;
 167        case CHIP_NAVI14:
 168                chip_name = "navi14";
 169                break;
 170        case CHIP_NAVI12:
 171                chip_name = "navi12";
 172                break;
 173        default:
 174                BUG();
 175        }
 176
 177        snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name);
 178
 179        err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
 180        if (err)
 181                goto out;
 182        err = amdgpu_ucode_validate(adev->pm.fw);
 183        if (err)
 184                goto out;
 185
 186        hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
 187        amdgpu_ucode_print_smc_hdr(&hdr->header);
 188        adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
 189
 190        if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
 191                ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
 192                ucode->ucode_id = AMDGPU_UCODE_ID_SMC;
 193                ucode->fw = adev->pm.fw;
 194                header = (const struct common_firmware_header *)ucode->fw->data;
 195                adev->firmware.fw_size +=
 196                        ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
 197        }
 198
 199out:
 200        if (err) {
 201                DRM_ERROR("smu_v11_0: Failed to load firmware \"%s\"\n",
 202                          fw_name);
 203                release_firmware(adev->pm.fw);
 204                adev->pm.fw = NULL;
 205        }
 206        return err;
 207}
 208
 209static int smu_v11_0_load_microcode(struct smu_context *smu)
 210{
 211        struct amdgpu_device *adev = smu->adev;
 212        const uint32_t *src;
 213        const struct smc_firmware_header_v1_0 *hdr;
 214        uint32_t addr_start = MP1_SRAM;
 215        uint32_t i;
 216        uint32_t mp1_fw_flags;
 217
 218        hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
 219        src = (const uint32_t *)(adev->pm.fw->data +
 220                le32_to_cpu(hdr->header.ucode_array_offset_bytes));
 221
 222        for (i = 1; i < MP1_SMC_SIZE/4 - 1; i++) {
 223                WREG32_PCIE(addr_start, src[i]);
 224                addr_start += 4;
 225        }
 226
 227        WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff),
 228                1 & MP1_SMN_PUB_CTRL__RESET_MASK);
 229        WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff),
 230                1 & ~MP1_SMN_PUB_CTRL__RESET_MASK);
 231
 232        for (i = 0; i < adev->usec_timeout; i++) {
 233                mp1_fw_flags = RREG32_PCIE(MP1_Public |
 234                        (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
 235                if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
 236                        MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
 237                        break;
 238                udelay(1);
 239        }
 240
 241        if (i == adev->usec_timeout)
 242                return -ETIME;
 243
 244        return 0;
 245}
 246
 247static int smu_v11_0_check_fw_status(struct smu_context *smu)
 248{
 249        struct amdgpu_device *adev = smu->adev;
 250        uint32_t mp1_fw_flags;
 251
 252        mp1_fw_flags = RREG32_PCIE(MP1_Public |
 253                                   (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
 254
 255        if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
 256            MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
 257                return 0;
 258
 259        return -EIO;
 260}
 261
 262static int smu_v11_0_check_fw_version(struct smu_context *smu)
 263{
 264        uint32_t if_version = 0xff, smu_version = 0xff;
 265        uint16_t smu_major;
 266        uint8_t smu_minor, smu_debug;
 267        int ret = 0;
 268
 269        ret = smu_get_smc_version(smu, &if_version, &smu_version);
 270        if (ret)
 271                return ret;
 272
 273        smu_major = (smu_version >> 16) & 0xffff;
 274        smu_minor = (smu_version >> 8) & 0xff;
 275        smu_debug = (smu_version >> 0) & 0xff;
 276
 277        switch (smu->adev->asic_type) {
 278        case CHIP_VEGA20:
 279                smu->smc_if_version = SMU11_DRIVER_IF_VERSION_VG20;
 280                break;
 281        case CHIP_ARCTURUS:
 282                smu->smc_if_version = SMU11_DRIVER_IF_VERSION_ARCT;
 283                break;
 284        case CHIP_NAVI10:
 285                smu->smc_if_version = SMU11_DRIVER_IF_VERSION_NV10;
 286                break;
 287        case CHIP_NAVI14:
 288                smu->smc_if_version = SMU11_DRIVER_IF_VERSION_NV14;
 289                break;
 290        default:
 291                pr_err("smu unsupported asic type:%d.\n", smu->adev->asic_type);
 292                smu->smc_if_version = SMU11_DRIVER_IF_VERSION_INV;
 293                break;
 294        }
 295
 296        /*
 297         * 1. if_version mismatch is not critical as our fw is designed
 298         * to be backward compatible.
 299         * 2. New fw usually brings some optimizations. But that's visible
 300         * only on the paired driver.
 301         * Considering above, we just leave user a warning message instead
 302         * of halt driver loading.
 303         */
 304        if (if_version != smu->smc_if_version) {
 305                pr_info("smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
 306                        "smu fw version = 0x%08x (%d.%d.%d)\n",
 307                        smu->smc_if_version, if_version,
 308                        smu_version, smu_major, smu_minor, smu_debug);
 309                pr_warn("SMU driver if version not matched\n");
 310        }
 311
 312        return ret;
 313}
 314
 315static int smu_v11_0_set_pptable_v2_0(struct smu_context *smu, void **table, uint32_t *size)
 316{
 317        struct amdgpu_device *adev = smu->adev;
 318        uint32_t ppt_offset_bytes;
 319        const struct smc_firmware_header_v2_0 *v2;
 320
 321        v2 = (const struct smc_firmware_header_v2_0 *) adev->pm.fw->data;
 322
 323        ppt_offset_bytes = le32_to_cpu(v2->ppt_offset_bytes);
 324        *size = le32_to_cpu(v2->ppt_size_bytes);
 325        *table = (uint8_t *)v2 + ppt_offset_bytes;
 326
 327        return 0;
 328}
 329
 330static int smu_v11_0_set_pptable_v2_1(struct smu_context *smu, void **table,
 331                                      uint32_t *size, uint32_t pptable_id)
 332{
 333        struct amdgpu_device *adev = smu->adev;
 334        const struct smc_firmware_header_v2_1 *v2_1;
 335        struct smc_soft_pptable_entry *entries;
 336        uint32_t pptable_count = 0;
 337        int i = 0;
 338
 339        v2_1 = (const struct smc_firmware_header_v2_1 *) adev->pm.fw->data;
 340        entries = (struct smc_soft_pptable_entry *)
 341                ((uint8_t *)v2_1 + le32_to_cpu(v2_1->pptable_entry_offset));
 342        pptable_count = le32_to_cpu(v2_1->pptable_count);
 343        for (i = 0; i < pptable_count; i++) {
 344                if (le32_to_cpu(entries[i].id) == pptable_id) {
 345                        *table = ((uint8_t *)v2_1 + le32_to_cpu(entries[i].ppt_offset_bytes));
 346                        *size = le32_to_cpu(entries[i].ppt_size_bytes);
 347                        break;
 348                }
 349        }
 350
 351        if (i == pptable_count)
 352                return -EINVAL;
 353
 354        return 0;
 355}
 356
 357static int smu_v11_0_setup_pptable(struct smu_context *smu)
 358{
 359        struct amdgpu_device *adev = smu->adev;
 360        const struct smc_firmware_header_v1_0 *hdr;
 361        int ret, index;
 362        uint32_t size = 0;
 363        uint16_t atom_table_size;
 364        uint8_t frev, crev;
 365        void *table;
 366        uint16_t version_major, version_minor;
 367
 368        hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
 369        version_major = le16_to_cpu(hdr->header.header_version_major);
 370        version_minor = le16_to_cpu(hdr->header.header_version_minor);
 371        if (version_major == 2 && smu->smu_table.boot_values.pp_table_id > 0) {
 372                switch (version_minor) {
 373                case 0:
 374                        ret = smu_v11_0_set_pptable_v2_0(smu, &table, &size);
 375                        break;
 376                case 1:
 377                        ret = smu_v11_0_set_pptable_v2_1(smu, &table, &size,
 378                                                         smu->smu_table.boot_values.pp_table_id);
 379                        break;
 380                default:
 381                        ret = -EINVAL;
 382                        break;
 383                }
 384                if (ret)
 385                        return ret;
 386
 387        } else {
 388                index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
 389                                                    powerplayinfo);
 390
 391                ret = smu_get_atom_data_table(smu, index, &atom_table_size, &frev, &crev,
 392                                              (uint8_t **)&table);
 393                if (ret)
 394                        return ret;
 395                size = atom_table_size;
 396        }
 397
 398        if (!smu->smu_table.power_play_table)
 399                smu->smu_table.power_play_table = table;
 400        if (!smu->smu_table.power_play_table_size)
 401                smu->smu_table.power_play_table_size = size;
 402
 403        return 0;
 404}
 405
 406static int smu_v11_0_init_dpm_context(struct smu_context *smu)
 407{
 408        struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
 409
 410        if (smu_dpm->dpm_context || smu_dpm->dpm_context_size != 0)
 411                return -EINVAL;
 412
 413        return smu_alloc_dpm_context(smu);
 414}
 415
 416static int smu_v11_0_fini_dpm_context(struct smu_context *smu)
 417{
 418        struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
 419
 420        if (!smu_dpm->dpm_context || smu_dpm->dpm_context_size == 0)
 421                return -EINVAL;
 422
 423        kfree(smu_dpm->dpm_context);
 424        kfree(smu_dpm->golden_dpm_context);
 425        kfree(smu_dpm->dpm_current_power_state);
 426        kfree(smu_dpm->dpm_request_power_state);
 427        smu_dpm->dpm_context = NULL;
 428        smu_dpm->golden_dpm_context = NULL;
 429        smu_dpm->dpm_context_size = 0;
 430        smu_dpm->dpm_current_power_state = NULL;
 431        smu_dpm->dpm_request_power_state = NULL;
 432
 433        return 0;
 434}
 435
 436static int smu_v11_0_init_smc_tables(struct smu_context *smu)
 437{
 438        struct smu_table_context *smu_table = &smu->smu_table;
 439        struct smu_table *tables = NULL;
 440        int ret = 0;
 441
 442        if (smu_table->tables || smu_table->table_count == 0)
 443                return -EINVAL;
 444
 445        tables = kcalloc(SMU_TABLE_COUNT, sizeof(struct smu_table),
 446                         GFP_KERNEL);
 447        if (!tables)
 448                return -ENOMEM;
 449
 450        smu_table->tables = tables;
 451
 452        ret = smu_tables_init(smu, tables);
 453        if (ret)
 454                return ret;
 455
 456        ret = smu_v11_0_init_dpm_context(smu);
 457        if (ret)
 458                return ret;
 459
 460        return 0;
 461}
 462
 463static int smu_v11_0_fini_smc_tables(struct smu_context *smu)
 464{
 465        struct smu_table_context *smu_table = &smu->smu_table;
 466        int ret = 0;
 467
 468        if (!smu_table->tables || smu_table->table_count == 0)
 469                return -EINVAL;
 470
 471        kfree(smu_table->tables);
 472        kfree(smu_table->metrics_table);
 473        smu_table->tables = NULL;
 474        smu_table->table_count = 0;
 475        smu_table->metrics_table = NULL;
 476        smu_table->metrics_time = 0;
 477
 478        ret = smu_v11_0_fini_dpm_context(smu);
 479        if (ret)
 480                return ret;
 481        return 0;
 482}
 483
 484static int smu_v11_0_init_power(struct smu_context *smu)
 485{
 486        struct smu_power_context *smu_power = &smu->smu_power;
 487
 488        if (!smu->pm_enabled)
 489                return 0;
 490        if (smu_power->power_context || smu_power->power_context_size != 0)
 491                return -EINVAL;
 492
 493        smu_power->power_context = kzalloc(sizeof(struct smu_11_0_dpm_context),
 494                                           GFP_KERNEL);
 495        if (!smu_power->power_context)
 496                return -ENOMEM;
 497        smu_power->power_context_size = sizeof(struct smu_11_0_dpm_context);
 498
 499        return 0;
 500}
 501
 502static int smu_v11_0_fini_power(struct smu_context *smu)
 503{
 504        struct smu_power_context *smu_power = &smu->smu_power;
 505
 506        if (!smu->pm_enabled)
 507                return 0;
 508        if (!smu_power->power_context || smu_power->power_context_size == 0)
 509                return -EINVAL;
 510
 511        kfree(smu_power->power_context);
 512        smu_power->power_context = NULL;
 513        smu_power->power_context_size = 0;
 514
 515        return 0;
 516}
 517
 518int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu)
 519{
 520        int ret, index;
 521        uint16_t size;
 522        uint8_t frev, crev;
 523        struct atom_common_table_header *header;
 524        struct atom_firmware_info_v3_3 *v_3_3;
 525        struct atom_firmware_info_v3_1 *v_3_1;
 526
 527        index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
 528                                            firmwareinfo);
 529
 530        ret = smu_get_atom_data_table(smu, index, &size, &frev, &crev,
 531                                      (uint8_t **)&header);
 532        if (ret)
 533                return ret;
 534
 535        if (header->format_revision != 3) {
 536                pr_err("unknown atom_firmware_info version! for smu11\n");
 537                return -EINVAL;
 538        }
 539
 540        switch (header->content_revision) {
 541        case 0:
 542        case 1:
 543        case 2:
 544                v_3_1 = (struct atom_firmware_info_v3_1 *)header;
 545                smu->smu_table.boot_values.revision = v_3_1->firmware_revision;
 546                smu->smu_table.boot_values.gfxclk = v_3_1->bootup_sclk_in10khz;
 547                smu->smu_table.boot_values.uclk = v_3_1->bootup_mclk_in10khz;
 548                smu->smu_table.boot_values.socclk = 0;
 549                smu->smu_table.boot_values.dcefclk = 0;
 550                smu->smu_table.boot_values.vddc = v_3_1->bootup_vddc_mv;
 551                smu->smu_table.boot_values.vddci = v_3_1->bootup_vddci_mv;
 552                smu->smu_table.boot_values.mvddc = v_3_1->bootup_mvddc_mv;
 553                smu->smu_table.boot_values.vdd_gfx = v_3_1->bootup_vddgfx_mv;
 554                smu->smu_table.boot_values.cooling_id = v_3_1->coolingsolution_id;
 555                smu->smu_table.boot_values.pp_table_id = 0;
 556                break;
 557        case 3:
 558        default:
 559                v_3_3 = (struct atom_firmware_info_v3_3 *)header;
 560                smu->smu_table.boot_values.revision = v_3_3->firmware_revision;
 561                smu->smu_table.boot_values.gfxclk = v_3_3->bootup_sclk_in10khz;
 562                smu->smu_table.boot_values.uclk = v_3_3->bootup_mclk_in10khz;
 563                smu->smu_table.boot_values.socclk = 0;
 564                smu->smu_table.boot_values.dcefclk = 0;
 565                smu->smu_table.boot_values.vddc = v_3_3->bootup_vddc_mv;
 566                smu->smu_table.boot_values.vddci = v_3_3->bootup_vddci_mv;
 567                smu->smu_table.boot_values.mvddc = v_3_3->bootup_mvddc_mv;
 568                smu->smu_table.boot_values.vdd_gfx = v_3_3->bootup_vddgfx_mv;
 569                smu->smu_table.boot_values.cooling_id = v_3_3->coolingsolution_id;
 570                smu->smu_table.boot_values.pp_table_id = v_3_3->pplib_pptable_id;
 571        }
 572
 573        smu->smu_table.boot_values.format_revision = header->format_revision;
 574        smu->smu_table.boot_values.content_revision = header->content_revision;
 575
 576        return 0;
 577}
 578
 579static int smu_v11_0_get_clk_info_from_vbios(struct smu_context *smu)
 580{
 581        int ret, index;
 582        struct amdgpu_device *adev = smu->adev;
 583        struct atom_get_smu_clock_info_parameters_v3_1 input = {0};
 584        struct atom_get_smu_clock_info_output_parameters_v3_1 *output;
 585
 586        input.clk_id = SMU11_SYSPLL0_SOCCLK_ID;
 587        input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
 588        index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
 589                                            getsmuclockinfo);
 590
 591        ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
 592                                        (uint32_t *)&input);
 593        if (ret)
 594                return -EINVAL;
 595
 596        output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
 597        smu->smu_table.boot_values.socclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
 598
 599        memset(&input, 0, sizeof(input));
 600        input.clk_id = SMU11_SYSPLL0_DCEFCLK_ID;
 601        input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
 602        index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
 603                                            getsmuclockinfo);
 604
 605        ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
 606                                        (uint32_t *)&input);
 607        if (ret)
 608                return -EINVAL;
 609
 610        output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
 611        smu->smu_table.boot_values.dcefclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
 612
 613        memset(&input, 0, sizeof(input));
 614        input.clk_id = SMU11_SYSPLL0_ECLK_ID;
 615        input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
 616        index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
 617                                            getsmuclockinfo);
 618
 619        ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
 620                                        (uint32_t *)&input);
 621        if (ret)
 622                return -EINVAL;
 623
 624        output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
 625        smu->smu_table.boot_values.eclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
 626
 627        memset(&input, 0, sizeof(input));
 628        input.clk_id = SMU11_SYSPLL0_VCLK_ID;
 629        input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
 630        index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
 631                                            getsmuclockinfo);
 632
 633        ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
 634                                        (uint32_t *)&input);
 635        if (ret)
 636                return -EINVAL;
 637
 638        output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
 639        smu->smu_table.boot_values.vclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
 640
 641        memset(&input, 0, sizeof(input));
 642        input.clk_id = SMU11_SYSPLL0_DCLK_ID;
 643        input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
 644        index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
 645                                            getsmuclockinfo);
 646
 647        ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
 648                                        (uint32_t *)&input);
 649        if (ret)
 650                return -EINVAL;
 651
 652        output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
 653        smu->smu_table.boot_values.dclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
 654
 655        if ((smu->smu_table.boot_values.format_revision == 3) &&
 656            (smu->smu_table.boot_values.content_revision >= 2)) {
 657                memset(&input, 0, sizeof(input));
 658                input.clk_id = SMU11_SYSPLL1_0_FCLK_ID;
 659                input.syspll_id = SMU11_SYSPLL1_2_ID;
 660                input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
 661                index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
 662                                                    getsmuclockinfo);
 663
 664                ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
 665                                                (uint32_t *)&input);
 666                if (ret)
 667                        return -EINVAL;
 668
 669                output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
 670                smu->smu_table.boot_values.fclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
 671        }
 672
 673        return 0;
 674}
 675
 676static int smu_v11_0_notify_memory_pool_location(struct smu_context *smu)
 677{
 678        struct smu_table_context *smu_table = &smu->smu_table;
 679        struct smu_table *memory_pool = &smu_table->memory_pool;
 680        int ret = 0;
 681        uint64_t address;
 682        uint32_t address_low, address_high;
 683
 684        if (memory_pool->size == 0 || memory_pool->cpu_addr == NULL)
 685                return ret;
 686
 687        address = (uintptr_t)memory_pool->cpu_addr;
 688        address_high = (uint32_t)upper_32_bits(address);
 689        address_low  = (uint32_t)lower_32_bits(address);
 690
 691        ret = smu_send_smc_msg_with_param(smu,
 692                                          SMU_MSG_SetSystemVirtualDramAddrHigh,
 693                                          address_high);
 694        if (ret)
 695                return ret;
 696        ret = smu_send_smc_msg_with_param(smu,
 697                                          SMU_MSG_SetSystemVirtualDramAddrLow,
 698                                          address_low);
 699        if (ret)
 700                return ret;
 701
 702        address = memory_pool->mc_address;
 703        address_high = (uint32_t)upper_32_bits(address);
 704        address_low  = (uint32_t)lower_32_bits(address);
 705
 706        ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh,
 707                                          address_high);
 708        if (ret)
 709                return ret;
 710        ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow,
 711                                          address_low);
 712        if (ret)
 713                return ret;
 714        ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize,
 715                                          (uint32_t)memory_pool->size);
 716        if (ret)
 717                return ret;
 718
 719        return ret;
 720}
 721
 722static int smu_v11_0_check_pptable(struct smu_context *smu)
 723{
 724        int ret;
 725
 726        ret = smu_check_powerplay_table(smu);
 727        return ret;
 728}
 729
 730static int smu_v11_0_parse_pptable(struct smu_context *smu)
 731{
 732        int ret;
 733
 734        struct smu_table_context *table_context = &smu->smu_table;
 735        struct smu_table *table = &table_context->tables[SMU_TABLE_PPTABLE];
 736
 737        if (table_context->driver_pptable)
 738                return -EINVAL;
 739
 740        table_context->driver_pptable = kzalloc(table->size, GFP_KERNEL);
 741
 742        if (!table_context->driver_pptable)
 743                return -ENOMEM;
 744
 745        ret = smu_store_powerplay_table(smu);
 746        if (ret)
 747                return -EINVAL;
 748
 749        ret = smu_append_powerplay_table(smu);
 750
 751        return ret;
 752}
 753
 754static int smu_v11_0_populate_smc_pptable(struct smu_context *smu)
 755{
 756        int ret;
 757
 758        ret = smu_set_default_dpm_table(smu);
 759
 760        return ret;
 761}
 762
 763static int smu_v11_0_write_pptable(struct smu_context *smu)
 764{
 765        struct smu_table_context *table_context = &smu->smu_table;
 766        int ret = 0;
 767
 768        ret = smu_update_table(smu, SMU_TABLE_PPTABLE, 0,
 769                               table_context->driver_pptable, true);
 770
 771        return ret;
 772}
 773
 774static int smu_v11_0_write_watermarks_table(struct smu_context *smu)
 775{
 776        int ret = 0;
 777        struct smu_table_context *smu_table = &smu->smu_table;
 778        struct smu_table *table = NULL;
 779
 780        table = &smu_table->tables[SMU_TABLE_WATERMARKS];
 781
 782        if (!table->cpu_addr)
 783                return -EINVAL;
 784
 785        ret = smu_update_table(smu, SMU_TABLE_WATERMARKS, 0, table->cpu_addr,
 786                                true);
 787
 788        return ret;
 789}
 790
 791static int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk)
 792{
 793        int ret;
 794
 795        ret = smu_send_smc_msg_with_param(smu,
 796                                          SMU_MSG_SetMinDeepSleepDcefclk, clk);
 797        if (ret)
 798                pr_err("SMU11 attempt to set divider for DCEFCLK Failed!");
 799
 800        return ret;
 801}
 802
 803static int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu)
 804{
 805        struct smu_table_context *table_context = &smu->smu_table;
 806
 807        if (!smu->pm_enabled)
 808                return 0;
 809        if (!table_context)
 810                return -EINVAL;
 811
 812        return smu_set_deep_sleep_dcefclk(smu,
 813                                          table_context->boot_values.dcefclk / 100);
 814}
 815
 816static int smu_v11_0_set_tool_table_location(struct smu_context *smu)
 817{
 818        int ret = 0;
 819        struct smu_table *tool_table = &smu->smu_table.tables[SMU_TABLE_PMSTATUSLOG];
 820
 821        if (tool_table->mc_address) {
 822                ret = smu_send_smc_msg_with_param(smu,
 823                                SMU_MSG_SetToolsDramAddrHigh,
 824                                upper_32_bits(tool_table->mc_address));
 825                if (!ret)
 826                        ret = smu_send_smc_msg_with_param(smu,
 827                                SMU_MSG_SetToolsDramAddrLow,
 828                                lower_32_bits(tool_table->mc_address));
 829        }
 830
 831        return ret;
 832}
 833
 834static int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count)
 835{
 836        int ret = 0;
 837
 838        if (!smu->pm_enabled)
 839                return ret;
 840
 841        ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, count);
 842        return ret;
 843}
 844
 845
 846static int smu_v11_0_set_allowed_mask(struct smu_context *smu)
 847{
 848        struct smu_feature *feature = &smu->smu_feature;
 849        int ret = 0;
 850        uint32_t feature_mask[2];
 851
 852        mutex_lock(&feature->mutex);
 853        if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) || feature->feature_num < 64)
 854                goto failed;
 855
 856        bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64);
 857
 858        ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
 859                                          feature_mask[1]);
 860        if (ret)
 861                goto failed;
 862
 863        ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskLow,
 864                                          feature_mask[0]);
 865        if (ret)
 866                goto failed;
 867
 868failed:
 869        mutex_unlock(&feature->mutex);
 870        return ret;
 871}
 872
 873static int smu_v11_0_get_enabled_mask(struct smu_context *smu,
 874                                      uint32_t *feature_mask, uint32_t num)
 875{
 876        uint32_t feature_mask_high = 0, feature_mask_low = 0;
 877        int ret = 0;
 878
 879        if (!feature_mask || num < 2)
 880                return -EINVAL;
 881
 882        ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh);
 883        if (ret)
 884                return ret;
 885        ret = smu_read_smc_arg(smu, &feature_mask_high);
 886        if (ret)
 887                return ret;
 888
 889        ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow);
 890        if (ret)
 891                return ret;
 892        ret = smu_read_smc_arg(smu, &feature_mask_low);
 893        if (ret)
 894                return ret;
 895
 896        feature_mask[0] = feature_mask_low;
 897        feature_mask[1] = feature_mask_high;
 898
 899        return ret;
 900}
 901
 902static int smu_v11_0_system_features_control(struct smu_context *smu,
 903                                             bool en)
 904{
 905        struct smu_feature *feature = &smu->smu_feature;
 906        uint32_t feature_mask[2];
 907        int ret = 0;
 908
 909        if (smu->pm_enabled) {
 910                ret = smu_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
 911                                             SMU_MSG_DisableAllSmuFeatures));
 912                if (ret)
 913                        return ret;
 914        }
 915
 916        ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
 917        if (ret)
 918                return ret;
 919
 920        bitmap_copy(feature->enabled, (unsigned long *)&feature_mask,
 921                    feature->feature_num);
 922        bitmap_copy(feature->supported, (unsigned long *)&feature_mask,
 923                    feature->feature_num);
 924
 925        return ret;
 926}
 927
 928static int smu_v11_0_notify_display_change(struct smu_context *smu)
 929{
 930        int ret = 0;
 931
 932        if (!smu->pm_enabled)
 933                return ret;
 934        if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
 935            smu->adev->gmc.vram_type == AMDGPU_VRAM_TYPE_HBM)
 936                ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1);
 937
 938        return ret;
 939}
 940
 941static int
 942smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock,
 943                                    enum smu_clk_type clock_select)
 944{
 945        int ret = 0;
 946        int clk_id;
 947
 948        if (!smu->pm_enabled)
 949                return ret;
 950
 951        if ((smu_msg_get_index(smu, SMU_MSG_GetDcModeMaxDpmFreq) < 0) ||
 952            (smu_msg_get_index(smu, SMU_MSG_GetMaxDpmFreq) < 0))
 953                return 0;
 954
 955        clk_id = smu_clk_get_index(smu, clock_select);
 956        if (clk_id < 0)
 957                return -EINVAL;
 958
 959        ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDcModeMaxDpmFreq,
 960                                          clk_id << 16);
 961        if (ret) {
 962                pr_err("[GetMaxSustainableClock] Failed to get max DC clock from SMC!");
 963                return ret;
 964        }
 965
 966        ret = smu_read_smc_arg(smu, clock);
 967        if (ret)
 968                return ret;
 969
 970        if (*clock != 0)
 971                return 0;
 972
 973        /* if DC limit is zero, return AC limit */
 974        ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq,
 975                                          clk_id << 16);
 976        if (ret) {
 977                pr_err("[GetMaxSustainableClock] failed to get max AC clock from SMC!");
 978                return ret;
 979        }
 980
 981        ret = smu_read_smc_arg(smu, clock);
 982
 983        return ret;
 984}
 985
 986static int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
 987{
 988        struct smu_11_0_max_sustainable_clocks *max_sustainable_clocks;
 989        int ret = 0;
 990
 991        max_sustainable_clocks = kzalloc(sizeof(struct smu_11_0_max_sustainable_clocks),
 992                                         GFP_KERNEL);
 993        smu->smu_table.max_sustainable_clocks = (void *)max_sustainable_clocks;
 994
 995        max_sustainable_clocks->uclock = smu->smu_table.boot_values.uclk / 100;
 996        max_sustainable_clocks->soc_clock = smu->smu_table.boot_values.socclk / 100;
 997        max_sustainable_clocks->dcef_clock = smu->smu_table.boot_values.dcefclk / 100;
 998        max_sustainable_clocks->display_clock = 0xFFFFFFFF;
 999        max_sustainable_clocks->phy_clock = 0xFFFFFFFF;
1000        max_sustainable_clocks->pixel_clock = 0xFFFFFFFF;
1001
1002        if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
1003                ret = smu_v11_0_get_max_sustainable_clock(smu,
1004                                                          &(max_sustainable_clocks->uclock),
1005                                                          SMU_UCLK);
1006                if (ret) {
1007                        pr_err("[%s] failed to get max UCLK from SMC!",
1008                               __func__);
1009                        return ret;
1010                }
1011        }
1012
1013        if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
1014                ret = smu_v11_0_get_max_sustainable_clock(smu,
1015                                                          &(max_sustainable_clocks->soc_clock),
1016                                                          SMU_SOCCLK);
1017                if (ret) {
1018                        pr_err("[%s] failed to get max SOCCLK from SMC!",
1019                               __func__);
1020                        return ret;
1021                }
1022        }
1023
1024        if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
1025                ret = smu_v11_0_get_max_sustainable_clock(smu,
1026                                                          &(max_sustainable_clocks->dcef_clock),
1027                                                          SMU_DCEFCLK);
1028                if (ret) {
1029                        pr_err("[%s] failed to get max DCEFCLK from SMC!",
1030                               __func__);
1031                        return ret;
1032                }
1033
1034                ret = smu_v11_0_get_max_sustainable_clock(smu,
1035                                                          &(max_sustainable_clocks->display_clock),
1036                                                          SMU_DISPCLK);
1037                if (ret) {
1038                        pr_err("[%s] failed to get max DISPCLK from SMC!",
1039                               __func__);
1040                        return ret;
1041                }
1042                ret = smu_v11_0_get_max_sustainable_clock(smu,
1043                                                          &(max_sustainable_clocks->phy_clock),
1044                                                          SMU_PHYCLK);
1045                if (ret) {
1046                        pr_err("[%s] failed to get max PHYCLK from SMC!",
1047                               __func__);
1048                        return ret;
1049                }
1050                ret = smu_v11_0_get_max_sustainable_clock(smu,
1051                                                          &(max_sustainable_clocks->pixel_clock),
1052                                                          SMU_PIXCLK);
1053                if (ret) {
1054                        pr_err("[%s] failed to get max PIXCLK from SMC!",
1055                               __func__);
1056                        return ret;
1057                }
1058        }
1059
1060        if (max_sustainable_clocks->soc_clock < max_sustainable_clocks->uclock)
1061                max_sustainable_clocks->uclock = max_sustainable_clocks->soc_clock;
1062
1063        return 0;
1064}
1065
1066static int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n)
1067{
1068        int ret = 0;
1069
1070        if (n > smu->default_power_limit) {
1071                pr_err("New power limit is over the max allowed %d\n",
1072                                smu->default_power_limit);
1073                return -EINVAL;
1074        }
1075
1076        if (n == 0)
1077                n = smu->default_power_limit;
1078
1079        if (!smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
1080                pr_err("Setting new power limit is not supported!\n");
1081                return -EOPNOTSUPP;
1082        }
1083
1084        ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n);
1085        if (ret) {
1086                pr_err("[%s] Set power limit Failed!\n", __func__);
1087                return ret;
1088        }
1089        smu->power_limit = n;
1090
1091        return 0;
1092}
1093
1094static int smu_v11_0_get_current_clk_freq(struct smu_context *smu,
1095                                          enum smu_clk_type clk_id,
1096                                          uint32_t *value)
1097{
1098        int ret = 0;
1099        uint32_t freq = 0;
1100        int asic_clk_id;
1101
1102        if (clk_id >= SMU_CLK_COUNT || !value)
1103                return -EINVAL;
1104
1105        asic_clk_id = smu_clk_get_index(smu, clk_id);
1106        if (asic_clk_id < 0)
1107                return -EINVAL;
1108
1109        /* if don't has GetDpmClockFreq Message, try get current clock by SmuMetrics_t */
1110        if (smu_msg_get_index(smu, SMU_MSG_GetDpmClockFreq) < 0)
1111                ret =  smu_get_current_clk_freq_by_table(smu, clk_id, &freq);
1112        else {
1113                ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDpmClockFreq,
1114                                                  (asic_clk_id << 16));
1115                if (ret)
1116                        return ret;
1117
1118                ret = smu_read_smc_arg(smu, &freq);
1119                if (ret)
1120                        return ret;
1121        }
1122
1123        freq *= 100;
1124        *value = freq;
1125
1126        return ret;
1127}
1128
1129static int smu_v11_0_set_thermal_range(struct smu_context *smu,
1130                                       struct smu_temperature_range range)
1131{
1132        struct amdgpu_device *adev = smu->adev;
1133        int low = SMU_THERMAL_MINIMUM_ALERT_TEMP;
1134        int high = SMU_THERMAL_MAXIMUM_ALERT_TEMP;
1135        uint32_t val;
1136
1137        low = max(SMU_THERMAL_MINIMUM_ALERT_TEMP,
1138                        range.min / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES);
1139        high = min(SMU_THERMAL_MAXIMUM_ALERT_TEMP,
1140                        range.max / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES);
1141
1142        if (low > high)
1143                return -EINVAL;
1144
1145        val = RREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL);
1146        val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5);
1147        val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
1148        val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 0);
1149        val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 0);
1150        val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high & 0xff));
1151        val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low & 0xff));
1152        val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
1153
1154        WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val);
1155
1156        return 0;
1157}
1158
1159static int smu_v11_0_enable_thermal_alert(struct smu_context *smu)
1160{
1161        struct amdgpu_device *adev = smu->adev;
1162        uint32_t val = 0;
1163
1164        val |= (1 << THM_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT);
1165        val |= (1 << THM_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT);
1166        val |= (1 << THM_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT);
1167
1168        WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, val);
1169
1170        return 0;
1171}
1172
1173static int smu_v11_0_start_thermal_control(struct smu_context *smu)
1174{
1175        int ret = 0;
1176        struct smu_temperature_range range;
1177        struct amdgpu_device *adev = smu->adev;
1178
1179        if (!smu->pm_enabled)
1180                return ret;
1181
1182        memcpy(&range, &smu11_thermal_policy[0], sizeof(struct smu_temperature_range));
1183
1184        ret = smu_get_thermal_temperature_range(smu, &range);
1185        if (ret)
1186                return ret;
1187
1188        if (smu->smu_table.thermal_controller_type) {
1189                ret = smu_v11_0_set_thermal_range(smu, range);
1190                if (ret)
1191                        return ret;
1192
1193                ret = smu_v11_0_enable_thermal_alert(smu);
1194                if (ret)
1195                        return ret;
1196
1197                ret = smu_set_thermal_fan_table(smu);
1198                if (ret)
1199                        return ret;
1200        }
1201
1202        adev->pm.dpm.thermal.min_temp = range.min;
1203        adev->pm.dpm.thermal.max_temp = range.max;
1204        adev->pm.dpm.thermal.max_edge_emergency_temp = range.edge_emergency_max;
1205        adev->pm.dpm.thermal.min_hotspot_temp = range.hotspot_min;
1206        adev->pm.dpm.thermal.max_hotspot_crit_temp = range.hotspot_crit_max;
1207        adev->pm.dpm.thermal.max_hotspot_emergency_temp = range.hotspot_emergency_max;
1208        adev->pm.dpm.thermal.min_mem_temp = range.mem_min;
1209        adev->pm.dpm.thermal.max_mem_crit_temp = range.mem_crit_max;
1210        adev->pm.dpm.thermal.max_mem_emergency_temp = range.mem_emergency_max;
1211
1212        return ret;
1213}
1214
1215static uint16_t convert_to_vddc(uint8_t vid)
1216{
1217        return (uint16_t) ((6200 - (vid * 25)) / SMU11_VOLTAGE_SCALE);
1218}
1219
1220static int smu_v11_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value)
1221{
1222        struct amdgpu_device *adev = smu->adev;
1223        uint32_t vdd = 0, val_vid = 0;
1224
1225        if (!value)
1226                return -EINVAL;
1227        val_vid = (RREG32_SOC15(SMUIO, 0, mmSMUSVI0_TEL_PLANE0) &
1228                SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR_MASK) >>
1229                SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR__SHIFT;
1230
1231        vdd = (uint32_t)convert_to_vddc((uint8_t)val_vid);
1232
1233        *value = vdd;
1234
1235        return 0;
1236
1237}
1238
1239static int smu_v11_0_read_sensor(struct smu_context *smu,
1240                                 enum amd_pp_sensors sensor,
1241                                 void *data, uint32_t *size)
1242{
1243        int ret = 0;
1244
1245        if(!data || !size)
1246                return -EINVAL;
1247
1248        switch (sensor) {
1249        case AMDGPU_PP_SENSOR_GFX_MCLK:
1250                ret = smu_get_current_clk_freq(smu, SMU_UCLK, (uint32_t *)data);
1251                *size = 4;
1252                break;
1253        case AMDGPU_PP_SENSOR_GFX_SCLK:
1254                ret = smu_get_current_clk_freq(smu, SMU_GFXCLK, (uint32_t *)data);
1255                *size = 4;
1256                break;
1257        case AMDGPU_PP_SENSOR_VDDGFX:
1258                ret = smu_v11_0_get_gfx_vdd(smu, (uint32_t *)data);
1259                *size = 4;
1260                break;
1261        case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
1262                *(uint32_t *)data = 0;
1263                *size = 4;
1264                break;
1265        default:
1266                ret = smu_common_read_sensor(smu, sensor, data, size);
1267                break;
1268        }
1269
1270        if (ret)
1271                *size = 0;
1272
1273        return ret;
1274}
1275
1276static int
1277smu_v11_0_display_clock_voltage_request(struct smu_context *smu,
1278                                        struct pp_display_clock_request
1279                                        *clock_req)
1280{
1281        enum amd_pp_clock_type clk_type = clock_req->clock_type;
1282        int ret = 0;
1283        enum smu_clk_type clk_select = 0;
1284        uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
1285
1286        if (!smu->pm_enabled)
1287                return -EINVAL;
1288
1289        if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) ||
1290                smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
1291                switch (clk_type) {
1292                case amd_pp_dcef_clock:
1293                        clk_select = SMU_DCEFCLK;
1294                        break;
1295                case amd_pp_disp_clock:
1296                        clk_select = SMU_DISPCLK;
1297                        break;
1298                case amd_pp_pixel_clock:
1299                        clk_select = SMU_PIXCLK;
1300                        break;
1301                case amd_pp_phy_clock:
1302                        clk_select = SMU_PHYCLK;
1303                        break;
1304                case amd_pp_mem_clock:
1305                        clk_select = SMU_UCLK;
1306                        break;
1307                default:
1308                        pr_info("[%s] Invalid Clock Type!", __func__);
1309                        ret = -EINVAL;
1310                        break;
1311                }
1312
1313                if (ret)
1314                        goto failed;
1315
1316                if (clk_select == SMU_UCLK && smu->disable_uclk_switch)
1317                        return 0;
1318
1319                mutex_lock(&smu->mutex);
1320                ret = smu_set_hard_freq_range(smu, clk_select, clk_freq, 0);
1321                mutex_unlock(&smu->mutex);
1322
1323                if(clk_select == SMU_UCLK)
1324                        smu->hard_min_uclk_req_from_dal = clk_freq;
1325        }
1326
1327failed:
1328        return ret;
1329}
1330
1331static int
1332smu_v11_0_set_watermarks_for_clock_ranges(struct smu_context *smu, struct
1333                                          dm_pp_wm_sets_with_clock_ranges_soc15
1334                                          *clock_ranges)
1335{
1336        int ret = 0;
1337        struct smu_table *watermarks = &smu->smu_table.tables[SMU_TABLE_WATERMARKS];
1338        void *table = watermarks->cpu_addr;
1339
1340        if (!smu->disable_watermark &&
1341            smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
1342            smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
1343                smu_set_watermarks_table(smu, table, clock_ranges);
1344                smu->watermarks_bitmap |= WATERMARKS_EXIST;
1345                smu->watermarks_bitmap &= ~WATERMARKS_LOADED;
1346        }
1347
1348        return ret;
1349}
1350
1351static int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
1352{
1353        int ret = 0;
1354        struct amdgpu_device *adev = smu->adev;
1355
1356        switch (adev->asic_type) {
1357        case CHIP_VEGA20:
1358                break;
1359        case CHIP_NAVI10:
1360        case CHIP_NAVI14:
1361        case CHIP_NAVI12:
1362                if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
1363                        return 0;
1364                mutex_lock(&smu->mutex);
1365                if (enable)
1366                        ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff);
1367                else
1368                        ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff);
1369                mutex_unlock(&smu->mutex);
1370                break;
1371        default:
1372                break;
1373        }
1374
1375        return ret;
1376}
1377
1378static uint32_t
1379smu_v11_0_get_fan_control_mode(struct smu_context *smu)
1380{
1381        if (!smu_feature_is_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT))
1382                return AMD_FAN_CTRL_MANUAL;
1383        else
1384                return AMD_FAN_CTRL_AUTO;
1385}
1386
1387static int
1388smu_v11_0_auto_fan_control(struct smu_context *smu, bool auto_fan_control)
1389{
1390        int ret = 0;
1391
1392        if (!smu_feature_is_supported(smu, SMU_FEATURE_FAN_CONTROL_BIT))
1393                return 0;
1394
1395        ret = smu_feature_set_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT, auto_fan_control);
1396        if (ret)
1397                pr_err("[%s]%s smc FAN CONTROL feature failed!",
1398                       __func__, (auto_fan_control ? "Start" : "Stop"));
1399
1400        return ret;
1401}
1402
1403static int
1404smu_v11_0_set_fan_static_mode(struct smu_context *smu, uint32_t mode)
1405{
1406        struct amdgpu_device *adev = smu->adev;
1407
1408        WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2,
1409                     REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
1410                                   CG_FDO_CTRL2, TMIN, 0));
1411        WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2,
1412                     REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
1413                                   CG_FDO_CTRL2, FDO_PWM_MODE, mode));
1414
1415        return 0;
1416}
1417
1418static int
1419smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
1420{
1421        struct amdgpu_device *adev = smu->adev;
1422        uint32_t duty100, duty;
1423        uint64_t tmp64;
1424
1425        if (speed > 100)
1426                speed = 100;
1427
1428        if (smu_v11_0_auto_fan_control(smu, 0))
1429                return -EINVAL;
1430
1431        duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1),
1432                                CG_FDO_CTRL1, FMAX_DUTY100);
1433        if (!duty100)
1434                return -EINVAL;
1435
1436        tmp64 = (uint64_t)speed * duty100;
1437        do_div(tmp64, 100);
1438        duty = (uint32_t)tmp64;
1439
1440        WREG32_SOC15(THM, 0, mmCG_FDO_CTRL0,
1441                     REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL0),
1442                                   CG_FDO_CTRL0, FDO_STATIC_DUTY, duty));
1443
1444        return smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC);
1445}
1446
1447static int
1448smu_v11_0_set_fan_control_mode(struct smu_context *smu,
1449                               uint32_t mode)
1450{
1451        int ret = 0;
1452
1453        switch (mode) {
1454        case AMD_FAN_CTRL_NONE:
1455                ret = smu_v11_0_set_fan_speed_percent(smu, 100);
1456                break;
1457        case AMD_FAN_CTRL_MANUAL:
1458                ret = smu_v11_0_auto_fan_control(smu, 0);
1459                break;
1460        case AMD_FAN_CTRL_AUTO:
1461                ret = smu_v11_0_auto_fan_control(smu, 1);
1462                break;
1463        default:
1464                break;
1465        }
1466
1467        if (ret) {
1468                pr_err("[%s]Set fan control mode failed!", __func__);
1469                return -EINVAL;
1470        }
1471
1472        return ret;
1473}
1474
1475static int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
1476                                       uint32_t speed)
1477{
1478        struct amdgpu_device *adev = smu->adev;
1479        int ret;
1480        uint32_t tach_period, crystal_clock_freq;
1481
1482        if (!speed)
1483                return -EINVAL;
1484
1485        mutex_lock(&(smu->mutex));
1486        ret = smu_v11_0_auto_fan_control(smu, 0);
1487        if (ret)
1488                goto set_fan_speed_rpm_failed;
1489
1490        crystal_clock_freq = amdgpu_asic_get_xclk(adev);
1491        tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
1492        WREG32_SOC15(THM, 0, mmCG_TACH_CTRL,
1493                     REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL),
1494                                   CG_TACH_CTRL, TARGET_PERIOD,
1495                                   tach_period));
1496
1497        ret = smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC_RPM);
1498
1499set_fan_speed_rpm_failed:
1500        mutex_unlock(&(smu->mutex));
1501        return ret;
1502}
1503
1504#define XGMI_STATE_D0 1
1505#define XGMI_STATE_D3 0
1506
1507static int smu_v11_0_set_xgmi_pstate(struct smu_context *smu,
1508                                     uint32_t pstate)
1509{
1510        int ret = 0;
1511        mutex_lock(&(smu->mutex));
1512        ret = smu_send_smc_msg_with_param(smu,
1513                                          SMU_MSG_SetXgmiMode,
1514                                          pstate ? XGMI_STATE_D0 : XGMI_STATE_D3);
1515        mutex_unlock(&(smu->mutex));
1516        return ret;
1517}
1518
1519#define THM_11_0__SRCID__THM_DIG_THERM_L2H              0               /* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH  */
1520#define THM_11_0__SRCID__THM_DIG_THERM_H2L              1               /* ASIC_TEMP < CG_THERMAL_INT.DIG_THERM_INTL  */
1521
1522static int smu_v11_0_irq_process(struct amdgpu_device *adev,
1523                                 struct amdgpu_irq_src *source,
1524                                 struct amdgpu_iv_entry *entry)
1525{
1526        uint32_t client_id = entry->client_id;
1527        uint32_t src_id = entry->src_id;
1528
1529        if (client_id == SOC15_IH_CLIENTID_THM) {
1530                switch (src_id) {
1531                case THM_11_0__SRCID__THM_DIG_THERM_L2H:
1532                        pr_warn("GPU over temperature range detected on PCIe %d:%d.%d!\n",
1533                                PCI_BUS_NUM(adev->pdev->devfn),
1534                                PCI_SLOT(adev->pdev->devfn),
1535                                PCI_FUNC(adev->pdev->devfn));
1536                break;
1537                case THM_11_0__SRCID__THM_DIG_THERM_H2L:
1538                        pr_warn("GPU under temperature range detected on PCIe %d:%d.%d!\n",
1539                                PCI_BUS_NUM(adev->pdev->devfn),
1540                                PCI_SLOT(adev->pdev->devfn),
1541                                PCI_FUNC(adev->pdev->devfn));
1542                break;
1543                default:
1544                        pr_warn("GPU under temperature range unknown src id (%d), detected on PCIe %d:%d.%d!\n",
1545                                src_id,
1546                                PCI_BUS_NUM(adev->pdev->devfn),
1547                                PCI_SLOT(adev->pdev->devfn),
1548                                PCI_FUNC(adev->pdev->devfn));
1549                break;
1550
1551                }
1552        }
1553
1554        return 0;
1555}
1556
1557static const struct amdgpu_irq_src_funcs smu_v11_0_irq_funcs =
1558{
1559        .process = smu_v11_0_irq_process,
1560};
1561
1562static int smu_v11_0_register_irq_handler(struct smu_context *smu)
1563{
1564        struct amdgpu_device *adev = smu->adev;
1565        struct amdgpu_irq_src *irq_src = smu->irq_source;
1566        int ret = 0;
1567
1568        /* already register */
1569        if (irq_src)
1570                return 0;
1571
1572        irq_src = kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL);
1573        if (!irq_src)
1574                return -ENOMEM;
1575        smu->irq_source = irq_src;
1576
1577        irq_src->funcs = &smu_v11_0_irq_funcs;
1578
1579        ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM,
1580                                THM_11_0__SRCID__THM_DIG_THERM_L2H,
1581                                irq_src);
1582        if (ret)
1583                return ret;
1584
1585        ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM,
1586                                THM_11_0__SRCID__THM_DIG_THERM_H2L,
1587                                irq_src);
1588        if (ret)
1589                return ret;
1590
1591        return ret;
1592}
1593
1594static int smu_v11_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
1595                struct pp_smu_nv_clock_table *max_clocks)
1596{
1597        struct smu_table_context *table_context = &smu->smu_table;
1598        struct smu_11_0_max_sustainable_clocks *sustainable_clocks = NULL;
1599
1600        if (!max_clocks || !table_context->max_sustainable_clocks)
1601                return -EINVAL;
1602
1603        sustainable_clocks = table_context->max_sustainable_clocks;
1604
1605        max_clocks->dcfClockInKhz =
1606                        (unsigned int) sustainable_clocks->dcef_clock * 1000;
1607        max_clocks->displayClockInKhz =
1608                        (unsigned int) sustainable_clocks->display_clock * 1000;
1609        max_clocks->phyClockInKhz =
1610                        (unsigned int) sustainable_clocks->phy_clock * 1000;
1611        max_clocks->pixelClockInKhz =
1612                        (unsigned int) sustainable_clocks->pixel_clock * 1000;
1613        max_clocks->uClockInKhz =
1614                        (unsigned int) sustainable_clocks->uclock * 1000;
1615        max_clocks->socClockInKhz =
1616                        (unsigned int) sustainable_clocks->soc_clock * 1000;
1617        max_clocks->dscClockInKhz = 0;
1618        max_clocks->dppClockInKhz = 0;
1619        max_clocks->fabricClockInKhz = 0;
1620
1621        return 0;
1622}
1623
1624static int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu)
1625{
1626        int ret = 0;
1627
1628        mutex_lock(&smu->mutex);
1629        ret = smu_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME);
1630        mutex_unlock(&smu->mutex);
1631
1632        return ret;
1633}
1634
1635static int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu, enum smu_v11_0_baco_seq baco_seq)
1636{
1637        return smu_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq);
1638}
1639
1640static bool smu_v11_0_baco_is_support(struct smu_context *smu)
1641{
1642        struct amdgpu_device *adev = smu->adev;
1643        struct smu_baco_context *smu_baco = &smu->smu_baco;
1644        uint32_t val;
1645        bool baco_support;
1646
1647        mutex_lock(&smu_baco->mutex);
1648        baco_support = smu_baco->platform_support;
1649        mutex_unlock(&smu_baco->mutex);
1650
1651        if (!baco_support)
1652                return false;
1653
1654        if (!smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT))
1655                return false;
1656
1657        val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0);
1658        if (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK)
1659                return true;
1660
1661        return false;
1662}
1663
1664static enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu)
1665{
1666        struct smu_baco_context *smu_baco = &smu->smu_baco;
1667        enum smu_baco_state baco_state;
1668
1669        mutex_lock(&smu_baco->mutex);
1670        baco_state = smu_baco->state;
1671        mutex_unlock(&smu_baco->mutex);
1672
1673        return baco_state;
1674}
1675
1676static int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
1677{
1678
1679        struct smu_baco_context *smu_baco = &smu->smu_baco;
1680        int ret = 0;
1681
1682        if (smu_v11_0_baco_get_state(smu) == state)
1683                return 0;
1684
1685        mutex_lock(&smu_baco->mutex);
1686
1687        if (state == SMU_BACO_STATE_ENTER)
1688                ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, BACO_SEQ_BACO);
1689        else
1690                ret = smu_send_smc_msg(smu, SMU_MSG_ExitBaco);
1691        if (ret)
1692                goto out;
1693
1694        smu_baco->state = state;
1695out:
1696        mutex_unlock(&smu_baco->mutex);
1697        return ret;
1698}
1699
1700static int smu_v11_0_baco_reset(struct smu_context *smu)
1701{
1702        int ret = 0;
1703
1704        ret = smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO);
1705        if (ret)
1706                return ret;
1707
1708        ret = smu_v11_0_baco_set_state(smu, SMU_BACO_STATE_ENTER);
1709        if (ret)
1710                return ret;
1711
1712        msleep(10);
1713
1714        ret = smu_v11_0_baco_set_state(smu, SMU_BACO_STATE_EXIT);
1715        if (ret)
1716                return ret;
1717
1718        return ret;
1719}
1720
1721static int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
1722                                                 uint32_t *min, uint32_t *max)
1723{
1724        int ret = 0, clk_id = 0;
1725        uint32_t param = 0;
1726
1727        mutex_lock(&smu->mutex);
1728        clk_id = smu_clk_get_index(smu, clk_type);
1729        if (clk_id < 0) {
1730                ret = -EINVAL;
1731                goto failed;
1732        }
1733        param = (clk_id & 0xffff) << 16;
1734
1735        if (max) {
1736                ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, param);
1737                if (ret)
1738                        goto failed;
1739                ret = smu_read_smc_arg(smu, max);
1740                if (ret)
1741                        goto failed;
1742        }
1743
1744        if (min) {
1745                ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param);
1746                if (ret)
1747                        goto failed;
1748                ret = smu_read_smc_arg(smu, min);
1749                if (ret)
1750                        goto failed;
1751        }
1752
1753failed:
1754        mutex_unlock(&smu->mutex);
1755        return ret;
1756}
1757
1758static const struct smu_funcs smu_v11_0_funcs = {
1759        .init_microcode = smu_v11_0_init_microcode,
1760        .load_microcode = smu_v11_0_load_microcode,
1761        .check_fw_status = smu_v11_0_check_fw_status,
1762        .check_fw_version = smu_v11_0_check_fw_version,
1763        .send_smc_msg = smu_v11_0_send_msg,
1764        .send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
1765        .read_smc_arg = smu_v11_0_read_arg,
1766        .setup_pptable = smu_v11_0_setup_pptable,
1767        .init_smc_tables = smu_v11_0_init_smc_tables,
1768        .fini_smc_tables = smu_v11_0_fini_smc_tables,
1769        .init_power = smu_v11_0_init_power,
1770        .fini_power = smu_v11_0_fini_power,
1771        .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
1772        .get_clk_info_from_vbios = smu_v11_0_get_clk_info_from_vbios,
1773        .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
1774        .check_pptable = smu_v11_0_check_pptable,
1775        .parse_pptable = smu_v11_0_parse_pptable,
1776        .populate_smc_tables = smu_v11_0_populate_smc_pptable,
1777        .write_pptable = smu_v11_0_write_pptable,
1778        .write_watermarks_table = smu_v11_0_write_watermarks_table,
1779        .set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep,
1780        .set_tool_table_location = smu_v11_0_set_tool_table_location,
1781        .init_display_count = smu_v11_0_init_display_count,
1782        .set_allowed_mask = smu_v11_0_set_allowed_mask,
1783        .get_enabled_mask = smu_v11_0_get_enabled_mask,
1784        .system_features_control = smu_v11_0_system_features_control,
1785        .notify_display_change = smu_v11_0_notify_display_change,
1786        .set_power_limit = smu_v11_0_set_power_limit,
1787        .get_current_clk_freq = smu_v11_0_get_current_clk_freq,
1788        .init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks,
1789        .start_thermal_control = smu_v11_0_start_thermal_control,
1790        .read_sensor = smu_v11_0_read_sensor,
1791        .set_deep_sleep_dcefclk = smu_v11_0_set_deep_sleep_dcefclk,
1792        .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
1793        .set_watermarks_for_clock_ranges = smu_v11_0_set_watermarks_for_clock_ranges,
1794        .get_fan_control_mode = smu_v11_0_get_fan_control_mode,
1795        .set_fan_control_mode = smu_v11_0_set_fan_control_mode,
1796        .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
1797        .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
1798        .set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
1799        .gfx_off_control = smu_v11_0_gfx_off_control,
1800        .register_irq_handler = smu_v11_0_register_irq_handler,
1801        .set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme,
1802        .get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc,
1803        .baco_is_support = smu_v11_0_baco_is_support,
1804        .baco_get_state = smu_v11_0_baco_get_state,
1805        .baco_set_state = smu_v11_0_baco_set_state,
1806        .baco_reset = smu_v11_0_baco_reset,
1807        .get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq,
1808};
1809
1810void smu_v11_0_set_smu_funcs(struct smu_context *smu)
1811{
1812        struct amdgpu_device *adev = smu->adev;
1813
1814        smu->funcs = &smu_v11_0_funcs;
1815        switch (adev->asic_type) {
1816        case CHIP_VEGA20:
1817                vega20_set_ppt_funcs(smu);
1818                break;
1819        case CHIP_ARCTURUS:
1820                arcturus_set_ppt_funcs(smu);
1821                break;
1822        case CHIP_NAVI10:
1823        case CHIP_NAVI14:
1824        case CHIP_NAVI12:
1825                navi10_set_ppt_funcs(smu);
1826                break;
1827        default:
1828                pr_warn("Unknown asic for smu11\n");
1829        }
1830}
1831