linux/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
<<
>>
Prefs
   1/*
   2 * Copyright 2020 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 */
  22
  23#define SWSMU_CODE_LAYER_L4
  24
  25#include "amdgpu.h"
  26#include "amdgpu_smu.h"
  27#include "smu_cmn.h"
  28#include "soc15_common.h"
  29
  30/*
  31 * DO NOT use these for err/warn/info/debug messages.
  32 * Use dev_err, dev_warn, dev_info and dev_dbg instead.
  33 * They are more MGPU friendly.
  34 */
  35#undef pr_err
  36#undef pr_warn
  37#undef pr_info
  38#undef pr_debug
  39
  40/*
  41 * Although these are defined in each ASIC's specific header file.
  42 * They share the same definitions and values. That makes common
  43 * APIs for SMC messages issuing for all ASICs possible.
  44 */
  45#define mmMP1_SMN_C2PMSG_66                                                                            0x0282
  46#define mmMP1_SMN_C2PMSG_66_BASE_IDX                                                                   0
  47
  48#define mmMP1_SMN_C2PMSG_82                                                                            0x0292
  49#define mmMP1_SMN_C2PMSG_82_BASE_IDX                                                                   0
  50
  51#define mmMP1_SMN_C2PMSG_90                                                                            0x029a
  52#define mmMP1_SMN_C2PMSG_90_BASE_IDX                                                                   0
  53
  54#define MP1_C2PMSG_90__CONTENT_MASK                                                                    0xFFFFFFFFL
  55
  56#undef __SMU_DUMMY_MAP
  57#define __SMU_DUMMY_MAP(type)   #type
  58static const char* __smu_message_names[] = {
  59        SMU_MESSAGE_TYPES
  60};
  61
  62static const char *smu_get_message_name(struct smu_context *smu,
  63                                        enum smu_message_type type)
  64{
  65        if (type < 0 || type >= SMU_MSG_MAX_COUNT)
  66                return "unknown smu message";
  67
  68        return __smu_message_names[type];
  69}
  70
  71static void smu_cmn_send_msg_without_waiting(struct smu_context *smu,
  72                                             uint16_t msg)
  73{
  74        struct amdgpu_device *adev = smu->adev;
  75
  76        WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
  77}
  78
  79static void smu_cmn_read_arg(struct smu_context *smu,
  80                             uint32_t *arg)
  81{
  82        struct amdgpu_device *adev = smu->adev;
  83
  84        *arg = RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_82);
  85}
  86
  87static int smu_cmn_wait_for_response(struct smu_context *smu)
  88{
  89        struct amdgpu_device *adev = smu->adev;
  90        uint32_t cur_value, i, timeout = adev->usec_timeout * 10;
  91
  92        for (i = 0; i < timeout; i++) {
  93                cur_value = RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90);
  94                if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0)
  95                        return cur_value == 0x1 ? 0 : -EIO;
  96
  97                udelay(1);
  98        }
  99
 100        /* timeout means wrong logic */
 101        if (i == timeout)
 102                return -ETIME;
 103
 104        return RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO;
 105}
 106
 107int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,
 108                                    enum smu_message_type msg,
 109                                    uint32_t param,
 110                                    uint32_t *read_arg)
 111{
 112        struct amdgpu_device *adev = smu->adev;
 113        int ret = 0, index = 0;
 114
 115        if (smu->adev->in_pci_err_recovery)
 116                return 0;
 117
 118        index = smu_cmn_to_asic_specific_index(smu,
 119                                               CMN2ASIC_MAPPING_MSG,
 120                                               msg);
 121        if (index < 0)
 122                return index == -EACCES ? 0 : index;
 123
 124        mutex_lock(&smu->message_lock);
 125        ret = smu_cmn_wait_for_response(smu);
 126        if (ret) {
 127                dev_err(adev->dev, "Msg issuing pre-check failed and "
 128                       "SMU may be not in the right state!\n");
 129                goto out;
 130        }
 131
 132        WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
 133
 134        WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
 135
 136        smu_cmn_send_msg_without_waiting(smu, (uint16_t)index);
 137
 138        ret = smu_cmn_wait_for_response(smu);
 139        if (ret) {
 140                dev_err(adev->dev, "failed send message: %10s (%d) \tparam: 0x%08x response %#x\n",
 141                       smu_get_message_name(smu, msg), index, param, ret);
 142                goto out;
 143        }
 144
 145        if (read_arg)
 146                smu_cmn_read_arg(smu, read_arg);
 147
 148out:
 149        mutex_unlock(&smu->message_lock);
 150        return ret;
 151}
 152
 153int smu_cmn_send_smc_msg(struct smu_context *smu,
 154                         enum smu_message_type msg,
 155                         uint32_t *read_arg)
 156{
 157        return smu_cmn_send_smc_msg_with_param(smu,
 158                                               msg,
 159                                               0,
 160                                               read_arg);
 161}
 162
 163int smu_cmn_to_asic_specific_index(struct smu_context *smu,
 164                                   enum smu_cmn2asic_mapping_type type,
 165                                   uint32_t index)
 166{
 167        struct cmn2asic_msg_mapping msg_mapping;
 168        struct cmn2asic_mapping mapping;
 169
 170        switch (type) {
 171        case CMN2ASIC_MAPPING_MSG:
 172                if (index >= SMU_MSG_MAX_COUNT ||
 173                    !smu->message_map)
 174                        return -EINVAL;
 175
 176                msg_mapping = smu->message_map[index];
 177                if (!msg_mapping.valid_mapping)
 178                        return -EINVAL;
 179
 180                if (amdgpu_sriov_vf(smu->adev) &&
 181                    !msg_mapping.valid_in_vf)
 182                        return -EACCES;
 183
 184                return msg_mapping.map_to;
 185
 186        case CMN2ASIC_MAPPING_CLK:
 187                if (index >= SMU_CLK_COUNT ||
 188                    !smu->clock_map)
 189                        return -EINVAL;
 190
 191                mapping = smu->clock_map[index];
 192                if (!mapping.valid_mapping)
 193                        return -EINVAL;
 194
 195                return mapping.map_to;
 196
 197        case CMN2ASIC_MAPPING_FEATURE:
 198                if (index >= SMU_FEATURE_COUNT ||
 199                    !smu->feature_map)
 200                        return -EINVAL;
 201
 202                mapping = smu->feature_map[index];
 203                if (!mapping.valid_mapping)
 204                        return -EINVAL;
 205
 206                return mapping.map_to;
 207
 208        case CMN2ASIC_MAPPING_TABLE:
 209                if (index >= SMU_TABLE_COUNT ||
 210                    !smu->table_map)
 211                        return -EINVAL;
 212
 213                mapping = smu->table_map[index];
 214                if (!mapping.valid_mapping)
 215                        return -EINVAL;
 216
 217                return mapping.map_to;
 218
 219        case CMN2ASIC_MAPPING_PWR:
 220                if (index >= SMU_POWER_SOURCE_COUNT ||
 221                    !smu->pwr_src_map)
 222                        return -EINVAL;
 223
 224                mapping = smu->pwr_src_map[index];
 225                if (!mapping.valid_mapping)
 226                        return -EINVAL;
 227
 228                return mapping.map_to;
 229
 230        case CMN2ASIC_MAPPING_WORKLOAD:
 231                if (index > PP_SMC_POWER_PROFILE_CUSTOM ||
 232                    !smu->workload_map)
 233                        return -EINVAL;
 234
 235                mapping = smu->workload_map[index];
 236                if (!mapping.valid_mapping)
 237                        return -EINVAL;
 238
 239                return mapping.map_to;
 240
 241        default:
 242                return -EINVAL;
 243        }
 244}
 245
 246int smu_cmn_feature_is_supported(struct smu_context *smu,
 247                                 enum smu_feature_mask mask)
 248{
 249        struct smu_feature *feature = &smu->smu_feature;
 250        int feature_id;
 251        int ret = 0;
 252
 253        feature_id = smu_cmn_to_asic_specific_index(smu,
 254                                                    CMN2ASIC_MAPPING_FEATURE,
 255                                                    mask);
 256        if (feature_id < 0)
 257                return 0;
 258
 259        WARN_ON(feature_id > feature->feature_num);
 260
 261        mutex_lock(&feature->mutex);
 262        ret = test_bit(feature_id, feature->supported);
 263        mutex_unlock(&feature->mutex);
 264
 265        return ret;
 266}
 267
 268int smu_cmn_feature_is_enabled(struct smu_context *smu,
 269                               enum smu_feature_mask mask)
 270{
 271        struct smu_feature *feature = &smu->smu_feature;
 272        int feature_id;
 273        int ret = 0;
 274
 275        if (smu->is_apu)
 276                return 1;
 277        feature_id = smu_cmn_to_asic_specific_index(smu,
 278                                                    CMN2ASIC_MAPPING_FEATURE,
 279                                                    mask);
 280        if (feature_id < 0)
 281                return 0;
 282
 283        WARN_ON(feature_id > feature->feature_num);
 284
 285        mutex_lock(&feature->mutex);
 286        ret = test_bit(feature_id, feature->enabled);
 287        mutex_unlock(&feature->mutex);
 288
 289        return ret;
 290}
 291
 292bool smu_cmn_clk_dpm_is_enabled(struct smu_context *smu,
 293                                enum smu_clk_type clk_type)
 294{
 295        enum smu_feature_mask feature_id = 0;
 296
 297        switch (clk_type) {
 298        case SMU_MCLK:
 299        case SMU_UCLK:
 300                feature_id = SMU_FEATURE_DPM_UCLK_BIT;
 301                break;
 302        case SMU_GFXCLK:
 303        case SMU_SCLK:
 304                feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
 305                break;
 306        case SMU_SOCCLK:
 307                feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
 308                break;
 309        default:
 310                return true;
 311        }
 312
 313        if (!smu_cmn_feature_is_enabled(smu, feature_id))
 314                return false;
 315
 316        return true;
 317}
 318
 319int smu_cmn_get_enabled_mask(struct smu_context *smu,
 320                             uint32_t *feature_mask,
 321                             uint32_t num)
 322{
 323        uint32_t feature_mask_high = 0, feature_mask_low = 0;
 324        struct smu_feature *feature = &smu->smu_feature;
 325        int ret = 0;
 326
 327        if (!feature_mask || num < 2)
 328                return -EINVAL;
 329
 330        if (bitmap_empty(feature->enabled, feature->feature_num)) {
 331                ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh, &feature_mask_high);
 332                if (ret)
 333                        return ret;
 334
 335                ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow, &feature_mask_low);
 336                if (ret)
 337                        return ret;
 338
 339                feature_mask[0] = feature_mask_low;
 340                feature_mask[1] = feature_mask_high;
 341        } else {
 342                bitmap_copy((unsigned long *)feature_mask, feature->enabled,
 343                             feature->feature_num);
 344        }
 345
 346        return ret;
 347}
 348
 349int smu_cmn_get_enabled_32_bits_mask(struct smu_context *smu,
 350                                        uint32_t *feature_mask,
 351                                        uint32_t num)
 352{
 353        uint32_t feature_mask_en_low = 0;
 354        uint32_t feature_mask_en_high = 0;
 355        struct smu_feature *feature = &smu->smu_feature;
 356        int ret = 0;
 357
 358        if (!feature_mask || num < 2)
 359                return -EINVAL;
 360
 361        if (bitmap_empty(feature->enabled, feature->feature_num)) {
 362                ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetEnabledSmuFeatures, 0,
 363                                                                                 &feature_mask_en_low);
 364
 365                if (ret)
 366                        return ret;
 367
 368                ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetEnabledSmuFeatures, 1,
 369                                                                                 &feature_mask_en_high);
 370
 371                if (ret)
 372                        return ret;
 373
 374                feature_mask[0] = feature_mask_en_low;
 375                feature_mask[1] = feature_mask_en_high;
 376
 377        } else {
 378                bitmap_copy((unsigned long *)feature_mask, feature->enabled,
 379                                 feature->feature_num);
 380        }
 381
 382        return ret;
 383
 384}
 385
 386int smu_cmn_feature_update_enable_state(struct smu_context *smu,
 387                                        uint64_t feature_mask,
 388                                        bool enabled)
 389{
 390        struct smu_feature *feature = &smu->smu_feature;
 391        int ret = 0;
 392
 393        if (enabled) {
 394                ret = smu_cmn_send_smc_msg_with_param(smu,
 395                                                  SMU_MSG_EnableSmuFeaturesLow,
 396                                                  lower_32_bits(feature_mask),
 397                                                  NULL);
 398                if (ret)
 399                        return ret;
 400                ret = smu_cmn_send_smc_msg_with_param(smu,
 401                                                  SMU_MSG_EnableSmuFeaturesHigh,
 402                                                  upper_32_bits(feature_mask),
 403                                                  NULL);
 404                if (ret)
 405                        return ret;
 406        } else {
 407                ret = smu_cmn_send_smc_msg_with_param(smu,
 408                                                  SMU_MSG_DisableSmuFeaturesLow,
 409                                                  lower_32_bits(feature_mask),
 410                                                  NULL);
 411                if (ret)
 412                        return ret;
 413                ret = smu_cmn_send_smc_msg_with_param(smu,
 414                                                  SMU_MSG_DisableSmuFeaturesHigh,
 415                                                  upper_32_bits(feature_mask),
 416                                                  NULL);
 417                if (ret)
 418                        return ret;
 419        }
 420
 421        mutex_lock(&feature->mutex);
 422        if (enabled)
 423                bitmap_or(feature->enabled, feature->enabled,
 424                                (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
 425        else
 426                bitmap_andnot(feature->enabled, feature->enabled,
 427                                (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
 428        mutex_unlock(&feature->mutex);
 429
 430        return ret;
 431}
 432
 433int smu_cmn_feature_set_enabled(struct smu_context *smu,
 434                                enum smu_feature_mask mask,
 435                                bool enable)
 436{
 437        struct smu_feature *feature = &smu->smu_feature;
 438        int feature_id;
 439
 440        feature_id = smu_cmn_to_asic_specific_index(smu,
 441                                                    CMN2ASIC_MAPPING_FEATURE,
 442                                                    mask);
 443        if (feature_id < 0)
 444                return -EINVAL;
 445
 446        WARN_ON(feature_id > feature->feature_num);
 447
 448        return smu_cmn_feature_update_enable_state(smu,
 449                                               1ULL << feature_id,
 450                                               enable);
 451}
 452
 453#undef __SMU_DUMMY_MAP
 454#define __SMU_DUMMY_MAP(fea)    #fea
 455static const char* __smu_feature_names[] = {
 456        SMU_FEATURE_MASKS
 457};
 458
 459static const char *smu_get_feature_name(struct smu_context *smu,
 460                                        enum smu_feature_mask feature)
 461{
 462        if (feature < 0 || feature >= SMU_FEATURE_COUNT)
 463                return "unknown smu feature";
 464        return __smu_feature_names[feature];
 465}
 466
 467size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu,
 468                                   char *buf)
 469{
 470        uint32_t feature_mask[2] = { 0 };
 471        int feature_index = 0;
 472        uint32_t count = 0;
 473        int8_t sort_feature[SMU_FEATURE_COUNT];
 474        size_t size = 0;
 475        int ret = 0, i;
 476
 477        if (!smu->is_apu) {
 478                ret = smu_cmn_get_enabled_mask(smu,
 479                                                feature_mask,
 480                                                2);
 481                if (ret)
 482                        return 0;
 483        } else {
 484                ret = smu_cmn_get_enabled_32_bits_mask(smu,
 485                                        feature_mask,
 486                                        2);
 487                if (ret)
 488                        return 0;
 489        }
 490
 491        size =  sprintf(buf + size, "features high: 0x%08x low: 0x%08x\n",
 492                        feature_mask[1], feature_mask[0]);
 493
 494        memset(sort_feature, -1, sizeof(sort_feature));
 495
 496        for (i = 0; i < SMU_FEATURE_COUNT; i++) {
 497                feature_index = smu_cmn_to_asic_specific_index(smu,
 498                                                               CMN2ASIC_MAPPING_FEATURE,
 499                                                               i);
 500                if (feature_index < 0)
 501                        continue;
 502
 503                sort_feature[feature_index] = i;
 504        }
 505
 506        size += sprintf(buf + size, "%-2s. %-20s  %-3s : %-s\n",
 507                        "No", "Feature", "Bit", "State");
 508
 509        for (i = 0; i < SMU_FEATURE_COUNT; i++) {
 510                if (sort_feature[i] < 0)
 511                        continue;
 512
 513                size += sprintf(buf + size, "%02d. %-20s (%2d) : %s\n",
 514                                count++,
 515                                smu_get_feature_name(smu, sort_feature[i]),
 516                                i,
 517                                !!smu_cmn_feature_is_enabled(smu, sort_feature[i]) ?
 518                                "enabled" : "disabled");
 519        }
 520
 521        return size;
 522}
 523
 524int smu_cmn_set_pp_feature_mask(struct smu_context *smu,
 525                                uint64_t new_mask)
 526{
 527        int ret = 0;
 528        uint32_t feature_mask[2] = { 0 };
 529        uint64_t feature_2_enabled = 0;
 530        uint64_t feature_2_disabled = 0;
 531        uint64_t feature_enables = 0;
 532
 533        ret = smu_cmn_get_enabled_mask(smu,
 534                                       feature_mask,
 535                                       2);
 536        if (ret)
 537                return ret;
 538
 539        feature_enables = ((uint64_t)feature_mask[1] << 32 |
 540                           (uint64_t)feature_mask[0]);
 541
 542        feature_2_enabled  = ~feature_enables & new_mask;
 543        feature_2_disabled = feature_enables & ~new_mask;
 544
 545        if (feature_2_enabled) {
 546                ret = smu_cmn_feature_update_enable_state(smu,
 547                                                          feature_2_enabled,
 548                                                          true);
 549                if (ret)
 550                        return ret;
 551        }
 552        if (feature_2_disabled) {
 553                ret = smu_cmn_feature_update_enable_state(smu,
 554                                                          feature_2_disabled,
 555                                                          false);
 556                if (ret)
 557                        return ret;
 558        }
 559
 560        return ret;
 561}
 562
 563int smu_cmn_disable_all_features_with_exception(struct smu_context *smu,
 564                                                enum smu_feature_mask mask)
 565{
 566        uint64_t features_to_disable = U64_MAX;
 567        int skipped_feature_id;
 568
 569        skipped_feature_id = smu_cmn_to_asic_specific_index(smu,
 570                                                            CMN2ASIC_MAPPING_FEATURE,
 571                                                            mask);
 572        if (skipped_feature_id < 0)
 573                return -EINVAL;
 574
 575        features_to_disable &= ~(1ULL << skipped_feature_id);
 576
 577        return smu_cmn_feature_update_enable_state(smu,
 578                                                   features_to_disable,
 579                                                   0);
 580}
 581
 582int smu_cmn_get_smc_version(struct smu_context *smu,
 583                            uint32_t *if_version,
 584                            uint32_t *smu_version)
 585{
 586        int ret = 0;
 587
 588        if (!if_version && !smu_version)
 589                return -EINVAL;
 590
 591        if (smu->smc_fw_if_version && smu->smc_fw_version)
 592        {
 593                if (if_version)
 594                        *if_version = smu->smc_fw_if_version;
 595
 596                if (smu_version)
 597                        *smu_version = smu->smc_fw_version;
 598
 599                return 0;
 600        }
 601
 602        if (if_version) {
 603                ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion, if_version);
 604                if (ret)
 605                        return ret;
 606
 607                smu->smc_fw_if_version = *if_version;
 608        }
 609
 610        if (smu_version) {
 611                ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetSmuVersion, smu_version);
 612                if (ret)
 613                        return ret;
 614
 615                smu->smc_fw_version = *smu_version;
 616        }
 617
 618        return ret;
 619}
 620
 621int smu_cmn_update_table(struct smu_context *smu,
 622                         enum smu_table_id table_index,
 623                         int argument,
 624                         void *table_data,
 625                         bool drv2smu)
 626{
 627        struct smu_table_context *smu_table = &smu->smu_table;
 628        struct amdgpu_device *adev = smu->adev;
 629        struct smu_table *table = &smu_table->driver_table;
 630        int table_id = smu_cmn_to_asic_specific_index(smu,
 631                                                      CMN2ASIC_MAPPING_TABLE,
 632                                                      table_index);
 633        uint32_t table_size;
 634        int ret = 0;
 635        if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
 636                return -EINVAL;
 637
 638        table_size = smu_table->tables[table_index].size;
 639
 640        if (drv2smu) {
 641                memcpy(table->cpu_addr, table_data, table_size);
 642                /*
 643                 * Flush hdp cache: to guard the content seen by
 644                 * GPU is consitent with CPU.
 645                 */
 646                amdgpu_asic_flush_hdp(adev, NULL);
 647        }
 648
 649        ret = smu_cmn_send_smc_msg_with_param(smu, drv2smu ?
 650                                          SMU_MSG_TransferTableDram2Smu :
 651                                          SMU_MSG_TransferTableSmu2Dram,
 652                                          table_id | ((argument & 0xFFFF) << 16),
 653                                          NULL);
 654        if (ret)
 655                return ret;
 656
 657        if (!drv2smu) {
 658                amdgpu_asic_invalidate_hdp(adev, NULL);
 659                memcpy(table_data, table->cpu_addr, table_size);
 660        }
 661
 662        return 0;
 663}
 664
 665int smu_cmn_write_watermarks_table(struct smu_context *smu)
 666{
 667        void *watermarks_table = smu->smu_table.watermarks_table;
 668
 669        if (!watermarks_table)
 670                return -EINVAL;
 671
 672        return smu_cmn_update_table(smu,
 673                                    SMU_TABLE_WATERMARKS,
 674                                    0,
 675                                    watermarks_table,
 676                                    true);
 677}
 678
 679int smu_cmn_write_pptable(struct smu_context *smu)
 680{
 681        void *pptable = smu->smu_table.driver_pptable;
 682
 683        return smu_cmn_update_table(smu,
 684                                    SMU_TABLE_PPTABLE,
 685                                    0,
 686                                    pptable,
 687                                    true);
 688}
 689
 690int smu_cmn_get_metrics_table_locked(struct smu_context *smu,
 691                                     void *metrics_table,
 692                                     bool bypass_cache)
 693{
 694        struct smu_table_context *smu_table= &smu->smu_table;
 695        uint32_t table_size =
 696                smu_table->tables[SMU_TABLE_SMU_METRICS].size;
 697        int ret = 0;
 698
 699        if (bypass_cache ||
 700            !smu_table->metrics_time ||
 701            time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(1))) {
 702                ret = smu_cmn_update_table(smu,
 703                                       SMU_TABLE_SMU_METRICS,
 704                                       0,
 705                                       smu_table->metrics_table,
 706                                       false);
 707                if (ret) {
 708                        dev_info(smu->adev->dev, "Failed to export SMU metrics table!\n");
 709                        return ret;
 710                }
 711                smu_table->metrics_time = jiffies;
 712        }
 713
 714        if (metrics_table)
 715                memcpy(metrics_table, smu_table->metrics_table, table_size);
 716
 717        return 0;
 718}
 719
 720int smu_cmn_get_metrics_table(struct smu_context *smu,
 721                              void *metrics_table,
 722                              bool bypass_cache)
 723{
 724        int ret = 0;
 725
 726        mutex_lock(&smu->metrics_lock);
 727        ret = smu_cmn_get_metrics_table_locked(smu,
 728                                               metrics_table,
 729                                               bypass_cache);
 730        mutex_unlock(&smu->metrics_lock);
 731
 732        return ret;
 733}
 734