linux/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
<<
>>
Prefs
   1/*
   2 * Copyright 2018 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: AMD
  23 */
  24#include <linux/string.h>
  25#include <linux/acpi.h>
  26
  27#include <drm/drm_probe_helper.h>
  28#include <drm/amdgpu_drm.h>
  29#include "dm_services.h"
  30#include "amdgpu.h"
  31#include "amdgpu_dm.h"
  32#include "amdgpu_dm_irq.h"
  33#include "amdgpu_pm.h"
  34#include "dm_pp_smu.h"
  35#include "amdgpu_smu.h"
  36
  37
  38bool dm_pp_apply_display_requirements(
  39                const struct dc_context *ctx,
  40                const struct dm_pp_display_configuration *pp_display_cfg)
  41{
  42        struct amdgpu_device *adev = ctx->driver_context;
  43        struct smu_context *smu = &adev->smu;
  44        int i;
  45
  46        if (adev->pm.dpm_enabled) {
  47
  48                memset(&adev->pm.pm_display_cfg, 0,
  49                                sizeof(adev->pm.pm_display_cfg));
  50
  51                adev->pm.pm_display_cfg.cpu_cc6_disable =
  52                        pp_display_cfg->cpu_cc6_disable;
  53
  54                adev->pm.pm_display_cfg.cpu_pstate_disable =
  55                        pp_display_cfg->cpu_pstate_disable;
  56
  57                adev->pm.pm_display_cfg.cpu_pstate_separation_time =
  58                        pp_display_cfg->cpu_pstate_separation_time;
  59
  60                adev->pm.pm_display_cfg.nb_pstate_switch_disable =
  61                        pp_display_cfg->nb_pstate_switch_disable;
  62
  63                adev->pm.pm_display_cfg.num_display =
  64                                pp_display_cfg->display_count;
  65                adev->pm.pm_display_cfg.num_path_including_non_display =
  66                                pp_display_cfg->display_count;
  67
  68                adev->pm.pm_display_cfg.min_core_set_clock =
  69                                pp_display_cfg->min_engine_clock_khz/10;
  70                adev->pm.pm_display_cfg.min_core_set_clock_in_sr =
  71                                pp_display_cfg->min_engine_clock_deep_sleep_khz/10;
  72                adev->pm.pm_display_cfg.min_mem_set_clock =
  73                                pp_display_cfg->min_memory_clock_khz/10;
  74
  75                adev->pm.pm_display_cfg.min_dcef_deep_sleep_set_clk =
  76                                pp_display_cfg->min_engine_clock_deep_sleep_khz/10;
  77                adev->pm.pm_display_cfg.min_dcef_set_clk =
  78                                pp_display_cfg->min_dcfclock_khz/10;
  79
  80                adev->pm.pm_display_cfg.multi_monitor_in_sync =
  81                                pp_display_cfg->all_displays_in_sync;
  82                adev->pm.pm_display_cfg.min_vblank_time =
  83                                pp_display_cfg->avail_mclk_switch_time_us;
  84
  85                adev->pm.pm_display_cfg.display_clk =
  86                                pp_display_cfg->disp_clk_khz/10;
  87
  88                adev->pm.pm_display_cfg.dce_tolerable_mclk_in_active_latency =
  89                                pp_display_cfg->avail_mclk_switch_time_in_disp_active_us;
  90
  91                adev->pm.pm_display_cfg.crtc_index = pp_display_cfg->crtc_index;
  92                adev->pm.pm_display_cfg.line_time_in_us =
  93                                pp_display_cfg->line_time_in_us;
  94
  95                adev->pm.pm_display_cfg.vrefresh = pp_display_cfg->disp_configs[0].v_refresh;
  96                adev->pm.pm_display_cfg.crossfire_display_index = -1;
  97                adev->pm.pm_display_cfg.min_bus_bandwidth = 0;
  98
  99                for (i = 0; i < pp_display_cfg->display_count; i++) {
 100                        const struct dm_pp_single_disp_config *dc_cfg =
 101                                                &pp_display_cfg->disp_configs[i];
 102                        adev->pm.pm_display_cfg.displays[i].controller_id = dc_cfg->pipe_idx + 1;
 103                }
 104
 105                if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->display_configuration_change)
 106                        adev->powerplay.pp_funcs->display_configuration_change(
 107                                adev->powerplay.pp_handle,
 108                                &adev->pm.pm_display_cfg);
 109                else if (adev->smu.ppt_funcs)
 110                        smu_display_configuration_change(smu,
 111                                                         &adev->pm.pm_display_cfg);
 112
 113                amdgpu_pm_compute_clocks(adev);
 114        }
 115
 116        return true;
 117}
 118
 119static void get_default_clock_levels(
 120                enum dm_pp_clock_type clk_type,
 121                struct dm_pp_clock_levels *clks)
 122{
 123        uint32_t disp_clks_in_khz[6] = {
 124                        300000, 400000, 496560, 626090, 685720, 757900 };
 125        uint32_t sclks_in_khz[6] = {
 126                        300000, 360000, 423530, 514290, 626090, 720000 };
 127        uint32_t mclks_in_khz[2] = { 333000, 800000 };
 128
 129        switch (clk_type) {
 130        case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
 131                clks->num_levels = 6;
 132                memmove(clks->clocks_in_khz, disp_clks_in_khz,
 133                                sizeof(disp_clks_in_khz));
 134                break;
 135        case DM_PP_CLOCK_TYPE_ENGINE_CLK:
 136                clks->num_levels = 6;
 137                memmove(clks->clocks_in_khz, sclks_in_khz,
 138                                sizeof(sclks_in_khz));
 139                break;
 140        case DM_PP_CLOCK_TYPE_MEMORY_CLK:
 141                clks->num_levels = 2;
 142                memmove(clks->clocks_in_khz, mclks_in_khz,
 143                                sizeof(mclks_in_khz));
 144                break;
 145        default:
 146                clks->num_levels = 0;
 147                break;
 148        }
 149}
 150
 151static enum smu_clk_type dc_to_smu_clock_type(
 152                enum dm_pp_clock_type dm_pp_clk_type)
 153{
 154        enum smu_clk_type smu_clk_type = SMU_CLK_COUNT;
 155
 156        switch (dm_pp_clk_type) {
 157        case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
 158                smu_clk_type = SMU_DISPCLK;
 159                break;
 160        case DM_PP_CLOCK_TYPE_ENGINE_CLK:
 161                smu_clk_type = SMU_GFXCLK;
 162                break;
 163        case DM_PP_CLOCK_TYPE_MEMORY_CLK:
 164                smu_clk_type = SMU_MCLK;
 165                break;
 166        case DM_PP_CLOCK_TYPE_DCEFCLK:
 167                smu_clk_type = SMU_DCEFCLK;
 168                break;
 169        case DM_PP_CLOCK_TYPE_SOCCLK:
 170                smu_clk_type = SMU_SOCCLK;
 171                break;
 172        default:
 173                DRM_ERROR("DM_PPLIB: invalid clock type: %d!\n",
 174                          dm_pp_clk_type);
 175                break;
 176        }
 177
 178        return smu_clk_type;
 179}
 180
 181static enum amd_pp_clock_type dc_to_pp_clock_type(
 182                enum dm_pp_clock_type dm_pp_clk_type)
 183{
 184        enum amd_pp_clock_type amd_pp_clk_type = 0;
 185
 186        switch (dm_pp_clk_type) {
 187        case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
 188                amd_pp_clk_type = amd_pp_disp_clock;
 189                break;
 190        case DM_PP_CLOCK_TYPE_ENGINE_CLK:
 191                amd_pp_clk_type = amd_pp_sys_clock;
 192                break;
 193        case DM_PP_CLOCK_TYPE_MEMORY_CLK:
 194                amd_pp_clk_type = amd_pp_mem_clock;
 195                break;
 196        case DM_PP_CLOCK_TYPE_DCEFCLK:
 197                amd_pp_clk_type  = amd_pp_dcef_clock;
 198                break;
 199        case DM_PP_CLOCK_TYPE_DCFCLK:
 200                amd_pp_clk_type = amd_pp_dcf_clock;
 201                break;
 202        case DM_PP_CLOCK_TYPE_PIXELCLK:
 203                amd_pp_clk_type = amd_pp_pixel_clock;
 204                break;
 205        case DM_PP_CLOCK_TYPE_FCLK:
 206                amd_pp_clk_type = amd_pp_f_clock;
 207                break;
 208        case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK:
 209                amd_pp_clk_type = amd_pp_phy_clock;
 210                break;
 211        case DM_PP_CLOCK_TYPE_DPPCLK:
 212                amd_pp_clk_type = amd_pp_dpp_clock;
 213                break;
 214        default:
 215                DRM_ERROR("DM_PPLIB: invalid clock type: %d!\n",
 216                                dm_pp_clk_type);
 217                break;
 218        }
 219
 220        return amd_pp_clk_type;
 221}
 222
 223static enum dm_pp_clocks_state pp_to_dc_powerlevel_state(
 224                        enum PP_DAL_POWERLEVEL max_clocks_state)
 225{
 226        switch (max_clocks_state) {
 227        case PP_DAL_POWERLEVEL_0:
 228                return DM_PP_CLOCKS_DPM_STATE_LEVEL_0;
 229        case PP_DAL_POWERLEVEL_1:
 230                return DM_PP_CLOCKS_DPM_STATE_LEVEL_1;
 231        case PP_DAL_POWERLEVEL_2:
 232                return DM_PP_CLOCKS_DPM_STATE_LEVEL_2;
 233        case PP_DAL_POWERLEVEL_3:
 234                return DM_PP_CLOCKS_DPM_STATE_LEVEL_3;
 235        case PP_DAL_POWERLEVEL_4:
 236                return DM_PP_CLOCKS_DPM_STATE_LEVEL_4;
 237        case PP_DAL_POWERLEVEL_5:
 238                return DM_PP_CLOCKS_DPM_STATE_LEVEL_5;
 239        case PP_DAL_POWERLEVEL_6:
 240                return DM_PP_CLOCKS_DPM_STATE_LEVEL_6;
 241        case PP_DAL_POWERLEVEL_7:
 242                return DM_PP_CLOCKS_DPM_STATE_LEVEL_7;
 243        default:
 244                DRM_ERROR("DM_PPLIB: invalid powerlevel state: %d!\n",
 245                                max_clocks_state);
 246                return DM_PP_CLOCKS_STATE_INVALID;
 247        }
 248}
 249
 250static void pp_to_dc_clock_levels(
 251                const struct amd_pp_clocks *pp_clks,
 252                struct dm_pp_clock_levels *dc_clks,
 253                enum dm_pp_clock_type dc_clk_type)
 254{
 255        uint32_t i;
 256
 257        if (pp_clks->count > DM_PP_MAX_CLOCK_LEVELS) {
 258                DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
 259                                DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
 260                                pp_clks->count,
 261                                DM_PP_MAX_CLOCK_LEVELS);
 262
 263                dc_clks->num_levels = DM_PP_MAX_CLOCK_LEVELS;
 264        } else
 265                dc_clks->num_levels = pp_clks->count;
 266
 267        DRM_INFO("DM_PPLIB: values for %s clock\n",
 268                        DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
 269
 270        for (i = 0; i < dc_clks->num_levels; i++) {
 271                DRM_INFO("DM_PPLIB:\t %d\n", pp_clks->clock[i]);
 272                dc_clks->clocks_in_khz[i] = pp_clks->clock[i];
 273        }
 274}
 275
 276static void pp_to_dc_clock_levels_with_latency(
 277                const struct pp_clock_levels_with_latency *pp_clks,
 278                struct dm_pp_clock_levels_with_latency *clk_level_info,
 279                enum dm_pp_clock_type dc_clk_type)
 280{
 281        uint32_t i;
 282
 283        if (pp_clks->num_levels > DM_PP_MAX_CLOCK_LEVELS) {
 284                DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
 285                                DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
 286                                pp_clks->num_levels,
 287                                DM_PP_MAX_CLOCK_LEVELS);
 288
 289                clk_level_info->num_levels = DM_PP_MAX_CLOCK_LEVELS;
 290        } else
 291                clk_level_info->num_levels = pp_clks->num_levels;
 292
 293        DRM_DEBUG("DM_PPLIB: values for %s clock\n",
 294                        DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
 295
 296        for (i = 0; i < clk_level_info->num_levels; i++) {
 297                DRM_DEBUG("DM_PPLIB:\t %d in kHz\n", pp_clks->data[i].clocks_in_khz);
 298                clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz;
 299                clk_level_info->data[i].latency_in_us = pp_clks->data[i].latency_in_us;
 300        }
 301}
 302
 303static void pp_to_dc_clock_levels_with_voltage(
 304                const struct pp_clock_levels_with_voltage *pp_clks,
 305                struct dm_pp_clock_levels_with_voltage *clk_level_info,
 306                enum dm_pp_clock_type dc_clk_type)
 307{
 308        uint32_t i;
 309
 310        if (pp_clks->num_levels > DM_PP_MAX_CLOCK_LEVELS) {
 311                DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
 312                                DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
 313                                pp_clks->num_levels,
 314                                DM_PP_MAX_CLOCK_LEVELS);
 315
 316                clk_level_info->num_levels = DM_PP_MAX_CLOCK_LEVELS;
 317        } else
 318                clk_level_info->num_levels = pp_clks->num_levels;
 319
 320        DRM_INFO("DM_PPLIB: values for %s clock\n",
 321                        DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
 322
 323        for (i = 0; i < clk_level_info->num_levels; i++) {
 324                DRM_INFO("DM_PPLIB:\t %d in kHz, %d in mV\n", pp_clks->data[i].clocks_in_khz,
 325                         pp_clks->data[i].voltage_in_mv);
 326                clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz;
 327                clk_level_info->data[i].voltage_in_mv = pp_clks->data[i].voltage_in_mv;
 328        }
 329}
 330
 331bool dm_pp_get_clock_levels_by_type(
 332                const struct dc_context *ctx,
 333                enum dm_pp_clock_type clk_type,
 334                struct dm_pp_clock_levels *dc_clks)
 335{
 336        struct amdgpu_device *adev = ctx->driver_context;
 337        void *pp_handle = adev->powerplay.pp_handle;
 338        struct amd_pp_clocks pp_clks = { 0 };
 339        struct amd_pp_simple_clock_info validation_clks = { 0 };
 340        uint32_t i;
 341
 342        if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_clock_by_type) {
 343                if (adev->powerplay.pp_funcs->get_clock_by_type(pp_handle,
 344                        dc_to_pp_clock_type(clk_type), &pp_clks)) {
 345                        /* Error in pplib. Provide default values. */
 346                        get_default_clock_levels(clk_type, dc_clks);
 347                        return true;
 348                }
 349        } else if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->get_clock_by_type) {
 350                if (smu_get_clock_by_type(&adev->smu,
 351                                          dc_to_pp_clock_type(clk_type),
 352                                          &pp_clks)) {
 353                        get_default_clock_levels(clk_type, dc_clks);
 354                        return true;
 355                }
 356        }
 357
 358        pp_to_dc_clock_levels(&pp_clks, dc_clks, clk_type);
 359
 360        if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_display_mode_validation_clocks) {
 361                if (adev->powerplay.pp_funcs->get_display_mode_validation_clocks(
 362                                                pp_handle, &validation_clks)) {
 363                        /* Error in pplib. Provide default values. */
 364                        DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n");
 365                        validation_clks.engine_max_clock = 72000;
 366                        validation_clks.memory_max_clock = 80000;
 367                        validation_clks.level = 0;
 368                }
 369        } else if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->get_max_high_clocks) {
 370                if (smu_get_max_high_clocks(&adev->smu, &validation_clks)) {
 371                        DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n");
 372                        validation_clks.engine_max_clock = 72000;
 373                        validation_clks.memory_max_clock = 80000;
 374                        validation_clks.level = 0;
 375                }
 376        }
 377
 378        DRM_INFO("DM_PPLIB: Validation clocks:\n");
 379        DRM_INFO("DM_PPLIB:    engine_max_clock: %d\n",
 380                        validation_clks.engine_max_clock);
 381        DRM_INFO("DM_PPLIB:    memory_max_clock: %d\n",
 382                        validation_clks.memory_max_clock);
 383        DRM_INFO("DM_PPLIB:    level           : %d\n",
 384                        validation_clks.level);
 385
 386        /* Translate 10 kHz to kHz. */
 387        validation_clks.engine_max_clock *= 10;
 388        validation_clks.memory_max_clock *= 10;
 389
 390        /* Determine the highest non-boosted level from the Validation Clocks */
 391        if (clk_type == DM_PP_CLOCK_TYPE_ENGINE_CLK) {
 392                for (i = 0; i < dc_clks->num_levels; i++) {
 393                        if (dc_clks->clocks_in_khz[i] > validation_clks.engine_max_clock) {
 394                                /* This clock is higher the validation clock.
 395                                 * Than means the previous one is the highest
 396                                 * non-boosted one. */
 397                                DRM_INFO("DM_PPLIB: reducing engine clock level from %d to %d\n",
 398                                                dc_clks->num_levels, i);
 399                                dc_clks->num_levels = i > 0 ? i : 1;
 400                                break;
 401                        }
 402                }
 403        } else if (clk_type == DM_PP_CLOCK_TYPE_MEMORY_CLK) {
 404                for (i = 0; i < dc_clks->num_levels; i++) {
 405                        if (dc_clks->clocks_in_khz[i] > validation_clks.memory_max_clock) {
 406                                DRM_INFO("DM_PPLIB: reducing memory clock level from %d to %d\n",
 407                                                dc_clks->num_levels, i);
 408                                dc_clks->num_levels = i > 0 ? i : 1;
 409                                break;
 410                        }
 411                }
 412        }
 413
 414        return true;
 415}
 416
 417bool dm_pp_get_clock_levels_by_type_with_latency(
 418        const struct dc_context *ctx,
 419        enum dm_pp_clock_type clk_type,
 420        struct dm_pp_clock_levels_with_latency *clk_level_info)
 421{
 422        struct amdgpu_device *adev = ctx->driver_context;
 423        void *pp_handle = adev->powerplay.pp_handle;
 424        struct pp_clock_levels_with_latency pp_clks = { 0 };
 425        const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 426        int ret;
 427
 428        if (pp_funcs && pp_funcs->get_clock_by_type_with_latency) {
 429                ret = pp_funcs->get_clock_by_type_with_latency(pp_handle,
 430                                                dc_to_pp_clock_type(clk_type),
 431                                                &pp_clks);
 432                if (ret)
 433                        return false;
 434        } else if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->get_clock_by_type_with_latency) {
 435                if (smu_get_clock_by_type_with_latency(&adev->smu,
 436                                                       dc_to_smu_clock_type(clk_type),
 437                                                       &pp_clks))
 438                        return false;
 439        }
 440
 441
 442        pp_to_dc_clock_levels_with_latency(&pp_clks, clk_level_info, clk_type);
 443
 444        return true;
 445}
 446
 447bool dm_pp_get_clock_levels_by_type_with_voltage(
 448        const struct dc_context *ctx,
 449        enum dm_pp_clock_type clk_type,
 450        struct dm_pp_clock_levels_with_voltage *clk_level_info)
 451{
 452        struct amdgpu_device *adev = ctx->driver_context;
 453        void *pp_handle = adev->powerplay.pp_handle;
 454        struct pp_clock_levels_with_voltage pp_clk_info = {0};
 455        const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 456        int ret;
 457
 458        if (pp_funcs && pp_funcs->get_clock_by_type_with_voltage) {
 459                ret = pp_funcs->get_clock_by_type_with_voltage(pp_handle,
 460                                                dc_to_pp_clock_type(clk_type),
 461                                                &pp_clk_info);
 462                if (ret)
 463                        return false;
 464        } else if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->get_clock_by_type_with_voltage) {
 465                if (smu_get_clock_by_type_with_voltage(&adev->smu,
 466                                                       dc_to_pp_clock_type(clk_type),
 467                                                       &pp_clk_info))
 468                        return false;
 469        }
 470
 471        pp_to_dc_clock_levels_with_voltage(&pp_clk_info, clk_level_info, clk_type);
 472
 473        return true;
 474}
 475
 476bool dm_pp_notify_wm_clock_changes(
 477        const struct dc_context *ctx,
 478        struct dm_pp_wm_sets_with_clock_ranges *wm_with_clock_ranges)
 479{
 480        /* TODO: to be implemented */
 481        return false;
 482}
 483
 484bool dm_pp_apply_power_level_change_request(
 485        const struct dc_context *ctx,
 486        struct dm_pp_power_level_change_request *level_change_req)
 487{
 488        /* TODO: to be implemented */
 489        return false;
 490}
 491
 492bool dm_pp_apply_clock_for_voltage_request(
 493        const struct dc_context *ctx,
 494        struct dm_pp_clock_for_voltage_req *clock_for_voltage_req)
 495{
 496        struct amdgpu_device *adev = ctx->driver_context;
 497        struct pp_display_clock_request pp_clock_request = {0};
 498        int ret = 0;
 499
 500        pp_clock_request.clock_type = dc_to_pp_clock_type(clock_for_voltage_req->clk_type);
 501        pp_clock_request.clock_freq_in_khz = clock_for_voltage_req->clocks_in_khz;
 502
 503        if (!pp_clock_request.clock_type)
 504                return false;
 505
 506        if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->display_clock_voltage_request)
 507                ret = adev->powerplay.pp_funcs->display_clock_voltage_request(
 508                        adev->powerplay.pp_handle,
 509                        &pp_clock_request);
 510        else if (adev->smu.ppt_funcs &&
 511                 adev->smu.ppt_funcs->display_clock_voltage_request)
 512                ret = smu_display_clock_voltage_request(&adev->smu,
 513                                                        &pp_clock_request);
 514        if (ret)
 515                return false;
 516        return true;
 517}
 518
 519bool dm_pp_get_static_clocks(
 520        const struct dc_context *ctx,
 521        struct dm_pp_static_clock_info *static_clk_info)
 522{
 523        struct amdgpu_device *adev = ctx->driver_context;
 524        struct amd_pp_clock_info pp_clk_info = {0};
 525        int ret = 0;
 526
 527        if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_current_clocks)
 528                ret = adev->powerplay.pp_funcs->get_current_clocks(
 529                        adev->powerplay.pp_handle,
 530                        &pp_clk_info);
 531        else if (adev->smu.ppt_funcs)
 532                ret = smu_get_current_clocks(&adev->smu, &pp_clk_info);
 533        else
 534                return false;
 535        if (ret)
 536                return false;
 537
 538        static_clk_info->max_clocks_state = pp_to_dc_powerlevel_state(pp_clk_info.max_clocks_state);
 539        static_clk_info->max_mclk_khz = pp_clk_info.max_memory_clock * 10;
 540        static_clk_info->max_sclk_khz = pp_clk_info.max_engine_clock * 10;
 541
 542        return true;
 543}
 544
 545void pp_rv_set_wm_ranges(struct pp_smu *pp,
 546                struct pp_smu_wm_range_sets *ranges)
 547{
 548        const struct dc_context *ctx = pp->dm;
 549        struct amdgpu_device *adev = ctx->driver_context;
 550        void *pp_handle = adev->powerplay.pp_handle;
 551        const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 552        struct dm_pp_wm_sets_with_clock_ranges_soc15 wm_with_clock_ranges;
 553        struct dm_pp_clock_range_for_dmif_wm_set_soc15 *wm_dce_clocks = wm_with_clock_ranges.wm_dmif_clocks_ranges;
 554        struct dm_pp_clock_range_for_mcif_wm_set_soc15 *wm_soc_clocks = wm_with_clock_ranges.wm_mcif_clocks_ranges;
 555        int32_t i;
 556
 557        wm_with_clock_ranges.num_wm_dmif_sets = ranges->num_reader_wm_sets;
 558        wm_with_clock_ranges.num_wm_mcif_sets = ranges->num_writer_wm_sets;
 559
 560        for (i = 0; i < wm_with_clock_ranges.num_wm_dmif_sets; i++) {
 561                if (ranges->reader_wm_sets[i].wm_inst > 3)
 562                        wm_dce_clocks[i].wm_set_id = WM_SET_A;
 563                else
 564                        wm_dce_clocks[i].wm_set_id =
 565                                        ranges->reader_wm_sets[i].wm_inst;
 566                wm_dce_clocks[i].wm_max_dcfclk_clk_in_khz =
 567                                ranges->reader_wm_sets[i].max_drain_clk_mhz * 1000;
 568                wm_dce_clocks[i].wm_min_dcfclk_clk_in_khz =
 569                                ranges->reader_wm_sets[i].min_drain_clk_mhz * 1000;
 570                wm_dce_clocks[i].wm_max_mem_clk_in_khz =
 571                                ranges->reader_wm_sets[i].max_fill_clk_mhz * 1000;
 572                wm_dce_clocks[i].wm_min_mem_clk_in_khz =
 573                                ranges->reader_wm_sets[i].min_fill_clk_mhz * 1000;
 574        }
 575
 576        for (i = 0; i < wm_with_clock_ranges.num_wm_mcif_sets; i++) {
 577                if (ranges->writer_wm_sets[i].wm_inst > 3)
 578                        wm_soc_clocks[i].wm_set_id = WM_SET_A;
 579                else
 580                        wm_soc_clocks[i].wm_set_id =
 581                                        ranges->writer_wm_sets[i].wm_inst;
 582                wm_soc_clocks[i].wm_max_socclk_clk_in_khz =
 583                                ranges->writer_wm_sets[i].max_fill_clk_mhz * 1000;
 584                wm_soc_clocks[i].wm_min_socclk_clk_in_khz =
 585                                ranges->writer_wm_sets[i].min_fill_clk_mhz * 1000;
 586                wm_soc_clocks[i].wm_max_mem_clk_in_khz =
 587                                ranges->writer_wm_sets[i].max_drain_clk_mhz * 1000;
 588                wm_soc_clocks[i].wm_min_mem_clk_in_khz =
 589                                ranges->writer_wm_sets[i].min_drain_clk_mhz * 1000;
 590        }
 591
 592        if (pp_funcs && pp_funcs->set_watermarks_for_clocks_ranges)
 593                pp_funcs->set_watermarks_for_clocks_ranges(pp_handle,
 594                                                           &wm_with_clock_ranges);
 595}
 596
 597void pp_rv_set_pme_wa_enable(struct pp_smu *pp)
 598{
 599        const struct dc_context *ctx = pp->dm;
 600        struct amdgpu_device *adev = ctx->driver_context;
 601        void *pp_handle = adev->powerplay.pp_handle;
 602        const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 603
 604        if (pp_funcs && pp_funcs->notify_smu_enable_pwe)
 605                pp_funcs->notify_smu_enable_pwe(pp_handle);
 606        else if (adev->smu.ppt_funcs)
 607                smu_notify_smu_enable_pwe(&adev->smu);
 608}
 609
 610void pp_rv_set_active_display_count(struct pp_smu *pp, int count)
 611{
 612        const struct dc_context *ctx = pp->dm;
 613        struct amdgpu_device *adev = ctx->driver_context;
 614        void *pp_handle = adev->powerplay.pp_handle;
 615        const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 616
 617        if (!pp_funcs || !pp_funcs->set_active_display_count)
 618                return;
 619
 620        pp_funcs->set_active_display_count(pp_handle, count);
 621}
 622
 623void pp_rv_set_min_deep_sleep_dcfclk(struct pp_smu *pp, int clock)
 624{
 625        const struct dc_context *ctx = pp->dm;
 626        struct amdgpu_device *adev = ctx->driver_context;
 627        void *pp_handle = adev->powerplay.pp_handle;
 628        const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 629
 630        if (!pp_funcs || !pp_funcs->set_min_deep_sleep_dcefclk)
 631                return;
 632
 633        pp_funcs->set_min_deep_sleep_dcefclk(pp_handle, clock);
 634}
 635
 636void pp_rv_set_hard_min_dcefclk_by_freq(struct pp_smu *pp, int clock)
 637{
 638        const struct dc_context *ctx = pp->dm;
 639        struct amdgpu_device *adev = ctx->driver_context;
 640        void *pp_handle = adev->powerplay.pp_handle;
 641        const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 642
 643        if (!pp_funcs || !pp_funcs->set_hard_min_dcefclk_by_freq)
 644                return;
 645
 646        pp_funcs->set_hard_min_dcefclk_by_freq(pp_handle, clock);
 647}
 648
 649void pp_rv_set_hard_min_fclk_by_freq(struct pp_smu *pp, int mhz)
 650{
 651        const struct dc_context *ctx = pp->dm;
 652        struct amdgpu_device *adev = ctx->driver_context;
 653        void *pp_handle = adev->powerplay.pp_handle;
 654        const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 655
 656        if (!pp_funcs || !pp_funcs->set_hard_min_fclk_by_freq)
 657                return;
 658
 659        pp_funcs->set_hard_min_fclk_by_freq(pp_handle, mhz);
 660}
 661
 662static enum pp_smu_status pp_nv_set_wm_ranges(struct pp_smu *pp,
 663                struct pp_smu_wm_range_sets *ranges)
 664{
 665        const struct dc_context *ctx = pp->dm;
 666        struct amdgpu_device *adev = ctx->driver_context;
 667
 668        smu_set_watermarks_for_clock_ranges(&adev->smu, ranges);
 669
 670        return PP_SMU_RESULT_OK;
 671}
 672
 673enum pp_smu_status pp_nv_set_pme_wa_enable(struct pp_smu *pp)
 674{
 675        const struct dc_context *ctx = pp->dm;
 676        struct amdgpu_device *adev = ctx->driver_context;
 677        struct smu_context *smu = &adev->smu;
 678
 679        if (!smu->ppt_funcs)
 680                return PP_SMU_RESULT_UNSUPPORTED;
 681
 682        /* 0: successful or smu.ppt_funcs->set_azalia_d3_pme = NULL;  1: fail */
 683        if (smu_set_azalia_d3_pme(smu))
 684                return PP_SMU_RESULT_FAIL;
 685
 686        return PP_SMU_RESULT_OK;
 687}
 688
 689static enum pp_smu_status pp_nv_set_display_count(struct pp_smu *pp, int count)
 690{
 691        const struct dc_context *ctx = pp->dm;
 692        struct amdgpu_device *adev = ctx->driver_context;
 693        struct smu_context *smu = &adev->smu;
 694
 695        if (!smu->ppt_funcs)
 696                return PP_SMU_RESULT_UNSUPPORTED;
 697
 698        /* 0: successful or smu.ppt_funcs->set_display_count = NULL;  1: fail */
 699        if (smu_set_display_count(smu, count))
 700                return PP_SMU_RESULT_FAIL;
 701
 702        return PP_SMU_RESULT_OK;
 703}
 704
 705static enum pp_smu_status
 706pp_nv_set_min_deep_sleep_dcfclk(struct pp_smu *pp, int mhz)
 707{
 708        const struct dc_context *ctx = pp->dm;
 709        struct amdgpu_device *adev = ctx->driver_context;
 710        struct smu_context *smu = &adev->smu;
 711
 712        if (!smu->ppt_funcs)
 713                return PP_SMU_RESULT_UNSUPPORTED;
 714
 715        /* 0: successful or smu.ppt_funcs->set_deep_sleep_dcefclk = NULL;1: fail */
 716        if (smu_set_deep_sleep_dcefclk(smu, mhz))
 717                return PP_SMU_RESULT_FAIL;
 718
 719        return PP_SMU_RESULT_OK;
 720}
 721
 722static enum pp_smu_status pp_nv_set_hard_min_dcefclk_by_freq(
 723                struct pp_smu *pp, int mhz)
 724{
 725        const struct dc_context *ctx = pp->dm;
 726        struct amdgpu_device *adev = ctx->driver_context;
 727        struct smu_context *smu = &adev->smu;
 728        struct pp_display_clock_request clock_req;
 729
 730        if (!smu->ppt_funcs)
 731                return PP_SMU_RESULT_UNSUPPORTED;
 732
 733        clock_req.clock_type = amd_pp_dcef_clock;
 734        clock_req.clock_freq_in_khz = mhz * 1000;
 735
 736        /* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL
 737         * 1: fail
 738         */
 739        if (smu_display_clock_voltage_request(smu, &clock_req))
 740                return PP_SMU_RESULT_FAIL;
 741
 742        return PP_SMU_RESULT_OK;
 743}
 744
 745static enum pp_smu_status
 746pp_nv_set_hard_min_uclk_by_freq(struct pp_smu *pp, int mhz)
 747{
 748        const struct dc_context *ctx = pp->dm;
 749        struct amdgpu_device *adev = ctx->driver_context;
 750        struct smu_context *smu = &adev->smu;
 751        struct pp_display_clock_request clock_req;
 752
 753        if (!smu->ppt_funcs)
 754                return PP_SMU_RESULT_UNSUPPORTED;
 755
 756        clock_req.clock_type = amd_pp_mem_clock;
 757        clock_req.clock_freq_in_khz = mhz * 1000;
 758
 759        /* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL
 760         * 1: fail
 761         */
 762        if (smu_display_clock_voltage_request(smu, &clock_req))
 763                return PP_SMU_RESULT_FAIL;
 764
 765        return PP_SMU_RESULT_OK;
 766}
 767
 768static enum pp_smu_status pp_nv_set_pstate_handshake_support(
 769        struct pp_smu *pp, bool pstate_handshake_supported)
 770{
 771        const struct dc_context *ctx = pp->dm;
 772        struct amdgpu_device *adev = ctx->driver_context;
 773        struct smu_context *smu = &adev->smu;
 774
 775        if (smu_display_disable_memory_clock_switch(smu, !pstate_handshake_supported))
 776                return PP_SMU_RESULT_FAIL;
 777
 778        return PP_SMU_RESULT_OK;
 779}
 780
 781static enum pp_smu_status pp_nv_set_voltage_by_freq(struct pp_smu *pp,
 782                enum pp_smu_nv_clock_id clock_id, int mhz)
 783{
 784        const struct dc_context *ctx = pp->dm;
 785        struct amdgpu_device *adev = ctx->driver_context;
 786        struct smu_context *smu = &adev->smu;
 787        struct pp_display_clock_request clock_req;
 788
 789        if (!smu->ppt_funcs)
 790                return PP_SMU_RESULT_UNSUPPORTED;
 791
 792        switch (clock_id) {
 793        case PP_SMU_NV_DISPCLK:
 794                clock_req.clock_type = amd_pp_disp_clock;
 795                break;
 796        case PP_SMU_NV_PHYCLK:
 797                clock_req.clock_type = amd_pp_phy_clock;
 798                break;
 799        case PP_SMU_NV_PIXELCLK:
 800                clock_req.clock_type = amd_pp_pixel_clock;
 801                break;
 802        default:
 803                break;
 804        }
 805        clock_req.clock_freq_in_khz = mhz * 1000;
 806
 807        /* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL
 808         * 1: fail
 809         */
 810        if (smu_display_clock_voltage_request(smu, &clock_req))
 811                return PP_SMU_RESULT_FAIL;
 812
 813        return PP_SMU_RESULT_OK;
 814}
 815
 816static enum pp_smu_status pp_nv_get_maximum_sustainable_clocks(
 817                struct pp_smu *pp, struct pp_smu_nv_clock_table *max_clocks)
 818{
 819        const struct dc_context *ctx = pp->dm;
 820        struct amdgpu_device *adev = ctx->driver_context;
 821        struct smu_context *smu = &adev->smu;
 822
 823        if (!smu->ppt_funcs)
 824                return PP_SMU_RESULT_UNSUPPORTED;
 825
 826        if (!smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
 827                return PP_SMU_RESULT_UNSUPPORTED;
 828
 829        if (!smu_get_max_sustainable_clocks_by_dc(smu, max_clocks))
 830                return PP_SMU_RESULT_OK;
 831
 832        return PP_SMU_RESULT_FAIL;
 833}
 834
 835static enum pp_smu_status pp_nv_get_uclk_dpm_states(struct pp_smu *pp,
 836                unsigned int *clock_values_in_khz, unsigned int *num_states)
 837{
 838        const struct dc_context *ctx = pp->dm;
 839        struct amdgpu_device *adev = ctx->driver_context;
 840        struct smu_context *smu = &adev->smu;
 841
 842        if (!smu->ppt_funcs)
 843                return PP_SMU_RESULT_UNSUPPORTED;
 844
 845        if (!smu->ppt_funcs->get_uclk_dpm_states)
 846                return PP_SMU_RESULT_UNSUPPORTED;
 847
 848        if (!smu_get_uclk_dpm_states(smu,
 849                        clock_values_in_khz, num_states))
 850                return PP_SMU_RESULT_OK;
 851
 852        return PP_SMU_RESULT_FAIL;
 853}
 854
 855static enum pp_smu_status pp_rn_get_dpm_clock_table(
 856                struct pp_smu *pp, struct dpm_clocks *clock_table)
 857{
 858        const struct dc_context *ctx = pp->dm;
 859        struct amdgpu_device *adev = ctx->driver_context;
 860        struct smu_context *smu = &adev->smu;
 861
 862        if (!smu->ppt_funcs)
 863                return PP_SMU_RESULT_UNSUPPORTED;
 864
 865        if (!smu->ppt_funcs->get_dpm_clock_table)
 866                return PP_SMU_RESULT_UNSUPPORTED;
 867
 868        if (!smu_get_dpm_clock_table(smu, clock_table))
 869                return PP_SMU_RESULT_OK;
 870
 871        return PP_SMU_RESULT_FAIL;
 872}
 873
 874static enum pp_smu_status pp_rn_set_wm_ranges(struct pp_smu *pp,
 875                struct pp_smu_wm_range_sets *ranges)
 876{
 877        const struct dc_context *ctx = pp->dm;
 878        struct amdgpu_device *adev = ctx->driver_context;
 879
 880        smu_set_watermarks_for_clock_ranges(&adev->smu, ranges);
 881
 882        return PP_SMU_RESULT_OK;
 883}
 884
 885void dm_pp_get_funcs(
 886                struct dc_context *ctx,
 887                struct pp_smu_funcs *funcs)
 888{
 889        switch (ctx->dce_version) {
 890        case DCN_VERSION_1_0:
 891        case DCN_VERSION_1_01:
 892                funcs->ctx.ver = PP_SMU_VER_RV;
 893                funcs->rv_funcs.pp_smu.dm = ctx;
 894                funcs->rv_funcs.set_wm_ranges = pp_rv_set_wm_ranges;
 895                funcs->rv_funcs.set_pme_wa_enable = pp_rv_set_pme_wa_enable;
 896                funcs->rv_funcs.set_display_count =
 897                                pp_rv_set_active_display_count;
 898                funcs->rv_funcs.set_min_deep_sleep_dcfclk =
 899                                pp_rv_set_min_deep_sleep_dcfclk;
 900                funcs->rv_funcs.set_hard_min_dcfclk_by_freq =
 901                                pp_rv_set_hard_min_dcefclk_by_freq;
 902                funcs->rv_funcs.set_hard_min_fclk_by_freq =
 903                                pp_rv_set_hard_min_fclk_by_freq;
 904                break;
 905        case DCN_VERSION_2_0:
 906                funcs->ctx.ver = PP_SMU_VER_NV;
 907                funcs->nv_funcs.pp_smu.dm = ctx;
 908                funcs->nv_funcs.set_display_count = pp_nv_set_display_count;
 909                funcs->nv_funcs.set_hard_min_dcfclk_by_freq =
 910                                pp_nv_set_hard_min_dcefclk_by_freq;
 911                funcs->nv_funcs.set_min_deep_sleep_dcfclk =
 912                                pp_nv_set_min_deep_sleep_dcfclk;
 913                funcs->nv_funcs.set_voltage_by_freq =
 914                                pp_nv_set_voltage_by_freq;
 915                funcs->nv_funcs.set_wm_ranges = pp_nv_set_wm_ranges;
 916
 917                /* todo set_pme_wa_enable cause 4k@6ohz display not light up */
 918                funcs->nv_funcs.set_pme_wa_enable = NULL;
 919                /* todo debug waring message */
 920                funcs->nv_funcs.set_hard_min_uclk_by_freq = pp_nv_set_hard_min_uclk_by_freq;
 921                /* todo  compare data with window driver*/
 922                funcs->nv_funcs.get_maximum_sustainable_clocks = pp_nv_get_maximum_sustainable_clocks;
 923                /*todo  compare data with window driver */
 924                funcs->nv_funcs.get_uclk_dpm_states = pp_nv_get_uclk_dpm_states;
 925                funcs->nv_funcs.set_pstate_handshake_support = pp_nv_set_pstate_handshake_support;
 926                break;
 927
 928        case DCN_VERSION_2_1:
 929                funcs->ctx.ver = PP_SMU_VER_RN;
 930                funcs->rn_funcs.pp_smu.dm = ctx;
 931                funcs->rn_funcs.set_wm_ranges = pp_rn_set_wm_ranges;
 932                funcs->rn_funcs.get_dpm_clock_table = pp_rn_get_dpm_clock_table;
 933                break;
 934        default:
 935                DRM_ERROR("smu version is not supported !\n");
 936                break;
 937        }
 938}
 939