linux/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
<<
>>
Prefs
   1/*
   2 * Copyright 2018 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: AMD
  23 */
  24#include <linux/string.h>
  25#include <linux/acpi.h>
  26
  27#include <drm/drm_probe_helper.h>
  28#include <drm/amdgpu_drm.h>
  29#include "dm_services.h"
  30#include "amdgpu.h"
  31#include "amdgpu_dm.h"
  32#include "amdgpu_dm_irq.h"
  33#include "amdgpu_pm.h"
  34#include "dm_pp_smu.h"
  35#include "amdgpu_smu.h"
  36
  37
  38bool dm_pp_apply_display_requirements(
  39                const struct dc_context *ctx,
  40                const struct dm_pp_display_configuration *pp_display_cfg)
  41{
  42        struct amdgpu_device *adev = ctx->driver_context;
  43        struct smu_context *smu = &adev->smu;
  44        int i;
  45
  46        if (adev->pm.dpm_enabled) {
  47
  48                memset(&adev->pm.pm_display_cfg, 0,
  49                                sizeof(adev->pm.pm_display_cfg));
  50
  51                adev->pm.pm_display_cfg.cpu_cc6_disable =
  52                        pp_display_cfg->cpu_cc6_disable;
  53
  54                adev->pm.pm_display_cfg.cpu_pstate_disable =
  55                        pp_display_cfg->cpu_pstate_disable;
  56
  57                adev->pm.pm_display_cfg.cpu_pstate_separation_time =
  58                        pp_display_cfg->cpu_pstate_separation_time;
  59
  60                adev->pm.pm_display_cfg.nb_pstate_switch_disable =
  61                        pp_display_cfg->nb_pstate_switch_disable;
  62
  63                adev->pm.pm_display_cfg.num_display =
  64                                pp_display_cfg->display_count;
  65                adev->pm.pm_display_cfg.num_path_including_non_display =
  66                                pp_display_cfg->display_count;
  67
  68                adev->pm.pm_display_cfg.min_core_set_clock =
  69                                pp_display_cfg->min_engine_clock_khz/10;
  70                adev->pm.pm_display_cfg.min_core_set_clock_in_sr =
  71                                pp_display_cfg->min_engine_clock_deep_sleep_khz/10;
  72                adev->pm.pm_display_cfg.min_mem_set_clock =
  73                                pp_display_cfg->min_memory_clock_khz/10;
  74
  75                adev->pm.pm_display_cfg.min_dcef_deep_sleep_set_clk =
  76                                pp_display_cfg->min_engine_clock_deep_sleep_khz/10;
  77                adev->pm.pm_display_cfg.min_dcef_set_clk =
  78                                pp_display_cfg->min_dcfclock_khz/10;
  79
  80                adev->pm.pm_display_cfg.multi_monitor_in_sync =
  81                                pp_display_cfg->all_displays_in_sync;
  82                adev->pm.pm_display_cfg.min_vblank_time =
  83                                pp_display_cfg->avail_mclk_switch_time_us;
  84
  85                adev->pm.pm_display_cfg.display_clk =
  86                                pp_display_cfg->disp_clk_khz/10;
  87
  88                adev->pm.pm_display_cfg.dce_tolerable_mclk_in_active_latency =
  89                                pp_display_cfg->avail_mclk_switch_time_in_disp_active_us;
  90
  91                adev->pm.pm_display_cfg.crtc_index = pp_display_cfg->crtc_index;
  92                adev->pm.pm_display_cfg.line_time_in_us =
  93                                pp_display_cfg->line_time_in_us;
  94
  95                adev->pm.pm_display_cfg.vrefresh = pp_display_cfg->disp_configs[0].v_refresh;
  96                adev->pm.pm_display_cfg.crossfire_display_index = -1;
  97                adev->pm.pm_display_cfg.min_bus_bandwidth = 0;
  98
  99                for (i = 0; i < pp_display_cfg->display_count; i++) {
 100                        const struct dm_pp_single_disp_config *dc_cfg =
 101                                                &pp_display_cfg->disp_configs[i];
 102                        adev->pm.pm_display_cfg.displays[i].controller_id = dc_cfg->pipe_idx + 1;
 103                }
 104
 105                if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->display_configuration_change)
 106                        adev->powerplay.pp_funcs->display_configuration_change(
 107                                adev->powerplay.pp_handle,
 108                                &adev->pm.pm_display_cfg);
 109                else
 110                        smu_display_configuration_change(smu,
 111                                                         &adev->pm.pm_display_cfg);
 112
 113                amdgpu_pm_compute_clocks(adev);
 114        }
 115
 116        return true;
 117}
 118
 119static void get_default_clock_levels(
 120                enum dm_pp_clock_type clk_type,
 121                struct dm_pp_clock_levels *clks)
 122{
 123        uint32_t disp_clks_in_khz[6] = {
 124                        300000, 400000, 496560, 626090, 685720, 757900 };
 125        uint32_t sclks_in_khz[6] = {
 126                        300000, 360000, 423530, 514290, 626090, 720000 };
 127        uint32_t mclks_in_khz[2] = { 333000, 800000 };
 128
 129        switch (clk_type) {
 130        case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
 131                clks->num_levels = 6;
 132                memmove(clks->clocks_in_khz, disp_clks_in_khz,
 133                                sizeof(disp_clks_in_khz));
 134                break;
 135        case DM_PP_CLOCK_TYPE_ENGINE_CLK:
 136                clks->num_levels = 6;
 137                memmove(clks->clocks_in_khz, sclks_in_khz,
 138                                sizeof(sclks_in_khz));
 139                break;
 140        case DM_PP_CLOCK_TYPE_MEMORY_CLK:
 141                clks->num_levels = 2;
 142                memmove(clks->clocks_in_khz, mclks_in_khz,
 143                                sizeof(mclks_in_khz));
 144                break;
 145        default:
 146                clks->num_levels = 0;
 147                break;
 148        }
 149}
 150
 151static enum smu_clk_type dc_to_smu_clock_type(
 152                enum dm_pp_clock_type dm_pp_clk_type)
 153{
 154#define DCCLK_MAP_SMUCLK(dcclk, smuclk) \
 155        [dcclk] = smuclk
 156
 157        static int dc_clk_type_map[] = {
 158                DCCLK_MAP_SMUCLK(DM_PP_CLOCK_TYPE_DISPLAY_CLK,  SMU_DISPCLK),
 159                DCCLK_MAP_SMUCLK(DM_PP_CLOCK_TYPE_ENGINE_CLK,   SMU_GFXCLK),
 160                DCCLK_MAP_SMUCLK(DM_PP_CLOCK_TYPE_MEMORY_CLK,   SMU_MCLK),
 161                DCCLK_MAP_SMUCLK(DM_PP_CLOCK_TYPE_DCEFCLK,      SMU_DCEFCLK),
 162                DCCLK_MAP_SMUCLK(DM_PP_CLOCK_TYPE_SOCCLK,       SMU_SOCCLK),
 163        };
 164
 165        return dc_clk_type_map[dm_pp_clk_type];
 166}
 167
 168static enum amd_pp_clock_type dc_to_pp_clock_type(
 169                enum dm_pp_clock_type dm_pp_clk_type)
 170{
 171        enum amd_pp_clock_type amd_pp_clk_type = 0;
 172
 173        switch (dm_pp_clk_type) {
 174        case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
 175                amd_pp_clk_type = amd_pp_disp_clock;
 176                break;
 177        case DM_PP_CLOCK_TYPE_ENGINE_CLK:
 178                amd_pp_clk_type = amd_pp_sys_clock;
 179                break;
 180        case DM_PP_CLOCK_TYPE_MEMORY_CLK:
 181                amd_pp_clk_type = amd_pp_mem_clock;
 182                break;
 183        case DM_PP_CLOCK_TYPE_DCEFCLK:
 184                amd_pp_clk_type  = amd_pp_dcef_clock;
 185                break;
 186        case DM_PP_CLOCK_TYPE_DCFCLK:
 187                amd_pp_clk_type = amd_pp_dcf_clock;
 188                break;
 189        case DM_PP_CLOCK_TYPE_PIXELCLK:
 190                amd_pp_clk_type = amd_pp_pixel_clock;
 191                break;
 192        case DM_PP_CLOCK_TYPE_FCLK:
 193                amd_pp_clk_type = amd_pp_f_clock;
 194                break;
 195        case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK:
 196                amd_pp_clk_type = amd_pp_phy_clock;
 197                break;
 198        case DM_PP_CLOCK_TYPE_DPPCLK:
 199                amd_pp_clk_type = amd_pp_dpp_clock;
 200                break;
 201        default:
 202                DRM_ERROR("DM_PPLIB: invalid clock type: %d!\n",
 203                                dm_pp_clk_type);
 204                break;
 205        }
 206
 207        return amd_pp_clk_type;
 208}
 209
 210static enum dm_pp_clocks_state pp_to_dc_powerlevel_state(
 211                        enum PP_DAL_POWERLEVEL max_clocks_state)
 212{
 213        switch (max_clocks_state) {
 214        case PP_DAL_POWERLEVEL_0:
 215                return DM_PP_CLOCKS_DPM_STATE_LEVEL_0;
 216        case PP_DAL_POWERLEVEL_1:
 217                return DM_PP_CLOCKS_DPM_STATE_LEVEL_1;
 218        case PP_DAL_POWERLEVEL_2:
 219                return DM_PP_CLOCKS_DPM_STATE_LEVEL_2;
 220        case PP_DAL_POWERLEVEL_3:
 221                return DM_PP_CLOCKS_DPM_STATE_LEVEL_3;
 222        case PP_DAL_POWERLEVEL_4:
 223                return DM_PP_CLOCKS_DPM_STATE_LEVEL_4;
 224        case PP_DAL_POWERLEVEL_5:
 225                return DM_PP_CLOCKS_DPM_STATE_LEVEL_5;
 226        case PP_DAL_POWERLEVEL_6:
 227                return DM_PP_CLOCKS_DPM_STATE_LEVEL_6;
 228        case PP_DAL_POWERLEVEL_7:
 229                return DM_PP_CLOCKS_DPM_STATE_LEVEL_7;
 230        default:
 231                DRM_ERROR("DM_PPLIB: invalid powerlevel state: %d!\n",
 232                                max_clocks_state);
 233                return DM_PP_CLOCKS_STATE_INVALID;
 234        }
 235}
 236
 237static void pp_to_dc_clock_levels(
 238                const struct amd_pp_clocks *pp_clks,
 239                struct dm_pp_clock_levels *dc_clks,
 240                enum dm_pp_clock_type dc_clk_type)
 241{
 242        uint32_t i;
 243
 244        if (pp_clks->count > DM_PP_MAX_CLOCK_LEVELS) {
 245                DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
 246                                DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
 247                                pp_clks->count,
 248                                DM_PP_MAX_CLOCK_LEVELS);
 249
 250                dc_clks->num_levels = DM_PP_MAX_CLOCK_LEVELS;
 251        } else
 252                dc_clks->num_levels = pp_clks->count;
 253
 254        DRM_INFO("DM_PPLIB: values for %s clock\n",
 255                        DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
 256
 257        for (i = 0; i < dc_clks->num_levels; i++) {
 258                DRM_INFO("DM_PPLIB:\t %d\n", pp_clks->clock[i]);
 259                dc_clks->clocks_in_khz[i] = pp_clks->clock[i];
 260        }
 261}
 262
 263static void pp_to_dc_clock_levels_with_latency(
 264                const struct pp_clock_levels_with_latency *pp_clks,
 265                struct dm_pp_clock_levels_with_latency *clk_level_info,
 266                enum dm_pp_clock_type dc_clk_type)
 267{
 268        uint32_t i;
 269
 270        if (pp_clks->num_levels > DM_PP_MAX_CLOCK_LEVELS) {
 271                DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
 272                                DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
 273                                pp_clks->num_levels,
 274                                DM_PP_MAX_CLOCK_LEVELS);
 275
 276                clk_level_info->num_levels = DM_PP_MAX_CLOCK_LEVELS;
 277        } else
 278                clk_level_info->num_levels = pp_clks->num_levels;
 279
 280        DRM_DEBUG("DM_PPLIB: values for %s clock\n",
 281                        DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
 282
 283        for (i = 0; i < clk_level_info->num_levels; i++) {
 284                DRM_DEBUG("DM_PPLIB:\t %d in kHz\n", pp_clks->data[i].clocks_in_khz);
 285                clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz;
 286                clk_level_info->data[i].latency_in_us = pp_clks->data[i].latency_in_us;
 287        }
 288}
 289
 290static void pp_to_dc_clock_levels_with_voltage(
 291                const struct pp_clock_levels_with_voltage *pp_clks,
 292                struct dm_pp_clock_levels_with_voltage *clk_level_info,
 293                enum dm_pp_clock_type dc_clk_type)
 294{
 295        uint32_t i;
 296
 297        if (pp_clks->num_levels > DM_PP_MAX_CLOCK_LEVELS) {
 298                DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
 299                                DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
 300                                pp_clks->num_levels,
 301                                DM_PP_MAX_CLOCK_LEVELS);
 302
 303                clk_level_info->num_levels = DM_PP_MAX_CLOCK_LEVELS;
 304        } else
 305                clk_level_info->num_levels = pp_clks->num_levels;
 306
 307        DRM_INFO("DM_PPLIB: values for %s clock\n",
 308                        DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
 309
 310        for (i = 0; i < clk_level_info->num_levels; i++) {
 311                DRM_INFO("DM_PPLIB:\t %d in kHz, %d in mV\n", pp_clks->data[i].clocks_in_khz,
 312                         pp_clks->data[i].voltage_in_mv);
 313                clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz;
 314                clk_level_info->data[i].voltage_in_mv = pp_clks->data[i].voltage_in_mv;
 315        }
 316}
 317
 318bool dm_pp_get_clock_levels_by_type(
 319                const struct dc_context *ctx,
 320                enum dm_pp_clock_type clk_type,
 321                struct dm_pp_clock_levels *dc_clks)
 322{
 323        struct amdgpu_device *adev = ctx->driver_context;
 324        void *pp_handle = adev->powerplay.pp_handle;
 325        struct amd_pp_clocks pp_clks = { 0 };
 326        struct amd_pp_simple_clock_info validation_clks = { 0 };
 327        uint32_t i;
 328
 329        if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_clock_by_type) {
 330                if (adev->powerplay.pp_funcs->get_clock_by_type(pp_handle,
 331                        dc_to_pp_clock_type(clk_type), &pp_clks)) {
 332                /* Error in pplib. Provide default values. */
 333                        return true;
 334                }
 335        } else if (adev->smu.funcs && adev->smu.funcs->get_clock_by_type) {
 336                if (smu_get_clock_by_type(&adev->smu,
 337                                          dc_to_smu_clock_type(clk_type),
 338                                          &pp_clks)) {
 339                        get_default_clock_levels(clk_type, dc_clks);
 340                        return true;
 341                }
 342        }
 343
 344        pp_to_dc_clock_levels(&pp_clks, dc_clks, clk_type);
 345
 346        if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_display_mode_validation_clocks) {
 347                if (adev->powerplay.pp_funcs->get_display_mode_validation_clocks(
 348                                                pp_handle, &validation_clks)) {
 349                        /* Error in pplib. Provide default values. */
 350                        DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n");
 351                        validation_clks.engine_max_clock = 72000;
 352                        validation_clks.memory_max_clock = 80000;
 353                        validation_clks.level = 0;
 354                }
 355        } else if (adev->smu.funcs && adev->smu.funcs->get_max_high_clocks) {
 356                if (smu_get_max_high_clocks(&adev->smu, &validation_clks)) {
 357                        DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n");
 358                        validation_clks.engine_max_clock = 72000;
 359                        validation_clks.memory_max_clock = 80000;
 360                        validation_clks.level = 0;
 361                }
 362        }
 363
 364        DRM_INFO("DM_PPLIB: Validation clocks:\n");
 365        DRM_INFO("DM_PPLIB:    engine_max_clock: %d\n",
 366                        validation_clks.engine_max_clock);
 367        DRM_INFO("DM_PPLIB:    memory_max_clock: %d\n",
 368                        validation_clks.memory_max_clock);
 369        DRM_INFO("DM_PPLIB:    level           : %d\n",
 370                        validation_clks.level);
 371
 372        /* Translate 10 kHz to kHz. */
 373        validation_clks.engine_max_clock *= 10;
 374        validation_clks.memory_max_clock *= 10;
 375
 376        /* Determine the highest non-boosted level from the Validation Clocks */
 377        if (clk_type == DM_PP_CLOCK_TYPE_ENGINE_CLK) {
 378                for (i = 0; i < dc_clks->num_levels; i++) {
 379                        if (dc_clks->clocks_in_khz[i] > validation_clks.engine_max_clock) {
 380                                /* This clock is higher the validation clock.
 381                                 * Than means the previous one is the highest
 382                                 * non-boosted one. */
 383                                DRM_INFO("DM_PPLIB: reducing engine clock level from %d to %d\n",
 384                                                dc_clks->num_levels, i);
 385                                dc_clks->num_levels = i > 0 ? i : 1;
 386                                break;
 387                        }
 388                }
 389        } else if (clk_type == DM_PP_CLOCK_TYPE_MEMORY_CLK) {
 390                for (i = 0; i < dc_clks->num_levels; i++) {
 391                        if (dc_clks->clocks_in_khz[i] > validation_clks.memory_max_clock) {
 392                                DRM_INFO("DM_PPLIB: reducing memory clock level from %d to %d\n",
 393                                                dc_clks->num_levels, i);
 394                                dc_clks->num_levels = i > 0 ? i : 1;
 395                                break;
 396                        }
 397                }
 398        }
 399
 400        return true;
 401}
 402
 403bool dm_pp_get_clock_levels_by_type_with_latency(
 404        const struct dc_context *ctx,
 405        enum dm_pp_clock_type clk_type,
 406        struct dm_pp_clock_levels_with_latency *clk_level_info)
 407{
 408        struct amdgpu_device *adev = ctx->driver_context;
 409        void *pp_handle = adev->powerplay.pp_handle;
 410        struct pp_clock_levels_with_latency pp_clks = { 0 };
 411        const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 412        int ret;
 413
 414        if (pp_funcs && pp_funcs->get_clock_by_type_with_latency) {
 415                ret = pp_funcs->get_clock_by_type_with_latency(pp_handle,
 416                                                dc_to_pp_clock_type(clk_type),
 417                                                &pp_clks);
 418                if (ret)
 419                        return false;
 420        } else if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->get_clock_by_type_with_latency) {
 421                if (smu_get_clock_by_type_with_latency(&adev->smu,
 422                                                       dc_to_pp_clock_type(clk_type),
 423                                                       &pp_clks))
 424                        return false;
 425        }
 426
 427
 428        pp_to_dc_clock_levels_with_latency(&pp_clks, clk_level_info, clk_type);
 429
 430        return true;
 431}
 432
 433bool dm_pp_get_clock_levels_by_type_with_voltage(
 434        const struct dc_context *ctx,
 435        enum dm_pp_clock_type clk_type,
 436        struct dm_pp_clock_levels_with_voltage *clk_level_info)
 437{
 438        struct amdgpu_device *adev = ctx->driver_context;
 439        void *pp_handle = adev->powerplay.pp_handle;
 440        struct pp_clock_levels_with_voltage pp_clk_info = {0};
 441        const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 442        int ret;
 443
 444        if (pp_funcs && pp_funcs->get_clock_by_type_with_voltage) {
 445                ret = pp_funcs->get_clock_by_type_with_voltage(pp_handle,
 446                                                dc_to_pp_clock_type(clk_type),
 447                                                &pp_clk_info);
 448                if (ret)
 449                        return false;
 450        } else if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->get_clock_by_type_with_voltage) {
 451                if (smu_get_clock_by_type_with_voltage(&adev->smu,
 452                                                       dc_to_pp_clock_type(clk_type),
 453                                                       &pp_clk_info))
 454                        return false;
 455        }
 456
 457        pp_to_dc_clock_levels_with_voltage(&pp_clk_info, clk_level_info, clk_type);
 458
 459        return true;
 460}
 461
 462bool dm_pp_notify_wm_clock_changes(
 463        const struct dc_context *ctx,
 464        struct dm_pp_wm_sets_with_clock_ranges *wm_with_clock_ranges)
 465{
 466        /* TODO: to be implemented */
 467        return false;
 468}
 469
 470bool dm_pp_apply_power_level_change_request(
 471        const struct dc_context *ctx,
 472        struct dm_pp_power_level_change_request *level_change_req)
 473{
 474        /* TODO: to be implemented */
 475        return false;
 476}
 477
 478bool dm_pp_apply_clock_for_voltage_request(
 479        const struct dc_context *ctx,
 480        struct dm_pp_clock_for_voltage_req *clock_for_voltage_req)
 481{
 482        struct amdgpu_device *adev = ctx->driver_context;
 483        struct pp_display_clock_request pp_clock_request = {0};
 484        int ret = 0;
 485
 486        pp_clock_request.clock_type = dc_to_pp_clock_type(clock_for_voltage_req->clk_type);
 487        pp_clock_request.clock_freq_in_khz = clock_for_voltage_req->clocks_in_khz;
 488
 489        if (!pp_clock_request.clock_type)
 490                return false;
 491
 492        if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->display_clock_voltage_request)
 493                ret = adev->powerplay.pp_funcs->display_clock_voltage_request(
 494                        adev->powerplay.pp_handle,
 495                        &pp_clock_request);
 496        else if (adev->smu.funcs &&
 497                 adev->smu.funcs->display_clock_voltage_request)
 498                ret = smu_display_clock_voltage_request(&adev->smu,
 499                                                        &pp_clock_request);
 500        if (ret)
 501                return false;
 502        return true;
 503}
 504
 505bool dm_pp_get_static_clocks(
 506        const struct dc_context *ctx,
 507        struct dm_pp_static_clock_info *static_clk_info)
 508{
 509        struct amdgpu_device *adev = ctx->driver_context;
 510        struct amd_pp_clock_info pp_clk_info = {0};
 511        int ret = 0;
 512
 513        if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_current_clocks)
 514                ret = adev->powerplay.pp_funcs->get_current_clocks(
 515                        adev->powerplay.pp_handle,
 516                        &pp_clk_info);
 517        else if (adev->smu.funcs)
 518                ret = smu_get_current_clocks(&adev->smu, &pp_clk_info);
 519        if (ret)
 520                return false;
 521
 522        static_clk_info->max_clocks_state = pp_to_dc_powerlevel_state(pp_clk_info.max_clocks_state);
 523        static_clk_info->max_mclk_khz = pp_clk_info.max_memory_clock * 10;
 524        static_clk_info->max_sclk_khz = pp_clk_info.max_engine_clock * 10;
 525
 526        return true;
 527}
 528
 529void pp_rv_set_wm_ranges(struct pp_smu *pp,
 530                struct pp_smu_wm_range_sets *ranges)
 531{
 532        const struct dc_context *ctx = pp->dm;
 533        struct amdgpu_device *adev = ctx->driver_context;
 534        void *pp_handle = adev->powerplay.pp_handle;
 535        const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 536        struct dm_pp_wm_sets_with_clock_ranges_soc15 wm_with_clock_ranges;
 537        struct dm_pp_clock_range_for_dmif_wm_set_soc15 *wm_dce_clocks = wm_with_clock_ranges.wm_dmif_clocks_ranges;
 538        struct dm_pp_clock_range_for_mcif_wm_set_soc15 *wm_soc_clocks = wm_with_clock_ranges.wm_mcif_clocks_ranges;
 539        int32_t i;
 540
 541        wm_with_clock_ranges.num_wm_dmif_sets = ranges->num_reader_wm_sets;
 542        wm_with_clock_ranges.num_wm_mcif_sets = ranges->num_writer_wm_sets;
 543
 544        for (i = 0; i < wm_with_clock_ranges.num_wm_dmif_sets; i++) {
 545                if (ranges->reader_wm_sets[i].wm_inst > 3)
 546                        wm_dce_clocks[i].wm_set_id = WM_SET_A;
 547                else
 548                        wm_dce_clocks[i].wm_set_id =
 549                                        ranges->reader_wm_sets[i].wm_inst;
 550                wm_dce_clocks[i].wm_max_dcfclk_clk_in_khz =
 551                                ranges->reader_wm_sets[i].max_drain_clk_mhz * 1000;
 552                wm_dce_clocks[i].wm_min_dcfclk_clk_in_khz =
 553                                ranges->reader_wm_sets[i].min_drain_clk_mhz * 1000;
 554                wm_dce_clocks[i].wm_max_mem_clk_in_khz =
 555                                ranges->reader_wm_sets[i].max_fill_clk_mhz * 1000;
 556                wm_dce_clocks[i].wm_min_mem_clk_in_khz =
 557                                ranges->reader_wm_sets[i].min_fill_clk_mhz * 1000;
 558        }
 559
 560        for (i = 0; i < wm_with_clock_ranges.num_wm_mcif_sets; i++) {
 561                if (ranges->writer_wm_sets[i].wm_inst > 3)
 562                        wm_soc_clocks[i].wm_set_id = WM_SET_A;
 563                else
 564                        wm_soc_clocks[i].wm_set_id =
 565                                        ranges->writer_wm_sets[i].wm_inst;
 566                wm_soc_clocks[i].wm_max_socclk_clk_in_khz =
 567                                ranges->writer_wm_sets[i].max_fill_clk_mhz * 1000;
 568                wm_soc_clocks[i].wm_min_socclk_clk_in_khz =
 569                                ranges->writer_wm_sets[i].min_fill_clk_mhz * 1000;
 570                wm_soc_clocks[i].wm_max_mem_clk_in_khz =
 571                                ranges->writer_wm_sets[i].max_drain_clk_mhz * 1000;
 572                wm_soc_clocks[i].wm_min_mem_clk_in_khz =
 573                                ranges->writer_wm_sets[i].min_drain_clk_mhz * 1000;
 574        }
 575
 576        if (pp_funcs && pp_funcs->set_watermarks_for_clocks_ranges)
 577                pp_funcs->set_watermarks_for_clocks_ranges(pp_handle,
 578                                                           &wm_with_clock_ranges);
 579        else if (adev->smu.funcs &&
 580                 adev->smu.funcs->set_watermarks_for_clock_ranges)
 581                smu_set_watermarks_for_clock_ranges(&adev->smu,
 582                                                    &wm_with_clock_ranges);
 583}
 584
 585void pp_rv_set_pme_wa_enable(struct pp_smu *pp)
 586{
 587        const struct dc_context *ctx = pp->dm;
 588        struct amdgpu_device *adev = ctx->driver_context;
 589        void *pp_handle = adev->powerplay.pp_handle;
 590        const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 591
 592        if (pp_funcs && pp_funcs->notify_smu_enable_pwe)
 593                pp_funcs->notify_smu_enable_pwe(pp_handle);
 594        else if (adev->smu.funcs)
 595                smu_notify_smu_enable_pwe(&adev->smu);
 596}
 597
 598void pp_rv_set_active_display_count(struct pp_smu *pp, int count)
 599{
 600        const struct dc_context *ctx = pp->dm;
 601        struct amdgpu_device *adev = ctx->driver_context;
 602        void *pp_handle = adev->powerplay.pp_handle;
 603        const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 604
 605        if (!pp_funcs || !pp_funcs->set_active_display_count)
 606                return;
 607
 608        pp_funcs->set_active_display_count(pp_handle, count);
 609}
 610
 611void pp_rv_set_min_deep_sleep_dcfclk(struct pp_smu *pp, int clock)
 612{
 613        const struct dc_context *ctx = pp->dm;
 614        struct amdgpu_device *adev = ctx->driver_context;
 615        void *pp_handle = adev->powerplay.pp_handle;
 616        const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 617
 618        if (!pp_funcs || !pp_funcs->set_min_deep_sleep_dcefclk)
 619                return;
 620
 621        pp_funcs->set_min_deep_sleep_dcefclk(pp_handle, clock);
 622}
 623
 624void pp_rv_set_hard_min_dcefclk_by_freq(struct pp_smu *pp, int clock)
 625{
 626        const struct dc_context *ctx = pp->dm;
 627        struct amdgpu_device *adev = ctx->driver_context;
 628        void *pp_handle = adev->powerplay.pp_handle;
 629        const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 630
 631        if (!pp_funcs || !pp_funcs->set_hard_min_dcefclk_by_freq)
 632                return;
 633
 634        pp_funcs->set_hard_min_dcefclk_by_freq(pp_handle, clock);
 635}
 636
 637void pp_rv_set_hard_min_fclk_by_freq(struct pp_smu *pp, int mhz)
 638{
 639        const struct dc_context *ctx = pp->dm;
 640        struct amdgpu_device *adev = ctx->driver_context;
 641        void *pp_handle = adev->powerplay.pp_handle;
 642        const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 643
 644        if (!pp_funcs || !pp_funcs->set_hard_min_fclk_by_freq)
 645                return;
 646
 647        pp_funcs->set_hard_min_fclk_by_freq(pp_handle, mhz);
 648}
 649
 650enum pp_smu_status pp_nv_set_wm_ranges(struct pp_smu *pp,
 651                struct pp_smu_wm_range_sets *ranges)
 652{
 653        const struct dc_context *ctx = pp->dm;
 654        struct amdgpu_device *adev = ctx->driver_context;
 655        struct smu_context *smu = &adev->smu;
 656        struct dm_pp_wm_sets_with_clock_ranges_soc15 wm_with_clock_ranges;
 657        struct dm_pp_clock_range_for_dmif_wm_set_soc15 *wm_dce_clocks =
 658                        wm_with_clock_ranges.wm_dmif_clocks_ranges;
 659        struct dm_pp_clock_range_for_mcif_wm_set_soc15 *wm_soc_clocks =
 660                        wm_with_clock_ranges.wm_mcif_clocks_ranges;
 661        int32_t i;
 662
 663        wm_with_clock_ranges.num_wm_dmif_sets = ranges->num_reader_wm_sets;
 664        wm_with_clock_ranges.num_wm_mcif_sets = ranges->num_writer_wm_sets;
 665
 666        for (i = 0; i < wm_with_clock_ranges.num_wm_dmif_sets; i++) {
 667                if (ranges->reader_wm_sets[i].wm_inst > 3)
 668                        wm_dce_clocks[i].wm_set_id = WM_SET_A;
 669                else
 670                        wm_dce_clocks[i].wm_set_id =
 671                                        ranges->reader_wm_sets[i].wm_inst;
 672                wm_dce_clocks[i].wm_max_dcfclk_clk_in_khz =
 673                        ranges->reader_wm_sets[i].max_drain_clk_mhz * 1000;
 674                wm_dce_clocks[i].wm_min_dcfclk_clk_in_khz =
 675                        ranges->reader_wm_sets[i].min_drain_clk_mhz * 1000;
 676                wm_dce_clocks[i].wm_max_mem_clk_in_khz =
 677                        ranges->reader_wm_sets[i].max_fill_clk_mhz * 1000;
 678                wm_dce_clocks[i].wm_min_mem_clk_in_khz =
 679                        ranges->reader_wm_sets[i].min_fill_clk_mhz * 1000;
 680        }
 681
 682        for (i = 0; i < wm_with_clock_ranges.num_wm_mcif_sets; i++) {
 683                if (ranges->writer_wm_sets[i].wm_inst > 3)
 684                        wm_soc_clocks[i].wm_set_id = WM_SET_A;
 685                else
 686                        wm_soc_clocks[i].wm_set_id =
 687                                        ranges->writer_wm_sets[i].wm_inst;
 688                wm_soc_clocks[i].wm_max_socclk_clk_in_khz =
 689                        ranges->writer_wm_sets[i].max_fill_clk_mhz * 1000;
 690                wm_soc_clocks[i].wm_min_socclk_clk_in_khz =
 691                        ranges->writer_wm_sets[i].min_fill_clk_mhz * 1000;
 692                wm_soc_clocks[i].wm_max_mem_clk_in_khz =
 693                        ranges->writer_wm_sets[i].max_drain_clk_mhz * 1000;
 694                wm_soc_clocks[i].wm_min_mem_clk_in_khz =
 695                        ranges->writer_wm_sets[i].min_drain_clk_mhz * 1000;
 696        }
 697
 698        if (!smu->funcs)
 699                return PP_SMU_RESULT_UNSUPPORTED;
 700
 701        /* 0: successful or smu.funcs->set_watermarks_for_clock_ranges = NULL;
 702         * 1: fail
 703         */
 704        if (smu_set_watermarks_for_clock_ranges(&adev->smu,
 705                        &wm_with_clock_ranges))
 706                return PP_SMU_RESULT_UNSUPPORTED;
 707
 708        return PP_SMU_RESULT_OK;
 709}
 710
 711enum pp_smu_status pp_nv_set_pme_wa_enable(struct pp_smu *pp)
 712{
 713        const struct dc_context *ctx = pp->dm;
 714        struct amdgpu_device *adev = ctx->driver_context;
 715        struct smu_context *smu = &adev->smu;
 716
 717        if (!smu->funcs)
 718                return PP_SMU_RESULT_UNSUPPORTED;
 719
 720        /* 0: successful or smu.funcs->set_azalia_d3_pme = NULL;  1: fail */
 721        if (smu_set_azalia_d3_pme(smu))
 722                return PP_SMU_RESULT_FAIL;
 723
 724        return PP_SMU_RESULT_OK;
 725}
 726
 727enum pp_smu_status pp_nv_set_display_count(struct pp_smu *pp, int count)
 728{
 729        const struct dc_context *ctx = pp->dm;
 730        struct amdgpu_device *adev = ctx->driver_context;
 731        struct smu_context *smu = &adev->smu;
 732
 733        if (!smu->funcs)
 734                return PP_SMU_RESULT_UNSUPPORTED;
 735
 736        /* 0: successful or smu.funcs->set_display_count = NULL;  1: fail */
 737        if (smu_set_display_count(smu, count))
 738                return PP_SMU_RESULT_FAIL;
 739
 740        return PP_SMU_RESULT_OK;
 741}
 742
 743enum pp_smu_status pp_nv_set_min_deep_sleep_dcfclk(struct pp_smu *pp, int mhz)
 744{
 745        const struct dc_context *ctx = pp->dm;
 746        struct amdgpu_device *adev = ctx->driver_context;
 747        struct smu_context *smu = &adev->smu;
 748
 749        if (!smu->funcs)
 750                return PP_SMU_RESULT_UNSUPPORTED;
 751
 752        /* 0: successful or smu.funcs->set_deep_sleep_dcefclk = NULL;1: fail */
 753        if (smu_set_deep_sleep_dcefclk(smu, mhz))
 754                return PP_SMU_RESULT_FAIL;
 755
 756        return PP_SMU_RESULT_OK;
 757}
 758
 759enum pp_smu_status pp_nv_set_hard_min_dcefclk_by_freq(
 760                struct pp_smu *pp, int mhz)
 761{
 762        const struct dc_context *ctx = pp->dm;
 763        struct amdgpu_device *adev = ctx->driver_context;
 764        struct smu_context *smu = &adev->smu;
 765        struct pp_display_clock_request clock_req;
 766
 767        if (!smu->funcs)
 768                return PP_SMU_RESULT_UNSUPPORTED;
 769
 770        clock_req.clock_type = amd_pp_dcef_clock;
 771        clock_req.clock_freq_in_khz = mhz * 1000;
 772
 773        /* 0: successful or smu.funcs->display_clock_voltage_request = NULL
 774         * 1: fail
 775         */
 776        if (smu_display_clock_voltage_request(smu, &clock_req))
 777                return PP_SMU_RESULT_FAIL;
 778
 779        return PP_SMU_RESULT_OK;
 780}
 781
 782enum pp_smu_status pp_nv_set_hard_min_uclk_by_freq(struct pp_smu *pp, int mhz)
 783{
 784        const struct dc_context *ctx = pp->dm;
 785        struct amdgpu_device *adev = ctx->driver_context;
 786        struct smu_context *smu = &adev->smu;
 787        struct pp_display_clock_request clock_req;
 788
 789        if (!smu->funcs)
 790                return PP_SMU_RESULT_UNSUPPORTED;
 791
 792        clock_req.clock_type = amd_pp_mem_clock;
 793        clock_req.clock_freq_in_khz = mhz * 1000;
 794
 795        /* 0: successful or smu.funcs->display_clock_voltage_request = NULL
 796         * 1: fail
 797         */
 798        if (smu_display_clock_voltage_request(smu, &clock_req))
 799                return PP_SMU_RESULT_FAIL;
 800
 801        return PP_SMU_RESULT_OK;
 802}
 803
 804enum pp_smu_status pp_nv_set_voltage_by_freq(struct pp_smu *pp,
 805                enum pp_smu_nv_clock_id clock_id, int mhz)
 806{
 807        const struct dc_context *ctx = pp->dm;
 808        struct amdgpu_device *adev = ctx->driver_context;
 809        struct smu_context *smu = &adev->smu;
 810        struct pp_display_clock_request clock_req;
 811
 812        if (!smu->funcs)
 813                return PP_SMU_RESULT_UNSUPPORTED;
 814
 815        switch (clock_id) {
 816        case PP_SMU_NV_DISPCLK:
 817                clock_req.clock_type = amd_pp_disp_clock;
 818                break;
 819        case PP_SMU_NV_PHYCLK:
 820                clock_req.clock_type = amd_pp_phy_clock;
 821                break;
 822        case PP_SMU_NV_PIXELCLK:
 823                clock_req.clock_type = amd_pp_pixel_clock;
 824                break;
 825        default:
 826                break;
 827        }
 828        clock_req.clock_freq_in_khz = mhz * 1000;
 829
 830        /* 0: successful or smu.funcs->display_clock_voltage_request = NULL
 831         * 1: fail
 832         */
 833        if (smu_display_clock_voltage_request(smu, &clock_req))
 834                return PP_SMU_RESULT_FAIL;
 835
 836        return PP_SMU_RESULT_OK;
 837}
 838
 839enum pp_smu_status pp_nv_get_maximum_sustainable_clocks(
 840                struct pp_smu *pp, struct pp_smu_nv_clock_table *max_clocks)
 841{
 842        const struct dc_context *ctx = pp->dm;
 843        struct amdgpu_device *adev = ctx->driver_context;
 844        struct smu_context *smu = &adev->smu;
 845
 846        if (!smu->funcs)
 847                return PP_SMU_RESULT_UNSUPPORTED;
 848
 849        if (!smu->funcs->get_max_sustainable_clocks_by_dc)
 850                return PP_SMU_RESULT_UNSUPPORTED;
 851
 852        if (!smu->funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks))
 853                return PP_SMU_RESULT_OK;
 854
 855        return PP_SMU_RESULT_FAIL;
 856}
 857
 858enum pp_smu_status pp_nv_get_uclk_dpm_states(struct pp_smu *pp,
 859                unsigned int *clock_values_in_khz, unsigned int *num_states)
 860{
 861        const struct dc_context *ctx = pp->dm;
 862        struct amdgpu_device *adev = ctx->driver_context;
 863        struct smu_context *smu = &adev->smu;
 864
 865        if (!smu->ppt_funcs)
 866                return PP_SMU_RESULT_UNSUPPORTED;
 867
 868        if (!smu->ppt_funcs->get_uclk_dpm_states)
 869                return PP_SMU_RESULT_UNSUPPORTED;
 870
 871        if (!smu->ppt_funcs->get_uclk_dpm_states(smu,
 872                        clock_values_in_khz, num_states))
 873                return PP_SMU_RESULT_OK;
 874
 875        return PP_SMU_RESULT_FAIL;
 876}
 877
 878void dm_pp_get_funcs(
 879                struct dc_context *ctx,
 880                struct pp_smu_funcs *funcs)
 881{
 882        switch (ctx->dce_version) {
 883        case DCN_VERSION_1_0:
 884        case DCN_VERSION_1_01:
 885                funcs->ctx.ver = PP_SMU_VER_RV;
 886                funcs->rv_funcs.pp_smu.dm = ctx;
 887                funcs->rv_funcs.set_wm_ranges = pp_rv_set_wm_ranges;
 888                funcs->rv_funcs.set_pme_wa_enable = pp_rv_set_pme_wa_enable;
 889                funcs->rv_funcs.set_display_count =
 890                                pp_rv_set_active_display_count;
 891                funcs->rv_funcs.set_min_deep_sleep_dcfclk =
 892                                pp_rv_set_min_deep_sleep_dcfclk;
 893                funcs->rv_funcs.set_hard_min_dcfclk_by_freq =
 894                                pp_rv_set_hard_min_dcefclk_by_freq;
 895                funcs->rv_funcs.set_hard_min_fclk_by_freq =
 896                                pp_rv_set_hard_min_fclk_by_freq;
 897                break;
 898#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 899        case DCN_VERSION_2_0:
 900                funcs->ctx.ver = PP_SMU_VER_NV;
 901                funcs->nv_funcs.pp_smu.dm = ctx;
 902                funcs->nv_funcs.set_display_count = pp_nv_set_display_count;
 903                funcs->nv_funcs.set_hard_min_dcfclk_by_freq =
 904                                pp_nv_set_hard_min_dcefclk_by_freq;
 905                funcs->nv_funcs.set_min_deep_sleep_dcfclk =
 906                                pp_nv_set_min_deep_sleep_dcfclk;
 907                funcs->nv_funcs.set_voltage_by_freq =
 908                                pp_nv_set_voltage_by_freq;
 909                funcs->nv_funcs.set_wm_ranges = pp_nv_set_wm_ranges;
 910
 911                /* todo set_pme_wa_enable cause 4k@6ohz display not light up */
 912                funcs->nv_funcs.set_pme_wa_enable = NULL;
 913                /* todo debug waring message */
 914                funcs->nv_funcs.set_hard_min_uclk_by_freq = pp_nv_set_hard_min_uclk_by_freq;
 915                /* todo  compare data with window driver*/
 916                funcs->nv_funcs.get_maximum_sustainable_clocks = pp_nv_get_maximum_sustainable_clocks;
 917                /*todo  compare data with window driver */
 918                funcs->nv_funcs.get_uclk_dpm_states = pp_nv_get_uclk_dpm_states;
 919                break;
 920#endif
 921        default:
 922                DRM_ERROR("smu version is not supported !\n");
 923                break;
 924        }
 925}
 926