linux/drivers/gpu/drm/radeon/kv_dpm.c
<<
>>
Prefs
   1/*
   2 * Copyright 2013 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23
  24#include <linux/pci.h>
  25#include <linux/seq_file.h>
  26
  27#include "cikd.h"
  28#include "kv_dpm.h"
  29#include "r600_dpm.h"
  30#include "radeon.h"
  31#include "radeon_asic.h"
  32
  33#define KV_MAX_DEEPSLEEP_DIVIDER_ID     5
  34#define KV_MINIMUM_ENGINE_CLOCK         800
  35#define SMC_RAM_END                     0x40000
  36
  37static int kv_enable_nb_dpm(struct radeon_device *rdev,
  38                            bool enable);
  39static void kv_init_graphics_levels(struct radeon_device *rdev);
  40static int kv_calculate_ds_divider(struct radeon_device *rdev);
  41static int kv_calculate_nbps_level_settings(struct radeon_device *rdev);
  42static int kv_calculate_dpm_settings(struct radeon_device *rdev);
  43static void kv_enable_new_levels(struct radeon_device *rdev);
  44static void kv_program_nbps_index_settings(struct radeon_device *rdev,
  45                                           struct radeon_ps *new_rps);
  46static int kv_set_enabled_level(struct radeon_device *rdev, u32 level);
  47static int kv_set_enabled_levels(struct radeon_device *rdev);
  48static int kv_force_dpm_highest(struct radeon_device *rdev);
  49static int kv_force_dpm_lowest(struct radeon_device *rdev);
  50static void kv_apply_state_adjust_rules(struct radeon_device *rdev,
  51                                        struct radeon_ps *new_rps,
  52                                        struct radeon_ps *old_rps);
  53static int kv_set_thermal_temperature_range(struct radeon_device *rdev,
  54                                            int min_temp, int max_temp);
  55static int kv_init_fps_limits(struct radeon_device *rdev);
  56
  57void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate);
  58static void kv_dpm_powergate_vce(struct radeon_device *rdev, bool gate);
  59static void kv_dpm_powergate_samu(struct radeon_device *rdev, bool gate);
  60static void kv_dpm_powergate_acp(struct radeon_device *rdev, bool gate);
  61
  62extern void cik_enter_rlc_safe_mode(struct radeon_device *rdev);
  63extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev);
  64extern void cik_update_cg(struct radeon_device *rdev,
  65                          u32 block, bool enable);
  66
  67static const struct kv_pt_config_reg didt_config_kv[] =
  68{
  69        { 0x10, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  70        { 0x10, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
  71        { 0x10, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
  72        { 0x10, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
  73        { 0x11, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  74        { 0x11, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
  75        { 0x11, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
  76        { 0x11, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
  77        { 0x12, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  78        { 0x12, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
  79        { 0x12, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
  80        { 0x12, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
  81        { 0x2, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
  82        { 0x2, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
  83        { 0x2, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
  84        { 0x1, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
  85        { 0x1, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
  86        { 0x0, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  87        { 0x30, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  88        { 0x30, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
  89        { 0x30, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
  90        { 0x30, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
  91        { 0x31, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  92        { 0x31, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
  93        { 0x31, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
  94        { 0x31, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
  95        { 0x32, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  96        { 0x32, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
  97        { 0x32, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
  98        { 0x32, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
  99        { 0x22, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
 100        { 0x22, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
 101        { 0x22, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
 102        { 0x21, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
 103        { 0x21, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
 104        { 0x20, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
 105        { 0x50, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
 106        { 0x50, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
 107        { 0x50, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
 108        { 0x50, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
 109        { 0x51, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
 110        { 0x51, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
 111        { 0x51, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
 112        { 0x51, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
 113        { 0x52, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
 114        { 0x52, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
 115        { 0x52, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
 116        { 0x52, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
 117        { 0x42, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
 118        { 0x42, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
 119        { 0x42, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
 120        { 0x41, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
 121        { 0x41, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
 122        { 0x40, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
 123        { 0x70, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
 124        { 0x70, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
 125        { 0x70, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
 126        { 0x70, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
 127        { 0x71, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
 128        { 0x71, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
 129        { 0x71, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
 130        { 0x71, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
 131        { 0x72, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
 132        { 0x72, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
 133        { 0x72, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
 134        { 0x72, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
 135        { 0x62, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
 136        { 0x62, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
 137        { 0x62, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
 138        { 0x61, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
 139        { 0x61, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
 140        { 0x60, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
 141        { 0xFFFFFFFF }
 142};
 143
 144static struct kv_ps *kv_get_ps(struct radeon_ps *rps)
 145{
 146        struct kv_ps *ps = rps->ps_priv;
 147
 148        return ps;
 149}
 150
 151static struct kv_power_info *kv_get_pi(struct radeon_device *rdev)
 152{
 153        struct kv_power_info *pi = rdev->pm.dpm.priv;
 154
 155        return pi;
 156}
 157
 158static int kv_program_pt_config_registers(struct radeon_device *rdev,
 159                                          const struct kv_pt_config_reg *cac_config_regs)
 160{
 161        const struct kv_pt_config_reg *config_regs = cac_config_regs;
 162        u32 data;
 163        u32 cache = 0;
 164
 165        if (config_regs == NULL)
 166                return -EINVAL;
 167
 168        while (config_regs->offset != 0xFFFFFFFF) {
 169                if (config_regs->type == KV_CONFIGREG_CACHE) {
 170                        cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
 171                } else {
 172                        switch (config_regs->type) {
 173                        case KV_CONFIGREG_SMC_IND:
 174                                data = RREG32_SMC(config_regs->offset);
 175                                break;
 176                        case KV_CONFIGREG_DIDT_IND:
 177                                data = RREG32_DIDT(config_regs->offset);
 178                                break;
 179                        default:
 180                                data = RREG32(config_regs->offset << 2);
 181                                break;
 182                        }
 183
 184                        data &= ~config_regs->mask;
 185                        data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
 186                        data |= cache;
 187                        cache = 0;
 188
 189                        switch (config_regs->type) {
 190                        case KV_CONFIGREG_SMC_IND:
 191                                WREG32_SMC(config_regs->offset, data);
 192                                break;
 193                        case KV_CONFIGREG_DIDT_IND:
 194                                WREG32_DIDT(config_regs->offset, data);
 195                                break;
 196                        default:
 197                                WREG32(config_regs->offset << 2, data);
 198                                break;
 199                        }
 200                }
 201                config_regs++;
 202        }
 203
 204        return 0;
 205}
 206
 207static void kv_do_enable_didt(struct radeon_device *rdev, bool enable)
 208{
 209        struct kv_power_info *pi = kv_get_pi(rdev);
 210        u32 data;
 211
 212        if (pi->caps_sq_ramping) {
 213                data = RREG32_DIDT(DIDT_SQ_CTRL0);
 214                if (enable)
 215                        data |= DIDT_CTRL_EN;
 216                else
 217                        data &= ~DIDT_CTRL_EN;
 218                WREG32_DIDT(DIDT_SQ_CTRL0, data);
 219        }
 220
 221        if (pi->caps_db_ramping) {
 222                data = RREG32_DIDT(DIDT_DB_CTRL0);
 223                if (enable)
 224                        data |= DIDT_CTRL_EN;
 225                else
 226                        data &= ~DIDT_CTRL_EN;
 227                WREG32_DIDT(DIDT_DB_CTRL0, data);
 228        }
 229
 230        if (pi->caps_td_ramping) {
 231                data = RREG32_DIDT(DIDT_TD_CTRL0);
 232                if (enable)
 233                        data |= DIDT_CTRL_EN;
 234                else
 235                        data &= ~DIDT_CTRL_EN;
 236                WREG32_DIDT(DIDT_TD_CTRL0, data);
 237        }
 238
 239        if (pi->caps_tcp_ramping) {
 240                data = RREG32_DIDT(DIDT_TCP_CTRL0);
 241                if (enable)
 242                        data |= DIDT_CTRL_EN;
 243                else
 244                        data &= ~DIDT_CTRL_EN;
 245                WREG32_DIDT(DIDT_TCP_CTRL0, data);
 246        }
 247}
 248
 249static int kv_enable_didt(struct radeon_device *rdev, bool enable)
 250{
 251        struct kv_power_info *pi = kv_get_pi(rdev);
 252        int ret;
 253
 254        if (pi->caps_sq_ramping ||
 255            pi->caps_db_ramping ||
 256            pi->caps_td_ramping ||
 257            pi->caps_tcp_ramping) {
 258                cik_enter_rlc_safe_mode(rdev);
 259
 260                if (enable) {
 261                        ret = kv_program_pt_config_registers(rdev, didt_config_kv);
 262                        if (ret) {
 263                                cik_exit_rlc_safe_mode(rdev);
 264                                return ret;
 265                        }
 266                }
 267
 268                kv_do_enable_didt(rdev, enable);
 269
 270                cik_exit_rlc_safe_mode(rdev);
 271        }
 272
 273        return 0;
 274}
 275
 276static int kv_enable_smc_cac(struct radeon_device *rdev, bool enable)
 277{
 278        struct kv_power_info *pi = kv_get_pi(rdev);
 279        int ret = 0;
 280
 281        if (pi->caps_cac) {
 282                if (enable) {
 283                        ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_EnableCac);
 284                        if (ret)
 285                                pi->cac_enabled = false;
 286                        else
 287                                pi->cac_enabled = true;
 288                } else if (pi->cac_enabled) {
 289                        kv_notify_message_to_smu(rdev, PPSMC_MSG_DisableCac);
 290                        pi->cac_enabled = false;
 291                }
 292        }
 293
 294        return ret;
 295}
 296
 297static int kv_process_firmware_header(struct radeon_device *rdev)
 298{
 299        struct kv_power_info *pi = kv_get_pi(rdev);
 300        u32 tmp;
 301        int ret;
 302
 303        ret = kv_read_smc_sram_dword(rdev, SMU7_FIRMWARE_HEADER_LOCATION +
 304                                     offsetof(SMU7_Firmware_Header, DpmTable),
 305                                     &tmp, pi->sram_end);
 306
 307        if (ret == 0)
 308                pi->dpm_table_start = tmp;
 309
 310        ret = kv_read_smc_sram_dword(rdev, SMU7_FIRMWARE_HEADER_LOCATION +
 311                                     offsetof(SMU7_Firmware_Header, SoftRegisters),
 312                                     &tmp, pi->sram_end);
 313
 314        if (ret == 0)
 315                pi->soft_regs_start = tmp;
 316
 317        return ret;
 318}
 319
 320static int kv_enable_dpm_voltage_scaling(struct radeon_device *rdev)
 321{
 322        struct kv_power_info *pi = kv_get_pi(rdev);
 323        int ret;
 324
 325        pi->graphics_voltage_change_enable = 1;
 326
 327        ret = kv_copy_bytes_to_smc(rdev,
 328                                   pi->dpm_table_start +
 329                                   offsetof(SMU7_Fusion_DpmTable, GraphicsVoltageChangeEnable),
 330                                   &pi->graphics_voltage_change_enable,
 331                                   sizeof(u8), pi->sram_end);
 332
 333        return ret;
 334}
 335
 336static int kv_set_dpm_interval(struct radeon_device *rdev)
 337{
 338        struct kv_power_info *pi = kv_get_pi(rdev);
 339        int ret;
 340
 341        pi->graphics_interval = 1;
 342
 343        ret = kv_copy_bytes_to_smc(rdev,
 344                                   pi->dpm_table_start +
 345                                   offsetof(SMU7_Fusion_DpmTable, GraphicsInterval),
 346                                   &pi->graphics_interval,
 347                                   sizeof(u8), pi->sram_end);
 348
 349        return ret;
 350}
 351
 352static int kv_set_dpm_boot_state(struct radeon_device *rdev)
 353{
 354        struct kv_power_info *pi = kv_get_pi(rdev);
 355        int ret;
 356
 357        ret = kv_copy_bytes_to_smc(rdev,
 358                                   pi->dpm_table_start +
 359                                   offsetof(SMU7_Fusion_DpmTable, GraphicsBootLevel),
 360                                   &pi->graphics_boot_level,
 361                                   sizeof(u8), pi->sram_end);
 362
 363        return ret;
 364}
 365
 366static void kv_program_vc(struct radeon_device *rdev)
 367{
 368        WREG32_SMC(CG_FTV_0, 0x3FFFC100);
 369}
 370
 371static void kv_clear_vc(struct radeon_device *rdev)
 372{
 373        WREG32_SMC(CG_FTV_0, 0);
 374}
 375
 376static int kv_set_divider_value(struct radeon_device *rdev,
 377                                u32 index, u32 sclk)
 378{
 379        struct kv_power_info *pi = kv_get_pi(rdev);
 380        struct atom_clock_dividers dividers;
 381        int ret;
 382
 383        ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
 384                                             sclk, false, &dividers);
 385        if (ret)
 386                return ret;
 387
 388        pi->graphics_level[index].SclkDid = (u8)dividers.post_div;
 389        pi->graphics_level[index].SclkFrequency = cpu_to_be32(sclk);
 390
 391        return 0;
 392}
 393
 394static u32 kv_convert_vid2_to_vid7(struct radeon_device *rdev,
 395                                   struct sumo_vid_mapping_table *vid_mapping_table,
 396                                   u32 vid_2bit)
 397{
 398        struct radeon_clock_voltage_dependency_table *vddc_sclk_table =
 399                &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
 400        u32 i;
 401
 402        if (vddc_sclk_table && vddc_sclk_table->count) {
 403                if (vid_2bit < vddc_sclk_table->count)
 404                        return vddc_sclk_table->entries[vid_2bit].v;
 405                else
 406                        return vddc_sclk_table->entries[vddc_sclk_table->count - 1].v;
 407        } else {
 408                for (i = 0; i < vid_mapping_table->num_entries; i++) {
 409                        if (vid_mapping_table->entries[i].vid_2bit == vid_2bit)
 410                                return vid_mapping_table->entries[i].vid_7bit;
 411                }
 412                return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_7bit;
 413        }
 414}
 415
 416static u32 kv_convert_vid7_to_vid2(struct radeon_device *rdev,
 417                                   struct sumo_vid_mapping_table *vid_mapping_table,
 418                                   u32 vid_7bit)
 419{
 420        struct radeon_clock_voltage_dependency_table *vddc_sclk_table =
 421                &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
 422        u32 i;
 423
 424        if (vddc_sclk_table && vddc_sclk_table->count) {
 425                for (i = 0; i < vddc_sclk_table->count; i++) {
 426                        if (vddc_sclk_table->entries[i].v == vid_7bit)
 427                                return i;
 428                }
 429                return vddc_sclk_table->count - 1;
 430        } else {
 431                for (i = 0; i < vid_mapping_table->num_entries; i++) {
 432                        if (vid_mapping_table->entries[i].vid_7bit == vid_7bit)
 433                                return vid_mapping_table->entries[i].vid_2bit;
 434                }
 435
 436                return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_2bit;
 437        }
 438}
 439
 440static u16 kv_convert_8bit_index_to_voltage(struct radeon_device *rdev,
 441                                            u16 voltage)
 442{
 443        return 6200 - (voltage * 25);
 444}
 445
 446static u16 kv_convert_2bit_index_to_voltage(struct radeon_device *rdev,
 447                                            u32 vid_2bit)
 448{
 449        struct kv_power_info *pi = kv_get_pi(rdev);
 450        u32 vid_8bit = kv_convert_vid2_to_vid7(rdev,
 451                                               &pi->sys_info.vid_mapping_table,
 452                                               vid_2bit);
 453
 454        return kv_convert_8bit_index_to_voltage(rdev, (u16)vid_8bit);
 455}
 456
 457
 458static int kv_set_vid(struct radeon_device *rdev, u32 index, u32 vid)
 459{
 460        struct kv_power_info *pi = kv_get_pi(rdev);
 461
 462        pi->graphics_level[index].VoltageDownH = (u8)pi->voltage_drop_t;
 463        pi->graphics_level[index].MinVddNb =
 464                cpu_to_be32(kv_convert_2bit_index_to_voltage(rdev, vid));
 465
 466        return 0;
 467}
 468
 469static int kv_set_at(struct radeon_device *rdev, u32 index, u32 at)
 470{
 471        struct kv_power_info *pi = kv_get_pi(rdev);
 472
 473        pi->graphics_level[index].AT = cpu_to_be16((u16)at);
 474
 475        return 0;
 476}
 477
 478static void kv_dpm_power_level_enable(struct radeon_device *rdev,
 479                                      u32 index, bool enable)
 480{
 481        struct kv_power_info *pi = kv_get_pi(rdev);
 482
 483        pi->graphics_level[index].EnabledForActivity = enable ? 1 : 0;
 484}
 485
 486static void kv_start_dpm(struct radeon_device *rdev)
 487{
 488        u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
 489
 490        tmp |= GLOBAL_PWRMGT_EN;
 491        WREG32_SMC(GENERAL_PWRMGT, tmp);
 492
 493        kv_smc_dpm_enable(rdev, true);
 494}
 495
 496static void kv_stop_dpm(struct radeon_device *rdev)
 497{
 498        kv_smc_dpm_enable(rdev, false);
 499}
 500
 501static void kv_start_am(struct radeon_device *rdev)
 502{
 503        u32 sclk_pwrmgt_cntl = RREG32_SMC(SCLK_PWRMGT_CNTL);
 504
 505        sclk_pwrmgt_cntl &= ~(RESET_SCLK_CNT | RESET_BUSY_CNT);
 506        sclk_pwrmgt_cntl |= DYNAMIC_PM_EN;
 507
 508        WREG32_SMC(SCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl);
 509}
 510
 511static void kv_reset_am(struct radeon_device *rdev)
 512{
 513        u32 sclk_pwrmgt_cntl = RREG32_SMC(SCLK_PWRMGT_CNTL);
 514
 515        sclk_pwrmgt_cntl |= (RESET_SCLK_CNT | RESET_BUSY_CNT);
 516
 517        WREG32_SMC(SCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl);
 518}
 519
 520static int kv_freeze_sclk_dpm(struct radeon_device *rdev, bool freeze)
 521{
 522        return kv_notify_message_to_smu(rdev, freeze ?
 523                                        PPSMC_MSG_SCLKDPM_FreezeLevel : PPSMC_MSG_SCLKDPM_UnfreezeLevel);
 524}
 525
 526static int kv_force_lowest_valid(struct radeon_device *rdev)
 527{
 528        return kv_force_dpm_lowest(rdev);
 529}
 530
 531static int kv_unforce_levels(struct radeon_device *rdev)
 532{
 533        if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS)
 534                return kv_notify_message_to_smu(rdev, PPSMC_MSG_NoForcedLevel);
 535        else
 536                return kv_set_enabled_levels(rdev);
 537}
 538
 539static int kv_update_sclk_t(struct radeon_device *rdev)
 540{
 541        struct kv_power_info *pi = kv_get_pi(rdev);
 542        u32 low_sclk_interrupt_t = 0;
 543        int ret = 0;
 544
 545        if (pi->caps_sclk_throttle_low_notification) {
 546                low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
 547
 548                ret = kv_copy_bytes_to_smc(rdev,
 549                                           pi->dpm_table_start +
 550                                           offsetof(SMU7_Fusion_DpmTable, LowSclkInterruptT),
 551                                           (u8 *)&low_sclk_interrupt_t,
 552                                           sizeof(u32), pi->sram_end);
 553        }
 554        return ret;
 555}
 556
 557static int kv_program_bootup_state(struct radeon_device *rdev)
 558{
 559        struct kv_power_info *pi = kv_get_pi(rdev);
 560        u32 i;
 561        struct radeon_clock_voltage_dependency_table *table =
 562                &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
 563
 564        if (table && table->count) {
 565                for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
 566                        if (table->entries[i].clk == pi->boot_pl.sclk)
 567                                break;
 568                }
 569
 570                pi->graphics_boot_level = (u8)i;
 571                kv_dpm_power_level_enable(rdev, i, true);
 572        } else {
 573                struct sumo_sclk_voltage_mapping_table *table =
 574                        &pi->sys_info.sclk_voltage_mapping_table;
 575
 576                if (table->num_max_dpm_entries == 0)
 577                        return -EINVAL;
 578
 579                for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
 580                        if (table->entries[i].sclk_frequency == pi->boot_pl.sclk)
 581                                break;
 582                }
 583
 584                pi->graphics_boot_level = (u8)i;
 585                kv_dpm_power_level_enable(rdev, i, true);
 586        }
 587        return 0;
 588}
 589
 590static int kv_enable_auto_thermal_throttling(struct radeon_device *rdev)
 591{
 592        struct kv_power_info *pi = kv_get_pi(rdev);
 593        int ret;
 594
 595        pi->graphics_therm_throttle_enable = 1;
 596
 597        ret = kv_copy_bytes_to_smc(rdev,
 598                                   pi->dpm_table_start +
 599                                   offsetof(SMU7_Fusion_DpmTable, GraphicsThermThrottleEnable),
 600                                   &pi->graphics_therm_throttle_enable,
 601                                   sizeof(u8), pi->sram_end);
 602
 603        return ret;
 604}
 605
 606static int kv_upload_dpm_settings(struct radeon_device *rdev)
 607{
 608        struct kv_power_info *pi = kv_get_pi(rdev);
 609        int ret;
 610
 611        ret = kv_copy_bytes_to_smc(rdev,
 612                                   pi->dpm_table_start +
 613                                   offsetof(SMU7_Fusion_DpmTable, GraphicsLevel),
 614                                   (u8 *)&pi->graphics_level,
 615                                   sizeof(SMU7_Fusion_GraphicsLevel) * SMU7_MAX_LEVELS_GRAPHICS,
 616                                   pi->sram_end);
 617
 618        if (ret)
 619                return ret;
 620
 621        ret = kv_copy_bytes_to_smc(rdev,
 622                                   pi->dpm_table_start +
 623                                   offsetof(SMU7_Fusion_DpmTable, GraphicsDpmLevelCount),
 624                                   &pi->graphics_dpm_level_count,
 625                                   sizeof(u8), pi->sram_end);
 626
 627        return ret;
 628}
 629
 630static u32 kv_get_clock_difference(u32 a, u32 b)
 631{
 632        return (a >= b) ? a - b : b - a;
 633}
 634
 635static u32 kv_get_clk_bypass(struct radeon_device *rdev, u32 clk)
 636{
 637        struct kv_power_info *pi = kv_get_pi(rdev);
 638        u32 value;
 639
 640        if (pi->caps_enable_dfs_bypass) {
 641                if (kv_get_clock_difference(clk, 40000) < 200)
 642                        value = 3;
 643                else if (kv_get_clock_difference(clk, 30000) < 200)
 644                        value = 2;
 645                else if (kv_get_clock_difference(clk, 20000) < 200)
 646                        value = 7;
 647                else if (kv_get_clock_difference(clk, 15000) < 200)
 648                        value = 6;
 649                else if (kv_get_clock_difference(clk, 10000) < 200)
 650                        value = 8;
 651                else
 652                        value = 0;
 653        } else {
 654                value = 0;
 655        }
 656
 657        return value;
 658}
 659
 660static int kv_populate_uvd_table(struct radeon_device *rdev)
 661{
 662        struct kv_power_info *pi = kv_get_pi(rdev);
 663        struct radeon_uvd_clock_voltage_dependency_table *table =
 664                &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
 665        struct atom_clock_dividers dividers;
 666        int ret;
 667        u32 i;
 668
 669        if (table == NULL || table->count == 0)
 670                return 0;
 671
 672        pi->uvd_level_count = 0;
 673        for (i = 0; i < table->count; i++) {
 674                if (pi->high_voltage_t &&
 675                    (pi->high_voltage_t < table->entries[i].v))
 676                        break;
 677
 678                pi->uvd_level[i].VclkFrequency = cpu_to_be32(table->entries[i].vclk);
 679                pi->uvd_level[i].DclkFrequency = cpu_to_be32(table->entries[i].dclk);
 680                pi->uvd_level[i].MinVddNb = cpu_to_be16(table->entries[i].v);
 681
 682                pi->uvd_level[i].VClkBypassCntl =
 683                        (u8)kv_get_clk_bypass(rdev, table->entries[i].vclk);
 684                pi->uvd_level[i].DClkBypassCntl =
 685                        (u8)kv_get_clk_bypass(rdev, table->entries[i].dclk);
 686
 687                ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
 688                                                     table->entries[i].vclk, false, &dividers);
 689                if (ret)
 690                        return ret;
 691                pi->uvd_level[i].VclkDivider = (u8)dividers.post_div;
 692
 693                ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
 694                                                     table->entries[i].dclk, false, &dividers);
 695                if (ret)
 696                        return ret;
 697                pi->uvd_level[i].DclkDivider = (u8)dividers.post_div;
 698
 699                pi->uvd_level_count++;
 700        }
 701
 702        ret = kv_copy_bytes_to_smc(rdev,
 703                                   pi->dpm_table_start +
 704                                   offsetof(SMU7_Fusion_DpmTable, UvdLevelCount),
 705                                   (u8 *)&pi->uvd_level_count,
 706                                   sizeof(u8), pi->sram_end);
 707        if (ret)
 708                return ret;
 709
 710        pi->uvd_interval = 1;
 711
 712        ret = kv_copy_bytes_to_smc(rdev,
 713                                   pi->dpm_table_start +
 714                                   offsetof(SMU7_Fusion_DpmTable, UVDInterval),
 715                                   &pi->uvd_interval,
 716                                   sizeof(u8), pi->sram_end);
 717        if (ret)
 718                return ret;
 719
 720        ret = kv_copy_bytes_to_smc(rdev,
 721                                   pi->dpm_table_start +
 722                                   offsetof(SMU7_Fusion_DpmTable, UvdLevel),
 723                                   (u8 *)&pi->uvd_level,
 724                                   sizeof(SMU7_Fusion_UvdLevel) * SMU7_MAX_LEVELS_UVD,
 725                                   pi->sram_end);
 726
 727        return ret;
 728
 729}
 730
 731static int kv_populate_vce_table(struct radeon_device *rdev)
 732{
 733        struct kv_power_info *pi = kv_get_pi(rdev);
 734        int ret;
 735        u32 i;
 736        struct radeon_vce_clock_voltage_dependency_table *table =
 737                &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
 738        struct atom_clock_dividers dividers;
 739
 740        if (table == NULL || table->count == 0)
 741                return 0;
 742
 743        pi->vce_level_count = 0;
 744        for (i = 0; i < table->count; i++) {
 745                if (pi->high_voltage_t &&
 746                    pi->high_voltage_t < table->entries[i].v)
 747                        break;
 748
 749                pi->vce_level[i].Frequency = cpu_to_be32(table->entries[i].evclk);
 750                pi->vce_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
 751
 752                pi->vce_level[i].ClkBypassCntl =
 753                        (u8)kv_get_clk_bypass(rdev, table->entries[i].evclk);
 754
 755                ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
 756                                                     table->entries[i].evclk, false, &dividers);
 757                if (ret)
 758                        return ret;
 759                pi->vce_level[i].Divider = (u8)dividers.post_div;
 760
 761                pi->vce_level_count++;
 762        }
 763
 764        ret = kv_copy_bytes_to_smc(rdev,
 765                                   pi->dpm_table_start +
 766                                   offsetof(SMU7_Fusion_DpmTable, VceLevelCount),
 767                                   (u8 *)&pi->vce_level_count,
 768                                   sizeof(u8),
 769                                   pi->sram_end);
 770        if (ret)
 771                return ret;
 772
 773        pi->vce_interval = 1;
 774
 775        ret = kv_copy_bytes_to_smc(rdev,
 776                                   pi->dpm_table_start +
 777                                   offsetof(SMU7_Fusion_DpmTable, VCEInterval),
 778                                   (u8 *)&pi->vce_interval,
 779                                   sizeof(u8),
 780                                   pi->sram_end);
 781        if (ret)
 782                return ret;
 783
 784        ret = kv_copy_bytes_to_smc(rdev,
 785                                   pi->dpm_table_start +
 786                                   offsetof(SMU7_Fusion_DpmTable, VceLevel),
 787                                   (u8 *)&pi->vce_level,
 788                                   sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_VCE,
 789                                   pi->sram_end);
 790
 791        return ret;
 792}
 793
 794static int kv_populate_samu_table(struct radeon_device *rdev)
 795{
 796        struct kv_power_info *pi = kv_get_pi(rdev);
 797        struct radeon_clock_voltage_dependency_table *table =
 798                &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table;
 799        struct atom_clock_dividers dividers;
 800        int ret;
 801        u32 i;
 802
 803        if (table == NULL || table->count == 0)
 804                return 0;
 805
 806        pi->samu_level_count = 0;
 807        for (i = 0; i < table->count; i++) {
 808                if (pi->high_voltage_t &&
 809                    pi->high_voltage_t < table->entries[i].v)
 810                        break;
 811
 812                pi->samu_level[i].Frequency = cpu_to_be32(table->entries[i].clk);
 813                pi->samu_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
 814
 815                pi->samu_level[i].ClkBypassCntl =
 816                        (u8)kv_get_clk_bypass(rdev, table->entries[i].clk);
 817
 818                ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
 819                                                     table->entries[i].clk, false, &dividers);
 820                if (ret)
 821                        return ret;
 822                pi->samu_level[i].Divider = (u8)dividers.post_div;
 823
 824                pi->samu_level_count++;
 825        }
 826
 827        ret = kv_copy_bytes_to_smc(rdev,
 828                                   pi->dpm_table_start +
 829                                   offsetof(SMU7_Fusion_DpmTable, SamuLevelCount),
 830                                   (u8 *)&pi->samu_level_count,
 831                                   sizeof(u8),
 832                                   pi->sram_end);
 833        if (ret)
 834                return ret;
 835
 836        pi->samu_interval = 1;
 837
 838        ret = kv_copy_bytes_to_smc(rdev,
 839                                   pi->dpm_table_start +
 840                                   offsetof(SMU7_Fusion_DpmTable, SAMUInterval),
 841                                   (u8 *)&pi->samu_interval,
 842                                   sizeof(u8),
 843                                   pi->sram_end);
 844        if (ret)
 845                return ret;
 846
 847        ret = kv_copy_bytes_to_smc(rdev,
 848                                   pi->dpm_table_start +
 849                                   offsetof(SMU7_Fusion_DpmTable, SamuLevel),
 850                                   (u8 *)&pi->samu_level,
 851                                   sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_SAMU,
 852                                   pi->sram_end);
 853        if (ret)
 854                return ret;
 855
 856        return ret;
 857}
 858
 859
 860static int kv_populate_acp_table(struct radeon_device *rdev)
 861{
 862        struct kv_power_info *pi = kv_get_pi(rdev);
 863        struct radeon_clock_voltage_dependency_table *table =
 864                &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
 865        struct atom_clock_dividers dividers;
 866        int ret;
 867        u32 i;
 868
 869        if (table == NULL || table->count == 0)
 870                return 0;
 871
 872        pi->acp_level_count = 0;
 873        for (i = 0; i < table->count; i++) {
 874                pi->acp_level[i].Frequency = cpu_to_be32(table->entries[i].clk);
 875                pi->acp_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
 876
 877                ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
 878                                                     table->entries[i].clk, false, &dividers);
 879                if (ret)
 880                        return ret;
 881                pi->acp_level[i].Divider = (u8)dividers.post_div;
 882
 883                pi->acp_level_count++;
 884        }
 885
 886        ret = kv_copy_bytes_to_smc(rdev,
 887                                   pi->dpm_table_start +
 888                                   offsetof(SMU7_Fusion_DpmTable, AcpLevelCount),
 889                                   (u8 *)&pi->acp_level_count,
 890                                   sizeof(u8),
 891                                   pi->sram_end);
 892        if (ret)
 893                return ret;
 894
 895        pi->acp_interval = 1;
 896
 897        ret = kv_copy_bytes_to_smc(rdev,
 898                                   pi->dpm_table_start +
 899                                   offsetof(SMU7_Fusion_DpmTable, ACPInterval),
 900                                   (u8 *)&pi->acp_interval,
 901                                   sizeof(u8),
 902                                   pi->sram_end);
 903        if (ret)
 904                return ret;
 905
 906        ret = kv_copy_bytes_to_smc(rdev,
 907                                   pi->dpm_table_start +
 908                                   offsetof(SMU7_Fusion_DpmTable, AcpLevel),
 909                                   (u8 *)&pi->acp_level,
 910                                   sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_ACP,
 911                                   pi->sram_end);
 912        if (ret)
 913                return ret;
 914
 915        return ret;
 916}
 917
 918static void kv_calculate_dfs_bypass_settings(struct radeon_device *rdev)
 919{
 920        struct kv_power_info *pi = kv_get_pi(rdev);
 921        u32 i;
 922        struct radeon_clock_voltage_dependency_table *table =
 923                &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
 924
 925        if (table && table->count) {
 926                for (i = 0; i < pi->graphics_dpm_level_count; i++) {
 927                        if (pi->caps_enable_dfs_bypass) {
 928                                if (kv_get_clock_difference(table->entries[i].clk, 40000) < 200)
 929                                        pi->graphics_level[i].ClkBypassCntl = 3;
 930                                else if (kv_get_clock_difference(table->entries[i].clk, 30000) < 200)
 931                                        pi->graphics_level[i].ClkBypassCntl = 2;
 932                                else if (kv_get_clock_difference(table->entries[i].clk, 26600) < 200)
 933                                        pi->graphics_level[i].ClkBypassCntl = 7;
 934                                else if (kv_get_clock_difference(table->entries[i].clk , 20000) < 200)
 935                                        pi->graphics_level[i].ClkBypassCntl = 6;
 936                                else if (kv_get_clock_difference(table->entries[i].clk , 10000) < 200)
 937                                        pi->graphics_level[i].ClkBypassCntl = 8;
 938                                else
 939                                        pi->graphics_level[i].ClkBypassCntl = 0;
 940                        } else {
 941                                pi->graphics_level[i].ClkBypassCntl = 0;
 942                        }
 943                }
 944        } else {
 945                struct sumo_sclk_voltage_mapping_table *table =
 946                        &pi->sys_info.sclk_voltage_mapping_table;
 947                for (i = 0; i < pi->graphics_dpm_level_count; i++) {
 948                        if (pi->caps_enable_dfs_bypass) {
 949                                if (kv_get_clock_difference(table->entries[i].sclk_frequency, 40000) < 200)
 950                                        pi->graphics_level[i].ClkBypassCntl = 3;
 951                                else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 30000) < 200)
 952                                        pi->graphics_level[i].ClkBypassCntl = 2;
 953                                else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 26600) < 200)
 954                                        pi->graphics_level[i].ClkBypassCntl = 7;
 955                                else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 20000) < 200)
 956                                        pi->graphics_level[i].ClkBypassCntl = 6;
 957                                else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 10000) < 200)
 958                                        pi->graphics_level[i].ClkBypassCntl = 8;
 959                                else
 960                                        pi->graphics_level[i].ClkBypassCntl = 0;
 961                        } else {
 962                                pi->graphics_level[i].ClkBypassCntl = 0;
 963                        }
 964                }
 965        }
 966}
 967
 968static int kv_enable_ulv(struct radeon_device *rdev, bool enable)
 969{
 970        return kv_notify_message_to_smu(rdev, enable ?
 971                                        PPSMC_MSG_EnableULV : PPSMC_MSG_DisableULV);
 972}
 973
 974static void kv_reset_acp_boot_level(struct radeon_device *rdev)
 975{
 976        struct kv_power_info *pi = kv_get_pi(rdev);
 977
 978        pi->acp_boot_level = 0xff;
 979}
 980
 981static void kv_update_current_ps(struct radeon_device *rdev,
 982                                 struct radeon_ps *rps)
 983{
 984        struct kv_ps *new_ps = kv_get_ps(rps);
 985        struct kv_power_info *pi = kv_get_pi(rdev);
 986
 987        pi->current_rps = *rps;
 988        pi->current_ps = *new_ps;
 989        pi->current_rps.ps_priv = &pi->current_ps;
 990}
 991
 992static void kv_update_requested_ps(struct radeon_device *rdev,
 993                                   struct radeon_ps *rps)
 994{
 995        struct kv_ps *new_ps = kv_get_ps(rps);
 996        struct kv_power_info *pi = kv_get_pi(rdev);
 997
 998        pi->requested_rps = *rps;
 999        pi->requested_ps = *new_ps;
1000        pi->requested_rps.ps_priv = &pi->requested_ps;
1001}
1002
1003void kv_dpm_enable_bapm(struct radeon_device *rdev, bool enable)
1004{
1005        struct kv_power_info *pi = kv_get_pi(rdev);
1006        int ret;
1007
1008        if (pi->bapm_enable) {
1009                ret = kv_smc_bapm_enable(rdev, enable);
1010                if (ret)
1011                        DRM_ERROR("kv_smc_bapm_enable failed\n");
1012        }
1013}
1014
1015static void kv_enable_thermal_int(struct radeon_device *rdev, bool enable)
1016{
1017        u32 thermal_int;
1018
1019        thermal_int = RREG32_SMC(CG_THERMAL_INT_CTRL);
1020        if (enable)
1021                thermal_int |= THERM_INTH_MASK | THERM_INTL_MASK;
1022        else
1023                thermal_int &= ~(THERM_INTH_MASK | THERM_INTL_MASK);
1024        WREG32_SMC(CG_THERMAL_INT_CTRL, thermal_int);
1025
1026}
1027
1028int kv_dpm_enable(struct radeon_device *rdev)
1029{
1030        struct kv_power_info *pi = kv_get_pi(rdev);
1031        int ret;
1032
1033        ret = kv_process_firmware_header(rdev);
1034        if (ret) {
1035                DRM_ERROR("kv_process_firmware_header failed\n");
1036                return ret;
1037        }
1038        kv_init_fps_limits(rdev);
1039        kv_init_graphics_levels(rdev);
1040        ret = kv_program_bootup_state(rdev);
1041        if (ret) {
1042                DRM_ERROR("kv_program_bootup_state failed\n");
1043                return ret;
1044        }
1045        kv_calculate_dfs_bypass_settings(rdev);
1046        ret = kv_upload_dpm_settings(rdev);
1047        if (ret) {
1048                DRM_ERROR("kv_upload_dpm_settings failed\n");
1049                return ret;
1050        }
1051        ret = kv_populate_uvd_table(rdev);
1052        if (ret) {
1053                DRM_ERROR("kv_populate_uvd_table failed\n");
1054                return ret;
1055        }
1056        ret = kv_populate_vce_table(rdev);
1057        if (ret) {
1058                DRM_ERROR("kv_populate_vce_table failed\n");
1059                return ret;
1060        }
1061        ret = kv_populate_samu_table(rdev);
1062        if (ret) {
1063                DRM_ERROR("kv_populate_samu_table failed\n");
1064                return ret;
1065        }
1066        ret = kv_populate_acp_table(rdev);
1067        if (ret) {
1068                DRM_ERROR("kv_populate_acp_table failed\n");
1069                return ret;
1070        }
1071        kv_program_vc(rdev);
1072
1073        kv_start_am(rdev);
1074        if (pi->enable_auto_thermal_throttling) {
1075                ret = kv_enable_auto_thermal_throttling(rdev);
1076                if (ret) {
1077                        DRM_ERROR("kv_enable_auto_thermal_throttling failed\n");
1078                        return ret;
1079                }
1080        }
1081        ret = kv_enable_dpm_voltage_scaling(rdev);
1082        if (ret) {
1083                DRM_ERROR("kv_enable_dpm_voltage_scaling failed\n");
1084                return ret;
1085        }
1086        ret = kv_set_dpm_interval(rdev);
1087        if (ret) {
1088                DRM_ERROR("kv_set_dpm_interval failed\n");
1089                return ret;
1090        }
1091        ret = kv_set_dpm_boot_state(rdev);
1092        if (ret) {
1093                DRM_ERROR("kv_set_dpm_boot_state failed\n");
1094                return ret;
1095        }
1096        ret = kv_enable_ulv(rdev, true);
1097        if (ret) {
1098                DRM_ERROR("kv_enable_ulv failed\n");
1099                return ret;
1100        }
1101        kv_start_dpm(rdev);
1102        ret = kv_enable_didt(rdev, true);
1103        if (ret) {
1104                DRM_ERROR("kv_enable_didt failed\n");
1105                return ret;
1106        }
1107        ret = kv_enable_smc_cac(rdev, true);
1108        if (ret) {
1109                DRM_ERROR("kv_enable_smc_cac failed\n");
1110                return ret;
1111        }
1112
1113        kv_reset_acp_boot_level(rdev);
1114
1115        ret = kv_smc_bapm_enable(rdev, false);
1116        if (ret) {
1117                DRM_ERROR("kv_smc_bapm_enable failed\n");
1118                return ret;
1119        }
1120
1121        kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
1122
1123        return ret;
1124}
1125
1126int kv_dpm_late_enable(struct radeon_device *rdev)
1127{
1128        int ret = 0;
1129
1130        if (rdev->irq.installed &&
1131            r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
1132                ret = kv_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
1133                if (ret) {
1134                        DRM_ERROR("kv_set_thermal_temperature_range failed\n");
1135                        return ret;
1136                }
1137                kv_enable_thermal_int(rdev, true);
1138        }
1139
1140        /* powerdown unused blocks for now */
1141        kv_dpm_powergate_acp(rdev, true);
1142        kv_dpm_powergate_samu(rdev, true);
1143        kv_dpm_powergate_vce(rdev, true);
1144        kv_dpm_powergate_uvd(rdev, true);
1145
1146        return ret;
1147}
1148
1149void kv_dpm_disable(struct radeon_device *rdev)
1150{
1151        kv_smc_bapm_enable(rdev, false);
1152
1153        if (rdev->family == CHIP_MULLINS)
1154                kv_enable_nb_dpm(rdev, false);
1155
1156        /* powerup blocks */
1157        kv_dpm_powergate_acp(rdev, false);
1158        kv_dpm_powergate_samu(rdev, false);
1159        kv_dpm_powergate_vce(rdev, false);
1160        kv_dpm_powergate_uvd(rdev, false);
1161
1162        kv_enable_smc_cac(rdev, false);
1163        kv_enable_didt(rdev, false);
1164        kv_clear_vc(rdev);
1165        kv_stop_dpm(rdev);
1166        kv_enable_ulv(rdev, false);
1167        kv_reset_am(rdev);
1168        kv_enable_thermal_int(rdev, false);
1169
1170        kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
1171}
1172
1173static void kv_init_sclk_t(struct radeon_device *rdev)
1174{
1175        struct kv_power_info *pi = kv_get_pi(rdev);
1176
1177        pi->low_sclk_interrupt_t = 0;
1178}
1179
1180static int kv_init_fps_limits(struct radeon_device *rdev)
1181{
1182        struct kv_power_info *pi = kv_get_pi(rdev);
1183        int ret = 0;
1184
1185        if (pi->caps_fps) {
1186                u16 tmp;
1187
1188                tmp = 45;
1189                pi->fps_high_t = cpu_to_be16(tmp);
1190                ret = kv_copy_bytes_to_smc(rdev,
1191                                           pi->dpm_table_start +
1192                                           offsetof(SMU7_Fusion_DpmTable, FpsHighT),
1193                                           (u8 *)&pi->fps_high_t,
1194                                           sizeof(u16), pi->sram_end);
1195
1196                tmp = 30;
1197                pi->fps_low_t = cpu_to_be16(tmp);
1198
1199                ret = kv_copy_bytes_to_smc(rdev,
1200                                           pi->dpm_table_start +
1201                                           offsetof(SMU7_Fusion_DpmTable, FpsLowT),
1202                                           (u8 *)&pi->fps_low_t,
1203                                           sizeof(u16), pi->sram_end);
1204
1205        }
1206        return ret;
1207}
1208
1209static void kv_init_powergate_state(struct radeon_device *rdev)
1210{
1211        struct kv_power_info *pi = kv_get_pi(rdev);
1212
1213        pi->uvd_power_gated = false;
1214        pi->vce_power_gated = false;
1215        pi->samu_power_gated = false;
1216        pi->acp_power_gated = false;
1217
1218}
1219
1220static int kv_enable_uvd_dpm(struct radeon_device *rdev, bool enable)
1221{
1222        return kv_notify_message_to_smu(rdev, enable ?
1223                                        PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable);
1224}
1225
1226static int kv_enable_vce_dpm(struct radeon_device *rdev, bool enable)
1227{
1228        return kv_notify_message_to_smu(rdev, enable ?
1229                                        PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable);
1230}
1231
1232static int kv_enable_samu_dpm(struct radeon_device *rdev, bool enable)
1233{
1234        return kv_notify_message_to_smu(rdev, enable ?
1235                                        PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable);
1236}
1237
1238static int kv_enable_acp_dpm(struct radeon_device *rdev, bool enable)
1239{
1240        return kv_notify_message_to_smu(rdev, enable ?
1241                                        PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable);
1242}
1243
1244static int kv_update_uvd_dpm(struct radeon_device *rdev, bool gate)
1245{
1246        struct kv_power_info *pi = kv_get_pi(rdev);
1247        struct radeon_uvd_clock_voltage_dependency_table *table =
1248                &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
1249        int ret;
1250        u32 mask;
1251
1252        if (!gate) {
1253                if (table->count)
1254                        pi->uvd_boot_level = table->count - 1;
1255                else
1256                        pi->uvd_boot_level = 0;
1257
1258                if (!pi->caps_uvd_dpm || pi->caps_stable_p_state) {
1259                        mask = 1 << pi->uvd_boot_level;
1260                } else {
1261                        mask = 0x1f;
1262                }
1263
1264                ret = kv_copy_bytes_to_smc(rdev,
1265                                           pi->dpm_table_start +
1266                                           offsetof(SMU7_Fusion_DpmTable, UvdBootLevel),
1267                                           (uint8_t *)&pi->uvd_boot_level,
1268                                           sizeof(u8), pi->sram_end);
1269                if (ret)
1270                        return ret;
1271
1272                kv_send_msg_to_smc_with_parameter(rdev,
1273                                                  PPSMC_MSG_UVDDPM_SetEnabledMask,
1274                                                  mask);
1275        }
1276
1277        return kv_enable_uvd_dpm(rdev, !gate);
1278}
1279
1280static u8 kv_get_vce_boot_level(struct radeon_device *rdev, u32 evclk)
1281{
1282        u8 i;
1283        struct radeon_vce_clock_voltage_dependency_table *table =
1284                &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
1285
1286        for (i = 0; i < table->count; i++) {
1287                if (table->entries[i].evclk >= evclk)
1288                        break;
1289        }
1290
1291        return i;
1292}
1293
1294static int kv_update_vce_dpm(struct radeon_device *rdev,
1295                             struct radeon_ps *radeon_new_state,
1296                             struct radeon_ps *radeon_current_state)
1297{
1298        struct kv_power_info *pi = kv_get_pi(rdev);
1299        struct radeon_vce_clock_voltage_dependency_table *table =
1300                &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
1301        int ret;
1302
1303        if (radeon_new_state->evclk > 0 && radeon_current_state->evclk == 0) {
1304                kv_dpm_powergate_vce(rdev, false);
1305                /* turn the clocks on when encoding */
1306                cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, false);
1307                if (pi->caps_stable_p_state)
1308                        pi->vce_boot_level = table->count - 1;
1309                else
1310                        pi->vce_boot_level = kv_get_vce_boot_level(rdev, radeon_new_state->evclk);
1311
1312                ret = kv_copy_bytes_to_smc(rdev,
1313                                           pi->dpm_table_start +
1314                                           offsetof(SMU7_Fusion_DpmTable, VceBootLevel),
1315                                           (u8 *)&pi->vce_boot_level,
1316                                           sizeof(u8),
1317                                           pi->sram_end);
1318                if (ret)
1319                        return ret;
1320
1321                if (pi->caps_stable_p_state)
1322                        kv_send_msg_to_smc_with_parameter(rdev,
1323                                                          PPSMC_MSG_VCEDPM_SetEnabledMask,
1324                                                          (1 << pi->vce_boot_level));
1325
1326                kv_enable_vce_dpm(rdev, true);
1327        } else if (radeon_new_state->evclk == 0 && radeon_current_state->evclk > 0) {
1328                kv_enable_vce_dpm(rdev, false);
1329                /* turn the clocks off when not encoding */
1330                cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, true);
1331                kv_dpm_powergate_vce(rdev, true);
1332        }
1333
1334        return 0;
1335}
1336
1337static int kv_update_samu_dpm(struct radeon_device *rdev, bool gate)
1338{
1339        struct kv_power_info *pi = kv_get_pi(rdev);
1340        struct radeon_clock_voltage_dependency_table *table =
1341                &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table;
1342        int ret;
1343
1344        if (!gate) {
1345                if (pi->caps_stable_p_state)
1346                        pi->samu_boot_level = table->count - 1;
1347                else
1348                        pi->samu_boot_level = 0;
1349
1350                ret = kv_copy_bytes_to_smc(rdev,
1351                                           pi->dpm_table_start +
1352                                           offsetof(SMU7_Fusion_DpmTable, SamuBootLevel),
1353                                           (u8 *)&pi->samu_boot_level,
1354                                           sizeof(u8),
1355                                           pi->sram_end);
1356                if (ret)
1357                        return ret;
1358
1359                if (pi->caps_stable_p_state)
1360                        kv_send_msg_to_smc_with_parameter(rdev,
1361                                                          PPSMC_MSG_SAMUDPM_SetEnabledMask,
1362                                                          (1 << pi->samu_boot_level));
1363        }
1364
1365        return kv_enable_samu_dpm(rdev, !gate);
1366}
1367
1368static u8 kv_get_acp_boot_level(struct radeon_device *rdev)
1369{
1370        u8 i;
1371        struct radeon_clock_voltage_dependency_table *table =
1372                &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
1373
1374        for (i = 0; i < table->count; i++) {
1375                if (table->entries[i].clk >= 0) /* XXX */
1376                        break;
1377        }
1378
1379        if (i >= table->count)
1380                i = table->count - 1;
1381
1382        return i;
1383}
1384
1385static void kv_update_acp_boot_level(struct radeon_device *rdev)
1386{
1387        struct kv_power_info *pi = kv_get_pi(rdev);
1388        u8 acp_boot_level;
1389
1390        if (!pi->caps_stable_p_state) {
1391                acp_boot_level = kv_get_acp_boot_level(rdev);
1392                if (acp_boot_level != pi->acp_boot_level) {
1393                        pi->acp_boot_level = acp_boot_level;
1394                        kv_send_msg_to_smc_with_parameter(rdev,
1395                                                          PPSMC_MSG_ACPDPM_SetEnabledMask,
1396                                                          (1 << pi->acp_boot_level));
1397                }
1398        }
1399}
1400
1401static int kv_update_acp_dpm(struct radeon_device *rdev, bool gate)
1402{
1403        struct kv_power_info *pi = kv_get_pi(rdev);
1404        struct radeon_clock_voltage_dependency_table *table =
1405                &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
1406        int ret;
1407
1408        if (!gate) {
1409                if (pi->caps_stable_p_state)
1410                        pi->acp_boot_level = table->count - 1;
1411                else
1412                        pi->acp_boot_level = kv_get_acp_boot_level(rdev);
1413
1414                ret = kv_copy_bytes_to_smc(rdev,
1415                                           pi->dpm_table_start +
1416                                           offsetof(SMU7_Fusion_DpmTable, AcpBootLevel),
1417                                           (u8 *)&pi->acp_boot_level,
1418                                           sizeof(u8),
1419                                           pi->sram_end);
1420                if (ret)
1421                        return ret;
1422
1423                if (pi->caps_stable_p_state)
1424                        kv_send_msg_to_smc_with_parameter(rdev,
1425                                                          PPSMC_MSG_ACPDPM_SetEnabledMask,
1426                                                          (1 << pi->acp_boot_level));
1427        }
1428
1429        return kv_enable_acp_dpm(rdev, !gate);
1430}
1431
1432void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate)
1433{
1434        struct kv_power_info *pi = kv_get_pi(rdev);
1435
1436        if (pi->uvd_power_gated == gate)
1437                return;
1438
1439        pi->uvd_power_gated = gate;
1440
1441        if (gate) {
1442                if (pi->caps_uvd_pg) {
1443                        uvd_v1_0_stop(rdev);
1444                        cik_update_cg(rdev, RADEON_CG_BLOCK_UVD, false);
1445                }
1446                kv_update_uvd_dpm(rdev, gate);
1447                if (pi->caps_uvd_pg)
1448                        kv_notify_message_to_smu(rdev, PPSMC_MSG_UVDPowerOFF);
1449        } else {
1450                if (pi->caps_uvd_pg) {
1451                        kv_notify_message_to_smu(rdev, PPSMC_MSG_UVDPowerON);
1452                        uvd_v4_2_resume(rdev);
1453                        uvd_v1_0_start(rdev);
1454                        cik_update_cg(rdev, RADEON_CG_BLOCK_UVD, true);
1455                }
1456                kv_update_uvd_dpm(rdev, gate);
1457        }
1458}
1459
1460static void kv_dpm_powergate_vce(struct radeon_device *rdev, bool gate)
1461{
1462        struct kv_power_info *pi = kv_get_pi(rdev);
1463
1464        if (pi->vce_power_gated == gate)
1465                return;
1466
1467        pi->vce_power_gated = gate;
1468
1469        if (gate) {
1470                if (pi->caps_vce_pg) {
1471                        /* XXX do we need a vce_v1_0_stop() ?  */
1472                        kv_notify_message_to_smu(rdev, PPSMC_MSG_VCEPowerOFF);
1473                }
1474        } else {
1475                if (pi->caps_vce_pg) {
1476                        kv_notify_message_to_smu(rdev, PPSMC_MSG_VCEPowerON);
1477                        vce_v2_0_resume(rdev);
1478                        vce_v1_0_start(rdev);
1479                }
1480        }
1481}
1482
1483static void kv_dpm_powergate_samu(struct radeon_device *rdev, bool gate)
1484{
1485        struct kv_power_info *pi = kv_get_pi(rdev);
1486
1487        if (pi->samu_power_gated == gate)
1488                return;
1489
1490        pi->samu_power_gated = gate;
1491
1492        if (gate) {
1493                kv_update_samu_dpm(rdev, true);
1494                if (pi->caps_samu_pg)
1495                        kv_notify_message_to_smu(rdev, PPSMC_MSG_SAMPowerOFF);
1496        } else {
1497                if (pi->caps_samu_pg)
1498                        kv_notify_message_to_smu(rdev, PPSMC_MSG_SAMPowerON);
1499                kv_update_samu_dpm(rdev, false);
1500        }
1501}
1502
1503static void kv_dpm_powergate_acp(struct radeon_device *rdev, bool gate)
1504{
1505        struct kv_power_info *pi = kv_get_pi(rdev);
1506
1507        if (pi->acp_power_gated == gate)
1508                return;
1509
1510        if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS)
1511                return;
1512
1513        pi->acp_power_gated = gate;
1514
1515        if (gate) {
1516                kv_update_acp_dpm(rdev, true);
1517                if (pi->caps_acp_pg)
1518                        kv_notify_message_to_smu(rdev, PPSMC_MSG_ACPPowerOFF);
1519        } else {
1520                if (pi->caps_acp_pg)
1521                        kv_notify_message_to_smu(rdev, PPSMC_MSG_ACPPowerON);
1522                kv_update_acp_dpm(rdev, false);
1523        }
1524}
1525
1526static void kv_set_valid_clock_range(struct radeon_device *rdev,
1527                                     struct radeon_ps *new_rps)
1528{
1529        struct kv_ps *new_ps = kv_get_ps(new_rps);
1530        struct kv_power_info *pi = kv_get_pi(rdev);
1531        u32 i;
1532        struct radeon_clock_voltage_dependency_table *table =
1533                &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
1534
1535        if (table && table->count) {
1536                for (i = 0; i < pi->graphics_dpm_level_count; i++) {
1537                        if ((table->entries[i].clk >= new_ps->levels[0].sclk) ||
1538                            (i == (pi->graphics_dpm_level_count - 1))) {
1539                                pi->lowest_valid = i;
1540                                break;
1541                        }
1542                }
1543
1544                for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
1545                        if (table->entries[i].clk <= new_ps->levels[new_ps->num_levels - 1].sclk)
1546                                break;
1547                }
1548                pi->highest_valid = i;
1549
1550                if (pi->lowest_valid > pi->highest_valid) {
1551                        if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) >
1552                            (table->entries[pi->lowest_valid].clk - new_ps->levels[new_ps->num_levels - 1].sclk))
1553                                pi->highest_valid = pi->lowest_valid;
1554                        else
1555                                pi->lowest_valid =  pi->highest_valid;
1556                }
1557        } else {
1558                struct sumo_sclk_voltage_mapping_table *table =
1559                        &pi->sys_info.sclk_voltage_mapping_table;
1560
1561                for (i = 0; i < (int)pi->graphics_dpm_level_count; i++) {
1562                        if (table->entries[i].sclk_frequency >= new_ps->levels[0].sclk ||
1563                            i == (int)(pi->graphics_dpm_level_count - 1)) {
1564                                pi->lowest_valid = i;
1565                                break;
1566                        }
1567                }
1568
1569                for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
1570                        if (table->entries[i].sclk_frequency <=
1571                            new_ps->levels[new_ps->num_levels - 1].sclk)
1572                                break;
1573                }
1574                pi->highest_valid = i;
1575
1576                if (pi->lowest_valid > pi->highest_valid) {
1577                        if ((new_ps->levels[0].sclk -
1578                             table->entries[pi->highest_valid].sclk_frequency) >
1579                            (table->entries[pi->lowest_valid].sclk_frequency -
1580                             new_ps->levels[new_ps->num_levels -1].sclk))
1581                                pi->highest_valid = pi->lowest_valid;
1582                        else
1583                                pi->lowest_valid =  pi->highest_valid;
1584                }
1585        }
1586}
1587
1588static int kv_update_dfs_bypass_settings(struct radeon_device *rdev,
1589                                         struct radeon_ps *new_rps)
1590{
1591        struct kv_ps *new_ps = kv_get_ps(new_rps);
1592        struct kv_power_info *pi = kv_get_pi(rdev);
1593        int ret = 0;
1594        u8 clk_bypass_cntl;
1595
1596        if (pi->caps_enable_dfs_bypass) {
1597                clk_bypass_cntl = new_ps->need_dfs_bypass ?
1598                        pi->graphics_level[pi->graphics_boot_level].ClkBypassCntl : 0;
1599                ret = kv_copy_bytes_to_smc(rdev,
1600                                           (pi->dpm_table_start +
1601                                            offsetof(SMU7_Fusion_DpmTable, GraphicsLevel) +
1602                                            (pi->graphics_boot_level * sizeof(SMU7_Fusion_GraphicsLevel)) +
1603                                            offsetof(SMU7_Fusion_GraphicsLevel, ClkBypassCntl)),
1604                                           &clk_bypass_cntl,
1605                                           sizeof(u8), pi->sram_end);
1606        }
1607
1608        return ret;
1609}
1610
1611static int kv_enable_nb_dpm(struct radeon_device *rdev,
1612                            bool enable)
1613{
1614        struct kv_power_info *pi = kv_get_pi(rdev);
1615        int ret = 0;
1616
1617        if (enable) {
1618                if (pi->enable_nb_dpm && !pi->nb_dpm_enabled) {
1619                        ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_NBDPM_Enable);
1620                        if (ret == 0)
1621                                pi->nb_dpm_enabled = true;
1622                }
1623        } else {
1624                if (pi->enable_nb_dpm && pi->nb_dpm_enabled) {
1625                        ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_NBDPM_Disable);
1626                        if (ret == 0)
1627                                pi->nb_dpm_enabled = false;
1628                }
1629        }
1630
1631        return ret;
1632}
1633
1634int kv_dpm_force_performance_level(struct radeon_device *rdev,
1635                                   enum radeon_dpm_forced_level level)
1636{
1637        int ret;
1638
1639        if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
1640                ret = kv_force_dpm_highest(rdev);
1641                if (ret)
1642                        return ret;
1643        } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
1644                ret = kv_force_dpm_lowest(rdev);
1645                if (ret)
1646                        return ret;
1647        } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) {
1648                ret = kv_unforce_levels(rdev);
1649                if (ret)
1650                        return ret;
1651        }
1652
1653        rdev->pm.dpm.forced_level = level;
1654
1655        return 0;
1656}
1657
1658int kv_dpm_pre_set_power_state(struct radeon_device *rdev)
1659{
1660        struct kv_power_info *pi = kv_get_pi(rdev);
1661        struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps;
1662        struct radeon_ps *new_ps = &requested_ps;
1663
1664        kv_update_requested_ps(rdev, new_ps);
1665
1666        kv_apply_state_adjust_rules(rdev,
1667                                    &pi->requested_rps,
1668                                    &pi->current_rps);
1669
1670        return 0;
1671}
1672
1673int kv_dpm_set_power_state(struct radeon_device *rdev)
1674{
1675        struct kv_power_info *pi = kv_get_pi(rdev);
1676        struct radeon_ps *new_ps = &pi->requested_rps;
1677        struct radeon_ps *old_ps = &pi->current_rps;
1678        int ret;
1679
1680        if (pi->bapm_enable) {
1681                ret = kv_smc_bapm_enable(rdev, rdev->pm.dpm.ac_power);
1682                if (ret) {
1683                        DRM_ERROR("kv_smc_bapm_enable failed\n");
1684                        return ret;
1685                }
1686        }
1687
1688        if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) {
1689                if (pi->enable_dpm) {
1690                        kv_set_valid_clock_range(rdev, new_ps);
1691                        kv_update_dfs_bypass_settings(rdev, new_ps);
1692                        ret = kv_calculate_ds_divider(rdev);
1693                        if (ret) {
1694                                DRM_ERROR("kv_calculate_ds_divider failed\n");
1695                                return ret;
1696                        }
1697                        kv_calculate_nbps_level_settings(rdev);
1698                        kv_calculate_dpm_settings(rdev);
1699                        kv_force_lowest_valid(rdev);
1700                        kv_enable_new_levels(rdev);
1701                        kv_upload_dpm_settings(rdev);
1702                        kv_program_nbps_index_settings(rdev, new_ps);
1703                        kv_unforce_levels(rdev);
1704                        kv_set_enabled_levels(rdev);
1705                        kv_force_lowest_valid(rdev);
1706                        kv_unforce_levels(rdev);
1707
1708                        ret = kv_update_vce_dpm(rdev, new_ps, old_ps);
1709                        if (ret) {
1710                                DRM_ERROR("kv_update_vce_dpm failed\n");
1711                                return ret;
1712                        }
1713                        kv_update_sclk_t(rdev);
1714                        if (rdev->family == CHIP_MULLINS)
1715                                kv_enable_nb_dpm(rdev, true);
1716                }
1717        } else {
1718                if (pi->enable_dpm) {
1719                        kv_set_valid_clock_range(rdev, new_ps);
1720                        kv_update_dfs_bypass_settings(rdev, new_ps);
1721                        ret = kv_calculate_ds_divider(rdev);
1722                        if (ret) {
1723                                DRM_ERROR("kv_calculate_ds_divider failed\n");
1724                                return ret;
1725                        }
1726                        kv_calculate_nbps_level_settings(rdev);
1727                        kv_calculate_dpm_settings(rdev);
1728                        kv_freeze_sclk_dpm(rdev, true);
1729                        kv_upload_dpm_settings(rdev);
1730                        kv_program_nbps_index_settings(rdev, new_ps);
1731                        kv_freeze_sclk_dpm(rdev, false);
1732                        kv_set_enabled_levels(rdev);
1733                        ret = kv_update_vce_dpm(rdev, new_ps, old_ps);
1734                        if (ret) {
1735                                DRM_ERROR("kv_update_vce_dpm failed\n");
1736                                return ret;
1737                        }
1738                        kv_update_acp_boot_level(rdev);
1739                        kv_update_sclk_t(rdev);
1740                        kv_enable_nb_dpm(rdev, true);
1741                }
1742        }
1743
1744        return 0;
1745}
1746
1747void kv_dpm_post_set_power_state(struct radeon_device *rdev)
1748{
1749        struct kv_power_info *pi = kv_get_pi(rdev);
1750        struct radeon_ps *new_ps = &pi->requested_rps;
1751
1752        kv_update_current_ps(rdev, new_ps);
1753}
1754
1755void kv_dpm_setup_asic(struct radeon_device *rdev)
1756{
1757        sumo_take_smu_control(rdev, true);
1758        kv_init_powergate_state(rdev);
1759        kv_init_sclk_t(rdev);
1760}
1761
1762//XXX use sumo_dpm_display_configuration_changed
1763
1764static void kv_construct_max_power_limits_table(struct radeon_device *rdev,
1765                                                struct radeon_clock_and_voltage_limits *table)
1766{
1767        struct kv_power_info *pi = kv_get_pi(rdev);
1768
1769        if (pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries > 0) {
1770                int idx = pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries - 1;
1771                table->sclk =
1772                        pi->sys_info.sclk_voltage_mapping_table.entries[idx].sclk_frequency;
1773                table->vddc =
1774                        kv_convert_2bit_index_to_voltage(rdev,
1775                                                         pi->sys_info.sclk_voltage_mapping_table.entries[idx].vid_2bit);
1776        }
1777
1778        table->mclk = pi->sys_info.nbp_memory_clock[0];
1779}
1780
1781static void kv_patch_voltage_values(struct radeon_device *rdev)
1782{
1783        int i;
1784        struct radeon_uvd_clock_voltage_dependency_table *uvd_table =
1785                &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
1786        struct radeon_vce_clock_voltage_dependency_table *vce_table =
1787                &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
1788        struct radeon_clock_voltage_dependency_table *samu_table =
1789                &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table;
1790        struct radeon_clock_voltage_dependency_table *acp_table =
1791                &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
1792
1793        if (uvd_table->count) {
1794                for (i = 0; i < uvd_table->count; i++)
1795                        uvd_table->entries[i].v =
1796                                kv_convert_8bit_index_to_voltage(rdev,
1797                                                                 uvd_table->entries[i].v);
1798        }
1799
1800        if (vce_table->count) {
1801                for (i = 0; i < vce_table->count; i++)
1802                        vce_table->entries[i].v =
1803                                kv_convert_8bit_index_to_voltage(rdev,
1804                                                                 vce_table->entries[i].v);
1805        }
1806
1807        if (samu_table->count) {
1808                for (i = 0; i < samu_table->count; i++)
1809                        samu_table->entries[i].v =
1810                                kv_convert_8bit_index_to_voltage(rdev,
1811                                                                 samu_table->entries[i].v);
1812        }
1813
1814        if (acp_table->count) {
1815                for (i = 0; i < acp_table->count; i++)
1816                        acp_table->entries[i].v =
1817                                kv_convert_8bit_index_to_voltage(rdev,
1818                                                                 acp_table->entries[i].v);
1819        }
1820
1821}
1822
1823static void kv_construct_boot_state(struct radeon_device *rdev)
1824{
1825        struct kv_power_info *pi = kv_get_pi(rdev);
1826
1827        pi->boot_pl.sclk = pi->sys_info.bootup_sclk;
1828        pi->boot_pl.vddc_index = pi->sys_info.bootup_nb_voltage_index;
1829        pi->boot_pl.ds_divider_index = 0;
1830        pi->boot_pl.ss_divider_index = 0;
1831        pi->boot_pl.allow_gnb_slow = 1;
1832        pi->boot_pl.force_nbp_state = 0;
1833        pi->boot_pl.display_wm = 0;
1834        pi->boot_pl.vce_wm = 0;
1835}
1836
1837static int kv_force_dpm_highest(struct radeon_device *rdev)
1838{
1839        int ret;
1840        u32 enable_mask, i;
1841
1842        ret = kv_dpm_get_enable_mask(rdev, &enable_mask);
1843        if (ret)
1844                return ret;
1845
1846        for (i = SMU7_MAX_LEVELS_GRAPHICS - 1; i > 0; i--) {
1847                if (enable_mask & (1 << i))
1848                        break;
1849        }
1850
1851        if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS)
1852                return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i);
1853        else
1854                return kv_set_enabled_level(rdev, i);
1855}
1856
1857static int kv_force_dpm_lowest(struct radeon_device *rdev)
1858{
1859        int ret;
1860        u32 enable_mask, i;
1861
1862        ret = kv_dpm_get_enable_mask(rdev, &enable_mask);
1863        if (ret)
1864                return ret;
1865
1866        for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) {
1867                if (enable_mask & (1 << i))
1868                        break;
1869        }
1870
1871        if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS)
1872                return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i);
1873        else
1874                return kv_set_enabled_level(rdev, i);
1875}
1876
1877static u8 kv_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
1878                                             u32 sclk, u32 min_sclk_in_sr)
1879{
1880        struct kv_power_info *pi = kv_get_pi(rdev);
1881        u32 i;
1882        u32 temp;
1883        u32 min = (min_sclk_in_sr > KV_MINIMUM_ENGINE_CLOCK) ?
1884                min_sclk_in_sr : KV_MINIMUM_ENGINE_CLOCK;
1885
1886        if (sclk < min)
1887                return 0;
1888
1889        if (!pi->caps_sclk_ds)
1890                return 0;
1891
1892        for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i > 0; i--) {
1893                temp = sclk / sumo_get_sleep_divider_from_id(i);
1894                if (temp >= min)
1895                        break;
1896        }
1897
1898        return (u8)i;
1899}
1900
1901static int kv_get_high_voltage_limit(struct radeon_device *rdev, int *limit)
1902{
1903        struct kv_power_info *pi = kv_get_pi(rdev);
1904        struct radeon_clock_voltage_dependency_table *table =
1905                &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
1906        int i;
1907
1908        if (table && table->count) {
1909                for (i = table->count - 1; i >= 0; i--) {
1910                        if (pi->high_voltage_t &&
1911                            (kv_convert_8bit_index_to_voltage(rdev, table->entries[i].v) <=
1912                             pi->high_voltage_t)) {
1913                                *limit = i;
1914                                return 0;
1915                        }
1916                }
1917        } else {
1918                struct sumo_sclk_voltage_mapping_table *table =
1919                        &pi->sys_info.sclk_voltage_mapping_table;
1920
1921                for (i = table->num_max_dpm_entries - 1; i >= 0; i--) {
1922                        if (pi->high_voltage_t &&
1923                            (kv_convert_2bit_index_to_voltage(rdev, table->entries[i].vid_2bit) <=
1924                             pi->high_voltage_t)) {
1925                                *limit = i;
1926                                return 0;
1927                        }
1928                }
1929        }
1930
1931        *limit = 0;
1932        return 0;
1933}
1934
1935static void kv_apply_state_adjust_rules(struct radeon_device *rdev,
1936                                        struct radeon_ps *new_rps,
1937                                        struct radeon_ps *old_rps)
1938{
1939        struct kv_ps *ps = kv_get_ps(new_rps);
1940        struct kv_power_info *pi = kv_get_pi(rdev);
1941        u32 min_sclk = 10000; /* ??? */
1942        u32 sclk, mclk = 0;
1943        int i, limit;
1944        bool force_high;
1945        struct radeon_clock_voltage_dependency_table *table =
1946                &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
1947        u32 stable_p_state_sclk = 0;
1948        struct radeon_clock_and_voltage_limits *max_limits =
1949                &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
1950
1951        if (new_rps->vce_active) {
1952                new_rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;
1953                new_rps->ecclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].ecclk;
1954        } else {
1955                new_rps->evclk = 0;
1956                new_rps->ecclk = 0;
1957        }
1958
1959        mclk = max_limits->mclk;
1960        sclk = min_sclk;
1961
1962        if (pi->caps_stable_p_state) {
1963                stable_p_state_sclk = (max_limits->sclk * 75) / 100;
1964
1965                for (i = table->count - 1; i >= 0; i--) {
1966                        if (stable_p_state_sclk >= table->entries[i].clk) {
1967                                stable_p_state_sclk = table->entries[i].clk;
1968                                break;
1969                        }
1970                }
1971
1972                if (i > 0)
1973                        stable_p_state_sclk = table->entries[0].clk;
1974
1975                sclk = stable_p_state_sclk;
1976        }
1977
1978        if (new_rps->vce_active) {
1979                if (sclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk)
1980                        sclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk;
1981        }
1982
1983        ps->need_dfs_bypass = true;
1984
1985        for (i = 0; i < ps->num_levels; i++) {
1986                if (ps->levels[i].sclk < sclk)
1987                        ps->levels[i].sclk = sclk;
1988        }
1989
1990        if (table && table->count) {
1991                for (i = 0; i < ps->num_levels; i++) {
1992                        if (pi->high_voltage_t &&
1993                            (pi->high_voltage_t <
1994                             kv_convert_8bit_index_to_voltage(rdev, ps->levels[i].vddc_index))) {
1995                                kv_get_high_voltage_limit(rdev, &limit);
1996                                ps->levels[i].sclk = table->entries[limit].clk;
1997                        }
1998                }
1999        } else {
2000                struct sumo_sclk_voltage_mapping_table *table =
2001                        &pi->sys_info.sclk_voltage_mapping_table;
2002
2003                for (i = 0; i < ps->num_levels; i++) {
2004                        if (pi->high_voltage_t &&
2005                            (pi->high_voltage_t <
2006                             kv_convert_8bit_index_to_voltage(rdev, ps->levels[i].vddc_index))) {
2007                                kv_get_high_voltage_limit(rdev, &limit);
2008                                ps->levels[i].sclk = table->entries[limit].sclk_frequency;
2009                        }
2010                }
2011        }
2012
2013        if (pi->caps_stable_p_state) {
2014                for (i = 0; i < ps->num_levels; i++) {
2015                        ps->levels[i].sclk = stable_p_state_sclk;
2016                }
2017        }
2018
2019        pi->video_start = new_rps->dclk || new_rps->vclk ||
2020                new_rps->evclk || new_rps->ecclk;
2021
2022        if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
2023            ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
2024                pi->battery_state = true;
2025        else
2026                pi->battery_state = false;
2027
2028        if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) {
2029                ps->dpm0_pg_nb_ps_lo = 0x1;
2030                ps->dpm0_pg_nb_ps_hi = 0x0;
2031                ps->dpmx_nb_ps_lo = 0x1;
2032                ps->dpmx_nb_ps_hi = 0x0;
2033        } else {
2034                ps->dpm0_pg_nb_ps_lo = 0x3;
2035                ps->dpm0_pg_nb_ps_hi = 0x0;
2036                ps->dpmx_nb_ps_lo = 0x3;
2037                ps->dpmx_nb_ps_hi = 0x0;
2038
2039                if (pi->sys_info.nb_dpm_enable) {
2040                        force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) ||
2041                                pi->video_start || (rdev->pm.dpm.new_active_crtc_count >= 3) ||
2042                                pi->disable_nb_ps3_in_battery;
2043                        ps->dpm0_pg_nb_ps_lo = force_high ? 0x2 : 0x3;
2044                        ps->dpm0_pg_nb_ps_hi = 0x2;
2045                        ps->dpmx_nb_ps_lo = force_high ? 0x2 : 0x3;
2046                        ps->dpmx_nb_ps_hi = 0x2;
2047                }
2048        }
2049}
2050
2051static void kv_dpm_power_level_enabled_for_throttle(struct radeon_device *rdev,
2052                                                    u32 index, bool enable)
2053{
2054        struct kv_power_info *pi = kv_get_pi(rdev);
2055
2056        pi->graphics_level[index].EnabledForThrottle = enable ? 1 : 0;
2057}
2058
2059static int kv_calculate_ds_divider(struct radeon_device *rdev)
2060{
2061        struct kv_power_info *pi = kv_get_pi(rdev);
2062        u32 sclk_in_sr = 10000; /* ??? */
2063        u32 i;
2064
2065        if (pi->lowest_valid > pi->highest_valid)
2066                return -EINVAL;
2067
2068        for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
2069                pi->graphics_level[i].DeepSleepDivId =
2070                        kv_get_sleep_divider_id_from_clock(rdev,
2071                                                           be32_to_cpu(pi->graphics_level[i].SclkFrequency),
2072                                                           sclk_in_sr);
2073        }
2074        return 0;
2075}
2076
2077static int kv_calculate_nbps_level_settings(struct radeon_device *rdev)
2078{
2079        struct kv_power_info *pi = kv_get_pi(rdev);
2080        u32 i;
2081        bool force_high;
2082        struct radeon_clock_and_voltage_limits *max_limits =
2083                &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
2084        u32 mclk = max_limits->mclk;
2085
2086        if (pi->lowest_valid > pi->highest_valid)
2087                return -EINVAL;
2088
2089        if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) {
2090                for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
2091                        pi->graphics_level[i].GnbSlow = 1;
2092                        pi->graphics_level[i].ForceNbPs1 = 0;
2093                        pi->graphics_level[i].UpH = 0;
2094                }
2095
2096                if (!pi->sys_info.nb_dpm_enable)
2097                        return 0;
2098
2099                force_high = ((mclk >= pi->sys_info.nbp_memory_clock[3]) ||
2100                              (rdev->pm.dpm.new_active_crtc_count >= 3) || pi->video_start);
2101
2102                if (force_high) {
2103                        for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
2104                                pi->graphics_level[i].GnbSlow = 0;
2105                } else {
2106                        if (pi->battery_state)
2107                                pi->graphics_level[0].ForceNbPs1 = 1;
2108
2109                        pi->graphics_level[1].GnbSlow = 0;
2110                        pi->graphics_level[2].GnbSlow = 0;
2111                        pi->graphics_level[3].GnbSlow = 0;
2112                        pi->graphics_level[4].GnbSlow = 0;
2113                }
2114        } else {
2115                for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
2116                        pi->graphics_level[i].GnbSlow = 1;
2117                        pi->graphics_level[i].ForceNbPs1 = 0;
2118                        pi->graphics_level[i].UpH = 0;
2119                }
2120
2121                if (pi->sys_info.nb_dpm_enable && pi->battery_state) {
2122                        pi->graphics_level[pi->lowest_valid].UpH = 0x28;
2123                        pi->graphics_level[pi->lowest_valid].GnbSlow = 0;
2124                        if (pi->lowest_valid != pi->highest_valid)
2125                                pi->graphics_level[pi->lowest_valid].ForceNbPs1 = 1;
2126                }
2127        }
2128        return 0;
2129}
2130
2131static int kv_calculate_dpm_settings(struct radeon_device *rdev)
2132{
2133        struct kv_power_info *pi = kv_get_pi(rdev);
2134        u32 i;
2135
2136        if (pi->lowest_valid > pi->highest_valid)
2137                return -EINVAL;
2138
2139        for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
2140                pi->graphics_level[i].DisplayWatermark = (i == pi->highest_valid) ? 1 : 0;
2141
2142        return 0;
2143}
2144
2145static void kv_init_graphics_levels(struct radeon_device *rdev)
2146{
2147        struct kv_power_info *pi = kv_get_pi(rdev);
2148        u32 i;
2149        struct radeon_clock_voltage_dependency_table *table =
2150                &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
2151
2152        if (table && table->count) {
2153                u32 vid_2bit;
2154
2155                pi->graphics_dpm_level_count = 0;
2156                for (i = 0; i < table->count; i++) {
2157                        if (pi->high_voltage_t &&
2158                            (pi->high_voltage_t <
2159                             kv_convert_8bit_index_to_voltage(rdev, table->entries[i].v)))
2160                                break;
2161
2162                        kv_set_divider_value(rdev, i, table->entries[i].clk);
2163                        vid_2bit = kv_convert_vid7_to_vid2(rdev,
2164                                                           &pi->sys_info.vid_mapping_table,
2165                                                           table->entries[i].v);
2166                        kv_set_vid(rdev, i, vid_2bit);
2167                        kv_set_at(rdev, i, pi->at[i]);
2168                        kv_dpm_power_level_enabled_for_throttle(rdev, i, true);
2169                        pi->graphics_dpm_level_count++;
2170                }
2171        } else {
2172                struct sumo_sclk_voltage_mapping_table *table =
2173                        &pi->sys_info.sclk_voltage_mapping_table;
2174
2175                pi->graphics_dpm_level_count = 0;
2176                for (i = 0; i < table->num_max_dpm_entries; i++) {
2177                        if (pi->high_voltage_t &&
2178                            pi->high_voltage_t <
2179                            kv_convert_2bit_index_to_voltage(rdev, table->entries[i].vid_2bit))
2180                                break;
2181
2182                        kv_set_divider_value(rdev, i, table->entries[i].sclk_frequency);
2183                        kv_set_vid(rdev, i, table->entries[i].vid_2bit);
2184                        kv_set_at(rdev, i, pi->at[i]);
2185                        kv_dpm_power_level_enabled_for_throttle(rdev, i, true);
2186                        pi->graphics_dpm_level_count++;
2187                }
2188        }
2189
2190        for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++)
2191                kv_dpm_power_level_enable(rdev, i, false);
2192}
2193
2194static void kv_enable_new_levels(struct radeon_device *rdev)
2195{
2196        struct kv_power_info *pi = kv_get_pi(rdev);
2197        u32 i;
2198
2199        for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) {
2200                if (i >= pi->lowest_valid && i <= pi->highest_valid)
2201                        kv_dpm_power_level_enable(rdev, i, true);
2202        }
2203}
2204
2205static int kv_set_enabled_level(struct radeon_device *rdev, u32 level)
2206{
2207        u32 new_mask = (1 << level);
2208
2209        return kv_send_msg_to_smc_with_parameter(rdev,
2210                                                 PPSMC_MSG_SCLKDPM_SetEnabledMask,
2211                                                 new_mask);
2212}
2213
2214static int kv_set_enabled_levels(struct radeon_device *rdev)
2215{
2216        struct kv_power_info *pi = kv_get_pi(rdev);
2217        u32 i, new_mask = 0;
2218
2219        for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
2220                new_mask |= (1 << i);
2221
2222        return kv_send_msg_to_smc_with_parameter(rdev,
2223                                                 PPSMC_MSG_SCLKDPM_SetEnabledMask,
2224                                                 new_mask);
2225}
2226
2227static void kv_program_nbps_index_settings(struct radeon_device *rdev,
2228                                           struct radeon_ps *new_rps)
2229{
2230        struct kv_ps *new_ps = kv_get_ps(new_rps);
2231        struct kv_power_info *pi = kv_get_pi(rdev);
2232        u32 nbdpmconfig1;
2233
2234        if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS)
2235                return;
2236
2237        if (pi->sys_info.nb_dpm_enable) {
2238                nbdpmconfig1 = RREG32_SMC(NB_DPM_CONFIG_1);
2239                nbdpmconfig1 &= ~(Dpm0PgNbPsLo_MASK | Dpm0PgNbPsHi_MASK |
2240                                  DpmXNbPsLo_MASK | DpmXNbPsHi_MASK);
2241                nbdpmconfig1 |= (Dpm0PgNbPsLo(new_ps->dpm0_pg_nb_ps_lo) |
2242                                 Dpm0PgNbPsHi(new_ps->dpm0_pg_nb_ps_hi) |
2243                                 DpmXNbPsLo(new_ps->dpmx_nb_ps_lo) |
2244                                 DpmXNbPsHi(new_ps->dpmx_nb_ps_hi));
2245                WREG32_SMC(NB_DPM_CONFIG_1, nbdpmconfig1);
2246        }
2247}
2248
2249static int kv_set_thermal_temperature_range(struct radeon_device *rdev,
2250                                            int min_temp, int max_temp)
2251{
2252        int low_temp = 0 * 1000;
2253        int high_temp = 255 * 1000;
2254        u32 tmp;
2255
2256        if (low_temp < min_temp)
2257                low_temp = min_temp;
2258        if (high_temp > max_temp)
2259                high_temp = max_temp;
2260        if (high_temp < low_temp) {
2261                DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
2262                return -EINVAL;
2263        }
2264
2265        tmp = RREG32_SMC(CG_THERMAL_INT_CTRL);
2266        tmp &= ~(DIG_THERM_INTH_MASK | DIG_THERM_INTL_MASK);
2267        tmp |= (DIG_THERM_INTH(49 + (high_temp / 1000)) |
2268                DIG_THERM_INTL(49 + (low_temp / 1000)));
2269        WREG32_SMC(CG_THERMAL_INT_CTRL, tmp);
2270
2271        rdev->pm.dpm.thermal.min_temp = low_temp;
2272        rdev->pm.dpm.thermal.max_temp = high_temp;
2273
2274        return 0;
2275}
2276
2277union igp_info {
2278        struct _ATOM_INTEGRATED_SYSTEM_INFO info;
2279        struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
2280        struct _ATOM_INTEGRATED_SYSTEM_INFO_V5 info_5;
2281        struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
2282        struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
2283        struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
2284};
2285
2286static int kv_parse_sys_info_table(struct radeon_device *rdev)
2287{
2288        struct kv_power_info *pi = kv_get_pi(rdev);
2289        struct radeon_mode_info *mode_info = &rdev->mode_info;
2290        int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
2291        union igp_info *igp_info;
2292        u8 frev, crev;
2293        u16 data_offset;
2294        int i;
2295
2296        if (atom_parse_data_header(mode_info->atom_context, index, NULL,
2297                                   &frev, &crev, &data_offset)) {
2298                igp_info = (union igp_info *)(mode_info->atom_context->bios +
2299                                              data_offset);
2300
2301                if (crev != 8) {
2302                        DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
2303                        return -EINVAL;
2304                }
2305                pi->sys_info.bootup_sclk = le32_to_cpu(igp_info->info_8.ulBootUpEngineClock);
2306                pi->sys_info.bootup_uma_clk = le32_to_cpu(igp_info->info_8.ulBootUpUMAClock);
2307                pi->sys_info.bootup_nb_voltage_index =
2308                        le16_to_cpu(igp_info->info_8.usBootUpNBVoltage);
2309                if (igp_info->info_8.ucHtcTmpLmt == 0)
2310                        pi->sys_info.htc_tmp_lmt = 203;
2311                else
2312                        pi->sys_info.htc_tmp_lmt = igp_info->info_8.ucHtcTmpLmt;
2313                if (igp_info->info_8.ucHtcHystLmt == 0)
2314                        pi->sys_info.htc_hyst_lmt = 5;
2315                else
2316                        pi->sys_info.htc_hyst_lmt = igp_info->info_8.ucHtcHystLmt;
2317                if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) {
2318                        DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n");
2319                }
2320
2321                if (le32_to_cpu(igp_info->info_8.ulSystemConfig) & (1 << 3))
2322                        pi->sys_info.nb_dpm_enable = true;
2323                else
2324                        pi->sys_info.nb_dpm_enable = false;
2325
2326                for (i = 0; i < KV_NUM_NBPSTATES; i++) {
2327                        pi->sys_info.nbp_memory_clock[i] =
2328                                le32_to_cpu(igp_info->info_8.ulNbpStateMemclkFreq[i]);
2329                        pi->sys_info.nbp_n_clock[i] =
2330                                le32_to_cpu(igp_info->info_8.ulNbpStateNClkFreq[i]);
2331                }
2332                if (le32_to_cpu(igp_info->info_8.ulGPUCapInfo) &
2333                    SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS)
2334                        pi->caps_enable_dfs_bypass = true;
2335
2336                sumo_construct_sclk_voltage_mapping_table(rdev,
2337                                                          &pi->sys_info.sclk_voltage_mapping_table,
2338                                                          igp_info->info_8.sAvail_SCLK);
2339
2340                sumo_construct_vid_mapping_table(rdev,
2341                                                 &pi->sys_info.vid_mapping_table,
2342                                                 igp_info->info_8.sAvail_SCLK);
2343
2344                kv_construct_max_power_limits_table(rdev,
2345                                                    &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
2346        }
2347        return 0;
2348}
2349
2350union power_info {
2351        struct _ATOM_POWERPLAY_INFO info;
2352        struct _ATOM_POWERPLAY_INFO_V2 info_2;
2353        struct _ATOM_POWERPLAY_INFO_V3 info_3;
2354        struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
2355        struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
2356        struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
2357};
2358
2359union pplib_clock_info {
2360        struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
2361        struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
2362        struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
2363        struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
2364};
2365
2366union pplib_power_state {
2367        struct _ATOM_PPLIB_STATE v1;
2368        struct _ATOM_PPLIB_STATE_V2 v2;
2369};
2370
2371static void kv_patch_boot_state(struct radeon_device *rdev,
2372                                struct kv_ps *ps)
2373{
2374        struct kv_power_info *pi = kv_get_pi(rdev);
2375
2376        ps->num_levels = 1;
2377        ps->levels[0] = pi->boot_pl;
2378}
2379
2380static void kv_parse_pplib_non_clock_info(struct radeon_device *rdev,
2381                                          struct radeon_ps *rps,
2382                                          struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
2383                                          u8 table_rev)
2384{
2385        struct kv_ps *ps = kv_get_ps(rps);
2386
2387        rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
2388        rps->class = le16_to_cpu(non_clock_info->usClassification);
2389        rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
2390
2391        if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
2392                rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
2393                rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
2394        } else {
2395                rps->vclk = 0;
2396                rps->dclk = 0;
2397        }
2398
2399        if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
2400                rdev->pm.dpm.boot_ps = rps;
2401                kv_patch_boot_state(rdev, ps);
2402        }
2403        if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
2404                rdev->pm.dpm.uvd_ps = rps;
2405}
2406
2407static void kv_parse_pplib_clock_info(struct radeon_device *rdev,
2408                                      struct radeon_ps *rps, int index,
2409                                        union pplib_clock_info *clock_info)
2410{
2411        struct kv_power_info *pi = kv_get_pi(rdev);
2412        struct kv_ps *ps = kv_get_ps(rps);
2413        struct kv_pl *pl = &ps->levels[index];
2414        u32 sclk;
2415
2416        sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow);
2417        sclk |= clock_info->sumo.ucEngineClockHigh << 16;
2418        pl->sclk = sclk;
2419        pl->vddc_index = clock_info->sumo.vddcIndex;
2420
2421        ps->num_levels = index + 1;
2422
2423        if (pi->caps_sclk_ds) {
2424                pl->ds_divider_index = 5;
2425                pl->ss_divider_index = 5;
2426        }
2427}
2428
2429static int kv_parse_power_table(struct radeon_device *rdev)
2430{
2431        struct radeon_mode_info *mode_info = &rdev->mode_info;
2432        struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
2433        union pplib_power_state *power_state;
2434        int i, j, k, non_clock_array_index, clock_array_index;
2435        union pplib_clock_info *clock_info;
2436        struct _StateArray *state_array;
2437        struct _ClockInfoArray *clock_info_array;
2438        struct _NonClockInfoArray *non_clock_info_array;
2439        union power_info *power_info;
2440        int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
2441        u16 data_offset;
2442        u8 frev, crev;
2443        u8 *power_state_offset;
2444        struct kv_ps *ps;
2445
2446        if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
2447                                   &frev, &crev, &data_offset))
2448                return -EINVAL;
2449        power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
2450
2451        state_array = (struct _StateArray *)
2452                (mode_info->atom_context->bios + data_offset +
2453                 le16_to_cpu(power_info->pplib.usStateArrayOffset));
2454        clock_info_array = (struct _ClockInfoArray *)
2455                (mode_info->atom_context->bios + data_offset +
2456                 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
2457        non_clock_info_array = (struct _NonClockInfoArray *)
2458                (mode_info->atom_context->bios + data_offset +
2459                 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
2460
2461        rdev->pm.dpm.ps = kcalloc(state_array->ucNumEntries,
2462                                  sizeof(struct radeon_ps),
2463                                  GFP_KERNEL);
2464        if (!rdev->pm.dpm.ps)
2465                return -ENOMEM;
2466        power_state_offset = (u8 *)state_array->states;
2467        for (i = 0; i < state_array->ucNumEntries; i++) {
2468                u8 *idx;
2469                power_state = (union pplib_power_state *)power_state_offset;
2470                non_clock_array_index = power_state->v2.nonClockInfoIndex;
2471                non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
2472                        &non_clock_info_array->nonClockInfo[non_clock_array_index];
2473                if (!rdev->pm.power_state[i].clock_info)
2474                        return -EINVAL;
2475                ps = kzalloc(sizeof(struct kv_ps), GFP_KERNEL);
2476                if (ps == NULL) {
2477                        kfree(rdev->pm.dpm.ps);
2478                        return -ENOMEM;
2479                }
2480                rdev->pm.dpm.ps[i].ps_priv = ps;
2481                k = 0;
2482                idx = (u8 *)&power_state->v2.clockInfoIndex[0];
2483                for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
2484                        clock_array_index = idx[j];
2485                        if (clock_array_index >= clock_info_array->ucNumEntries)
2486                                continue;
2487                        if (k >= SUMO_MAX_HARDWARE_POWERLEVELS)
2488                                break;
2489                        clock_info = (union pplib_clock_info *)
2490                                ((u8 *)&clock_info_array->clockInfo[0] +
2491                                 (clock_array_index * clock_info_array->ucEntrySize));
2492                        kv_parse_pplib_clock_info(rdev,
2493                                                  &rdev->pm.dpm.ps[i], k,
2494                                                  clock_info);
2495                        k++;
2496                }
2497                kv_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
2498                                              non_clock_info,
2499                                              non_clock_info_array->ucEntrySize);
2500                power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
2501        }
2502        rdev->pm.dpm.num_ps = state_array->ucNumEntries;
2503
2504        /* fill in the vce power states */
2505        for (i = 0; i < RADEON_MAX_VCE_LEVELS; i++) {
2506                u32 sclk;
2507                clock_array_index = rdev->pm.dpm.vce_states[i].clk_idx;
2508                clock_info = (union pplib_clock_info *)
2509                        &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
2510                sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow);
2511                sclk |= clock_info->sumo.ucEngineClockHigh << 16;
2512                rdev->pm.dpm.vce_states[i].sclk = sclk;
2513                rdev->pm.dpm.vce_states[i].mclk = 0;
2514        }
2515
2516        return 0;
2517}
2518
2519int kv_dpm_init(struct radeon_device *rdev)
2520{
2521        struct kv_power_info *pi;
2522        int ret, i;
2523
2524        pi = kzalloc(sizeof(struct kv_power_info), GFP_KERNEL);
2525        if (pi == NULL)
2526                return -ENOMEM;
2527        rdev->pm.dpm.priv = pi;
2528
2529        ret = r600_get_platform_caps(rdev);
2530        if (ret)
2531                return ret;
2532
2533        ret = r600_parse_extended_power_table(rdev);
2534        if (ret)
2535                return ret;
2536
2537        for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
2538                pi->at[i] = TRINITY_AT_DFLT;
2539
2540        pi->sram_end = SMC_RAM_END;
2541
2542        /* Enabling nb dpm on an asrock system prevents dpm from working */
2543        if (rdev->pdev->subsystem_vendor == 0x1849)
2544                pi->enable_nb_dpm = false;
2545        else
2546                pi->enable_nb_dpm = true;
2547
2548        pi->caps_power_containment = true;
2549        pi->caps_cac = true;
2550        pi->enable_didt = false;
2551        if (pi->enable_didt) {
2552                pi->caps_sq_ramping = true;
2553                pi->caps_db_ramping = true;
2554                pi->caps_td_ramping = true;
2555                pi->caps_tcp_ramping = true;
2556        }
2557
2558        pi->caps_sclk_ds = true;
2559        pi->enable_auto_thermal_throttling = true;
2560        pi->disable_nb_ps3_in_battery = false;
2561        if (radeon_bapm == -1) {
2562                /* only enable bapm on KB, ML by default */
2563                if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS)
2564                        pi->bapm_enable = true;
2565                else
2566                        pi->bapm_enable = false;
2567        } else if (radeon_bapm == 0) {
2568                pi->bapm_enable = false;
2569        } else {
2570                pi->bapm_enable = true;
2571        }
2572        pi->voltage_drop_t = 0;
2573        pi->caps_sclk_throttle_low_notification = false;
2574        pi->caps_fps = false; /* true? */
2575        pi->caps_uvd_pg = true;
2576        pi->caps_uvd_dpm = true;
2577        pi->caps_vce_pg = false; /* XXX true */
2578        pi->caps_samu_pg = false;
2579        pi->caps_acp_pg = false;
2580        pi->caps_stable_p_state = false;
2581
2582        ret = kv_parse_sys_info_table(rdev);
2583        if (ret)
2584                return ret;
2585
2586        kv_patch_voltage_values(rdev);
2587        kv_construct_boot_state(rdev);
2588
2589        ret = kv_parse_power_table(rdev);
2590        if (ret)
2591                return ret;
2592
2593        pi->enable_dpm = true;
2594
2595        return 0;
2596}
2597
2598void kv_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
2599                                                    struct seq_file *m)
2600{
2601        struct kv_power_info *pi = kv_get_pi(rdev);
2602        u32 current_index =
2603                (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) & CURR_SCLK_INDEX_MASK) >>
2604                CURR_SCLK_INDEX_SHIFT;
2605        u32 sclk, tmp;
2606        u16 vddc;
2607
2608        if (current_index >= SMU__NUM_SCLK_DPM_STATE) {
2609                seq_printf(m, "invalid dpm profile %d\n", current_index);
2610        } else {
2611                sclk = be32_to_cpu(pi->graphics_level[current_index].SclkFrequency);
2612                tmp = (RREG32_SMC(SMU_VOLTAGE_STATUS) & SMU_VOLTAGE_CURRENT_LEVEL_MASK) >>
2613                        SMU_VOLTAGE_CURRENT_LEVEL_SHIFT;
2614                vddc = kv_convert_8bit_index_to_voltage(rdev, (u16)tmp);
2615                seq_printf(m, "uvd    %sabled\n", pi->uvd_power_gated ? "dis" : "en");
2616                seq_printf(m, "vce    %sabled\n", pi->vce_power_gated ? "dis" : "en");
2617                seq_printf(m, "power level %d    sclk: %u vddc: %u\n",
2618                           current_index, sclk, vddc);
2619        }
2620}
2621
2622u32 kv_dpm_get_current_sclk(struct radeon_device *rdev)
2623{
2624        struct kv_power_info *pi = kv_get_pi(rdev);
2625        u32 current_index =
2626                (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) & CURR_SCLK_INDEX_MASK) >>
2627                CURR_SCLK_INDEX_SHIFT;
2628        u32 sclk;
2629
2630        if (current_index >= SMU__NUM_SCLK_DPM_STATE) {
2631                return 0;
2632        } else {
2633                sclk = be32_to_cpu(pi->graphics_level[current_index].SclkFrequency);
2634                return sclk;
2635        }
2636}
2637
2638u32 kv_dpm_get_current_mclk(struct radeon_device *rdev)
2639{
2640        struct kv_power_info *pi = kv_get_pi(rdev);
2641
2642        return pi->sys_info.bootup_uma_clk;
2643}
2644
2645void kv_dpm_print_power_state(struct radeon_device *rdev,
2646                              struct radeon_ps *rps)
2647{
2648        int i;
2649        struct kv_ps *ps = kv_get_ps(rps);
2650
2651        r600_dpm_print_class_info(rps->class, rps->class2);
2652        r600_dpm_print_cap_info(rps->caps);
2653        printk("\tuvd    vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
2654        for (i = 0; i < ps->num_levels; i++) {
2655                struct kv_pl *pl = &ps->levels[i];
2656                printk("\t\tpower level %d    sclk: %u vddc: %u\n",
2657                       i, pl->sclk,
2658                       kv_convert_8bit_index_to_voltage(rdev, pl->vddc_index));
2659        }
2660        r600_dpm_print_ps_status(rdev, rps);
2661}
2662
2663void kv_dpm_fini(struct radeon_device *rdev)
2664{
2665        int i;
2666
2667        for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
2668                kfree(rdev->pm.dpm.ps[i].ps_priv);
2669        }
2670        kfree(rdev->pm.dpm.ps);
2671        kfree(rdev->pm.dpm.priv);
2672        r600_free_extended_power_table(rdev);
2673}
2674
2675void kv_dpm_display_configuration_changed(struct radeon_device *rdev)
2676{
2677
2678}
2679
2680u32 kv_dpm_get_sclk(struct radeon_device *rdev, bool low)
2681{
2682        struct kv_power_info *pi = kv_get_pi(rdev);
2683        struct kv_ps *requested_state = kv_get_ps(&pi->requested_rps);
2684
2685        if (low)
2686                return requested_state->levels[0].sclk;
2687        else
2688                return requested_state->levels[requested_state->num_levels - 1].sclk;
2689}
2690
2691u32 kv_dpm_get_mclk(struct radeon_device *rdev, bool low)
2692{
2693        struct kv_power_info *pi = kv_get_pi(rdev);
2694
2695        return pi->sys_info.bootup_uma_clk;
2696}
2697
2698