1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/firmware.h>
25#include <linux/pci.h>
26#include "amdgpu.h"
27#include "amdgpu_smu.h"
28#include "smu_internal.h"
29#include "atomfirmware.h"
30#include "amdgpu_atomfirmware.h"
31#include "soc15_common.h"
32#include "smu_v11_0.h"
33#include "smu11_driver_if_navi10.h"
34#include "atom.h"
35#include "navi10_ppt.h"
36#include "smu_v11_0_pptable.h"
37#include "smu_v11_0_ppsmc.h"
38#include "nbio/nbio_2_3_offset.h"
39#include "nbio/nbio_2_3_sh_mask.h"
40
41#include "asic_reg/mp/mp_11_0_sh_mask.h"
42
43#define FEATURE_MASK(feature) (1ULL << feature)
44#define SMC_DPM_FEATURE ( \
45 FEATURE_MASK(FEATURE_DPM_PREFETCHER_BIT) | \
46 FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT) | \
47 FEATURE_MASK(FEATURE_DPM_GFX_PACE_BIT) | \
48 FEATURE_MASK(FEATURE_DPM_UCLK_BIT) | \
49 FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT) | \
50 FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT) | \
51 FEATURE_MASK(FEATURE_DPM_LINK_BIT) | \
52 FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT))
53
54#define MSG_MAP(msg, index) \
55 [SMU_MSG_##msg] = {1, (index)}
56
57static struct smu_11_0_cmn2aisc_mapping navi10_message_map[SMU_MSG_MAX_COUNT] = {
58 MSG_MAP(TestMessage, PPSMC_MSG_TestMessage),
59 MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion),
60 MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion),
61 MSG_MAP(SetAllowedFeaturesMaskLow, PPSMC_MSG_SetAllowedFeaturesMaskLow),
62 MSG_MAP(SetAllowedFeaturesMaskHigh, PPSMC_MSG_SetAllowedFeaturesMaskHigh),
63 MSG_MAP(EnableAllSmuFeatures, PPSMC_MSG_EnableAllSmuFeatures),
64 MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures),
65 MSG_MAP(EnableSmuFeaturesLow, PPSMC_MSG_EnableSmuFeaturesLow),
66 MSG_MAP(EnableSmuFeaturesHigh, PPSMC_MSG_EnableSmuFeaturesHigh),
67 MSG_MAP(DisableSmuFeaturesLow, PPSMC_MSG_DisableSmuFeaturesLow),
68 MSG_MAP(DisableSmuFeaturesHigh, PPSMC_MSG_DisableSmuFeaturesHigh),
69 MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetEnabledSmuFeaturesLow),
70 MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetEnabledSmuFeaturesHigh),
71 MSG_MAP(SetWorkloadMask, PPSMC_MSG_SetWorkloadMask),
72 MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit),
73 MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh),
74 MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow),
75 MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh),
76 MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow),
77 MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram),
78 MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu),
79 MSG_MAP(UseDefaultPPTable, PPSMC_MSG_UseDefaultPPTable),
80 MSG_MAP(UseBackupPPTable, PPSMC_MSG_UseBackupPPTable),
81 MSG_MAP(RunBtc, PPSMC_MSG_RunBtc),
82 MSG_MAP(EnterBaco, PPSMC_MSG_EnterBaco),
83 MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq),
84 MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq),
85 MSG_MAP(SetHardMinByFreq, PPSMC_MSG_SetHardMinByFreq),
86 MSG_MAP(SetHardMaxByFreq, PPSMC_MSG_SetHardMaxByFreq),
87 MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq),
88 MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq),
89 MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex),
90 MSG_MAP(SetMemoryChannelConfig, PPSMC_MSG_SetMemoryChannelConfig),
91 MSG_MAP(SetGeminiMode, PPSMC_MSG_SetGeminiMode),
92 MSG_MAP(SetGeminiApertureHigh, PPSMC_MSG_SetGeminiApertureHigh),
93 MSG_MAP(SetGeminiApertureLow, PPSMC_MSG_SetGeminiApertureLow),
94 MSG_MAP(OverridePcieParameters, PPSMC_MSG_OverridePcieParameters),
95 MSG_MAP(SetMinDeepSleepDcefclk, PPSMC_MSG_SetMinDeepSleepDcefclk),
96 MSG_MAP(ReenableAcDcInterrupt, PPSMC_MSG_ReenableAcDcInterrupt),
97 MSG_MAP(NotifyPowerSource, PPSMC_MSG_NotifyPowerSource),
98 MSG_MAP(SetUclkFastSwitch, PPSMC_MSG_SetUclkFastSwitch),
99 MSG_MAP(SetVideoFps, PPSMC_MSG_SetVideoFps),
100 MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload),
101 MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh),
102 MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow),
103 MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize),
104 MSG_MAP(ConfigureGfxDidt, PPSMC_MSG_ConfigureGfxDidt),
105 MSG_MAP(NumOfDisplays, PPSMC_MSG_NumOfDisplays),
106 MSG_MAP(SetSystemVirtualDramAddrHigh, PPSMC_MSG_SetSystemVirtualDramAddrHigh),
107 MSG_MAP(SetSystemVirtualDramAddrLow, PPSMC_MSG_SetSystemVirtualDramAddrLow),
108 MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff),
109 MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff),
110 MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit),
111 MSG_MAP(GetDcModeMaxDpmFreq, PPSMC_MSG_GetDcModeMaxDpmFreq),
112 MSG_MAP(GetDebugData, PPSMC_MSG_GetDebugData),
113 MSG_MAP(ExitBaco, PPSMC_MSG_ExitBaco),
114 MSG_MAP(PrepareMp1ForReset, PPSMC_MSG_PrepareMp1ForReset),
115 MSG_MAP(PrepareMp1ForShutdown, PPSMC_MSG_PrepareMp1ForShutdown),
116 MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn),
117 MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn),
118 MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg),
119 MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg),
120 MSG_MAP(BacoAudioD3PME, PPSMC_MSG_BacoAudioD3PME),
121 MSG_MAP(ArmD3, PPSMC_MSG_ArmD3),
122 MSG_MAP(DAL_DISABLE_DUMMY_PSTATE_CHANGE,PPSMC_MSG_DALDisableDummyPstateChange),
123 MSG_MAP(DAL_ENABLE_DUMMY_PSTATE_CHANGE, PPSMC_MSG_DALEnableDummyPstateChange),
124 MSG_MAP(GetVoltageByDpm, PPSMC_MSG_GetVoltageByDpm),
125 MSG_MAP(GetVoltageByDpmOverdrive, PPSMC_MSG_GetVoltageByDpmOverdrive),
126};
127
128static struct smu_11_0_cmn2aisc_mapping navi10_clk_map[SMU_CLK_COUNT] = {
129 CLK_MAP(GFXCLK, PPCLK_GFXCLK),
130 CLK_MAP(SCLK, PPCLK_GFXCLK),
131 CLK_MAP(SOCCLK, PPCLK_SOCCLK),
132 CLK_MAP(FCLK, PPCLK_SOCCLK),
133 CLK_MAP(UCLK, PPCLK_UCLK),
134 CLK_MAP(MCLK, PPCLK_UCLK),
135 CLK_MAP(DCLK, PPCLK_DCLK),
136 CLK_MAP(VCLK, PPCLK_VCLK),
137 CLK_MAP(DCEFCLK, PPCLK_DCEFCLK),
138 CLK_MAP(DISPCLK, PPCLK_DISPCLK),
139 CLK_MAP(PIXCLK, PPCLK_PIXCLK),
140 CLK_MAP(PHYCLK, PPCLK_PHYCLK),
141};
142
143static struct smu_11_0_cmn2aisc_mapping navi10_feature_mask_map[SMU_FEATURE_COUNT] = {
144 FEA_MAP(DPM_PREFETCHER),
145 FEA_MAP(DPM_GFXCLK),
146 FEA_MAP(DPM_GFX_PACE),
147 FEA_MAP(DPM_UCLK),
148 FEA_MAP(DPM_SOCCLK),
149 FEA_MAP(DPM_MP0CLK),
150 FEA_MAP(DPM_LINK),
151 FEA_MAP(DPM_DCEFCLK),
152 FEA_MAP(MEM_VDDCI_SCALING),
153 FEA_MAP(MEM_MVDD_SCALING),
154 FEA_MAP(DS_GFXCLK),
155 FEA_MAP(DS_SOCCLK),
156 FEA_MAP(DS_LCLK),
157 FEA_MAP(DS_DCEFCLK),
158 FEA_MAP(DS_UCLK),
159 FEA_MAP(GFX_ULV),
160 FEA_MAP(FW_DSTATE),
161 FEA_MAP(GFXOFF),
162 FEA_MAP(BACO),
163 FEA_MAP(VCN_PG),
164 FEA_MAP(JPEG_PG),
165 FEA_MAP(USB_PG),
166 FEA_MAP(RSMU_SMN_CG),
167 FEA_MAP(PPT),
168 FEA_MAP(TDC),
169 FEA_MAP(GFX_EDC),
170 FEA_MAP(APCC_PLUS),
171 FEA_MAP(GTHR),
172 FEA_MAP(ACDC),
173 FEA_MAP(VR0HOT),
174 FEA_MAP(VR1HOT),
175 FEA_MAP(FW_CTF),
176 FEA_MAP(FAN_CONTROL),
177 FEA_MAP(THERMAL),
178 FEA_MAP(GFX_DCS),
179 FEA_MAP(RM),
180 FEA_MAP(LED_DISPLAY),
181 FEA_MAP(GFX_SS),
182 FEA_MAP(OUT_OF_BAND_MONITOR),
183 FEA_MAP(TEMP_DEPENDENT_VMIN),
184 FEA_MAP(MMHUB_PG),
185 FEA_MAP(ATHUB_PG),
186 FEA_MAP(APCC_DFLL),
187};
188
189static struct smu_11_0_cmn2aisc_mapping navi10_table_map[SMU_TABLE_COUNT] = {
190 TAB_MAP(PPTABLE),
191 TAB_MAP(WATERMARKS),
192 TAB_MAP(AVFS),
193 TAB_MAP(AVFS_PSM_DEBUG),
194 TAB_MAP(AVFS_FUSE_OVERRIDE),
195 TAB_MAP(PMSTATUSLOG),
196 TAB_MAP(SMU_METRICS),
197 TAB_MAP(DRIVER_SMU_CONFIG),
198 TAB_MAP(ACTIVITY_MONITOR_COEFF),
199 TAB_MAP(OVERDRIVE),
200 TAB_MAP(I2C_COMMANDS),
201 TAB_MAP(PACE),
202};
203
204static struct smu_11_0_cmn2aisc_mapping navi10_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
205 PWR_MAP(AC),
206 PWR_MAP(DC),
207};
208
209static struct smu_11_0_cmn2aisc_mapping navi10_workload_map[PP_SMC_POWER_PROFILE_COUNT] = {
210 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT, WORKLOAD_PPLIB_DEFAULT_BIT),
211 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D, WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT),
212 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT),
213 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT),
214 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT),
215 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT),
216 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT),
217};
218
219static int navi10_get_smu_msg_index(struct smu_context *smc, uint32_t index)
220{
221 struct smu_11_0_cmn2aisc_mapping mapping;
222
223 if (index >= SMU_MSG_MAX_COUNT)
224 return -EINVAL;
225
226 mapping = navi10_message_map[index];
227 if (!(mapping.valid_mapping)) {
228 return -EINVAL;
229 }
230
231 return mapping.map_to;
232}
233
234static int navi10_get_smu_clk_index(struct smu_context *smc, uint32_t index)
235{
236 struct smu_11_0_cmn2aisc_mapping mapping;
237
238 if (index >= SMU_CLK_COUNT)
239 return -EINVAL;
240
241 mapping = navi10_clk_map[index];
242 if (!(mapping.valid_mapping)) {
243 return -EINVAL;
244 }
245
246 return mapping.map_to;
247}
248
249static int navi10_get_smu_feature_index(struct smu_context *smc, uint32_t index)
250{
251 struct smu_11_0_cmn2aisc_mapping mapping;
252
253 if (index >= SMU_FEATURE_COUNT)
254 return -EINVAL;
255
256 mapping = navi10_feature_mask_map[index];
257 if (!(mapping.valid_mapping)) {
258 return -EINVAL;
259 }
260
261 return mapping.map_to;
262}
263
264static int navi10_get_smu_table_index(struct smu_context *smc, uint32_t index)
265{
266 struct smu_11_0_cmn2aisc_mapping mapping;
267
268 if (index >= SMU_TABLE_COUNT)
269 return -EINVAL;
270
271 mapping = navi10_table_map[index];
272 if (!(mapping.valid_mapping)) {
273 return -EINVAL;
274 }
275
276 return mapping.map_to;
277}
278
279static int navi10_get_pwr_src_index(struct smu_context *smc, uint32_t index)
280{
281 struct smu_11_0_cmn2aisc_mapping mapping;
282
283 if (index >= SMU_POWER_SOURCE_COUNT)
284 return -EINVAL;
285
286 mapping = navi10_pwr_src_map[index];
287 if (!(mapping.valid_mapping)) {
288 return -EINVAL;
289 }
290
291 return mapping.map_to;
292}
293
294
295static int navi10_get_workload_type(struct smu_context *smu, enum PP_SMC_POWER_PROFILE profile)
296{
297 struct smu_11_0_cmn2aisc_mapping mapping;
298
299 if (profile > PP_SMC_POWER_PROFILE_CUSTOM)
300 return -EINVAL;
301
302 mapping = navi10_workload_map[profile];
303 if (!(mapping.valid_mapping)) {
304 return -EINVAL;
305 }
306
307 return mapping.map_to;
308}
309
310static bool is_asic_secure(struct smu_context *smu)
311{
312 struct amdgpu_device *adev = smu->adev;
313 bool is_secure = true;
314 uint32_t mp0_fw_intf;
315
316 mp0_fw_intf = RREG32_PCIE(MP0_Public |
317 (smnMP0_FW_INTF & 0xffffffff));
318
319 if (!(mp0_fw_intf & (1 << 19)))
320 is_secure = false;
321
322 return is_secure;
323}
324
325static int
326navi10_get_allowed_feature_mask(struct smu_context *smu,
327 uint32_t *feature_mask, uint32_t num)
328{
329 struct amdgpu_device *adev = smu->adev;
330
331 if (num > 2)
332 return -EINVAL;
333
334 memset(feature_mask, 0, sizeof(uint32_t) * num);
335
336 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_PREFETCHER_BIT)
337 | FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT)
338 | FEATURE_MASK(FEATURE_RSMU_SMN_CG_BIT)
339 | FEATURE_MASK(FEATURE_DS_SOCCLK_BIT)
340 | FEATURE_MASK(FEATURE_PPT_BIT)
341 | FEATURE_MASK(FEATURE_TDC_BIT)
342 | FEATURE_MASK(FEATURE_GFX_EDC_BIT)
343 | FEATURE_MASK(FEATURE_APCC_PLUS_BIT)
344 | FEATURE_MASK(FEATURE_VR0HOT_BIT)
345 | FEATURE_MASK(FEATURE_FAN_CONTROL_BIT)
346 | FEATURE_MASK(FEATURE_THERMAL_BIT)
347 | FEATURE_MASK(FEATURE_LED_DISPLAY_BIT)
348 | FEATURE_MASK(FEATURE_DS_LCLK_BIT)
349 | FEATURE_MASK(FEATURE_DS_DCEFCLK_BIT)
350 | FEATURE_MASK(FEATURE_FW_DSTATE_BIT)
351 | FEATURE_MASK(FEATURE_BACO_BIT)
352 | FEATURE_MASK(FEATURE_GFX_SS_BIT)
353 | FEATURE_MASK(FEATURE_APCC_DFLL_BIT)
354 | FEATURE_MASK(FEATURE_FW_CTF_BIT)
355 | FEATURE_MASK(FEATURE_OUT_OF_BAND_MONITOR_BIT);
356
357 if (adev->pm.pp_feature & PP_SOCCLK_DPM_MASK)
358 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT);
359
360 if (adev->pm.pp_feature & PP_SCLK_DPM_MASK)
361 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT);
362
363 if (adev->pm.pp_feature & PP_PCIE_DPM_MASK)
364 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_LINK_BIT);
365
366 if (adev->pm.pp_feature & PP_DCEFCLK_DPM_MASK)
367 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT);
368
369 if (adev->pm.pp_feature & PP_MCLK_DPM_MASK)
370 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_UCLK_BIT)
371 | FEATURE_MASK(FEATURE_MEM_VDDCI_SCALING_BIT)
372 | FEATURE_MASK(FEATURE_MEM_MVDD_SCALING_BIT);
373
374 if (adev->pm.pp_feature & PP_ULV_MASK)
375 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_ULV_BIT);
376
377 if (adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK)
378 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_GFXCLK_BIT);
379
380 if (adev->pm.pp_feature & PP_GFXOFF_MASK)
381 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFXOFF_BIT);
382
383 if (smu->adev->pg_flags & AMD_PG_SUPPORT_MMHUB)
384 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_MMHUB_PG_BIT);
385
386 if (smu->adev->pg_flags & AMD_PG_SUPPORT_ATHUB)
387 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_ATHUB_PG_BIT);
388
389 if (smu->adev->pg_flags & AMD_PG_SUPPORT_VCN)
390 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VCN_PG_BIT);
391
392 if (smu->adev->pg_flags & AMD_PG_SUPPORT_JPEG)
393 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_JPEG_PG_BIT);
394
395 if (smu->dc_controlled_by_gpio)
396 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_ACDC_BIT);
397
398
399 if (is_asic_secure(smu)) {
400
401 if ((adev->asic_type == CHIP_NAVI10) &&
402 (adev->rev_id == 0)) {
403 *(uint64_t *)feature_mask &=
404 ~(FEATURE_MASK(FEATURE_DPM_UCLK_BIT)
405 | FEATURE_MASK(FEATURE_MEM_VDDCI_SCALING_BIT)
406 | FEATURE_MASK(FEATURE_MEM_MVDD_SCALING_BIT));
407 *(uint64_t *)feature_mask &=
408 ~FEATURE_MASK(FEATURE_DS_SOCCLK_BIT);
409 }
410 }
411
412 return 0;
413}
414
415static int navi10_check_powerplay_table(struct smu_context *smu)
416{
417 return 0;
418}
419
420static int navi10_append_powerplay_table(struct smu_context *smu)
421{
422 struct amdgpu_device *adev = smu->adev;
423 struct smu_table_context *table_context = &smu->smu_table;
424 PPTable_t *smc_pptable = table_context->driver_pptable;
425 struct atom_smc_dpm_info_v4_5 *smc_dpm_table;
426 struct atom_smc_dpm_info_v4_7 *smc_dpm_table_v4_7;
427 int index, ret;
428
429 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
430 smc_dpm_info);
431
432 ret = smu_get_atom_data_table(smu, index, NULL, NULL, NULL,
433 (uint8_t **)&smc_dpm_table);
434 if (ret)
435 return ret;
436
437 pr_info("smc_dpm_info table revision(format.content): %d.%d\n",
438 smc_dpm_table->table_header.format_revision,
439 smc_dpm_table->table_header.content_revision);
440
441 if (smc_dpm_table->table_header.format_revision != 4) {
442 pr_err("smc_dpm_info table format revision is not 4!\n");
443 return -EINVAL;
444 }
445
446 switch (smc_dpm_table->table_header.content_revision) {
447 case 5:
448 memcpy(smc_pptable->I2cControllers, smc_dpm_table->I2cControllers,
449 sizeof(*smc_dpm_table) - sizeof(smc_dpm_table->table_header));
450 break;
451 case 7:
452 ret = smu_get_atom_data_table(smu, index, NULL, NULL, NULL,
453 (uint8_t **)&smc_dpm_table_v4_7);
454 if (ret)
455 return ret;
456 memcpy(smc_pptable->I2cControllers, smc_dpm_table_v4_7->I2cControllers,
457 sizeof(*smc_dpm_table_v4_7) - sizeof(smc_dpm_table_v4_7->table_header));
458 break;
459 default:
460 pr_err("smc_dpm_info with unsupported content revision %d!\n",
461 smc_dpm_table->table_header.content_revision);
462 return -EINVAL;
463 }
464
465 if (adev->pm.pp_feature & PP_GFXOFF_MASK) {
466
467 smc_pptable->DebugOverrides |= DPM_OVERRIDE_DISABLE_DFLL_PLL_SHUTDOWN;
468 }
469
470 return 0;
471}
472
473static int navi10_store_powerplay_table(struct smu_context *smu)
474{
475 struct smu_11_0_powerplay_table *powerplay_table = NULL;
476 struct smu_table_context *table_context = &smu->smu_table;
477 struct smu_baco_context *smu_baco = &smu->smu_baco;
478
479 if (!table_context->power_play_table)
480 return -EINVAL;
481
482 powerplay_table = table_context->power_play_table;
483
484 memcpy(table_context->driver_pptable, &powerplay_table->smc_pptable,
485 sizeof(PPTable_t));
486
487 table_context->thermal_controller_type = powerplay_table->thermal_controller_type;
488
489 if (powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_HARDWAREDC)
490 smu->dc_controlled_by_gpio = true;
491
492 mutex_lock(&smu_baco->mutex);
493 if (powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_BACO ||
494 powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_MACO)
495 smu_baco->platform_support = true;
496 mutex_unlock(&smu_baco->mutex);
497
498 return 0;
499}
500
501static int navi10_tables_init(struct smu_context *smu, struct smu_table *tables)
502{
503 struct smu_table_context *smu_table = &smu->smu_table;
504
505 SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t),
506 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
507 SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t),
508 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
509 SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
510 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
511 SMU_TABLE_INIT(tables, SMU_TABLE_OVERDRIVE, sizeof(OverDriveTable_t),
512 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
513 SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU11_TOOL_SIZE,
514 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
515 SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF,
516 sizeof(DpmActivityMonitorCoeffInt_t), PAGE_SIZE,
517 AMDGPU_GEM_DOMAIN_VRAM);
518
519 smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
520 if (!smu_table->metrics_table)
521 return -ENOMEM;
522 smu_table->metrics_time = 0;
523
524 smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL);
525 if (!smu_table->watermarks_table)
526 return -ENOMEM;
527
528 return 0;
529}
530
531static int navi10_get_metrics_table(struct smu_context *smu,
532 SmuMetrics_t *metrics_table)
533{
534 struct smu_table_context *smu_table= &smu->smu_table;
535 int ret = 0;
536
537 mutex_lock(&smu->metrics_lock);
538 if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(100))) {
539 ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
540 (void *)smu_table->metrics_table, false);
541 if (ret) {
542 pr_info("Failed to export SMU metrics table!\n");
543 mutex_unlock(&smu->metrics_lock);
544 return ret;
545 }
546 smu_table->metrics_time = jiffies;
547 }
548
549 memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t));
550 mutex_unlock(&smu->metrics_lock);
551
552 return ret;
553}
554
555static int navi10_allocate_dpm_context(struct smu_context *smu)
556{
557 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
558
559 if (smu_dpm->dpm_context)
560 return -EINVAL;
561
562 smu_dpm->dpm_context = kzalloc(sizeof(struct smu_11_0_dpm_context),
563 GFP_KERNEL);
564 if (!smu_dpm->dpm_context)
565 return -ENOMEM;
566
567 smu_dpm->dpm_context_size = sizeof(struct smu_11_0_dpm_context);
568
569 return 0;
570}
571
572static int navi10_set_default_dpm_table(struct smu_context *smu)
573{
574 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
575 struct smu_table_context *table_context = &smu->smu_table;
576 struct smu_11_0_dpm_context *dpm_context = smu_dpm->dpm_context;
577 PPTable_t *driver_ppt = NULL;
578 int i;
579
580 driver_ppt = table_context->driver_pptable;
581
582 dpm_context->dpm_tables.soc_table.min = driver_ppt->FreqTableSocclk[0];
583 dpm_context->dpm_tables.soc_table.max = driver_ppt->FreqTableSocclk[NUM_SOCCLK_DPM_LEVELS - 1];
584
585 dpm_context->dpm_tables.gfx_table.min = driver_ppt->FreqTableGfx[0];
586 dpm_context->dpm_tables.gfx_table.max = driver_ppt->FreqTableGfx[NUM_GFXCLK_DPM_LEVELS - 1];
587
588 dpm_context->dpm_tables.uclk_table.min = driver_ppt->FreqTableUclk[0];
589 dpm_context->dpm_tables.uclk_table.max = driver_ppt->FreqTableUclk[NUM_UCLK_DPM_LEVELS - 1];
590
591 dpm_context->dpm_tables.vclk_table.min = driver_ppt->FreqTableVclk[0];
592 dpm_context->dpm_tables.vclk_table.max = driver_ppt->FreqTableVclk[NUM_VCLK_DPM_LEVELS - 1];
593
594 dpm_context->dpm_tables.dclk_table.min = driver_ppt->FreqTableDclk[0];
595 dpm_context->dpm_tables.dclk_table.max = driver_ppt->FreqTableDclk[NUM_DCLK_DPM_LEVELS - 1];
596
597 dpm_context->dpm_tables.dcef_table.min = driver_ppt->FreqTableDcefclk[0];
598 dpm_context->dpm_tables.dcef_table.max = driver_ppt->FreqTableDcefclk[NUM_DCEFCLK_DPM_LEVELS - 1];
599
600 dpm_context->dpm_tables.pixel_table.min = driver_ppt->FreqTablePixclk[0];
601 dpm_context->dpm_tables.pixel_table.max = driver_ppt->FreqTablePixclk[NUM_PIXCLK_DPM_LEVELS - 1];
602
603 dpm_context->dpm_tables.display_table.min = driver_ppt->FreqTableDispclk[0];
604 dpm_context->dpm_tables.display_table.max = driver_ppt->FreqTableDispclk[NUM_DISPCLK_DPM_LEVELS - 1];
605
606 dpm_context->dpm_tables.phy_table.min = driver_ppt->FreqTablePhyclk[0];
607 dpm_context->dpm_tables.phy_table.max = driver_ppt->FreqTablePhyclk[NUM_PHYCLK_DPM_LEVELS - 1];
608
609 for (i = 0; i < MAX_PCIE_CONF; i++) {
610 dpm_context->dpm_tables.pcie_table.pcie_gen[i] = driver_ppt->PcieGenSpeed[i];
611 dpm_context->dpm_tables.pcie_table.pcie_lane[i] = driver_ppt->PcieLaneCount[i];
612 }
613
614 return 0;
615}
616
617static int navi10_dpm_set_uvd_enable(struct smu_context *smu, bool enable)
618{
619 struct smu_power_context *smu_power = &smu->smu_power;
620 struct smu_power_gate *power_gate = &smu_power->power_gate;
621 int ret = 0;
622
623 if (enable) {
624
625 if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
626 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1, NULL);
627 if (ret)
628 return ret;
629 }
630 power_gate->vcn_gated = false;
631 } else {
632 if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
633 ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn, NULL);
634 if (ret)
635 return ret;
636 }
637 power_gate->vcn_gated = true;
638 }
639
640 return ret;
641}
642
643static int navi10_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
644{
645 struct smu_power_context *smu_power = &smu->smu_power;
646 struct smu_power_gate *power_gate = &smu_power->power_gate;
647 int ret = 0;
648
649 if (enable) {
650 if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
651 ret = smu_send_smc_msg(smu, SMU_MSG_PowerUpJpeg, NULL);
652 if (ret)
653 return ret;
654 }
655 power_gate->jpeg_gated = false;
656 } else {
657 if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
658 ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownJpeg, NULL);
659 if (ret)
660 return ret;
661 }
662 power_gate->jpeg_gated = true;
663 }
664
665 return ret;
666}
667
668static int navi10_get_current_clk_freq_by_table(struct smu_context *smu,
669 enum smu_clk_type clk_type,
670 uint32_t *value)
671{
672 int ret = 0, clk_id = 0;
673 SmuMetrics_t metrics;
674
675 ret = navi10_get_metrics_table(smu, &metrics);
676 if (ret)
677 return ret;
678
679 clk_id = smu_clk_get_index(smu, clk_type);
680 if (clk_id < 0)
681 return clk_id;
682
683 *value = metrics.CurrClock[clk_id];
684
685 return ret;
686}
687
688static bool navi10_is_support_fine_grained_dpm(struct smu_context *smu, enum smu_clk_type clk_type)
689{
690 PPTable_t *pptable = smu->smu_table.driver_pptable;
691 DpmDescriptor_t *dpm_desc = NULL;
692 uint32_t clk_index = 0;
693
694 clk_index = smu_clk_get_index(smu, clk_type);
695 dpm_desc = &pptable->DpmDescriptor[clk_index];
696
697
698 return dpm_desc->SnapToDiscrete == 0 ? true : false;
699}
700
701static inline bool navi10_od_feature_is_supported(struct smu_11_0_overdrive_table *od_table, enum SMU_11_0_ODFEATURE_CAP cap)
702{
703 return od_table->cap[cap];
704}
705
706static void navi10_od_setting_get_range(struct smu_11_0_overdrive_table *od_table,
707 enum SMU_11_0_ODSETTING_ID setting,
708 uint32_t *min, uint32_t *max)
709{
710 if (min)
711 *min = od_table->min[setting];
712 if (max)
713 *max = od_table->max[setting];
714}
715
716static int navi10_print_clk_levels(struct smu_context *smu,
717 enum smu_clk_type clk_type, char *buf)
718{
719 uint16_t *curve_settings;
720 int i, size = 0, ret = 0;
721 uint32_t cur_value = 0, value = 0, count = 0;
722 uint32_t freq_values[3] = {0};
723 uint32_t mark_index = 0;
724 struct smu_table_context *table_context = &smu->smu_table;
725 uint32_t gen_speed, lane_width;
726 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
727 struct smu_11_0_dpm_context *dpm_context = smu_dpm->dpm_context;
728 struct amdgpu_device *adev = smu->adev;
729 PPTable_t *pptable = (PPTable_t *)table_context->driver_pptable;
730 OverDriveTable_t *od_table =
731 (OverDriveTable_t *)table_context->overdrive_table;
732 struct smu_11_0_overdrive_table *od_settings = smu->od_settings;
733 uint32_t min_value, max_value;
734
735 switch (clk_type) {
736 case SMU_GFXCLK:
737 case SMU_SCLK:
738 case SMU_SOCCLK:
739 case SMU_MCLK:
740 case SMU_UCLK:
741 case SMU_FCLK:
742 case SMU_DCEFCLK:
743 ret = smu_get_current_clk_freq(smu, clk_type, &cur_value);
744 if (ret)
745 return size;
746
747
748 cur_value = cur_value / 100;
749
750 ret = smu_get_dpm_level_count(smu, clk_type, &count);
751 if (ret)
752 return size;
753
754 if (!navi10_is_support_fine_grained_dpm(smu, clk_type)) {
755 for (i = 0; i < count; i++) {
756 ret = smu_get_dpm_freq_by_index(smu, clk_type, i, &value);
757 if (ret)
758 return size;
759
760 size += sprintf(buf + size, "%d: %uMhz %s\n", i, value,
761 cur_value == value ? "*" : "");
762 }
763 } else {
764 ret = smu_get_dpm_freq_by_index(smu, clk_type, 0, &freq_values[0]);
765 if (ret)
766 return size;
767 ret = smu_get_dpm_freq_by_index(smu, clk_type, count - 1, &freq_values[2]);
768 if (ret)
769 return size;
770
771 freq_values[1] = cur_value;
772 mark_index = cur_value == freq_values[0] ? 0 :
773 cur_value == freq_values[2] ? 2 : 1;
774 if (mark_index != 1)
775 freq_values[1] = (freq_values[0] + freq_values[2]) / 2;
776
777 for (i = 0; i < 3; i++) {
778 size += sprintf(buf + size, "%d: %uMhz %s\n", i, freq_values[i],
779 i == mark_index ? "*" : "");
780 }
781
782 }
783 break;
784 case SMU_PCIE:
785 gen_speed = (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
786 PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK)
787 >> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
788 lane_width = (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) &
789 PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK)
790 >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
791 for (i = 0; i < NUM_LINK_LEVELS; i++)
792 size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i,
793 (dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 0) ? "2.5GT/s," :
794 (dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 1) ? "5.0GT/s," :
795 (dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 2) ? "8.0GT/s," :
796 (dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 3) ? "16.0GT/s," : "",
797 (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 1) ? "x1" :
798 (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 2) ? "x2" :
799 (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 3) ? "x4" :
800 (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 4) ? "x8" :
801 (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 5) ? "x12" :
802 (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 6) ? "x16" : "",
803 pptable->LclkFreq[i],
804 (gen_speed == dpm_context->dpm_tables.pcie_table.pcie_gen[i]) &&
805 (lane_width == dpm_context->dpm_tables.pcie_table.pcie_lane[i]) ?
806 "*" : "");
807 break;
808 case SMU_OD_SCLK:
809 if (!smu->od_enabled || !od_table || !od_settings)
810 break;
811 if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS))
812 break;
813 size += sprintf(buf + size, "OD_SCLK:\n");
814 size += sprintf(buf + size, "0: %uMhz\n1: %uMhz\n", od_table->GfxclkFmin, od_table->GfxclkFmax);
815 break;
816 case SMU_OD_MCLK:
817 if (!smu->od_enabled || !od_table || !od_settings)
818 break;
819 if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_UCLK_MAX))
820 break;
821 size += sprintf(buf + size, "OD_MCLK:\n");
822 size += sprintf(buf + size, "1: %uMHz\n", od_table->UclkFmax);
823 break;
824 case SMU_OD_VDDC_CURVE:
825 if (!smu->od_enabled || !od_table || !od_settings)
826 break;
827 if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_CURVE))
828 break;
829 size += sprintf(buf + size, "OD_VDDC_CURVE:\n");
830 for (i = 0; i < 3; i++) {
831 switch (i) {
832 case 0:
833 curve_settings = &od_table->GfxclkFreq1;
834 break;
835 case 1:
836 curve_settings = &od_table->GfxclkFreq2;
837 break;
838 case 2:
839 curve_settings = &od_table->GfxclkFreq3;
840 break;
841 default:
842 break;
843 }
844 size += sprintf(buf + size, "%d: %uMHz @ %umV\n", i, curve_settings[0], curve_settings[1] / NAVI10_VOLTAGE_SCALE);
845 }
846 break;
847 case SMU_OD_RANGE:
848 if (!smu->od_enabled || !od_table || !od_settings)
849 break;
850 size = sprintf(buf, "%s:\n", "OD_RANGE");
851
852 if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS)) {
853 navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMIN,
854 &min_value, NULL);
855 navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMAX,
856 NULL, &max_value);
857 size += sprintf(buf + size, "SCLK: %7uMhz %10uMhz\n",
858 min_value, max_value);
859 }
860
861 if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_UCLK_MAX)) {
862 navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_UCLKFMAX,
863 &min_value, &max_value);
864 size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n",
865 min_value, max_value);
866 }
867
868 if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_CURVE)) {
869 navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P1,
870 &min_value, &max_value);
871 size += sprintf(buf + size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n",
872 min_value, max_value);
873 navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P1,
874 &min_value, &max_value);
875 size += sprintf(buf + size, "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n",
876 min_value, max_value);
877 navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P2,
878 &min_value, &max_value);
879 size += sprintf(buf + size, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n",
880 min_value, max_value);
881 navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P2,
882 &min_value, &max_value);
883 size += sprintf(buf + size, "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n",
884 min_value, max_value);
885 navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P3,
886 &min_value, &max_value);
887 size += sprintf(buf + size, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n",
888 min_value, max_value);
889 navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P3,
890 &min_value, &max_value);
891 size += sprintf(buf + size, "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n",
892 min_value, max_value);
893 }
894
895 break;
896 default:
897 break;
898 }
899
900 return size;
901}
902
903static int navi10_force_clk_levels(struct smu_context *smu,
904 enum smu_clk_type clk_type, uint32_t mask)
905{
906
907 int ret = 0, size = 0;
908 uint32_t soft_min_level = 0, soft_max_level = 0, min_freq = 0, max_freq = 0;
909
910 soft_min_level = mask ? (ffs(mask) - 1) : 0;
911 soft_max_level = mask ? (fls(mask) - 1) : 0;
912
913 switch (clk_type) {
914 case SMU_GFXCLK:
915 case SMU_SCLK:
916 case SMU_SOCCLK:
917 case SMU_MCLK:
918 case SMU_UCLK:
919 case SMU_DCEFCLK:
920 case SMU_FCLK:
921
922 if (navi10_is_support_fine_grained_dpm(smu, clk_type)) {
923 soft_max_level = (soft_max_level >= 1 ? 1 : 0);
924 soft_min_level = (soft_min_level >= 1 ? 1 : 0);
925 }
926
927 ret = smu_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq);
928 if (ret)
929 return size;
930
931 ret = smu_get_dpm_freq_by_index(smu, clk_type, soft_max_level, &max_freq);
932 if (ret)
933 return size;
934
935 ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq, false);
936 if (ret)
937 return size;
938 break;
939 default:
940 break;
941 }
942
943 return size;
944}
945
946static int navi10_populate_umd_state_clk(struct smu_context *smu)
947{
948 int ret = 0;
949 uint32_t min_sclk_freq = 0, min_mclk_freq = 0;
950
951 ret = smu_get_dpm_freq_range(smu, SMU_SCLK, &min_sclk_freq, NULL, false);
952 if (ret)
953 return ret;
954
955 smu->pstate_sclk = min_sclk_freq * 100;
956
957 ret = smu_get_dpm_freq_range(smu, SMU_MCLK, &min_mclk_freq, NULL, false);
958 if (ret)
959 return ret;
960
961 smu->pstate_mclk = min_mclk_freq * 100;
962
963 return ret;
964}
965
966static int navi10_get_clock_by_type_with_latency(struct smu_context *smu,
967 enum smu_clk_type clk_type,
968 struct pp_clock_levels_with_latency *clocks)
969{
970 int ret = 0, i = 0;
971 uint32_t level_count = 0, freq = 0;
972
973 switch (clk_type) {
974 case SMU_GFXCLK:
975 case SMU_DCEFCLK:
976 case SMU_SOCCLK:
977 case SMU_MCLK:
978 case SMU_UCLK:
979 ret = smu_get_dpm_level_count(smu, clk_type, &level_count);
980 if (ret)
981 return ret;
982
983 level_count = min(level_count, (uint32_t)MAX_NUM_CLOCKS);
984 clocks->num_levels = level_count;
985
986 for (i = 0; i < level_count; i++) {
987 ret = smu_get_dpm_freq_by_index(smu, clk_type, i, &freq);
988 if (ret)
989 return ret;
990
991 clocks->data[i].clocks_in_khz = freq * 1000;
992 clocks->data[i].latency_in_us = 0;
993 }
994 break;
995 default:
996 break;
997 }
998
999 return ret;
1000}
1001
1002static int navi10_pre_display_config_changed(struct smu_context *smu)
1003{
1004 int ret = 0;
1005 uint32_t max_freq = 0;
1006
1007 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0, NULL);
1008 if (ret)
1009 return ret;
1010
1011 if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
1012 ret = smu_get_dpm_freq_range(smu, SMU_UCLK, NULL, &max_freq, false);
1013 if (ret)
1014 return ret;
1015 ret = smu_set_hard_freq_range(smu, SMU_UCLK, 0, max_freq);
1016 if (ret)
1017 return ret;
1018 }
1019
1020 return ret;
1021}
1022
1023static int navi10_display_config_changed(struct smu_context *smu)
1024{
1025 int ret = 0;
1026
1027 if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
1028 smu_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
1029 smu_feature_is_supported(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
1030 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays,
1031 smu->display_config->num_display,
1032 NULL);
1033 if (ret)
1034 return ret;
1035 }
1036
1037 return ret;
1038}
1039
1040static int navi10_force_dpm_limit_value(struct smu_context *smu, bool highest)
1041{
1042 int ret = 0, i = 0;
1043 uint32_t min_freq, max_freq, force_freq;
1044 enum smu_clk_type clk_type;
1045
1046 enum smu_clk_type clks[] = {
1047 SMU_GFXCLK,
1048 SMU_MCLK,
1049 SMU_SOCCLK,
1050 };
1051
1052 for (i = 0; i < ARRAY_SIZE(clks); i++) {
1053 clk_type = clks[i];
1054 ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq, false);
1055 if (ret)
1056 return ret;
1057
1058 force_freq = highest ? max_freq : min_freq;
1059 ret = smu_set_soft_freq_range(smu, clk_type, force_freq, force_freq, false);
1060 if (ret)
1061 return ret;
1062 }
1063
1064 return ret;
1065}
1066
1067static int navi10_unforce_dpm_levels(struct smu_context *smu)
1068{
1069 int ret = 0, i = 0;
1070 uint32_t min_freq, max_freq;
1071 enum smu_clk_type clk_type;
1072
1073 enum smu_clk_type clks[] = {
1074 SMU_GFXCLK,
1075 SMU_MCLK,
1076 SMU_SOCCLK,
1077 };
1078
1079 for (i = 0; i < ARRAY_SIZE(clks); i++) {
1080 clk_type = clks[i];
1081 ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq, false);
1082 if (ret)
1083 return ret;
1084
1085 ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq, false);
1086 if (ret)
1087 return ret;
1088 }
1089
1090 return ret;
1091}
1092
1093static int navi10_get_gpu_power(struct smu_context *smu, uint32_t *value)
1094{
1095 int ret = 0;
1096 SmuMetrics_t metrics;
1097
1098 if (!value)
1099 return -EINVAL;
1100
1101 ret = navi10_get_metrics_table(smu, &metrics);
1102 if (ret)
1103 return ret;
1104
1105 *value = metrics.AverageSocketPower << 8;
1106
1107 return 0;
1108}
1109
1110static int navi10_get_current_activity_percent(struct smu_context *smu,
1111 enum amd_pp_sensors sensor,
1112 uint32_t *value)
1113{
1114 int ret = 0;
1115 SmuMetrics_t metrics;
1116
1117 if (!value)
1118 return -EINVAL;
1119
1120 ret = navi10_get_metrics_table(smu, &metrics);
1121 if (ret)
1122 return ret;
1123
1124 switch (sensor) {
1125 case AMDGPU_PP_SENSOR_GPU_LOAD:
1126 *value = metrics.AverageGfxActivity;
1127 break;
1128 case AMDGPU_PP_SENSOR_MEM_LOAD:
1129 *value = metrics.AverageUclkActivity;
1130 break;
1131 default:
1132 pr_err("Invalid sensor for retrieving clock activity\n");
1133 return -EINVAL;
1134 }
1135
1136 return 0;
1137}
1138
1139static bool navi10_is_dpm_running(struct smu_context *smu)
1140{
1141 int ret = 0;
1142 uint32_t feature_mask[2];
1143 unsigned long feature_enabled;
1144 ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
1145 feature_enabled = (unsigned long)((uint64_t)feature_mask[0] |
1146 ((uint64_t)feature_mask[1] << 32));
1147 return !!(feature_enabled & SMC_DPM_FEATURE);
1148}
1149
1150static int navi10_get_fan_speed_rpm(struct smu_context *smu,
1151 uint32_t *speed)
1152{
1153 SmuMetrics_t metrics;
1154 int ret = 0;
1155
1156 if (!speed)
1157 return -EINVAL;
1158
1159 ret = navi10_get_metrics_table(smu, &metrics);
1160 if (ret)
1161 return ret;
1162
1163 *speed = metrics.CurrFanSpeed;
1164
1165 return ret;
1166}
1167
1168static int navi10_get_fan_speed_percent(struct smu_context *smu,
1169 uint32_t *speed)
1170{
1171 int ret = 0;
1172 uint32_t percent = 0;
1173 uint32_t current_rpm;
1174 PPTable_t *pptable = smu->smu_table.driver_pptable;
1175
1176 ret = navi10_get_fan_speed_rpm(smu, ¤t_rpm);
1177 if (ret)
1178 return ret;
1179
1180 percent = current_rpm * 100 / pptable->FanMaximumRpm;
1181 *speed = percent > 100 ? 100 : percent;
1182
1183 return ret;
1184}
1185
1186static int navi10_get_power_profile_mode(struct smu_context *smu, char *buf)
1187{
1188 DpmActivityMonitorCoeffInt_t activity_monitor;
1189 uint32_t i, size = 0;
1190 int16_t workload_type = 0;
1191 static const char *profile_name[] = {
1192 "BOOTUP_DEFAULT",
1193 "3D_FULL_SCREEN",
1194 "POWER_SAVING",
1195 "VIDEO",
1196 "VR",
1197 "COMPUTE",
1198 "CUSTOM"};
1199 static const char *title[] = {
1200 "PROFILE_INDEX(NAME)",
1201 "CLOCK_TYPE(NAME)",
1202 "FPS",
1203 "MinFreqType",
1204 "MinActiveFreqType",
1205 "MinActiveFreq",
1206 "BoosterFreqType",
1207 "BoosterFreq",
1208 "PD_Data_limit_c",
1209 "PD_Data_error_coeff",
1210 "PD_Data_error_rate_coeff"};
1211 int result = 0;
1212
1213 if (!buf)
1214 return -EINVAL;
1215
1216 size += sprintf(buf + size, "%16s %s %s %s %s %s %s %s %s %s %s\n",
1217 title[0], title[1], title[2], title[3], title[4], title[5],
1218 title[6], title[7], title[8], title[9], title[10]);
1219
1220 for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
1221
1222 workload_type = smu_workload_get_type(smu, i);
1223 if (workload_type < 0)
1224 return -EINVAL;
1225
1226 result = smu_update_table(smu,
1227 SMU_TABLE_ACTIVITY_MONITOR_COEFF, workload_type,
1228 (void *)(&activity_monitor), false);
1229 if (result) {
1230 pr_err("[%s] Failed to get activity monitor!", __func__);
1231 return result;
1232 }
1233
1234 size += sprintf(buf + size, "%2d %14s%s:\n",
1235 i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
1236
1237 size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
1238 " ",
1239 0,
1240 "GFXCLK",
1241 activity_monitor.Gfx_FPS,
1242 activity_monitor.Gfx_MinFreqStep,
1243 activity_monitor.Gfx_MinActiveFreqType,
1244 activity_monitor.Gfx_MinActiveFreq,
1245 activity_monitor.Gfx_BoosterFreqType,
1246 activity_monitor.Gfx_BoosterFreq,
1247 activity_monitor.Gfx_PD_Data_limit_c,
1248 activity_monitor.Gfx_PD_Data_error_coeff,
1249 activity_monitor.Gfx_PD_Data_error_rate_coeff);
1250
1251 size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
1252 " ",
1253 1,
1254 "SOCCLK",
1255 activity_monitor.Soc_FPS,
1256 activity_monitor.Soc_MinFreqStep,
1257 activity_monitor.Soc_MinActiveFreqType,
1258 activity_monitor.Soc_MinActiveFreq,
1259 activity_monitor.Soc_BoosterFreqType,
1260 activity_monitor.Soc_BoosterFreq,
1261 activity_monitor.Soc_PD_Data_limit_c,
1262 activity_monitor.Soc_PD_Data_error_coeff,
1263 activity_monitor.Soc_PD_Data_error_rate_coeff);
1264
1265 size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
1266 " ",
1267 2,
1268 "MEMLK",
1269 activity_monitor.Mem_FPS,
1270 activity_monitor.Mem_MinFreqStep,
1271 activity_monitor.Mem_MinActiveFreqType,
1272 activity_monitor.Mem_MinActiveFreq,
1273 activity_monitor.Mem_BoosterFreqType,
1274 activity_monitor.Mem_BoosterFreq,
1275 activity_monitor.Mem_PD_Data_limit_c,
1276 activity_monitor.Mem_PD_Data_error_coeff,
1277 activity_monitor.Mem_PD_Data_error_rate_coeff);
1278 }
1279
1280 return size;
1281}
1282
1283static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
1284{
1285 DpmActivityMonitorCoeffInt_t activity_monitor;
1286 int workload_type, ret = 0;
1287
1288 smu->power_profile_mode = input[size];
1289
1290 if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
1291 pr_err("Invalid power profile mode %d\n", smu->power_profile_mode);
1292 return -EINVAL;
1293 }
1294
1295 if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
1296
1297 ret = smu_update_table(smu,
1298 SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
1299 (void *)(&activity_monitor), false);
1300 if (ret) {
1301 pr_err("[%s] Failed to get activity monitor!", __func__);
1302 return ret;
1303 }
1304
1305 switch (input[0]) {
1306 case 0:
1307 activity_monitor.Gfx_FPS = input[1];
1308 activity_monitor.Gfx_MinFreqStep = input[2];
1309 activity_monitor.Gfx_MinActiveFreqType = input[3];
1310 activity_monitor.Gfx_MinActiveFreq = input[4];
1311 activity_monitor.Gfx_BoosterFreqType = input[5];
1312 activity_monitor.Gfx_BoosterFreq = input[6];
1313 activity_monitor.Gfx_PD_Data_limit_c = input[7];
1314 activity_monitor.Gfx_PD_Data_error_coeff = input[8];
1315 activity_monitor.Gfx_PD_Data_error_rate_coeff = input[9];
1316 break;
1317 case 1:
1318 activity_monitor.Soc_FPS = input[1];
1319 activity_monitor.Soc_MinFreqStep = input[2];
1320 activity_monitor.Soc_MinActiveFreqType = input[3];
1321 activity_monitor.Soc_MinActiveFreq = input[4];
1322 activity_monitor.Soc_BoosterFreqType = input[5];
1323 activity_monitor.Soc_BoosterFreq = input[6];
1324 activity_monitor.Soc_PD_Data_limit_c = input[7];
1325 activity_monitor.Soc_PD_Data_error_coeff = input[8];
1326 activity_monitor.Soc_PD_Data_error_rate_coeff = input[9];
1327 break;
1328 case 2:
1329 activity_monitor.Mem_FPS = input[1];
1330 activity_monitor.Mem_MinFreqStep = input[2];
1331 activity_monitor.Mem_MinActiveFreqType = input[3];
1332 activity_monitor.Mem_MinActiveFreq = input[4];
1333 activity_monitor.Mem_BoosterFreqType = input[5];
1334 activity_monitor.Mem_BoosterFreq = input[6];
1335 activity_monitor.Mem_PD_Data_limit_c = input[7];
1336 activity_monitor.Mem_PD_Data_error_coeff = input[8];
1337 activity_monitor.Mem_PD_Data_error_rate_coeff = input[9];
1338 break;
1339 }
1340
1341 ret = smu_update_table(smu,
1342 SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
1343 (void *)(&activity_monitor), true);
1344 if (ret) {
1345 pr_err("[%s] Failed to set activity monitor!", __func__);
1346 return ret;
1347 }
1348 }
1349
1350
1351 workload_type = smu_workload_get_type(smu, smu->power_profile_mode);
1352 if (workload_type < 0)
1353 return -EINVAL;
1354 smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
1355 1 << workload_type, NULL);
1356
1357 return ret;
1358}
1359
1360static int navi10_get_profiling_clk_mask(struct smu_context *smu,
1361 enum amd_dpm_forced_level level,
1362 uint32_t *sclk_mask,
1363 uint32_t *mclk_mask,
1364 uint32_t *soc_mask)
1365{
1366 int ret = 0;
1367 uint32_t level_count = 0;
1368
1369 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
1370 if (sclk_mask)
1371 *sclk_mask = 0;
1372 } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
1373 if (mclk_mask)
1374 *mclk_mask = 0;
1375 } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
1376 if(sclk_mask) {
1377 ret = smu_get_dpm_level_count(smu, SMU_SCLK, &level_count);
1378 if (ret)
1379 return ret;
1380 *sclk_mask = level_count - 1;
1381 }
1382
1383 if(mclk_mask) {
1384 ret = smu_get_dpm_level_count(smu, SMU_MCLK, &level_count);
1385 if (ret)
1386 return ret;
1387 *mclk_mask = level_count - 1;
1388 }
1389
1390 if(soc_mask) {
1391 ret = smu_get_dpm_level_count(smu, SMU_SOCCLK, &level_count);
1392 if (ret)
1393 return ret;
1394 *soc_mask = level_count - 1;
1395 }
1396 }
1397
1398 return ret;
1399}
1400
1401static int navi10_notify_smc_display_config(struct smu_context *smu)
1402{
1403 struct smu_clocks min_clocks = {0};
1404 struct pp_display_clock_request clock_req;
1405 int ret = 0;
1406
1407 min_clocks.dcef_clock = smu->display_config->min_dcef_set_clk;
1408 min_clocks.dcef_clock_in_sr = smu->display_config->min_dcef_deep_sleep_set_clk;
1409 min_clocks.memory_clock = smu->display_config->min_mem_set_clock;
1410
1411 if (smu_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
1412 clock_req.clock_type = amd_pp_dcef_clock;
1413 clock_req.clock_freq_in_khz = min_clocks.dcef_clock * 10;
1414
1415 ret = smu_v11_0_display_clock_voltage_request(smu, &clock_req);
1416 if (!ret) {
1417 if (smu_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) {
1418 ret = smu_send_smc_msg_with_param(smu,
1419 SMU_MSG_SetMinDeepSleepDcefclk,
1420 min_clocks.dcef_clock_in_sr/100,
1421 NULL);
1422 if (ret) {
1423 pr_err("Attempt to set divider for DCEFCLK Failed!");
1424 return ret;
1425 }
1426 }
1427 } else {
1428 pr_info("Attempt to set Hard Min for DCEFCLK Failed!");
1429 }
1430 }
1431
1432 if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
1433 ret = smu_set_hard_freq_range(smu, SMU_UCLK, min_clocks.memory_clock/100, 0);
1434 if (ret) {
1435 pr_err("[%s] Set hard min uclk failed!", __func__);
1436 return ret;
1437 }
1438 }
1439
1440 return 0;
1441}
1442
1443static int navi10_set_watermarks_table(struct smu_context *smu,
1444 void *watermarks, struct
1445 dm_pp_wm_sets_with_clock_ranges_soc15
1446 *clock_ranges)
1447{
1448 int i;
1449 int ret = 0;
1450 Watermarks_t *table = watermarks;
1451
1452 if (!table || !clock_ranges)
1453 return -EINVAL;
1454
1455 if (clock_ranges->num_wm_dmif_sets > 4 ||
1456 clock_ranges->num_wm_mcif_sets > 4)
1457 return -EINVAL;
1458
1459 for (i = 0; i < clock_ranges->num_wm_dmif_sets; i++) {
1460 table->WatermarkRow[1][i].MinClock =
1461 cpu_to_le16((uint16_t)
1462 (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz /
1463 1000));
1464 table->WatermarkRow[1][i].MaxClock =
1465 cpu_to_le16((uint16_t)
1466 (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz /
1467 1000));
1468 table->WatermarkRow[1][i].MinUclk =
1469 cpu_to_le16((uint16_t)
1470 (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz /
1471 1000));
1472 table->WatermarkRow[1][i].MaxUclk =
1473 cpu_to_le16((uint16_t)
1474 (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz /
1475 1000));
1476 table->WatermarkRow[1][i].WmSetting = (uint8_t)
1477 clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
1478 }
1479
1480 for (i = 0; i < clock_ranges->num_wm_mcif_sets; i++) {
1481 table->WatermarkRow[0][i].MinClock =
1482 cpu_to_le16((uint16_t)
1483 (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz /
1484 1000));
1485 table->WatermarkRow[0][i].MaxClock =
1486 cpu_to_le16((uint16_t)
1487 (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz /
1488 1000));
1489 table->WatermarkRow[0][i].MinUclk =
1490 cpu_to_le16((uint16_t)
1491 (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz /
1492 1000));
1493 table->WatermarkRow[0][i].MaxUclk =
1494 cpu_to_le16((uint16_t)
1495 (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz /
1496 1000));
1497 table->WatermarkRow[0][i].WmSetting = (uint8_t)
1498 clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
1499 }
1500
1501 smu->watermarks_bitmap |= WATERMARKS_EXIST;
1502
1503
1504 if (!(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
1505 ret = smu_write_watermarks_table(smu);
1506 if (ret) {
1507 pr_err("Failed to update WMTABLE!");
1508 return ret;
1509 }
1510 smu->watermarks_bitmap |= WATERMARKS_LOADED;
1511 }
1512
1513 return 0;
1514}
1515
1516static int navi10_thermal_get_temperature(struct smu_context *smu,
1517 enum amd_pp_sensors sensor,
1518 uint32_t *value)
1519{
1520 SmuMetrics_t metrics;
1521 int ret = 0;
1522
1523 if (!value)
1524 return -EINVAL;
1525
1526 ret = navi10_get_metrics_table(smu, &metrics);
1527 if (ret)
1528 return ret;
1529
1530 switch (sensor) {
1531 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
1532 *value = metrics.TemperatureHotspot *
1533 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1534 break;
1535 case AMDGPU_PP_SENSOR_EDGE_TEMP:
1536 *value = metrics.TemperatureEdge *
1537 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1538 break;
1539 case AMDGPU_PP_SENSOR_MEM_TEMP:
1540 *value = metrics.TemperatureMem *
1541 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1542 break;
1543 default:
1544 pr_err("Invalid sensor for retrieving temp\n");
1545 return -EINVAL;
1546 }
1547
1548 return 0;
1549}
1550
1551static int navi10_read_sensor(struct smu_context *smu,
1552 enum amd_pp_sensors sensor,
1553 void *data, uint32_t *size)
1554{
1555 int ret = 0;
1556 struct smu_table_context *table_context = &smu->smu_table;
1557 PPTable_t *pptable = table_context->driver_pptable;
1558
1559 if(!data || !size)
1560 return -EINVAL;
1561
1562 mutex_lock(&smu->sensor_lock);
1563 switch (sensor) {
1564 case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
1565 *(uint32_t *)data = pptable->FanMaximumRpm;
1566 *size = 4;
1567 break;
1568 case AMDGPU_PP_SENSOR_MEM_LOAD:
1569 case AMDGPU_PP_SENSOR_GPU_LOAD:
1570 ret = navi10_get_current_activity_percent(smu, sensor, (uint32_t *)data);
1571 *size = 4;
1572 break;
1573 case AMDGPU_PP_SENSOR_GPU_POWER:
1574 ret = navi10_get_gpu_power(smu, (uint32_t *)data);
1575 *size = 4;
1576 break;
1577 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
1578 case AMDGPU_PP_SENSOR_EDGE_TEMP:
1579 case AMDGPU_PP_SENSOR_MEM_TEMP:
1580 ret = navi10_thermal_get_temperature(smu, sensor, (uint32_t *)data);
1581 *size = 4;
1582 break;
1583 default:
1584 ret = smu_v11_0_read_sensor(smu, sensor, data, size);
1585 }
1586 mutex_unlock(&smu->sensor_lock);
1587
1588 return ret;
1589}
1590
1591static int navi10_get_uclk_dpm_states(struct smu_context *smu, uint32_t *clocks_in_khz, uint32_t *num_states)
1592{
1593 uint32_t num_discrete_levels = 0;
1594 uint16_t *dpm_levels = NULL;
1595 uint16_t i = 0;
1596 struct smu_table_context *table_context = &smu->smu_table;
1597 PPTable_t *driver_ppt = NULL;
1598
1599 if (!clocks_in_khz || !num_states || !table_context->driver_pptable)
1600 return -EINVAL;
1601
1602 driver_ppt = table_context->driver_pptable;
1603 num_discrete_levels = driver_ppt->DpmDescriptor[PPCLK_UCLK].NumDiscreteLevels;
1604 dpm_levels = driver_ppt->FreqTableUclk;
1605
1606 if (num_discrete_levels == 0 || dpm_levels == NULL)
1607 return -EINVAL;
1608
1609 *num_states = num_discrete_levels;
1610 for (i = 0; i < num_discrete_levels; i++) {
1611
1612 *clocks_in_khz = (*dpm_levels) * 1000;
1613 clocks_in_khz++;
1614 dpm_levels++;
1615 }
1616
1617 return 0;
1618}
1619
1620static int navi10_set_performance_level(struct smu_context *smu,
1621 enum amd_dpm_forced_level level);
1622
1623static int navi10_set_standard_performance_level(struct smu_context *smu)
1624{
1625 struct amdgpu_device *adev = smu->adev;
1626 int ret = 0;
1627 uint32_t sclk_freq = 0, uclk_freq = 0;
1628
1629 switch (adev->asic_type) {
1630 case CHIP_NAVI10:
1631 sclk_freq = NAVI10_UMD_PSTATE_PROFILING_GFXCLK;
1632 uclk_freq = NAVI10_UMD_PSTATE_PROFILING_MEMCLK;
1633 break;
1634 case CHIP_NAVI14:
1635 sclk_freq = NAVI14_UMD_PSTATE_PROFILING_GFXCLK;
1636 uclk_freq = NAVI14_UMD_PSTATE_PROFILING_MEMCLK;
1637 break;
1638 default:
1639
1640 return navi10_set_performance_level(smu, AMD_DPM_FORCED_LEVEL_AUTO);
1641 }
1642
1643 ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq, false);
1644 if (ret)
1645 return ret;
1646 ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq, false);
1647 if (ret)
1648 return ret;
1649
1650 return ret;
1651}
1652
1653static int navi10_set_peak_performance_level(struct smu_context *smu)
1654{
1655 struct amdgpu_device *adev = smu->adev;
1656 int ret = 0;
1657 uint32_t sclk_freq = 0, uclk_freq = 0;
1658
1659 switch (adev->asic_type) {
1660 case CHIP_NAVI10:
1661 switch (adev->pdev->revision) {
1662 case 0xf0:
1663 case 0xc0:
1664 sclk_freq = NAVI10_PEAK_SCLK_XTX;
1665 break;
1666 case 0xf1:
1667 case 0xc1:
1668 sclk_freq = NAVI10_PEAK_SCLK_XT;
1669 break;
1670 default:
1671 sclk_freq = NAVI10_PEAK_SCLK_XL;
1672 break;
1673 }
1674 break;
1675 case CHIP_NAVI14:
1676 switch (adev->pdev->revision) {
1677 case 0xc7:
1678 case 0xf4:
1679 sclk_freq = NAVI14_UMD_PSTATE_PEAK_XT_GFXCLK;
1680 break;
1681 case 0xc1:
1682 case 0xf2:
1683 sclk_freq = NAVI14_UMD_PSTATE_PEAK_XTM_GFXCLK;
1684 break;
1685 case 0xc3:
1686 case 0xf3:
1687 sclk_freq = NAVI14_UMD_PSTATE_PEAK_XLM_GFXCLK;
1688 break;
1689 case 0xc5:
1690 case 0xf6:
1691 sclk_freq = NAVI14_UMD_PSTATE_PEAK_XLM_GFXCLK;
1692 break;
1693 default:
1694 sclk_freq = NAVI14_UMD_PSTATE_PEAK_XL_GFXCLK;
1695 break;
1696 }
1697 break;
1698 case CHIP_NAVI12:
1699 sclk_freq = NAVI12_UMD_PSTATE_PEAK_GFXCLK;
1700 break;
1701 default:
1702 ret = smu_get_dpm_level_range(smu, SMU_SCLK, NULL, &sclk_freq);
1703 if (ret)
1704 return ret;
1705 }
1706
1707 ret = smu_get_dpm_level_range(smu, SMU_UCLK, NULL, &uclk_freq);
1708 if (ret)
1709 return ret;
1710
1711 ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq, false);
1712 if (ret)
1713 return ret;
1714 ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq, false);
1715 if (ret)
1716 return ret;
1717
1718 return ret;
1719}
1720
1721static int navi10_set_performance_level(struct smu_context *smu,
1722 enum amd_dpm_forced_level level)
1723{
1724 int ret = 0;
1725 uint32_t sclk_mask, mclk_mask, soc_mask;
1726
1727 switch (level) {
1728 case AMD_DPM_FORCED_LEVEL_HIGH:
1729 ret = smu_force_dpm_limit_value(smu, true);
1730 break;
1731 case AMD_DPM_FORCED_LEVEL_LOW:
1732 ret = smu_force_dpm_limit_value(smu, false);
1733 break;
1734 case AMD_DPM_FORCED_LEVEL_AUTO:
1735 ret = smu_unforce_dpm_levels(smu);
1736 break;
1737 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
1738 ret = navi10_set_standard_performance_level(smu);
1739 break;
1740 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
1741 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
1742 ret = smu_get_profiling_clk_mask(smu, level,
1743 &sclk_mask,
1744 &mclk_mask,
1745 &soc_mask);
1746 if (ret)
1747 return ret;
1748 smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask, false);
1749 smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask, false);
1750 smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask, false);
1751 break;
1752 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
1753 ret = navi10_set_peak_performance_level(smu);
1754 break;
1755 case AMD_DPM_FORCED_LEVEL_MANUAL:
1756 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
1757 default:
1758 break;
1759 }
1760 return ret;
1761}
1762
1763static int navi10_get_thermal_temperature_range(struct smu_context *smu,
1764 struct smu_temperature_range *range)
1765{
1766 struct smu_table_context *table_context = &smu->smu_table;
1767 struct smu_11_0_powerplay_table *powerplay_table = table_context->power_play_table;
1768
1769 if (!range || !powerplay_table)
1770 return -EINVAL;
1771
1772 range->max = powerplay_table->software_shutdown_temp *
1773 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1774
1775 return 0;
1776}
1777
1778static int navi10_display_disable_memory_clock_switch(struct smu_context *smu,
1779 bool disable_memory_clock_switch)
1780{
1781 int ret = 0;
1782 struct smu_11_0_max_sustainable_clocks *max_sustainable_clocks =
1783 (struct smu_11_0_max_sustainable_clocks *)
1784 smu->smu_table.max_sustainable_clocks;
1785 uint32_t min_memory_clock = smu->hard_min_uclk_req_from_dal;
1786 uint32_t max_memory_clock = max_sustainable_clocks->uclock;
1787
1788 if(smu->disable_uclk_switch == disable_memory_clock_switch)
1789 return 0;
1790
1791 if(disable_memory_clock_switch)
1792 ret = smu_set_hard_freq_range(smu, SMU_UCLK, max_memory_clock, 0);
1793 else
1794 ret = smu_set_hard_freq_range(smu, SMU_UCLK, min_memory_clock, 0);
1795
1796 if(!ret)
1797 smu->disable_uclk_switch = disable_memory_clock_switch;
1798
1799 return ret;
1800}
1801
1802static uint32_t navi10_get_pptable_power_limit(struct smu_context *smu)
1803{
1804 PPTable_t *pptable = smu->smu_table.driver_pptable;
1805 return pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0];
1806}
1807
1808static int navi10_get_power_limit(struct smu_context *smu,
1809 uint32_t *limit,
1810 bool cap)
1811{
1812 PPTable_t *pptable = smu->smu_table.driver_pptable;
1813 uint32_t asic_default_power_limit = 0;
1814 int ret = 0;
1815 int power_src;
1816
1817 if (!smu->power_limit) {
1818 if (smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT) &&
1819 !amdgpu_sriov_vf(smu->adev)) {
1820 power_src = smu_power_get_index(smu, SMU_POWER_SOURCE_AC);
1821 if (power_src < 0)
1822 return -EINVAL;
1823
1824 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetPptLimit,
1825 power_src << 16, &asic_default_power_limit);
1826 if (ret) {
1827 pr_err("[%s] get PPT limit failed!", __func__);
1828 return ret;
1829 }
1830 } else {
1831
1832 if (!pptable) {
1833 pr_err("Cannot get PPT limit due to pptable missing!");
1834 return -EINVAL;
1835 }
1836 asic_default_power_limit =
1837 pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0];
1838 }
1839
1840 smu->power_limit = asic_default_power_limit;
1841 }
1842
1843 if (cap)
1844 *limit = smu_v11_0_get_max_power_limit(smu);
1845 else
1846 *limit = smu->power_limit;
1847
1848 return 0;
1849}
1850
1851static int navi10_update_pcie_parameters(struct smu_context *smu,
1852 uint32_t pcie_gen_cap,
1853 uint32_t pcie_width_cap)
1854{
1855 PPTable_t *pptable = smu->smu_table.driver_pptable;
1856 int ret, i;
1857 uint32_t smu_pcie_arg;
1858
1859 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
1860 struct smu_11_0_dpm_context *dpm_context = smu_dpm->dpm_context;
1861
1862 for (i = 0; i < NUM_LINK_LEVELS; i++) {
1863 smu_pcie_arg = (i << 16) |
1864 ((pptable->PcieGenSpeed[i] <= pcie_gen_cap) ? (pptable->PcieGenSpeed[i] << 8) :
1865 (pcie_gen_cap << 8)) | ((pptable->PcieLaneCount[i] <= pcie_width_cap) ?
1866 pptable->PcieLaneCount[i] : pcie_width_cap);
1867 ret = smu_send_smc_msg_with_param(smu,
1868 SMU_MSG_OverridePcieParameters,
1869 smu_pcie_arg,
1870 NULL);
1871
1872 if (ret)
1873 return ret;
1874
1875 if (pptable->PcieGenSpeed[i] > pcie_gen_cap)
1876 dpm_context->dpm_tables.pcie_table.pcie_gen[i] = pcie_gen_cap;
1877 if (pptable->PcieLaneCount[i] > pcie_width_cap)
1878 dpm_context->dpm_tables.pcie_table.pcie_lane[i] = pcie_width_cap;
1879 }
1880
1881 return 0;
1882}
1883
1884static inline void navi10_dump_od_table(OverDriveTable_t *od_table) {
1885 pr_debug("OD: Gfxclk: (%d, %d)\n", od_table->GfxclkFmin, od_table->GfxclkFmax);
1886 pr_debug("OD: Gfx1: (%d, %d)\n", od_table->GfxclkFreq1, od_table->GfxclkVolt1);
1887 pr_debug("OD: Gfx2: (%d, %d)\n", od_table->GfxclkFreq2, od_table->GfxclkVolt2);
1888 pr_debug("OD: Gfx3: (%d, %d)\n", od_table->GfxclkFreq3, od_table->GfxclkVolt3);
1889 pr_debug("OD: UclkFmax: %d\n", od_table->UclkFmax);
1890 pr_debug("OD: OverDrivePct: %d\n", od_table->OverDrivePct);
1891}
1892
1893static int navi10_od_setting_check_range(struct smu_11_0_overdrive_table *od_table, enum SMU_11_0_ODSETTING_ID setting, uint32_t value)
1894{
1895 if (value < od_table->min[setting]) {
1896 pr_warn("OD setting (%d, %d) is less than the minimum allowed (%d)\n", setting, value, od_table->min[setting]);
1897 return -EINVAL;
1898 }
1899 if (value > od_table->max[setting]) {
1900 pr_warn("OD setting (%d, %d) is greater than the maximum allowed (%d)\n", setting, value, od_table->max[setting]);
1901 return -EINVAL;
1902 }
1903 return 0;
1904}
1905
1906static int navi10_overdrive_get_gfx_clk_base_voltage(struct smu_context *smu,
1907 uint16_t *voltage,
1908 uint32_t freq)
1909{
1910 uint32_t param = (freq & 0xFFFF) | (PPCLK_GFXCLK << 16);
1911 uint32_t value = 0;
1912 int ret;
1913
1914 ret = smu_send_smc_msg_with_param(smu,
1915 SMU_MSG_GetVoltageByDpm,
1916 param,
1917 &value);
1918 if (ret) {
1919 pr_err("[GetBaseVoltage] failed to get GFXCLK AVFS voltage from SMU!");
1920 return ret;
1921 }
1922
1923 *voltage = (uint16_t)value;
1924
1925 return 0;
1926}
1927
1928static int navi10_setup_od_limits(struct smu_context *smu) {
1929 struct smu_11_0_overdrive_table *overdrive_table = NULL;
1930 struct smu_11_0_powerplay_table *powerplay_table = NULL;
1931
1932 if (!smu->smu_table.power_play_table) {
1933 pr_err("powerplay table uninitialized!\n");
1934 return -ENOENT;
1935 }
1936 powerplay_table = (struct smu_11_0_powerplay_table *)smu->smu_table.power_play_table;
1937 overdrive_table = &powerplay_table->overdrive_table;
1938 if (!smu->od_settings) {
1939 smu->od_settings = kmemdup(overdrive_table, sizeof(struct smu_11_0_overdrive_table), GFP_KERNEL);
1940 } else {
1941 memcpy(smu->od_settings, overdrive_table, sizeof(struct smu_11_0_overdrive_table));
1942 }
1943 return 0;
1944}
1945
1946static bool navi10_is_baco_supported(struct smu_context *smu)
1947{
1948 struct amdgpu_device *adev = smu->adev;
1949 uint32_t val;
1950
1951 if (!smu_v11_0_baco_is_support(smu))
1952 return false;
1953
1954 val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0);
1955 return (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true : false;
1956}
1957
1958static int navi10_set_default_od_settings(struct smu_context *smu, bool initialize) {
1959 OverDriveTable_t *od_table, *boot_od_table;
1960 int ret = 0;
1961
1962 if (amdgpu_sriov_vf(smu->adev))
1963 return 0;
1964
1965 ret = smu_v11_0_set_default_od_settings(smu, initialize, sizeof(OverDriveTable_t));
1966 if (ret)
1967 return ret;
1968
1969 od_table = (OverDriveTable_t *)smu->smu_table.overdrive_table;
1970 boot_od_table = (OverDriveTable_t *)smu->smu_table.boot_overdrive_table;
1971 if (initialize) {
1972 ret = navi10_setup_od_limits(smu);
1973 if (ret) {
1974 pr_err("Failed to retrieve board OD limits\n");
1975 return ret;
1976 }
1977 if (od_table) {
1978 if (!od_table->GfxclkVolt1) {
1979 ret = navi10_overdrive_get_gfx_clk_base_voltage(smu,
1980 &od_table->GfxclkVolt1,
1981 od_table->GfxclkFreq1);
1982 if (ret)
1983 od_table->GfxclkVolt1 = 0;
1984 if (boot_od_table)
1985 boot_od_table->GfxclkVolt1 = od_table->GfxclkVolt1;
1986 }
1987
1988 if (!od_table->GfxclkVolt2) {
1989 ret = navi10_overdrive_get_gfx_clk_base_voltage(smu,
1990 &od_table->GfxclkVolt2,
1991 od_table->GfxclkFreq2);
1992 if (ret)
1993 od_table->GfxclkVolt2 = 0;
1994 if (boot_od_table)
1995 boot_od_table->GfxclkVolt2 = od_table->GfxclkVolt2;
1996 }
1997
1998 if (!od_table->GfxclkVolt3) {
1999 ret = navi10_overdrive_get_gfx_clk_base_voltage(smu,
2000 &od_table->GfxclkVolt3,
2001 od_table->GfxclkFreq3);
2002 if (ret)
2003 od_table->GfxclkVolt3 = 0;
2004 if (boot_od_table)
2005 boot_od_table->GfxclkVolt3 = od_table->GfxclkVolt3;
2006 }
2007 }
2008 }
2009
2010 if (od_table) {
2011 navi10_dump_od_table(od_table);
2012 }
2013
2014 return ret;
2015}
2016
2017static int navi10_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type, long input[], uint32_t size) {
2018 int i;
2019 int ret = 0;
2020 struct smu_table_context *table_context = &smu->smu_table;
2021 OverDriveTable_t *od_table;
2022 struct smu_11_0_overdrive_table *od_settings;
2023 enum SMU_11_0_ODSETTING_ID freq_setting, voltage_setting;
2024 uint16_t *freq_ptr, *voltage_ptr;
2025 od_table = (OverDriveTable_t *)table_context->overdrive_table;
2026
2027 if (!smu->od_enabled) {
2028 pr_warn("OverDrive is not enabled!\n");
2029 return -EINVAL;
2030 }
2031
2032 if (!smu->od_settings) {
2033 pr_err("OD board limits are not set!\n");
2034 return -ENOENT;
2035 }
2036
2037 od_settings = smu->od_settings;
2038
2039 switch (type) {
2040 case PP_OD_EDIT_SCLK_VDDC_TABLE:
2041 if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS)) {
2042 pr_warn("GFXCLK_LIMITS not supported!\n");
2043 return -ENOTSUPP;
2044 }
2045 if (!table_context->overdrive_table) {
2046 pr_err("Overdrive is not initialized\n");
2047 return -EINVAL;
2048 }
2049 for (i = 0; i < size; i += 2) {
2050 if (i + 2 > size) {
2051 pr_info("invalid number of input parameters %d\n", size);
2052 return -EINVAL;
2053 }
2054 switch (input[i]) {
2055 case 0:
2056 freq_setting = SMU_11_0_ODSETTING_GFXCLKFMIN;
2057 freq_ptr = &od_table->GfxclkFmin;
2058 if (input[i + 1] > od_table->GfxclkFmax) {
2059 pr_info("GfxclkFmin (%ld) must be <= GfxclkFmax (%u)!\n",
2060 input[i + 1],
2061 od_table->GfxclkFmin);
2062 return -EINVAL;
2063 }
2064 break;
2065 case 1:
2066 freq_setting = SMU_11_0_ODSETTING_GFXCLKFMAX;
2067 freq_ptr = &od_table->GfxclkFmax;
2068 if (input[i + 1] < od_table->GfxclkFmin) {
2069 pr_info("GfxclkFmax (%ld) must be >= GfxclkFmin (%u)!\n",
2070 input[i + 1],
2071 od_table->GfxclkFmax);
2072 return -EINVAL;
2073 }
2074 break;
2075 default:
2076 pr_info("Invalid SCLK_VDDC_TABLE index: %ld\n", input[i]);
2077 pr_info("Supported indices: [0:min,1:max]\n");
2078 return -EINVAL;
2079 }
2080 ret = navi10_od_setting_check_range(od_settings, freq_setting, input[i + 1]);
2081 if (ret)
2082 return ret;
2083 *freq_ptr = input[i + 1];
2084 }
2085 break;
2086 case PP_OD_EDIT_MCLK_VDDC_TABLE:
2087 if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_UCLK_MAX)) {
2088 pr_warn("UCLK_MAX not supported!\n");
2089 return -ENOTSUPP;
2090 }
2091 if (size < 2) {
2092 pr_info("invalid number of parameters: %d\n", size);
2093 return -EINVAL;
2094 }
2095 if (input[0] != 1) {
2096 pr_info("Invalid MCLK_VDDC_TABLE index: %ld\n", input[0]);
2097 pr_info("Supported indices: [1:max]\n");
2098 return -EINVAL;
2099 }
2100 ret = navi10_od_setting_check_range(od_settings, SMU_11_0_ODSETTING_UCLKFMAX, input[1]);
2101 if (ret)
2102 return ret;
2103 od_table->UclkFmax = input[1];
2104 break;
2105 case PP_OD_RESTORE_DEFAULT_TABLE:
2106 if (!(table_context->overdrive_table && table_context->boot_overdrive_table)) {
2107 pr_err("Overdrive table was not initialized!\n");
2108 return -EINVAL;
2109 }
2110 memcpy(table_context->overdrive_table, table_context->boot_overdrive_table, sizeof(OverDriveTable_t));
2111 break;
2112 case PP_OD_COMMIT_DPM_TABLE:
2113 navi10_dump_od_table(od_table);
2114 ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, (void *)od_table, true);
2115 if (ret) {
2116 pr_err("Failed to import overdrive table!\n");
2117 return ret;
2118 }
2119
2120 ret = smu_handle_task(smu, smu->smu_dpm.dpm_level,
2121 AMD_PP_TASK_READJUST_POWER_STATE,
2122 false);
2123 if (ret) {
2124 return ret;
2125 }
2126 break;
2127 case PP_OD_EDIT_VDDC_CURVE:
2128 if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_CURVE)) {
2129 pr_warn("GFXCLK_CURVE not supported!\n");
2130 return -ENOTSUPP;
2131 }
2132 if (size < 3) {
2133 pr_info("invalid number of parameters: %d\n", size);
2134 return -EINVAL;
2135 }
2136 if (!od_table) {
2137 pr_info("Overdrive is not initialized\n");
2138 return -EINVAL;
2139 }
2140
2141 switch (input[0]) {
2142 case 0:
2143 freq_setting = SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P1;
2144 voltage_setting = SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P1;
2145 freq_ptr = &od_table->GfxclkFreq1;
2146 voltage_ptr = &od_table->GfxclkVolt1;
2147 break;
2148 case 1:
2149 freq_setting = SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P2;
2150 voltage_setting = SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P2;
2151 freq_ptr = &od_table->GfxclkFreq2;
2152 voltage_ptr = &od_table->GfxclkVolt2;
2153 break;
2154 case 2:
2155 freq_setting = SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P3;
2156 voltage_setting = SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P3;
2157 freq_ptr = &od_table->GfxclkFreq3;
2158 voltage_ptr = &od_table->GfxclkVolt3;
2159 break;
2160 default:
2161 pr_info("Invalid VDDC_CURVE index: %ld\n", input[0]);
2162 pr_info("Supported indices: [0, 1, 2]\n");
2163 return -EINVAL;
2164 }
2165 ret = navi10_od_setting_check_range(od_settings, freq_setting, input[1]);
2166 if (ret)
2167 return ret;
2168
2169 if (input[2] != 0) {
2170 ret = navi10_od_setting_check_range(od_settings, voltage_setting, input[2]);
2171 if (ret)
2172 return ret;
2173 *freq_ptr = input[1];
2174 *voltage_ptr = ((uint16_t)input[2]) * NAVI10_VOLTAGE_SCALE;
2175 pr_debug("OD: set curve %ld: (%d, %d)\n", input[0], *freq_ptr, *voltage_ptr);
2176 } else {
2177
2178 od_table->GfxclkVolt1 = 0;
2179 od_table->GfxclkVolt2 = 0;
2180 od_table->GfxclkVolt3 = 0;
2181 }
2182 navi10_dump_od_table(od_table);
2183 break;
2184 default:
2185 return -ENOSYS;
2186 }
2187 return ret;
2188}
2189
2190static int navi10_run_btc(struct smu_context *smu)
2191{
2192 int ret = 0;
2193
2194 ret = smu_send_smc_msg(smu, SMU_MSG_RunBtc, NULL);
2195 if (ret)
2196 pr_err("RunBtc failed!\n");
2197
2198 return ret;
2199}
2200
2201static int navi10_dummy_pstate_control(struct smu_context *smu, bool enable)
2202{
2203 int result = 0;
2204
2205 if (!enable)
2206 result = smu_send_smc_msg(smu, SMU_MSG_DAL_DISABLE_DUMMY_PSTATE_CHANGE, NULL);
2207 else
2208 result = smu_send_smc_msg(smu, SMU_MSG_DAL_ENABLE_DUMMY_PSTATE_CHANGE, NULL);
2209
2210 return result;
2211}
2212
2213static int navi10_disable_umc_cdr_12gbps_workaround(struct smu_context *smu)
2214{
2215 uint32_t uclk_count, uclk_min, uclk_max;
2216 uint32_t smu_version;
2217 int ret = 0;
2218
2219 ret = smu_get_smc_version(smu, NULL, &smu_version);
2220 if (ret)
2221 return ret;
2222
2223
2224 if (smu_version < 0x2A3200)
2225 return 0;
2226
2227 ret = smu_get_dpm_level_count(smu, SMU_UCLK, &uclk_count);
2228 if (ret)
2229 return ret;
2230
2231 ret = smu_get_dpm_freq_by_index(smu, SMU_UCLK, (uint16_t)0, &uclk_min);
2232 if (ret)
2233 return ret;
2234
2235 ret = smu_get_dpm_freq_by_index(smu, SMU_UCLK, (uint16_t)(uclk_count - 1), &uclk_max);
2236 if (ret)
2237 return ret;
2238
2239
2240 ret = smu_set_hard_freq_range(smu, SMU_UCLK, 0, uclk_min);
2241 if (ret)
2242 return ret;
2243
2244
2245 ret = smu_set_hard_freq_range(smu, SMU_UCLK, 0, uclk_max);
2246 if (ret)
2247 return ret;
2248
2249
2250
2251
2252
2253 return navi10_dummy_pstate_control(smu, true);
2254}
2255
2256static const struct pptable_funcs navi10_ppt_funcs = {
2257 .tables_init = navi10_tables_init,
2258 .alloc_dpm_context = navi10_allocate_dpm_context,
2259 .store_powerplay_table = navi10_store_powerplay_table,
2260 .check_powerplay_table = navi10_check_powerplay_table,
2261 .append_powerplay_table = navi10_append_powerplay_table,
2262 .get_smu_msg_index = navi10_get_smu_msg_index,
2263 .get_smu_clk_index = navi10_get_smu_clk_index,
2264 .get_smu_feature_index = navi10_get_smu_feature_index,
2265 .get_smu_table_index = navi10_get_smu_table_index,
2266 .get_smu_power_index = navi10_get_pwr_src_index,
2267 .get_workload_type = navi10_get_workload_type,
2268 .get_allowed_feature_mask = navi10_get_allowed_feature_mask,
2269 .set_default_dpm_table = navi10_set_default_dpm_table,
2270 .dpm_set_uvd_enable = navi10_dpm_set_uvd_enable,
2271 .dpm_set_jpeg_enable = navi10_dpm_set_jpeg_enable,
2272 .get_current_clk_freq_by_table = navi10_get_current_clk_freq_by_table,
2273 .print_clk_levels = navi10_print_clk_levels,
2274 .force_clk_levels = navi10_force_clk_levels,
2275 .populate_umd_state_clk = navi10_populate_umd_state_clk,
2276 .get_clock_by_type_with_latency = navi10_get_clock_by_type_with_latency,
2277 .pre_display_config_changed = navi10_pre_display_config_changed,
2278 .display_config_changed = navi10_display_config_changed,
2279 .notify_smc_display_config = navi10_notify_smc_display_config,
2280 .force_dpm_limit_value = navi10_force_dpm_limit_value,
2281 .unforce_dpm_levels = navi10_unforce_dpm_levels,
2282 .is_dpm_running = navi10_is_dpm_running,
2283 .get_fan_speed_percent = navi10_get_fan_speed_percent,
2284 .get_fan_speed_rpm = navi10_get_fan_speed_rpm,
2285 .get_power_profile_mode = navi10_get_power_profile_mode,
2286 .set_power_profile_mode = navi10_set_power_profile_mode,
2287 .get_profiling_clk_mask = navi10_get_profiling_clk_mask,
2288 .set_watermarks_table = navi10_set_watermarks_table,
2289 .read_sensor = navi10_read_sensor,
2290 .get_uclk_dpm_states = navi10_get_uclk_dpm_states,
2291 .set_performance_level = navi10_set_performance_level,
2292 .get_thermal_temperature_range = navi10_get_thermal_temperature_range,
2293 .display_disable_memory_clock_switch = navi10_display_disable_memory_clock_switch,
2294 .get_power_limit = navi10_get_power_limit,
2295 .update_pcie_parameters = navi10_update_pcie_parameters,
2296 .init_microcode = smu_v11_0_init_microcode,
2297 .load_microcode = smu_v11_0_load_microcode,
2298 .init_smc_tables = smu_v11_0_init_smc_tables,
2299 .fini_smc_tables = smu_v11_0_fini_smc_tables,
2300 .init_power = smu_v11_0_init_power,
2301 .fini_power = smu_v11_0_fini_power,
2302 .check_fw_status = smu_v11_0_check_fw_status,
2303 .setup_pptable = smu_v11_0_setup_pptable,
2304 .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
2305 .get_clk_info_from_vbios = smu_v11_0_get_clk_info_from_vbios,
2306 .check_pptable = smu_v11_0_check_pptable,
2307 .parse_pptable = smu_v11_0_parse_pptable,
2308 .populate_smc_tables = smu_v11_0_populate_smc_pptable,
2309 .check_fw_version = smu_v11_0_check_fw_version,
2310 .write_pptable = smu_v11_0_write_pptable,
2311 .set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep,
2312 .set_driver_table_location = smu_v11_0_set_driver_table_location,
2313 .set_tool_table_location = smu_v11_0_set_tool_table_location,
2314 .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
2315 .system_features_control = smu_v11_0_system_features_control,
2316 .send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
2317 .init_display_count = smu_v11_0_init_display_count,
2318 .set_allowed_mask = smu_v11_0_set_allowed_mask,
2319 .get_enabled_mask = smu_v11_0_get_enabled_mask,
2320 .notify_display_change = smu_v11_0_notify_display_change,
2321 .set_power_limit = smu_v11_0_set_power_limit,
2322 .get_current_clk_freq = smu_v11_0_get_current_clk_freq,
2323 .init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks,
2324 .start_thermal_control = smu_v11_0_start_thermal_control,
2325 .stop_thermal_control = smu_v11_0_stop_thermal_control,
2326 .set_deep_sleep_dcefclk = smu_v11_0_set_deep_sleep_dcefclk,
2327 .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
2328 .get_fan_control_mode = smu_v11_0_get_fan_control_mode,
2329 .set_fan_control_mode = smu_v11_0_set_fan_control_mode,
2330 .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
2331 .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
2332 .set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
2333 .gfx_off_control = smu_v11_0_gfx_off_control,
2334 .register_irq_handler = smu_v11_0_register_irq_handler,
2335 .set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme,
2336 .get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc,
2337 .baco_is_support= navi10_is_baco_supported,
2338 .baco_get_state = smu_v11_0_baco_get_state,
2339 .baco_set_state = smu_v11_0_baco_set_state,
2340 .baco_enter = smu_v11_0_baco_enter,
2341 .baco_exit = smu_v11_0_baco_exit,
2342 .get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq,
2343 .set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
2344 .override_pcie_parameters = smu_v11_0_override_pcie_parameters,
2345 .set_default_od_settings = navi10_set_default_od_settings,
2346 .od_edit_dpm_table = navi10_od_edit_dpm_table,
2347 .get_pptable_power_limit = navi10_get_pptable_power_limit,
2348 .run_btc = navi10_run_btc,
2349 .disable_umc_cdr_12gbps_workaround = navi10_disable_umc_cdr_12gbps_workaround,
2350 .set_power_source = smu_v11_0_set_power_source,
2351};
2352
2353void navi10_set_ppt_funcs(struct smu_context *smu)
2354{
2355 smu->ppt_funcs = &navi10_ppt_funcs;
2356}
2357