1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include "pp_debug.h"
24#include <linux/types.h>
25#include <linux/kernel.h>
26#include <linux/slab.h>
27#include "atom-types.h"
28#include "atombios.h"
29#include "processpptables.h"
30#include "cgs_common.h"
31#include "smumgr.h"
32#include "hwmgr.h"
33#include "hardwaremanager.h"
34#include "rv_ppsmc.h"
35#include "smu10_hwmgr.h"
36#include "power_state.h"
37#include "soc15_common.h"
38#include "smu10.h"
39#include "asic_reg/pwr/pwr_10_0_offset.h"
40#include "asic_reg/pwr/pwr_10_0_sh_mask.h"
41
42#define SMU10_MAX_DEEPSLEEP_DIVIDER_ID 5
43#define SMU10_MINIMUM_ENGINE_CLOCK 800
44#define SCLK_MIN_DIV_INTV_SHIFT 12
45#define SMU10_DISPCLK_BYPASS_THRESHOLD 10000
46#define SMC_RAM_END 0x40000
47
48static const unsigned long SMU10_Magic = (unsigned long) PHM_Rv_Magic;
49
50
51static int smu10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
52 struct pp_display_clock_request *clock_req)
53{
54 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
55 enum amd_pp_clock_type clk_type = clock_req->clock_type;
56 uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
57 PPSMC_Msg msg;
58
59 switch (clk_type) {
60 case amd_pp_dcf_clock:
61 if (clk_freq == smu10_data->dcf_actual_hard_min_freq)
62 return 0;
63 msg = PPSMC_MSG_SetHardMinDcefclkByFreq;
64 smu10_data->dcf_actual_hard_min_freq = clk_freq;
65 break;
66 case amd_pp_soc_clock:
67 msg = PPSMC_MSG_SetHardMinSocclkByFreq;
68 break;
69 case amd_pp_f_clock:
70 if (clk_freq == smu10_data->f_actual_hard_min_freq)
71 return 0;
72 smu10_data->f_actual_hard_min_freq = clk_freq;
73 msg = PPSMC_MSG_SetHardMinFclkByFreq;
74 break;
75 default:
76 pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!");
77 return -EINVAL;
78 }
79 smum_send_msg_to_smc_with_parameter(hwmgr, msg, clk_freq, NULL);
80
81 return 0;
82}
83
84static struct smu10_power_state *cast_smu10_ps(struct pp_hw_power_state *hw_ps)
85{
86 if (SMU10_Magic != hw_ps->magic)
87 return NULL;
88
89 return (struct smu10_power_state *)hw_ps;
90}
91
92static const struct smu10_power_state *cast_const_smu10_ps(
93 const struct pp_hw_power_state *hw_ps)
94{
95 if (SMU10_Magic != hw_ps->magic)
96 return NULL;
97
98 return (struct smu10_power_state *)hw_ps;
99}
100
101static int smu10_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
102{
103 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
104
105 smu10_data->dce_slow_sclk_threshold = 30000;
106 smu10_data->thermal_auto_throttling_treshold = 0;
107 smu10_data->is_nb_dpm_enabled = 1;
108 smu10_data->dpm_flags = 1;
109 smu10_data->need_min_deep_sleep_dcefclk = true;
110 smu10_data->num_active_display = 0;
111 smu10_data->deep_sleep_dcefclk = 0;
112
113 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
114 PHM_PlatformCaps_SclkDeepSleep);
115
116 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
117 PHM_PlatformCaps_SclkThrottleLowNotification);
118
119 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
120 PHM_PlatformCaps_PowerPlaySupport);
121 return 0;
122}
123
124static int smu10_construct_max_power_limits_table(struct pp_hwmgr *hwmgr,
125 struct phm_clock_and_voltage_limits *table)
126{
127 return 0;
128}
129
130static int smu10_init_dynamic_state_adjustment_rule_settings(
131 struct pp_hwmgr *hwmgr)
132{
133 struct phm_clock_voltage_dependency_table *table_clk_vlt;
134
135 table_clk_vlt = kzalloc(struct_size(table_clk_vlt, entries, 7),
136 GFP_KERNEL);
137
138 if (NULL == table_clk_vlt) {
139 pr_err("Can not allocate memory!\n");
140 return -ENOMEM;
141 }
142
143 table_clk_vlt->count = 8;
144 table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_0;
145 table_clk_vlt->entries[0].v = 0;
146 table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_1;
147 table_clk_vlt->entries[1].v = 1;
148 table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_2;
149 table_clk_vlt->entries[2].v = 2;
150 table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_3;
151 table_clk_vlt->entries[3].v = 3;
152 table_clk_vlt->entries[4].clk = PP_DAL_POWERLEVEL_4;
153 table_clk_vlt->entries[4].v = 4;
154 table_clk_vlt->entries[5].clk = PP_DAL_POWERLEVEL_5;
155 table_clk_vlt->entries[5].v = 5;
156 table_clk_vlt->entries[6].clk = PP_DAL_POWERLEVEL_6;
157 table_clk_vlt->entries[6].v = 6;
158 table_clk_vlt->entries[7].clk = PP_DAL_POWERLEVEL_7;
159 table_clk_vlt->entries[7].v = 7;
160 hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt;
161
162 return 0;
163}
164
165static int smu10_get_system_info_data(struct pp_hwmgr *hwmgr)
166{
167 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)hwmgr->backend;
168
169 smu10_data->sys_info.htc_hyst_lmt = 5;
170 smu10_data->sys_info.htc_tmp_lmt = 203;
171
172 if (smu10_data->thermal_auto_throttling_treshold == 0)
173 smu10_data->thermal_auto_throttling_treshold = 203;
174
175 smu10_construct_max_power_limits_table (hwmgr,
176 &hwmgr->dyn_state.max_clock_voltage_on_ac);
177
178 smu10_init_dynamic_state_adjustment_rule_settings(hwmgr);
179
180 return 0;
181}
182
183static int smu10_construct_boot_state(struct pp_hwmgr *hwmgr)
184{
185 return 0;
186}
187
188static int smu10_set_clock_limit(struct pp_hwmgr *hwmgr, const void *input)
189{
190 struct PP_Clocks clocks = {0};
191 struct pp_display_clock_request clock_req;
192
193 clocks.dcefClock = hwmgr->display_config->min_dcef_set_clk;
194 clock_req.clock_type = amd_pp_dcf_clock;
195 clock_req.clock_freq_in_khz = clocks.dcefClock * 10;
196
197 PP_ASSERT_WITH_CODE(!smu10_display_clock_voltage_request(hwmgr, &clock_req),
198 "Attempt to set DCF Clock Failed!", return -EINVAL);
199
200 return 0;
201}
202
203static int smu10_set_min_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clock)
204{
205 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
206
207 if (clock && smu10_data->deep_sleep_dcefclk != clock) {
208 smu10_data->deep_sleep_dcefclk = clock;
209 smum_send_msg_to_smc_with_parameter(hwmgr,
210 PPSMC_MSG_SetMinDeepSleepDcefclk,
211 smu10_data->deep_sleep_dcefclk,
212 NULL);
213 }
214 return 0;
215}
216
217static int smu10_set_hard_min_dcefclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t clock)
218{
219 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
220
221 if (clock && smu10_data->dcf_actual_hard_min_freq != clock) {
222 smu10_data->dcf_actual_hard_min_freq = clock;
223 smum_send_msg_to_smc_with_parameter(hwmgr,
224 PPSMC_MSG_SetHardMinDcefclkByFreq,
225 smu10_data->dcf_actual_hard_min_freq,
226 NULL);
227 }
228 return 0;
229}
230
231static int smu10_set_hard_min_fclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t clock)
232{
233 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
234
235 if (clock && smu10_data->f_actual_hard_min_freq != clock) {
236 smu10_data->f_actual_hard_min_freq = clock;
237 smum_send_msg_to_smc_with_parameter(hwmgr,
238 PPSMC_MSG_SetHardMinFclkByFreq,
239 smu10_data->f_actual_hard_min_freq,
240 NULL);
241 }
242 return 0;
243}
244
245static int smu10_set_hard_min_gfxclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t clock)
246{
247 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
248
249 if (clock && smu10_data->gfx_actual_soft_min_freq != clock) {
250 smu10_data->gfx_actual_soft_min_freq = clock;
251 smum_send_msg_to_smc_with_parameter(hwmgr,
252 PPSMC_MSG_SetHardMinGfxClk,
253 smu10_data->gfx_actual_soft_min_freq,
254 NULL);
255 }
256 return 0;
257}
258
259static int smu10_set_soft_max_gfxclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t clock)
260{
261 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
262
263 if (clock && smu10_data->gfx_max_freq_limit != (clock * 100)) {
264 smu10_data->gfx_max_freq_limit = clock * 100;
265 smum_send_msg_to_smc_with_parameter(hwmgr,
266 PPSMC_MSG_SetSoftMaxGfxClk,
267 clock,
268 NULL);
269 }
270 return 0;
271}
272
273static int smu10_set_active_display_count(struct pp_hwmgr *hwmgr, uint32_t count)
274{
275 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
276
277 if (smu10_data->num_active_display != count) {
278 smu10_data->num_active_display = count;
279 smum_send_msg_to_smc_with_parameter(hwmgr,
280 PPSMC_MSG_SetDisplayCount,
281 smu10_data->num_active_display,
282 NULL);
283 }
284
285 return 0;
286}
287
288static int smu10_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
289{
290 return smu10_set_clock_limit(hwmgr, input);
291}
292
293static int smu10_init_power_gate_state(struct pp_hwmgr *hwmgr)
294{
295 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
296 struct amdgpu_device *adev = hwmgr->adev;
297
298 smu10_data->vcn_power_gated = true;
299 smu10_data->isp_tileA_power_gated = true;
300 smu10_data->isp_tileB_power_gated = true;
301
302 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)
303 return smum_send_msg_to_smc_with_parameter(hwmgr,
304 PPSMC_MSG_SetGfxCGPG,
305 true,
306 NULL);
307 else
308 return 0;
309}
310
311
312static int smu10_setup_asic_task(struct pp_hwmgr *hwmgr)
313{
314 return smu10_init_power_gate_state(hwmgr);
315}
316
317static int smu10_reset_cc6_data(struct pp_hwmgr *hwmgr)
318{
319 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
320
321 smu10_data->separation_time = 0;
322 smu10_data->cc6_disable = false;
323 smu10_data->pstate_disable = false;
324 smu10_data->cc6_setting_changed = false;
325
326 return 0;
327}
328
329static int smu10_power_off_asic(struct pp_hwmgr *hwmgr)
330{
331 return smu10_reset_cc6_data(hwmgr);
332}
333
334static bool smu10_is_gfx_on(struct pp_hwmgr *hwmgr)
335{
336 uint32_t reg;
337 struct amdgpu_device *adev = hwmgr->adev;
338
339 reg = RREG32_SOC15(PWR, 0, mmPWR_MISC_CNTL_STATUS);
340 if ((reg & PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK) ==
341 (0x2 << PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT))
342 return true;
343
344 return false;
345}
346
347static int smu10_disable_gfx_off(struct pp_hwmgr *hwmgr)
348{
349 struct amdgpu_device *adev = hwmgr->adev;
350
351 if (adev->pm.pp_feature & PP_GFXOFF_MASK) {
352 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableGfxOff, NULL);
353
354
355 while (!smu10_is_gfx_on(hwmgr))
356 msleep(1);
357 }
358
359 return 0;
360}
361
362static int smu10_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
363{
364 return 0;
365}
366
367static int smu10_enable_gfx_off(struct pp_hwmgr *hwmgr)
368{
369 struct amdgpu_device *adev = hwmgr->adev;
370
371 if (adev->pm.pp_feature & PP_GFXOFF_MASK)
372 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableGfxOff, NULL);
373
374 return 0;
375}
376
377static int smu10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
378{
379 return 0;
380}
381
382static int smu10_gfx_off_control(struct pp_hwmgr *hwmgr, bool enable)
383{
384 if (enable)
385 return smu10_enable_gfx_off(hwmgr);
386 else
387 return smu10_disable_gfx_off(hwmgr);
388}
389
390static int smu10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
391 struct pp_power_state *prequest_ps,
392 const struct pp_power_state *pcurrent_ps)
393{
394 return 0;
395}
396
397
398static const DpmClock_t VddDcfClk[]= {
399 { 300, 2600},
400 { 600, 3200},
401 { 600, 3600},
402};
403
404static const DpmClock_t VddSocClk[]= {
405 { 478, 2600},
406 { 722, 3200},
407 { 722, 3600},
408};
409
410static const DpmClock_t VddFClk[]= {
411 { 400, 2600},
412 {1200, 3200},
413 {1200, 3600},
414};
415
416static const DpmClock_t VddDispClk[]= {
417 { 435, 2600},
418 { 661, 3200},
419 {1086, 3600},
420};
421
422static const DpmClock_t VddDppClk[]= {
423 { 435, 2600},
424 { 661, 3200},
425 { 661, 3600},
426};
427
428static const DpmClock_t VddPhyClk[]= {
429 { 540, 2600},
430 { 810, 3200},
431 { 810, 3600},
432};
433
434static int smu10_get_clock_voltage_dependency_table(struct pp_hwmgr *hwmgr,
435 struct smu10_voltage_dependency_table **pptable,
436 uint32_t num_entry, const DpmClock_t *pclk_dependency_table)
437{
438 uint32_t i;
439 struct smu10_voltage_dependency_table *ptable;
440
441 ptable = kzalloc(struct_size(ptable, entries, num_entry), GFP_KERNEL);
442 if (NULL == ptable)
443 return -ENOMEM;
444
445 ptable->count = num_entry;
446
447 for (i = 0; i < ptable->count; i++) {
448 ptable->entries[i].clk = pclk_dependency_table->Freq * 100;
449 ptable->entries[i].vol = pclk_dependency_table->Vol;
450 pclk_dependency_table++;
451 }
452
453 *pptable = ptable;
454
455 return 0;
456}
457
458
459static int smu10_populate_clock_table(struct pp_hwmgr *hwmgr)
460{
461 uint32_t result;
462
463 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
464 DpmClocks_t *table = &(smu10_data->clock_table);
465 struct smu10_clock_voltage_information *pinfo = &(smu10_data->clock_vol_info);
466
467 result = smum_smc_table_manager(hwmgr, (uint8_t *)table, SMU10_CLOCKTABLE, true);
468
469 PP_ASSERT_WITH_CODE((0 == result),
470 "Attempt to copy clock table from smc failed",
471 return result);
472
473 if (0 == result && table->DcefClocks[0].Freq != 0) {
474 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dcefclk,
475 NUM_DCEFCLK_DPM_LEVELS,
476 &smu10_data->clock_table.DcefClocks[0]);
477 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_socclk,
478 NUM_SOCCLK_DPM_LEVELS,
479 &smu10_data->clock_table.SocClocks[0]);
480 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_fclk,
481 NUM_FCLK_DPM_LEVELS,
482 &smu10_data->clock_table.FClocks[0]);
483 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_mclk,
484 NUM_MEMCLK_DPM_LEVELS,
485 &smu10_data->clock_table.MemClocks[0]);
486 } else {
487 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dcefclk,
488 ARRAY_SIZE(VddDcfClk),
489 &VddDcfClk[0]);
490 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_socclk,
491 ARRAY_SIZE(VddSocClk),
492 &VddSocClk[0]);
493 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_fclk,
494 ARRAY_SIZE(VddFClk),
495 &VddFClk[0]);
496 }
497 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dispclk,
498 ARRAY_SIZE(VddDispClk),
499 &VddDispClk[0]);
500 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dppclk,
501 ARRAY_SIZE(VddDppClk), &VddDppClk[0]);
502 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_phyclk,
503 ARRAY_SIZE(VddPhyClk), &VddPhyClk[0]);
504
505 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &result);
506 smu10_data->gfx_min_freq_limit = result / 10 * 1000;
507
508 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &result);
509 smu10_data->gfx_max_freq_limit = result / 10 * 1000;
510
511 return 0;
512}
513
514static int smu10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
515{
516 int result = 0;
517 struct smu10_hwmgr *data;
518
519 data = kzalloc(sizeof(struct smu10_hwmgr), GFP_KERNEL);
520 if (data == NULL)
521 return -ENOMEM;
522
523 hwmgr->backend = data;
524
525 result = smu10_initialize_dpm_defaults(hwmgr);
526 if (result != 0) {
527 pr_err("smu10_initialize_dpm_defaults failed\n");
528 return result;
529 }
530
531 smu10_populate_clock_table(hwmgr);
532
533 result = smu10_get_system_info_data(hwmgr);
534 if (result != 0) {
535 pr_err("smu10_get_system_info_data failed\n");
536 return result;
537 }
538
539 smu10_construct_boot_state(hwmgr);
540
541 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
542 SMU10_MAX_HARDWARE_POWERLEVELS;
543
544 hwmgr->platform_descriptor.hardwarePerformanceLevels =
545 SMU10_MAX_HARDWARE_POWERLEVELS;
546
547 hwmgr->platform_descriptor.vbiosInterruptId = 0;
548
549 hwmgr->platform_descriptor.clockStep.engineClock = 500;
550
551 hwmgr->platform_descriptor.clockStep.memoryClock = 500;
552
553 hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
554
555 hwmgr->pstate_sclk = SMU10_UMD_PSTATE_GFXCLK * 100;
556 hwmgr->pstate_mclk = SMU10_UMD_PSTATE_FCLK * 100;
557
558
559 hwmgr->od_enabled = 1;
560
561 return result;
562}
563
564static int smu10_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
565{
566 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
567 struct smu10_clock_voltage_information *pinfo = &(smu10_data->clock_vol_info);
568
569 kfree(pinfo->vdd_dep_on_dcefclk);
570 pinfo->vdd_dep_on_dcefclk = NULL;
571 kfree(pinfo->vdd_dep_on_socclk);
572 pinfo->vdd_dep_on_socclk = NULL;
573 kfree(pinfo->vdd_dep_on_fclk);
574 pinfo->vdd_dep_on_fclk = NULL;
575 kfree(pinfo->vdd_dep_on_dispclk);
576 pinfo->vdd_dep_on_dispclk = NULL;
577 kfree(pinfo->vdd_dep_on_dppclk);
578 pinfo->vdd_dep_on_dppclk = NULL;
579 kfree(pinfo->vdd_dep_on_phyclk);
580 pinfo->vdd_dep_on_phyclk = NULL;
581
582 kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
583 hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
584
585 kfree(hwmgr->backend);
586 hwmgr->backend = NULL;
587
588 return 0;
589}
590
591static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
592 enum amd_dpm_forced_level level)
593{
594 struct smu10_hwmgr *data = hwmgr->backend;
595 uint32_t min_sclk = hwmgr->display_config->min_core_set_clock;
596 uint32_t min_mclk = hwmgr->display_config->min_mem_set_clock/100;
597 uint32_t index_fclk = data->clock_vol_info.vdd_dep_on_fclk->count - 1;
598 uint32_t index_socclk = data->clock_vol_info.vdd_dep_on_socclk->count - 1;
599
600 if (hwmgr->smu_version < 0x1E3700) {
601 pr_info("smu firmware version too old, can not set dpm level\n");
602 return 0;
603 }
604
605 if (min_sclk < data->gfx_min_freq_limit)
606 min_sclk = data->gfx_min_freq_limit;
607
608 min_sclk /= 100;
609 if (min_mclk < data->clock_table.FClocks[0].Freq)
610 min_mclk = data->clock_table.FClocks[0].Freq;
611
612 switch (level) {
613 case AMD_DPM_FORCED_LEVEL_HIGH:
614 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
615 smum_send_msg_to_smc_with_parameter(hwmgr,
616 PPSMC_MSG_SetHardMinGfxClk,
617 data->gfx_max_freq_limit/100,
618 NULL);
619 smum_send_msg_to_smc_with_parameter(hwmgr,
620 PPSMC_MSG_SetHardMinFclkByFreq,
621 SMU10_UMD_PSTATE_PEAK_FCLK,
622 NULL);
623 smum_send_msg_to_smc_with_parameter(hwmgr,
624 PPSMC_MSG_SetHardMinSocclkByFreq,
625 SMU10_UMD_PSTATE_PEAK_SOCCLK,
626 NULL);
627 smum_send_msg_to_smc_with_parameter(hwmgr,
628 PPSMC_MSG_SetHardMinVcn,
629 SMU10_UMD_PSTATE_VCE,
630 NULL);
631
632 smum_send_msg_to_smc_with_parameter(hwmgr,
633 PPSMC_MSG_SetSoftMaxGfxClk,
634 data->gfx_max_freq_limit/100,
635 NULL);
636 smum_send_msg_to_smc_with_parameter(hwmgr,
637 PPSMC_MSG_SetSoftMaxFclkByFreq,
638 SMU10_UMD_PSTATE_PEAK_FCLK,
639 NULL);
640 smum_send_msg_to_smc_with_parameter(hwmgr,
641 PPSMC_MSG_SetSoftMaxSocclkByFreq,
642 SMU10_UMD_PSTATE_PEAK_SOCCLK,
643 NULL);
644 smum_send_msg_to_smc_with_parameter(hwmgr,
645 PPSMC_MSG_SetSoftMaxVcn,
646 SMU10_UMD_PSTATE_VCE,
647 NULL);
648 break;
649 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
650 smum_send_msg_to_smc_with_parameter(hwmgr,
651 PPSMC_MSG_SetHardMinGfxClk,
652 min_sclk,
653 NULL);
654 smum_send_msg_to_smc_with_parameter(hwmgr,
655 PPSMC_MSG_SetSoftMaxGfxClk,
656 min_sclk,
657 NULL);
658 break;
659 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
660 smum_send_msg_to_smc_with_parameter(hwmgr,
661 PPSMC_MSG_SetHardMinFclkByFreq,
662 min_mclk,
663 NULL);
664 smum_send_msg_to_smc_with_parameter(hwmgr,
665 PPSMC_MSG_SetSoftMaxFclkByFreq,
666 min_mclk,
667 NULL);
668 break;
669 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
670 smum_send_msg_to_smc_with_parameter(hwmgr,
671 PPSMC_MSG_SetHardMinGfxClk,
672 SMU10_UMD_PSTATE_GFXCLK,
673 NULL);
674 smum_send_msg_to_smc_with_parameter(hwmgr,
675 PPSMC_MSG_SetHardMinFclkByFreq,
676 SMU10_UMD_PSTATE_FCLK,
677 NULL);
678 smum_send_msg_to_smc_with_parameter(hwmgr,
679 PPSMC_MSG_SetHardMinSocclkByFreq,
680 SMU10_UMD_PSTATE_SOCCLK,
681 NULL);
682 smum_send_msg_to_smc_with_parameter(hwmgr,
683 PPSMC_MSG_SetHardMinVcn,
684 SMU10_UMD_PSTATE_PROFILE_VCE,
685 NULL);
686
687 smum_send_msg_to_smc_with_parameter(hwmgr,
688 PPSMC_MSG_SetSoftMaxGfxClk,
689 SMU10_UMD_PSTATE_GFXCLK,
690 NULL);
691 smum_send_msg_to_smc_with_parameter(hwmgr,
692 PPSMC_MSG_SetSoftMaxFclkByFreq,
693 SMU10_UMD_PSTATE_FCLK,
694 NULL);
695 smum_send_msg_to_smc_with_parameter(hwmgr,
696 PPSMC_MSG_SetSoftMaxSocclkByFreq,
697 SMU10_UMD_PSTATE_SOCCLK,
698 NULL);
699 smum_send_msg_to_smc_with_parameter(hwmgr,
700 PPSMC_MSG_SetSoftMaxVcn,
701 SMU10_UMD_PSTATE_PROFILE_VCE,
702 NULL);
703 break;
704 case AMD_DPM_FORCED_LEVEL_AUTO:
705 smum_send_msg_to_smc_with_parameter(hwmgr,
706 PPSMC_MSG_SetHardMinGfxClk,
707 min_sclk,
708 NULL);
709 smum_send_msg_to_smc_with_parameter(hwmgr,
710 PPSMC_MSG_SetHardMinFclkByFreq,
711 hwmgr->display_config->num_display > 3 ?
712 data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk :
713 min_mclk,
714 NULL);
715
716 smum_send_msg_to_smc_with_parameter(hwmgr,
717 PPSMC_MSG_SetHardMinSocclkByFreq,
718 data->clock_vol_info.vdd_dep_on_socclk->entries[0].clk,
719 NULL);
720 smum_send_msg_to_smc_with_parameter(hwmgr,
721 PPSMC_MSG_SetHardMinVcn,
722 SMU10_UMD_PSTATE_MIN_VCE,
723 NULL);
724
725 smum_send_msg_to_smc_with_parameter(hwmgr,
726 PPSMC_MSG_SetSoftMaxGfxClk,
727 data->gfx_max_freq_limit/100,
728 NULL);
729 smum_send_msg_to_smc_with_parameter(hwmgr,
730 PPSMC_MSG_SetSoftMaxFclkByFreq,
731 data->clock_vol_info.vdd_dep_on_fclk->entries[index_fclk].clk,
732 NULL);
733 smum_send_msg_to_smc_with_parameter(hwmgr,
734 PPSMC_MSG_SetSoftMaxSocclkByFreq,
735 data->clock_vol_info.vdd_dep_on_socclk->entries[index_socclk].clk,
736 NULL);
737 smum_send_msg_to_smc_with_parameter(hwmgr,
738 PPSMC_MSG_SetSoftMaxVcn,
739 SMU10_UMD_PSTATE_VCE,
740 NULL);
741 break;
742 case AMD_DPM_FORCED_LEVEL_LOW:
743 smum_send_msg_to_smc_with_parameter(hwmgr,
744 PPSMC_MSG_SetHardMinGfxClk,
745 data->gfx_min_freq_limit/100,
746 NULL);
747 smum_send_msg_to_smc_with_parameter(hwmgr,
748 PPSMC_MSG_SetSoftMaxGfxClk,
749 data->gfx_min_freq_limit/100,
750 NULL);
751 smum_send_msg_to_smc_with_parameter(hwmgr,
752 PPSMC_MSG_SetHardMinFclkByFreq,
753 min_mclk,
754 NULL);
755 smum_send_msg_to_smc_with_parameter(hwmgr,
756 PPSMC_MSG_SetSoftMaxFclkByFreq,
757 min_mclk,
758 NULL);
759 break;
760 case AMD_DPM_FORCED_LEVEL_MANUAL:
761 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
762 default:
763 break;
764 }
765 return 0;
766}
767
768static uint32_t smu10_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
769{
770 struct smu10_hwmgr *data;
771
772 if (hwmgr == NULL)
773 return -EINVAL;
774
775 data = (struct smu10_hwmgr *)(hwmgr->backend);
776
777 if (low)
778 return data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk;
779 else
780 return data->clock_vol_info.vdd_dep_on_fclk->entries[
781 data->clock_vol_info.vdd_dep_on_fclk->count - 1].clk;
782}
783
784static uint32_t smu10_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
785{
786 struct smu10_hwmgr *data;
787
788 if (hwmgr == NULL)
789 return -EINVAL;
790
791 data = (struct smu10_hwmgr *)(hwmgr->backend);
792
793 if (low)
794 return data->gfx_min_freq_limit;
795 else
796 return data->gfx_max_freq_limit;
797}
798
799static int smu10_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
800 struct pp_hw_power_state *hw_ps)
801{
802 return 0;
803}
804
805static int smu10_dpm_get_pp_table_entry_callback(
806 struct pp_hwmgr *hwmgr,
807 struct pp_hw_power_state *hw_ps,
808 unsigned int index,
809 const void *clock_info)
810{
811 struct smu10_power_state *smu10_ps = cast_smu10_ps(hw_ps);
812
813 smu10_ps->levels[index].engine_clock = 0;
814
815 smu10_ps->levels[index].vddc_index = 0;
816 smu10_ps->level = index + 1;
817
818 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
819 smu10_ps->levels[index].ds_divider_index = 5;
820 smu10_ps->levels[index].ss_divider_index = 5;
821 }
822
823 return 0;
824}
825
826static int smu10_dpm_get_num_of_pp_table_entries(struct pp_hwmgr *hwmgr)
827{
828 int result;
829 unsigned long ret = 0;
830
831 result = pp_tables_get_num_of_entries(hwmgr, &ret);
832
833 return result ? 0 : ret;
834}
835
836static int smu10_dpm_get_pp_table_entry(struct pp_hwmgr *hwmgr,
837 unsigned long entry, struct pp_power_state *ps)
838{
839 int result;
840 struct smu10_power_state *smu10_ps;
841
842 ps->hardware.magic = SMU10_Magic;
843
844 smu10_ps = cast_smu10_ps(&(ps->hardware));
845
846 result = pp_tables_get_entry(hwmgr, entry, ps,
847 smu10_dpm_get_pp_table_entry_callback);
848
849 smu10_ps->uvd_clocks.vclk = ps->uvd_clocks.VCLK;
850 smu10_ps->uvd_clocks.dclk = ps->uvd_clocks.DCLK;
851
852 return result;
853}
854
855static int smu10_get_power_state_size(struct pp_hwmgr *hwmgr)
856{
857 return sizeof(struct smu10_power_state);
858}
859
860static int smu10_set_cpu_power_state(struct pp_hwmgr *hwmgr)
861{
862 return 0;
863}
864
865
866static int smu10_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time,
867 bool cc6_disable, bool pstate_disable, bool pstate_switch_disable)
868{
869 struct smu10_hwmgr *data = (struct smu10_hwmgr *)(hwmgr->backend);
870
871 if (separation_time != data->separation_time ||
872 cc6_disable != data->cc6_disable ||
873 pstate_disable != data->pstate_disable) {
874 data->separation_time = separation_time;
875 data->cc6_disable = cc6_disable;
876 data->pstate_disable = pstate_disable;
877 data->cc6_setting_changed = true;
878 }
879 return 0;
880}
881
882static int smu10_get_dal_power_level(struct pp_hwmgr *hwmgr,
883 struct amd_pp_simple_clock_info *info)
884{
885 return -EINVAL;
886}
887
888static int smu10_force_clock_level(struct pp_hwmgr *hwmgr,
889 enum pp_clock_type type, uint32_t mask)
890{
891 struct smu10_hwmgr *data = hwmgr->backend;
892 struct smu10_voltage_dependency_table *mclk_table =
893 data->clock_vol_info.vdd_dep_on_fclk;
894 uint32_t low, high;
895
896 low = mask ? (ffs(mask) - 1) : 0;
897 high = mask ? (fls(mask) - 1) : 0;
898
899 switch (type) {
900 case PP_SCLK:
901 if (low > 2 || high > 2) {
902 pr_info("Currently sclk only support 3 levels on RV\n");
903 return -EINVAL;
904 }
905
906 smum_send_msg_to_smc_with_parameter(hwmgr,
907 PPSMC_MSG_SetHardMinGfxClk,
908 low == 2 ? data->gfx_max_freq_limit/100 :
909 low == 1 ? SMU10_UMD_PSTATE_GFXCLK :
910 data->gfx_min_freq_limit/100,
911 NULL);
912
913 smum_send_msg_to_smc_with_parameter(hwmgr,
914 PPSMC_MSG_SetSoftMaxGfxClk,
915 high == 0 ? data->gfx_min_freq_limit/100 :
916 high == 1 ? SMU10_UMD_PSTATE_GFXCLK :
917 data->gfx_max_freq_limit/100,
918 NULL);
919 break;
920
921 case PP_MCLK:
922 if (low > mclk_table->count - 1 || high > mclk_table->count - 1)
923 return -EINVAL;
924
925 smum_send_msg_to_smc_with_parameter(hwmgr,
926 PPSMC_MSG_SetHardMinFclkByFreq,
927 mclk_table->entries[low].clk/100,
928 NULL);
929
930 smum_send_msg_to_smc_with_parameter(hwmgr,
931 PPSMC_MSG_SetSoftMaxFclkByFreq,
932 mclk_table->entries[high].clk/100,
933 NULL);
934 break;
935
936 case PP_PCIE:
937 default:
938 break;
939 }
940 return 0;
941}
942
943static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
944 enum pp_clock_type type, char *buf)
945{
946 struct smu10_hwmgr *data = (struct smu10_hwmgr *)(hwmgr->backend);
947 struct smu10_voltage_dependency_table *mclk_table =
948 data->clock_vol_info.vdd_dep_on_fclk;
949 uint32_t i, now, size = 0;
950
951 switch (type) {
952 case PP_SCLK:
953 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency, &now);
954
955
956 if (now == data->gfx_max_freq_limit/100)
957 i = 2;
958 else if (now == data->gfx_min_freq_limit/100)
959 i = 0;
960 else
961 i = 1;
962
963 size += sprintf(buf + size, "0: %uMhz %s\n",
964 data->gfx_min_freq_limit/100,
965 i == 0 ? "*" : "");
966 size += sprintf(buf + size, "1: %uMhz %s\n",
967 i == 1 ? now : SMU10_UMD_PSTATE_GFXCLK,
968 i == 1 ? "*" : "");
969 size += sprintf(buf + size, "2: %uMhz %s\n",
970 data->gfx_max_freq_limit/100,
971 i == 2 ? "*" : "");
972 break;
973 case PP_MCLK:
974 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency, &now);
975
976 for (i = 0; i < mclk_table->count; i++)
977 size += sprintf(buf + size, "%d: %uMhz %s\n",
978 i,
979 mclk_table->entries[i].clk / 100,
980 ((mclk_table->entries[i].clk / 100)
981 == now) ? "*" : "");
982 break;
983 case OD_SCLK:
984 if (hwmgr->od_enabled) {
985 size = sprintf(buf, "%s:\n", "OD_SCLK");
986
987 size += sprintf(buf + size, "0: %10uMhz\n",
988 (data->gfx_actual_soft_min_freq > 0) ? data->gfx_actual_soft_min_freq : data->gfx_min_freq_limit/100);
989 size += sprintf(buf + size, "1: %10uMhz\n", data->gfx_max_freq_limit/100);
990 }
991 break;
992 case OD_RANGE:
993 if (hwmgr->od_enabled) {
994 uint32_t min_freq, max_freq = 0;
995 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
996 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
997
998 size = sprintf(buf, "%s:\n", "OD_RANGE");
999 size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
1000 min_freq, max_freq);
1001 }
1002 break;
1003 default:
1004 break;
1005 }
1006
1007 return size;
1008}
1009
1010static int smu10_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
1011 PHM_PerformanceLevelDesignation designation, uint32_t index,
1012 PHM_PerformanceLevel *level)
1013{
1014 struct smu10_hwmgr *data;
1015
1016 if (level == NULL || hwmgr == NULL || state == NULL)
1017 return -EINVAL;
1018
1019 data = (struct smu10_hwmgr *)(hwmgr->backend);
1020
1021 if (index == 0) {
1022 level->memory_clock = data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk;
1023 level->coreClock = data->gfx_min_freq_limit;
1024 } else {
1025 level->memory_clock = data->clock_vol_info.vdd_dep_on_fclk->entries[
1026 data->clock_vol_info.vdd_dep_on_fclk->count - 1].clk;
1027 level->coreClock = data->gfx_max_freq_limit;
1028 }
1029
1030 level->nonLocalMemoryFreq = 0;
1031 level->nonLocalMemoryWidth = 0;
1032
1033 return 0;
1034}
1035
1036static int smu10_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr,
1037 const struct pp_hw_power_state *state, struct pp_clock_info *clock_info)
1038{
1039 const struct smu10_power_state *ps = cast_const_smu10_ps(state);
1040
1041 clock_info->min_eng_clk = ps->levels[0].engine_clock / (1 << (ps->levels[0].ss_divider_index));
1042 clock_info->max_eng_clk = ps->levels[ps->level - 1].engine_clock / (1 << (ps->levels[ps->level - 1].ss_divider_index));
1043
1044 return 0;
1045}
1046
1047#define MEM_FREQ_LOW_LATENCY 25000
1048#define MEM_FREQ_HIGH_LATENCY 80000
1049#define MEM_LATENCY_HIGH 245
1050#define MEM_LATENCY_LOW 35
1051#define MEM_LATENCY_ERR 0xFFFF
1052
1053
1054static uint32_t smu10_get_mem_latency(struct pp_hwmgr *hwmgr,
1055 uint32_t clock)
1056{
1057 if (clock >= MEM_FREQ_LOW_LATENCY &&
1058 clock < MEM_FREQ_HIGH_LATENCY)
1059 return MEM_LATENCY_HIGH;
1060 else if (clock >= MEM_FREQ_HIGH_LATENCY)
1061 return MEM_LATENCY_LOW;
1062 else
1063 return MEM_LATENCY_ERR;
1064}
1065
1066static int smu10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
1067 enum amd_pp_clock_type type,
1068 struct pp_clock_levels_with_latency *clocks)
1069{
1070 uint32_t i;
1071 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
1072 struct smu10_clock_voltage_information *pinfo = &(smu10_data->clock_vol_info);
1073 struct smu10_voltage_dependency_table *pclk_vol_table;
1074 bool latency_required = false;
1075
1076 if (pinfo == NULL)
1077 return -EINVAL;
1078
1079 switch (type) {
1080 case amd_pp_mem_clock:
1081 pclk_vol_table = pinfo->vdd_dep_on_mclk;
1082 latency_required = true;
1083 break;
1084 case amd_pp_f_clock:
1085 pclk_vol_table = pinfo->vdd_dep_on_fclk;
1086 latency_required = true;
1087 break;
1088 case amd_pp_dcf_clock:
1089 pclk_vol_table = pinfo->vdd_dep_on_dcefclk;
1090 break;
1091 case amd_pp_disp_clock:
1092 pclk_vol_table = pinfo->vdd_dep_on_dispclk;
1093 break;
1094 case amd_pp_phy_clock:
1095 pclk_vol_table = pinfo->vdd_dep_on_phyclk;
1096 break;
1097 case amd_pp_dpp_clock:
1098 pclk_vol_table = pinfo->vdd_dep_on_dppclk;
1099 break;
1100 default:
1101 return -EINVAL;
1102 }
1103
1104 if (pclk_vol_table == NULL || pclk_vol_table->count == 0)
1105 return -EINVAL;
1106
1107 clocks->num_levels = 0;
1108 for (i = 0; i < pclk_vol_table->count; i++) {
1109 if (pclk_vol_table->entries[i].clk) {
1110 clocks->data[clocks->num_levels].clocks_in_khz =
1111 pclk_vol_table->entries[i].clk * 10;
1112 clocks->data[clocks->num_levels].latency_in_us = latency_required ?
1113 smu10_get_mem_latency(hwmgr,
1114 pclk_vol_table->entries[i].clk) :
1115 0;
1116 clocks->num_levels++;
1117 }
1118 }
1119
1120 return 0;
1121}
1122
1123static int smu10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
1124 enum amd_pp_clock_type type,
1125 struct pp_clock_levels_with_voltage *clocks)
1126{
1127 uint32_t i;
1128 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
1129 struct smu10_clock_voltage_information *pinfo = &(smu10_data->clock_vol_info);
1130 struct smu10_voltage_dependency_table *pclk_vol_table = NULL;
1131
1132 if (pinfo == NULL)
1133 return -EINVAL;
1134
1135 switch (type) {
1136 case amd_pp_mem_clock:
1137 pclk_vol_table = pinfo->vdd_dep_on_mclk;
1138 break;
1139 case amd_pp_f_clock:
1140 pclk_vol_table = pinfo->vdd_dep_on_fclk;
1141 break;
1142 case amd_pp_dcf_clock:
1143 pclk_vol_table = pinfo->vdd_dep_on_dcefclk;
1144 break;
1145 case amd_pp_soc_clock:
1146 pclk_vol_table = pinfo->vdd_dep_on_socclk;
1147 break;
1148 case amd_pp_disp_clock:
1149 pclk_vol_table = pinfo->vdd_dep_on_dispclk;
1150 break;
1151 case amd_pp_phy_clock:
1152 pclk_vol_table = pinfo->vdd_dep_on_phyclk;
1153 break;
1154 default:
1155 return -EINVAL;
1156 }
1157
1158 if (pclk_vol_table == NULL || pclk_vol_table->count == 0)
1159 return -EINVAL;
1160
1161 clocks->num_levels = 0;
1162 for (i = 0; i < pclk_vol_table->count; i++) {
1163 if (pclk_vol_table->entries[i].clk) {
1164 clocks->data[clocks->num_levels].clocks_in_khz = pclk_vol_table->entries[i].clk * 10;
1165 clocks->data[clocks->num_levels].voltage_in_mv = pclk_vol_table->entries[i].vol;
1166 clocks->num_levels++;
1167 }
1168 }
1169
1170 return 0;
1171}
1172
1173
1174
1175static int smu10_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks)
1176{
1177 clocks->engine_max_clock = 80000;
1178 return 0;
1179}
1180
1181static int smu10_thermal_get_temperature(struct pp_hwmgr *hwmgr)
1182{
1183 struct amdgpu_device *adev = hwmgr->adev;
1184 uint32_t reg_value = RREG32_SOC15(THM, 0, mmTHM_TCON_CUR_TMP);
1185 int cur_temp =
1186 (reg_value & THM_TCON_CUR_TMP__CUR_TEMP_MASK) >> THM_TCON_CUR_TMP__CUR_TEMP__SHIFT;
1187
1188 if (cur_temp & THM_TCON_CUR_TMP__CUR_TEMP_RANGE_SEL_MASK)
1189 cur_temp = ((cur_temp / 8) - 49) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1190 else
1191 cur_temp = (cur_temp / 8) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1192
1193 return cur_temp;
1194}
1195
1196static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
1197 void *value, int *size)
1198{
1199 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
1200 uint32_t sclk, mclk;
1201 int ret = 0;
1202
1203 switch (idx) {
1204 case AMDGPU_PP_SENSOR_GFX_SCLK:
1205 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency, &sclk);
1206
1207 *((uint32_t *)value) = sclk * 100;
1208 *size = 4;
1209 break;
1210 case AMDGPU_PP_SENSOR_GFX_MCLK:
1211 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency, &mclk);
1212
1213 *((uint32_t *)value) = mclk * 100;
1214 *size = 4;
1215 break;
1216 case AMDGPU_PP_SENSOR_GPU_TEMP:
1217 *((uint32_t *)value) = smu10_thermal_get_temperature(hwmgr);
1218 break;
1219 case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
1220 *(uint32_t *)value = smu10_data->vcn_power_gated ? 0 : 1;
1221 *size = 4;
1222 break;
1223 default:
1224 ret = -EINVAL;
1225 break;
1226 }
1227
1228 return ret;
1229}
1230
1231static int smu10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
1232 void *clock_ranges)
1233{
1234 struct smu10_hwmgr *data = hwmgr->backend;
1235 struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_ranges;
1236 Watermarks_t *table = &(data->water_marks_table);
1237 struct amdgpu_device *adev = hwmgr->adev;
1238 int i;
1239
1240 smu_set_watermarks_for_clocks_ranges(table,wm_with_clock_ranges);
1241
1242 if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
1243 for (i = 0; i < NUM_WM_RANGES; i++)
1244 table->WatermarkRow[WM_DCFCLK][i].WmType = (uint8_t)0;
1245
1246 for (i = 0; i < NUM_WM_RANGES; i++)
1247 table->WatermarkRow[WM_SOCCLK][i].WmType = (uint8_t)0;
1248 }
1249
1250 smum_smc_table_manager(hwmgr, (uint8_t *)table, (uint16_t)SMU10_WMTABLE, false);
1251 data->water_marks_exist = true;
1252 return 0;
1253}
1254
1255static int smu10_smus_notify_pwe(struct pp_hwmgr *hwmgr)
1256{
1257
1258 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SetRccPfcPmeRestoreRegister, NULL);
1259}
1260
1261static int smu10_powergate_mmhub(struct pp_hwmgr *hwmgr)
1262{
1263 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerGateMmHub, NULL);
1264}
1265
1266static int smu10_powergate_sdma(struct pp_hwmgr *hwmgr, bool gate)
1267{
1268 if (gate)
1269 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerDownSdma, NULL);
1270 else
1271 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerUpSdma, NULL);
1272}
1273
1274static void smu10_powergate_vcn(struct pp_hwmgr *hwmgr, bool bgate)
1275{
1276 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
1277
1278 if (bgate) {
1279 amdgpu_device_ip_set_powergating_state(hwmgr->adev,
1280 AMD_IP_BLOCK_TYPE_VCN,
1281 AMD_PG_STATE_GATE);
1282 smum_send_msg_to_smc_with_parameter(hwmgr,
1283 PPSMC_MSG_PowerDownVcn, 0, NULL);
1284 smu10_data->vcn_power_gated = true;
1285 } else {
1286 smum_send_msg_to_smc_with_parameter(hwmgr,
1287 PPSMC_MSG_PowerUpVcn, 0, NULL);
1288 amdgpu_device_ip_set_powergating_state(hwmgr->adev,
1289 AMD_IP_BLOCK_TYPE_VCN,
1290 AMD_PG_STATE_UNGATE);
1291 smu10_data->vcn_power_gated = false;
1292 }
1293}
1294
1295static int conv_power_profile_to_pplib_workload(int power_profile)
1296{
1297 int pplib_workload = 0;
1298
1299 switch (power_profile) {
1300 case PP_SMC_POWER_PROFILE_FULLSCREEN3D:
1301 pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT;
1302 break;
1303 case PP_SMC_POWER_PROFILE_VIDEO:
1304 pplib_workload = WORKLOAD_PPLIB_VIDEO_BIT;
1305 break;
1306 case PP_SMC_POWER_PROFILE_VR:
1307 pplib_workload = WORKLOAD_PPLIB_VR_BIT;
1308 break;
1309 case PP_SMC_POWER_PROFILE_COMPUTE:
1310 pplib_workload = WORKLOAD_PPLIB_COMPUTE_BIT;
1311 break;
1312 case PP_SMC_POWER_PROFILE_CUSTOM:
1313 pplib_workload = WORKLOAD_PPLIB_CUSTOM_BIT;
1314 break;
1315 }
1316
1317 return pplib_workload;
1318}
1319
1320static int smu10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
1321{
1322 uint32_t i, size = 0;
1323 static const uint8_t
1324 profile_mode_setting[6][4] = {{70, 60, 0, 0,},
1325 {70, 60, 1, 3,},
1326 {90, 60, 0, 0,},
1327 {70, 60, 0, 0,},
1328 {70, 90, 0, 0,},
1329 {30, 60, 0, 6,},
1330 };
1331 static const char *profile_name[6] = {
1332 "BOOTUP_DEFAULT",
1333 "3D_FULL_SCREEN",
1334 "POWER_SAVING",
1335 "VIDEO",
1336 "VR",
1337 "COMPUTE"};
1338 static const char *title[6] = {"NUM",
1339 "MODE_NAME",
1340 "BUSY_SET_POINT",
1341 "FPS",
1342 "USE_RLC_BUSY",
1343 "MIN_ACTIVE_LEVEL"};
1344
1345 if (!buf)
1346 return -EINVAL;
1347
1348 size += sprintf(buf + size, "%s %16s %s %s %s %s\n",title[0],
1349 title[1], title[2], title[3], title[4], title[5]);
1350
1351 for (i = 0; i <= PP_SMC_POWER_PROFILE_COMPUTE; i++)
1352 size += sprintf(buf + size, "%3d %14s%s: %14d %3d %10d %14d\n",
1353 i, profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ",
1354 profile_mode_setting[i][0], profile_mode_setting[i][1],
1355 profile_mode_setting[i][2], profile_mode_setting[i][3]);
1356
1357 return size;
1358}
1359
1360static bool smu10_is_raven1_refresh(struct pp_hwmgr *hwmgr)
1361{
1362 struct amdgpu_device *adev = hwmgr->adev;
1363 if ((adev->apu_flags & AMD_APU_IS_RAVEN) &&
1364 (hwmgr->smu_version >= 0x41e2b))
1365 return true;
1366 else
1367 return false;
1368}
1369
1370static int smu10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size)
1371{
1372 int workload_type = 0;
1373 int result = 0;
1374
1375 if (input[size] > PP_SMC_POWER_PROFILE_COMPUTE) {
1376 pr_err("Invalid power profile mode %ld\n", input[size]);
1377 return -EINVAL;
1378 }
1379 if (hwmgr->power_profile_mode == input[size])
1380 return 0;
1381
1382
1383 workload_type =
1384 conv_power_profile_to_pplib_workload(input[size]);
1385 if (workload_type &&
1386 smu10_is_raven1_refresh(hwmgr) &&
1387 !hwmgr->gfxoff_state_changed_by_workload) {
1388 smu10_gfx_off_control(hwmgr, false);
1389 hwmgr->gfxoff_state_changed_by_workload = true;
1390 }
1391 result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ActiveProcessNotify,
1392 1 << workload_type,
1393 NULL);
1394 if (!result)
1395 hwmgr->power_profile_mode = input[size];
1396 if (workload_type && hwmgr->gfxoff_state_changed_by_workload) {
1397 smu10_gfx_off_control(hwmgr, true);
1398 hwmgr->gfxoff_state_changed_by_workload = false;
1399 }
1400
1401 return 0;
1402}
1403
1404static int smu10_asic_reset(struct pp_hwmgr *hwmgr, enum SMU_ASIC_RESET_MODE mode)
1405{
1406 return smum_send_msg_to_smc_with_parameter(hwmgr,
1407 PPSMC_MSG_DeviceDriverReset,
1408 mode,
1409 NULL);
1410}
1411
1412static int smu10_set_fine_grain_clk_vol(struct pp_hwmgr *hwmgr,
1413 enum PP_OD_DPM_TABLE_COMMAND type,
1414 long *input, uint32_t size)
1415{
1416 if (!hwmgr->od_enabled) {
1417 pr_err("Fine grain not support\n");
1418 return -EINVAL;
1419 }
1420
1421 if (size != 2) {
1422 pr_err("Input parameter number not correct\n");
1423 return -EINVAL;
1424 }
1425
1426 if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) {
1427 if (input[0] == 0)
1428 smu10_set_hard_min_gfxclk_by_freq(hwmgr, input[1]);
1429 else if (input[0] == 1)
1430 smu10_set_soft_max_gfxclk_by_freq(hwmgr, input[1]);
1431 else
1432 return -EINVAL;
1433 }
1434
1435 return 0;
1436}
1437
1438static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
1439 .backend_init = smu10_hwmgr_backend_init,
1440 .backend_fini = smu10_hwmgr_backend_fini,
1441 .apply_state_adjust_rules = smu10_apply_state_adjust_rules,
1442 .force_dpm_level = smu10_dpm_force_dpm_level,
1443 .get_power_state_size = smu10_get_power_state_size,
1444 .powerdown_uvd = NULL,
1445 .powergate_uvd = smu10_powergate_vcn,
1446 .powergate_vce = NULL,
1447 .get_mclk = smu10_dpm_get_mclk,
1448 .get_sclk = smu10_dpm_get_sclk,
1449 .patch_boot_state = smu10_dpm_patch_boot_state,
1450 .get_pp_table_entry = smu10_dpm_get_pp_table_entry,
1451 .get_num_of_pp_table_entries = smu10_dpm_get_num_of_pp_table_entries,
1452 .set_cpu_power_state = smu10_set_cpu_power_state,
1453 .store_cc6_data = smu10_store_cc6_data,
1454 .force_clock_level = smu10_force_clock_level,
1455 .print_clock_levels = smu10_print_clock_levels,
1456 .get_dal_power_level = smu10_get_dal_power_level,
1457 .get_performance_level = smu10_get_performance_level,
1458 .get_current_shallow_sleep_clocks = smu10_get_current_shallow_sleep_clocks,
1459 .get_clock_by_type_with_latency = smu10_get_clock_by_type_with_latency,
1460 .get_clock_by_type_with_voltage = smu10_get_clock_by_type_with_voltage,
1461 .set_watermarks_for_clocks_ranges = smu10_set_watermarks_for_clocks_ranges,
1462 .get_max_high_clocks = smu10_get_max_high_clocks,
1463 .read_sensor = smu10_read_sensor,
1464 .set_active_display_count = smu10_set_active_display_count,
1465 .set_min_deep_sleep_dcefclk = smu10_set_min_deep_sleep_dcefclk,
1466 .dynamic_state_management_enable = smu10_enable_dpm_tasks,
1467 .power_off_asic = smu10_power_off_asic,
1468 .asic_setup = smu10_setup_asic_task,
1469 .power_state_set = smu10_set_power_state_tasks,
1470 .dynamic_state_management_disable = smu10_disable_dpm_tasks,
1471 .powergate_mmhub = smu10_powergate_mmhub,
1472 .smus_notify_pwe = smu10_smus_notify_pwe,
1473 .display_clock_voltage_request = smu10_display_clock_voltage_request,
1474 .powergate_gfx = smu10_gfx_off_control,
1475 .powergate_sdma = smu10_powergate_sdma,
1476 .set_hard_min_dcefclk_by_freq = smu10_set_hard_min_dcefclk_by_freq,
1477 .set_hard_min_fclk_by_freq = smu10_set_hard_min_fclk_by_freq,
1478 .set_hard_min_gfxclk_by_freq = smu10_set_hard_min_gfxclk_by_freq,
1479 .set_soft_max_gfxclk_by_freq = smu10_set_soft_max_gfxclk_by_freq,
1480 .get_power_profile_mode = smu10_get_power_profile_mode,
1481 .set_power_profile_mode = smu10_set_power_profile_mode,
1482 .asic_reset = smu10_asic_reset,
1483 .set_fine_grain_clk_vol = smu10_set_fine_grain_clk_vol,
1484};
1485
1486int smu10_init_function_pointers(struct pp_hwmgr *hwmgr)
1487{
1488 hwmgr->hwmgr_func = &smu10_hwmgr_funcs;
1489 hwmgr->pptable_func = &pptable_funcs;
1490 return 0;
1491}
1492