1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/module.h>
24#include <linux/slab.h>
25#include <linux/fb.h>
26#include "linux/delay.h"
27#include <linux/types.h>
28
29#include "smumgr.h"
30#include "pp_debug.h"
31#include "ci_smumgr.h"
32#include "ppsmc.h"
33#include "smu7_hwmgr.h"
34#include "hardwaremanager.h"
35#include "ppatomctrl.h"
36#include "cgs_common.h"
37#include "atombios.h"
38#include "pppcielanes.h"
39
40#include "smu/smu_7_0_1_d.h"
41#include "smu/smu_7_0_1_sh_mask.h"
42
43#include "dce/dce_8_0_d.h"
44#include "dce/dce_8_0_sh_mask.h"
45
46#include "bif/bif_4_1_d.h"
47#include "bif/bif_4_1_sh_mask.h"
48
49#include "gca/gfx_7_2_d.h"
50#include "gca/gfx_7_2_sh_mask.h"
51
52#include "gmc/gmc_7_1_d.h"
53#include "gmc/gmc_7_1_sh_mask.h"
54
55#include "processpptables.h"
56
57#define MC_CG_ARB_FREQ_F0 0x0a
58#define MC_CG_ARB_FREQ_F1 0x0b
59#define MC_CG_ARB_FREQ_F2 0x0c
60#define MC_CG_ARB_FREQ_F3 0x0d
61
62#define SMC_RAM_END 0x40000
63
64#define VOLTAGE_SCALE 4
65#define VOLTAGE_VID_OFFSET_SCALE1 625
66#define VOLTAGE_VID_OFFSET_SCALE2 100
67#define CISLAND_MINIMUM_ENGINE_CLOCK 800
68#define CISLAND_MAX_DEEPSLEEP_DIVIDER_ID 5
69
70static const struct ci_pt_defaults defaults_hawaii_xt = {
71 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
72 { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 },
73 { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
74};
75
76static const struct ci_pt_defaults defaults_hawaii_pro = {
77 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
78 { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 },
79 { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
80};
81
82static const struct ci_pt_defaults defaults_bonaire_xt = {
83 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
84 { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61 },
85 { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
86};
87
88
89static const struct ci_pt_defaults defaults_saturn_xt = {
90 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
91 { 0x8C, 0x247, 0x249, 0xA6, 0x80, 0x81, 0x8B, 0x89, 0x86, 0xC9, 0xCA, 0xC9, 0x4D, 0x4D, 0x4D },
92 { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
93};
94
95
96static int ci_set_smc_sram_address(struct pp_hwmgr *hwmgr,
97 uint32_t smc_addr, uint32_t limit)
98{
99 if ((0 != (3 & smc_addr))
100 || ((smc_addr + 3) >= limit)) {
101 pr_err("smc_addr invalid \n");
102 return -EINVAL;
103 }
104
105 cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_0, smc_addr);
106 PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
107 return 0;
108}
109
110static int ci_copy_bytes_to_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address,
111 const uint8_t *src, uint32_t byte_count, uint32_t limit)
112{
113 int result;
114 uint32_t data = 0;
115 uint32_t original_data;
116 uint32_t addr = 0;
117 uint32_t extra_shift;
118
119 if ((3 & smc_start_address)
120 || ((smc_start_address + byte_count) >= limit)) {
121 pr_err("smc_start_address invalid \n");
122 return -EINVAL;
123 }
124
125 addr = smc_start_address;
126
127 while (byte_count >= 4) {
128
129 data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3];
130
131 result = ci_set_smc_sram_address(hwmgr, addr, limit);
132
133 if (0 != result)
134 return result;
135
136 cgs_write_register(hwmgr->device, mmSMC_IND_DATA_0, data);
137
138 src += 4;
139 byte_count -= 4;
140 addr += 4;
141 }
142
143 if (0 != byte_count) {
144
145 data = 0;
146
147 result = ci_set_smc_sram_address(hwmgr, addr, limit);
148
149 if (0 != result)
150 return result;
151
152
153 original_data = cgs_read_register(hwmgr->device, mmSMC_IND_DATA_0);
154
155 extra_shift = 8 * (4 - byte_count);
156
157 while (byte_count > 0) {
158
159 data = (0x100 * data) + *src++;
160 byte_count--;
161 }
162
163 data <<= extra_shift;
164
165 data |= (original_data & ~((~0UL) << extra_shift));
166
167 result = ci_set_smc_sram_address(hwmgr, addr, limit);
168
169 if (0 != result)
170 return result;
171
172 cgs_write_register(hwmgr->device, mmSMC_IND_DATA_0, data);
173 }
174
175 return 0;
176}
177
178
179static int ci_program_jump_on_start(struct pp_hwmgr *hwmgr)
180{
181 static const unsigned char data[4] = { 0xE0, 0x00, 0x80, 0x40 };
182
183 ci_copy_bytes_to_smc(hwmgr, 0x0, data, 4, sizeof(data)+1);
184
185 return 0;
186}
187
188bool ci_is_smc_ram_running(struct pp_hwmgr *hwmgr)
189{
190 return ((0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
191 CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable))
192 && (0x20100 <= cgs_read_ind_register(hwmgr->device,
193 CGS_IND_REG__SMC, ixSMC_PC_C)));
194}
195
196static int ci_read_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr,
197 uint32_t *value, uint32_t limit)
198{
199 int result;
200
201 result = ci_set_smc_sram_address(hwmgr, smc_addr, limit);
202
203 if (result)
204 return result;
205
206 *value = cgs_read_register(hwmgr->device, mmSMC_IND_DATA_0);
207 return 0;
208}
209
210static int ci_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
211{
212 int ret;
213
214 if (!ci_is_smc_ram_running(hwmgr))
215 return -EINVAL;
216
217 cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, msg);
218
219 PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
220
221 ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP);
222
223 if (ret != 1)
224 pr_info("\n failed to send message %x ret is %d\n", msg, ret);
225
226 return 0;
227}
228
229static int ci_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
230 uint16_t msg, uint32_t parameter)
231{
232 cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, parameter);
233 return ci_send_msg_to_smc(hwmgr, msg);
234}
235
236static void ci_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
237{
238 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
239 struct amdgpu_device *adev = hwmgr->adev;
240 uint32_t dev_id;
241
242 dev_id = adev->pdev->device;
243
244 switch (dev_id) {
245 case 0x67BA:
246 case 0x66B1:
247 smu_data->power_tune_defaults = &defaults_hawaii_pro;
248 break;
249 case 0x67B8:
250 case 0x66B0:
251 smu_data->power_tune_defaults = &defaults_hawaii_xt;
252 break;
253 case 0x6640:
254 case 0x6641:
255 case 0x6646:
256 case 0x6647:
257 smu_data->power_tune_defaults = &defaults_saturn_xt;
258 break;
259 case 0x6649:
260 case 0x6650:
261 case 0x6651:
262 case 0x6658:
263 case 0x665C:
264 case 0x665D:
265 case 0x67A0:
266 case 0x67A1:
267 case 0x67A2:
268 case 0x67A8:
269 case 0x67A9:
270 case 0x67AA:
271 case 0x67B9:
272 case 0x67BE:
273 default:
274 smu_data->power_tune_defaults = &defaults_bonaire_xt;
275 break;
276 }
277}
278
279static int ci_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
280 struct phm_clock_voltage_dependency_table *allowed_clock_voltage_table,
281 uint32_t clock, uint32_t *vol)
282{
283 uint32_t i = 0;
284
285 if (allowed_clock_voltage_table->count == 0)
286 return -EINVAL;
287
288 for (i = 0; i < allowed_clock_voltage_table->count; i++) {
289 if (allowed_clock_voltage_table->entries[i].clk >= clock) {
290 *vol = allowed_clock_voltage_table->entries[i].v;
291 return 0;
292 }
293 }
294
295 *vol = allowed_clock_voltage_table->entries[i - 1].v;
296 return 0;
297}
298
299static int ci_calculate_sclk_params(struct pp_hwmgr *hwmgr,
300 uint32_t clock, struct SMU7_Discrete_GraphicsLevel *sclk)
301{
302 const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
303 struct pp_atomctrl_clock_dividers_vi dividers;
304 uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
305 uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
306 uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
307 uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
308 uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
309 uint32_t ref_clock;
310 uint32_t ref_divider;
311 uint32_t fbdiv;
312 int result;
313
314
315 result = atomctrl_get_engine_pll_dividers_vi(hwmgr, clock, ÷rs);
316
317 PP_ASSERT_WITH_CODE(result == 0,
318 "Error retrieving Engine Clock dividers from VBIOS.",
319 return result);
320
321
322 ref_clock = atomctrl_get_reference_clock(hwmgr);
323 ref_divider = 1 + dividers.uc_pll_ref_div;
324
325
326 fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
327
328
329 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
330 SPLL_REF_DIV, dividers.uc_pll_ref_div);
331 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
332 SPLL_PDIV_A, dividers.uc_pll_post_div);
333
334
335 spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
336 SPLL_FB_DIV, fbdiv);
337
338
339 spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
340 SPLL_DITHEN, 1);
341
342 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
343 PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
344 struct pp_atomctrl_internal_ss_info ss_info;
345 uint32_t vco_freq = clock * dividers.uc_pll_post_div;
346
347 if (!atomctrl_get_engine_clock_spread_spectrum(hwmgr,
348 vco_freq, &ss_info)) {
349 uint32_t clk_s = ref_clock * 5 /
350 (ref_divider * ss_info.speed_spectrum_rate);
351 uint32_t clk_v = 4 * ss_info.speed_spectrum_percentage *
352 fbdiv / (clk_s * 10000);
353
354 cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
355 CG_SPLL_SPREAD_SPECTRUM, CLKS, clk_s);
356 cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
357 CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
358 cg_spll_spread_spectrum_2 = PHM_SET_FIELD(cg_spll_spread_spectrum_2,
359 CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clk_v);
360 }
361 }
362
363 sclk->SclkFrequency = clock;
364 sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
365 sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
366 sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
367 sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
368 sclk->SclkDid = (uint8_t)dividers.pll_post_divider;
369
370 return 0;
371}
372
373static void ci_populate_phase_value_based_on_sclk(struct pp_hwmgr *hwmgr,
374 const struct phm_phase_shedding_limits_table *pl,
375 uint32_t sclk, uint32_t *p_shed)
376{
377 unsigned int i;
378
379
380 *p_shed = 1;
381
382 for (i = 0; i < pl->count; i++) {
383 if (sclk < pl->entries[i].Sclk) {
384 *p_shed = i;
385 break;
386 }
387 }
388}
389
390static uint8_t ci_get_sleep_divider_id_from_clock(uint32_t clock,
391 uint32_t clock_insr)
392{
393 uint8_t i;
394 uint32_t temp;
395 uint32_t min = min_t(uint32_t, clock_insr, CISLAND_MINIMUM_ENGINE_CLOCK);
396
397 if (clock < min) {
398 pr_info("Engine clock can't satisfy stutter requirement!\n");
399 return 0;
400 }
401 for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) {
402 temp = clock >> i;
403
404 if (temp >= min || i == 0)
405 break;
406 }
407 return i;
408}
409
410static int ci_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
411 uint32_t clock, struct SMU7_Discrete_GraphicsLevel *level)
412{
413 int result;
414 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
415
416
417 result = ci_calculate_sclk_params(hwmgr, clock, level);
418
419
420 result = ci_get_dependency_volt_by_clk(hwmgr,
421 hwmgr->dyn_state.vddc_dependency_on_sclk, clock,
422 (uint32_t *)(&level->MinVddc));
423 if (result) {
424 pr_err("vdd_dep_on_sclk table is NULL\n");
425 return result;
426 }
427
428 level->SclkFrequency = clock;
429 level->MinVddcPhases = 1;
430
431 if (data->vddc_phase_shed_control)
432 ci_populate_phase_value_based_on_sclk(hwmgr,
433 hwmgr->dyn_state.vddc_phase_shed_limits_table,
434 clock,
435 &level->MinVddcPhases);
436
437 level->ActivityLevel = data->current_profile_setting.sclk_activity;
438 level->CcPwrDynRm = 0;
439 level->CcPwrDynRm1 = 0;
440 level->EnabledForActivity = 0;
441
442 level->EnabledForThrottle = 1;
443 level->UpH = data->current_profile_setting.sclk_up_hyst;
444 level->DownH = data->current_profile_setting.sclk_down_hyst;
445 level->VoltageDownH = 0;
446 level->PowerThrottle = 0;
447
448
449 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
450 PHM_PlatformCaps_SclkDeepSleep))
451 level->DeepSleepDivId =
452 ci_get_sleep_divider_id_from_clock(clock,
453 CISLAND_MINIMUM_ENGINE_CLOCK);
454
455
456 level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
457
458 if (0 == result) {
459 level->MinVddc = PP_HOST_TO_SMC_UL(level->MinVddc * VOLTAGE_SCALE);
460 CONVERT_FROM_HOST_TO_SMC_UL(level->MinVddcPhases);
461 CONVERT_FROM_HOST_TO_SMC_UL(level->SclkFrequency);
462 CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
463 CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl3);
464 CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl4);
465 CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum);
466 CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum2);
467 CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
468 CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
469 }
470
471 return result;
472}
473
474static int ci_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
475{
476 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
477 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
478 struct smu7_dpm_table *dpm_table = &data->dpm_table;
479 int result = 0;
480 uint32_t array = smu_data->dpm_table_start +
481 offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
482 uint32_t array_size = sizeof(struct SMU7_Discrete_GraphicsLevel) *
483 SMU7_MAX_LEVELS_GRAPHICS;
484 struct SMU7_Discrete_GraphicsLevel *levels =
485 smu_data->smc_state_table.GraphicsLevel;
486 uint32_t i;
487
488 for (i = 0; i < dpm_table->sclk_table.count; i++) {
489 result = ci_populate_single_graphic_level(hwmgr,
490 dpm_table->sclk_table.dpm_levels[i].value,
491 &levels[i]);
492 if (result)
493 return result;
494 if (i > 1)
495 smu_data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
496 if (i == (dpm_table->sclk_table.count - 1))
497 smu_data->smc_state_table.GraphicsLevel[i].DisplayWatermark =
498 PPSMC_DISPLAY_WATERMARK_HIGH;
499 }
500
501 smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
502
503 smu_data->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
504 data->dpm_level_enable_mask.sclk_dpm_enable_mask =
505 phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
506
507 result = ci_copy_bytes_to_smc(hwmgr, array,
508 (u8 *)levels, array_size,
509 SMC_RAM_END);
510
511 return result;
512
513}
514
515static int ci_populate_svi_load_line(struct pp_hwmgr *hwmgr)
516{
517 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
518 const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults;
519
520 smu_data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en;
521 smu_data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddc;
522 smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
523 smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
524
525 return 0;
526}
527
528static int ci_populate_tdc_limit(struct pp_hwmgr *hwmgr)
529{
530 uint16_t tdc_limit;
531 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
532 const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults;
533
534 tdc_limit = (uint16_t)(hwmgr->dyn_state.cac_dtp_table->usTDC * 256);
535 smu_data->power_tune_table.TDC_VDDC_PkgLimit =
536 CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
537 smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
538 defaults->tdc_vddc_throttle_release_limit_perc;
539 smu_data->power_tune_table.TDC_MAWt = defaults->tdc_mawt;
540
541 return 0;
542}
543
544static int ci_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
545{
546 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
547 const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults;
548 uint32_t temp;
549
550 if (ci_read_smc_sram_dword(hwmgr,
551 fuse_table_offset +
552 offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl),
553 (uint32_t *)&temp, SMC_RAM_END))
554 PP_ASSERT_WITH_CODE(false,
555 "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
556 return -EINVAL);
557 else
558 smu_data->power_tune_table.TdcWaterfallCtl = defaults->tdc_waterfall_ctl;
559
560 return 0;
561}
562
563static int ci_populate_fuzzy_fan(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
564{
565 uint16_t tmp;
566 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
567
568 if ((hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity & (1 << 15))
569 || 0 == hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity)
570 tmp = hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity;
571 else
572 tmp = hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity;
573
574 smu_data->power_tune_table.FuzzyFan_PwmSetDelta = CONVERT_FROM_HOST_TO_SMC_US(tmp);
575
576 return 0;
577}
578
579static int ci_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr)
580{
581 int i;
582 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
583 uint8_t *hi_vid = smu_data->power_tune_table.BapmVddCVidHiSidd;
584 uint8_t *lo_vid = smu_data->power_tune_table.BapmVddCVidLoSidd;
585 uint8_t *hi2_vid = smu_data->power_tune_table.BapmVddCVidHiSidd2;
586
587 PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.cac_leakage_table,
588 "The CAC Leakage table does not exist!", return -EINVAL);
589 PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count <= 8,
590 "There should never be more than 8 entries for BapmVddcVid!!!", return -EINVAL);
591 PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count == hwmgr->dyn_state.vddc_dependency_on_sclk->count,
592 "CACLeakageTable->count and VddcDependencyOnSCLk->count not equal", return -EINVAL);
593
594 for (i = 0; (uint32_t) i < hwmgr->dyn_state.cac_leakage_table->count; i++) {
595 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_EVV)) {
596 lo_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc1);
597 hi_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc2);
598 hi2_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc3);
599 } else {
600 lo_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc);
601 hi_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Leakage);
602 }
603 }
604
605 return 0;
606}
607
608static int ci_populate_vddc_vid(struct pp_hwmgr *hwmgr)
609{
610 int i;
611 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
612 uint8_t *vid = smu_data->power_tune_table.VddCVid;
613 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
614
615 PP_ASSERT_WITH_CODE(data->vddc_voltage_table.count <= 8,
616 "There should never be more than 8 entries for VddcVid!!!",
617 return -EINVAL);
618
619 for (i = 0; i < (int)data->vddc_voltage_table.count; i++)
620 vid[i] = convert_to_vid(data->vddc_voltage_table.entries[i].value);
621
622 return 0;
623}
624
625static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct pp_hwmgr *hwmgr)
626{
627 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
628 u8 *hi_vid = smu_data->power_tune_table.BapmVddCVidHiSidd;
629 u8 *lo_vid = smu_data->power_tune_table.BapmVddCVidLoSidd;
630 int i, min, max;
631
632 min = max = hi_vid[0];
633 for (i = 0; i < 8; i++) {
634 if (0 != hi_vid[i]) {
635 if (min > hi_vid[i])
636 min = hi_vid[i];
637 if (max < hi_vid[i])
638 max = hi_vid[i];
639 }
640
641 if (0 != lo_vid[i]) {
642 if (min > lo_vid[i])
643 min = lo_vid[i];
644 if (max < lo_vid[i])
645 max = lo_vid[i];
646 }
647 }
648
649 if ((min == 0) || (max == 0))
650 return -EINVAL;
651 smu_data->power_tune_table.GnbLPMLMaxVid = (u8)max;
652 smu_data->power_tune_table.GnbLPMLMinVid = (u8)min;
653
654 return 0;
655}
656
657static int ci_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
658{
659 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
660 uint16_t HiSidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
661 uint16_t LoSidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
662 struct phm_cac_tdp_table *cac_table = hwmgr->dyn_state.cac_dtp_table;
663
664 HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
665 LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
666
667 smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
668 CONVERT_FROM_HOST_TO_SMC_US(HiSidd);
669 smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
670 CONVERT_FROM_HOST_TO_SMC_US(LoSidd);
671
672 return 0;
673}
674
675static int ci_populate_pm_fuses(struct pp_hwmgr *hwmgr)
676{
677 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
678 uint32_t pm_fuse_table_offset;
679 int ret = 0;
680
681 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
682 PHM_PlatformCaps_PowerContainment)) {
683 if (ci_read_smc_sram_dword(hwmgr,
684 SMU7_FIRMWARE_HEADER_LOCATION +
685 offsetof(SMU7_Firmware_Header, PmFuseTable),
686 &pm_fuse_table_offset, SMC_RAM_END)) {
687 pr_err("Attempt to get pm_fuse_table_offset Failed!\n");
688 return -EINVAL;
689 }
690
691
692 ret = ci_populate_bapm_vddc_vid_sidd(hwmgr);
693
694 ret |= ci_populate_vddc_vid(hwmgr);
695
696 ret |= ci_populate_svi_load_line(hwmgr);
697
698 ret |= ci_populate_tdc_limit(hwmgr);
699
700 ret |= ci_populate_dw8(hwmgr, pm_fuse_table_offset);
701
702 ret |= ci_populate_fuzzy_fan(hwmgr, pm_fuse_table_offset);
703
704 ret |= ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(hwmgr);
705
706 ret |= ci_populate_bapm_vddc_base_leakage_sidd(hwmgr);
707 if (ret)
708 return ret;
709
710 ret = ci_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset,
711 (uint8_t *)&smu_data->power_tune_table,
712 sizeof(struct SMU7_Discrete_PmFuses), SMC_RAM_END);
713 }
714 return ret;
715}
716
717static int ci_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
718{
719 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
720 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
721 const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults;
722 SMU7_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table);
723 struct phm_cac_tdp_table *cac_dtp_table = hwmgr->dyn_state.cac_dtp_table;
724 struct phm_ppm_table *ppm = hwmgr->dyn_state.ppm_parameter_table;
725 const uint16_t *def1, *def2;
726 int i, j, k;
727
728 dpm_table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 256));
729 dpm_table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usConfigurableTDP * 256));
730
731 dpm_table->DTETjOffset = 0;
732 dpm_table->GpuTjMax = (uint8_t)(data->thermal_temp_setting.temperature_high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES);
733 dpm_table->GpuTjHyst = 8;
734
735 dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base;
736
737 if (ppm) {
738 dpm_table->PPM_PkgPwrLimit = (uint16_t)ppm->dgpu_tdp * 256 / 1000;
739 dpm_table->PPM_TemperatureLimit = (uint16_t)ppm->tj_max * 256;
740 } else {
741 dpm_table->PPM_PkgPwrLimit = 0;
742 dpm_table->PPM_TemperatureLimit = 0;
743 }
744
745 CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_PkgPwrLimit);
746 CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_TemperatureLimit);
747
748 dpm_table->BAPM_TEMP_GRADIENT = PP_HOST_TO_SMC_UL(defaults->bapm_temp_gradient);
749 def1 = defaults->bapmti_r;
750 def2 = defaults->bapmti_rc;
751
752 for (i = 0; i < SMU7_DTE_ITERATIONS; i++) {
753 for (j = 0; j < SMU7_DTE_SOURCES; j++) {
754 for (k = 0; k < SMU7_DTE_SINKS; k++) {
755 dpm_table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*def1);
756 dpm_table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*def2);
757 def1++;
758 def2++;
759 }
760 }
761 }
762
763 return 0;
764}
765
766static int ci_get_std_voltage_value_sidd(struct pp_hwmgr *hwmgr,
767 pp_atomctrl_voltage_table_entry *tab, uint16_t *hi,
768 uint16_t *lo)
769{
770 uint16_t v_index;
771 bool vol_found = false;
772 *hi = tab->value * VOLTAGE_SCALE;
773 *lo = tab->value * VOLTAGE_SCALE;
774
775 PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.vddc_dependency_on_sclk,
776 "The SCLK/VDDC Dependency Table does not exist.\n",
777 return -EINVAL);
778
779 if (NULL == hwmgr->dyn_state.cac_leakage_table) {
780 pr_warn("CAC Leakage Table does not exist, using vddc.\n");
781 return 0;
782 }
783
784 for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) {
785 if (tab->value == hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) {
786 vol_found = true;
787 if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) {
788 *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE;
789 *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage * VOLTAGE_SCALE);
790 } else {
791 pr_warn("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index, using maximum index from CAC table.\n");
792 *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE;
793 *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE);
794 }
795 break;
796 }
797 }
798
799 if (!vol_found) {
800 for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) {
801 if (tab->value <= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) {
802 vol_found = true;
803 if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) {
804 *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE;
805 *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage) * VOLTAGE_SCALE;
806 } else {
807 pr_warn("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index in second look up, using maximum index from CAC table.");
808 *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE;
809 *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE);
810 }
811 break;
812 }
813 }
814
815 if (!vol_found)
816 pr_warn("Unable to get std_vddc from SCLK/VDDC Dependency Table, using vddc.\n");
817 }
818
819 return 0;
820}
821
822static int ci_populate_smc_voltage_table(struct pp_hwmgr *hwmgr,
823 pp_atomctrl_voltage_table_entry *tab,
824 SMU7_Discrete_VoltageLevel *smc_voltage_tab)
825{
826 int result;
827
828 result = ci_get_std_voltage_value_sidd(hwmgr, tab,
829 &smc_voltage_tab->StdVoltageHiSidd,
830 &smc_voltage_tab->StdVoltageLoSidd);
831 if (result) {
832 smc_voltage_tab->StdVoltageHiSidd = tab->value * VOLTAGE_SCALE;
833 smc_voltage_tab->StdVoltageLoSidd = tab->value * VOLTAGE_SCALE;
834 }
835
836 smc_voltage_tab->Voltage = PP_HOST_TO_SMC_US(tab->value * VOLTAGE_SCALE);
837 CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd);
838 CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageLoSidd);
839
840 return 0;
841}
842
843static int ci_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
844 SMU7_Discrete_DpmTable *table)
845{
846 unsigned int count;
847 int result;
848 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
849
850 table->VddcLevelCount = data->vddc_voltage_table.count;
851 for (count = 0; count < table->VddcLevelCount; count++) {
852 result = ci_populate_smc_voltage_table(hwmgr,
853 &(data->vddc_voltage_table.entries[count]),
854 &(table->VddcLevel[count]));
855 PP_ASSERT_WITH_CODE(0 == result, "do not populate SMC VDDC voltage table", return -EINVAL);
856
857
858 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control) {
859 table->VddcLevel[count].Smio = (uint8_t) count;
860 table->Smio[count] |= data->vddc_voltage_table.entries[count].smio_low;
861 table->SmioMaskVddcVid |= data->vddc_voltage_table.entries[count].smio_low;
862 } else {
863 table->VddcLevel[count].Smio = 0;
864 }
865 }
866
867 CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount);
868
869 return 0;
870}
871
872static int ci_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr,
873 SMU7_Discrete_DpmTable *table)
874{
875 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
876 uint32_t count;
877 int result;
878
879 table->VddciLevelCount = data->vddci_voltage_table.count;
880
881 for (count = 0; count < table->VddciLevelCount; count++) {
882 result = ci_populate_smc_voltage_table(hwmgr,
883 &(data->vddci_voltage_table.entries[count]),
884 &(table->VddciLevel[count]));
885 PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC VDDCI voltage table", return -EINVAL);
886 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
887 table->VddciLevel[count].Smio = (uint8_t) count;
888 table->Smio[count] |= data->vddci_voltage_table.entries[count].smio_low;
889 table->SmioMaskVddciVid |= data->vddci_voltage_table.entries[count].smio_low;
890 } else {
891 table->VddciLevel[count].Smio = 0;
892 }
893 }
894
895 CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount);
896
897 return 0;
898}
899
900static int ci_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
901 SMU7_Discrete_DpmTable *table)
902{
903 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
904 uint32_t count;
905 int result;
906
907 table->MvddLevelCount = data->mvdd_voltage_table.count;
908
909 for (count = 0; count < table->MvddLevelCount; count++) {
910 result = ci_populate_smc_voltage_table(hwmgr,
911 &(data->mvdd_voltage_table.entries[count]),
912 &table->MvddLevel[count]);
913 PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC mvdd voltage table", return -EINVAL);
914 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
915 table->MvddLevel[count].Smio = (uint8_t) count;
916 table->Smio[count] |= data->mvdd_voltage_table.entries[count].smio_low;
917 table->SmioMaskMvddVid |= data->mvdd_voltage_table.entries[count].smio_low;
918 } else {
919 table->MvddLevel[count].Smio = 0;
920 }
921 }
922
923 CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
924
925 return 0;
926}
927
928
929static int ci_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
930 SMU7_Discrete_DpmTable *table)
931{
932 int result;
933
934 result = ci_populate_smc_vddc_table(hwmgr, table);
935 PP_ASSERT_WITH_CODE(0 == result,
936 "can not populate VDDC voltage table to SMC", return -EINVAL);
937
938 result = ci_populate_smc_vdd_ci_table(hwmgr, table);
939 PP_ASSERT_WITH_CODE(0 == result,
940 "can not populate VDDCI voltage table to SMC", return -EINVAL);
941
942 result = ci_populate_smc_mvdd_table(hwmgr, table);
943 PP_ASSERT_WITH_CODE(0 == result,
944 "can not populate MVDD voltage table to SMC", return -EINVAL);
945
946 return 0;
947}
948
949static int ci_populate_ulv_level(struct pp_hwmgr *hwmgr,
950 struct SMU7_Discrete_Ulv *state)
951{
952 uint32_t voltage_response_time, ulv_voltage;
953 int result;
954 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
955
956 state->CcPwrDynRm = 0;
957 state->CcPwrDynRm1 = 0;
958
959 result = pp_tables_get_response_times(hwmgr, &voltage_response_time, &ulv_voltage);
960 PP_ASSERT_WITH_CODE((0 == result), "can not get ULV voltage value", return result;);
961
962 if (ulv_voltage == 0) {
963 data->ulv_supported = false;
964 return 0;
965 }
966
967 if (data->voltage_control != SMU7_VOLTAGE_CONTROL_BY_SVID2) {
968
969 if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v)
970 state->VddcOffset = 0;
971 else
972
973 state->VddcOffset = (uint16_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage);
974 } else {
975
976 if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v)
977 state->VddcOffsetVid = 0;
978 else
979 state->VddcOffsetVid = (uint8_t)(
980 (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage)
981 * VOLTAGE_VID_OFFSET_SCALE2
982 / VOLTAGE_VID_OFFSET_SCALE1);
983 }
984 state->VddcPhase = 1;
985
986 CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
987 CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
988 CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
989
990 return 0;
991}
992
993static int ci_populate_ulv_state(struct pp_hwmgr *hwmgr,
994 SMU7_Discrete_Ulv *ulv_level)
995{
996 return ci_populate_ulv_level(hwmgr, ulv_level);
997}
998
999static int ci_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU7_Discrete_DpmTable *table)
1000{
1001 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1002 struct smu7_dpm_table *dpm_table = &data->dpm_table;
1003 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1004 uint32_t i;
1005
1006
1007 for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
1008 table->LinkLevel[i].PcieGenSpeed =
1009 (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
1010 table->LinkLevel[i].PcieLaneCount =
1011 (uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
1012 table->LinkLevel[i].EnabledForActivity = 1;
1013 table->LinkLevel[i].DownT = PP_HOST_TO_SMC_UL(5);
1014 table->LinkLevel[i].UpT = PP_HOST_TO_SMC_UL(30);
1015 }
1016
1017 smu_data->smc_state_table.LinkLevelCount =
1018 (uint8_t)dpm_table->pcie_speed_table.count;
1019 data->dpm_level_enable_mask.pcie_dpm_enable_mask =
1020 phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
1021
1022 return 0;
1023}
1024
1025static int ci_calculate_mclk_params(
1026 struct pp_hwmgr *hwmgr,
1027 uint32_t memory_clock,
1028 SMU7_Discrete_MemoryLevel *mclk,
1029 bool strobe_mode,
1030 bool dllStateOn
1031 )
1032{
1033 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1034 uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
1035 uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
1036 uint32_t mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL;
1037 uint32_t mpll_dq_func_cntl = data->clock_registers.vMPLL_DQ_FUNC_CNTL;
1038 uint32_t mpll_func_cntl = data->clock_registers.vMPLL_FUNC_CNTL;
1039 uint32_t mpll_func_cntl_1 = data->clock_registers.vMPLL_FUNC_CNTL_1;
1040 uint32_t mpll_func_cntl_2 = data->clock_registers.vMPLL_FUNC_CNTL_2;
1041 uint32_t mpll_ss1 = data->clock_registers.vMPLL_SS1;
1042 uint32_t mpll_ss2 = data->clock_registers.vMPLL_SS2;
1043
1044 pp_atomctrl_memory_clock_param mpll_param;
1045 int result;
1046
1047 result = atomctrl_get_memory_pll_dividers_si(hwmgr,
1048 memory_clock, &mpll_param, strobe_mode);
1049 PP_ASSERT_WITH_CODE(0 == result,
1050 "Error retrieving Memory Clock Parameters from VBIOS.", return result);
1051
1052 mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL, mpll_param.bw_ctrl);
1053
1054 mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
1055 MPLL_FUNC_CNTL_1, CLKF, mpll_param.mpll_fb_divider.cl_kf);
1056 mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
1057 MPLL_FUNC_CNTL_1, CLKFRAC, mpll_param.mpll_fb_divider.clk_frac);
1058 mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
1059 MPLL_FUNC_CNTL_1, VCO_MODE, mpll_param.vco_mode);
1060
1061 mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl,
1062 MPLL_AD_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
1063
1064 if (data->is_memory_gddr5) {
1065 mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
1066 MPLL_DQ_FUNC_CNTL, YCLK_SEL, mpll_param.yclk_sel);
1067 mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
1068 MPLL_DQ_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
1069 }
1070
1071 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1072 PHM_PlatformCaps_MemorySpreadSpectrumSupport)) {
1073 pp_atomctrl_internal_ss_info ss_info;
1074 uint32_t freq_nom;
1075 uint32_t tmp;
1076 uint32_t reference_clock = atomctrl_get_mpll_reference_clock(hwmgr);
1077
1078
1079 if (1 == mpll_param.qdr)
1080 freq_nom = memory_clock * 4 * (1 << mpll_param.mpll_post_divider);
1081 else
1082 freq_nom = memory_clock * 2 * (1 << mpll_param.mpll_post_divider);
1083
1084
1085 tmp = (freq_nom / reference_clock);
1086 tmp = tmp * tmp;
1087
1088 if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) {
1089 uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate;
1090 uint32_t clkv =
1091 (uint32_t)((((131 * ss_info.speed_spectrum_percentage *
1092 ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom);
1093
1094 mpll_ss1 = PHM_SET_FIELD(mpll_ss1, MPLL_SS1, CLKV, clkv);
1095 mpll_ss2 = PHM_SET_FIELD(mpll_ss2, MPLL_SS2, CLKS, clks);
1096 }
1097 }
1098
1099 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1100 MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed);
1101 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1102 MCLK_PWRMGT_CNTL, MRDCK0_PDNB, dllStateOn);
1103 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1104 MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn);
1105
1106
1107 mclk->MclkFrequency = memory_clock;
1108 mclk->MpllFuncCntl = mpll_func_cntl;
1109 mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
1110 mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
1111 mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
1112 mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
1113 mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
1114 mclk->DllCntl = dll_cntl;
1115 mclk->MpllSs1 = mpll_ss1;
1116 mclk->MpllSs2 = mpll_ss2;
1117
1118 return 0;
1119}
1120
1121static uint8_t ci_get_mclk_frequency_ratio(uint32_t memory_clock,
1122 bool strobe_mode)
1123{
1124 uint8_t mc_para_index;
1125
1126 if (strobe_mode) {
1127 if (memory_clock < 12500)
1128 mc_para_index = 0x00;
1129 else if (memory_clock > 47500)
1130 mc_para_index = 0x0f;
1131 else
1132 mc_para_index = (uint8_t)((memory_clock - 10000) / 2500);
1133 } else {
1134 if (memory_clock < 65000)
1135 mc_para_index = 0x00;
1136 else if (memory_clock > 135000)
1137 mc_para_index = 0x0f;
1138 else
1139 mc_para_index = (uint8_t)((memory_clock - 60000) / 5000);
1140 }
1141
1142 return mc_para_index;
1143}
1144
1145static uint8_t ci_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock)
1146{
1147 uint8_t mc_para_index;
1148
1149 if (memory_clock < 10000)
1150 mc_para_index = 0;
1151 else if (memory_clock >= 80000)
1152 mc_para_index = 0x0f;
1153 else
1154 mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1);
1155
1156 return mc_para_index;
1157}
1158
1159static int ci_populate_phase_value_based_on_mclk(struct pp_hwmgr *hwmgr, const struct phm_phase_shedding_limits_table *pl,
1160 uint32_t memory_clock, uint32_t *p_shed)
1161{
1162 unsigned int i;
1163
1164 *p_shed = 1;
1165
1166 for (i = 0; i < pl->count; i++) {
1167 if (memory_clock < pl->entries[i].Mclk) {
1168 *p_shed = i;
1169 break;
1170 }
1171 }
1172
1173 return 0;
1174}
1175
1176static int ci_populate_single_memory_level(
1177 struct pp_hwmgr *hwmgr,
1178 uint32_t memory_clock,
1179 SMU7_Discrete_MemoryLevel *memory_level
1180 )
1181{
1182 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1183 int result = 0;
1184 bool dll_state_on;
1185 struct cgs_display_info info = {0};
1186 uint32_t mclk_edc_wr_enable_threshold = 40000;
1187 uint32_t mclk_edc_enable_threshold = 40000;
1188 uint32_t mclk_strobe_mode_threshold = 40000;
1189
1190 if (hwmgr->dyn_state.vddc_dependency_on_mclk != NULL) {
1191 result = ci_get_dependency_volt_by_clk(hwmgr,
1192 hwmgr->dyn_state.vddc_dependency_on_mclk, memory_clock, &memory_level->MinVddc);
1193 PP_ASSERT_WITH_CODE((0 == result),
1194 "can not find MinVddc voltage value from memory VDDC voltage dependency table", return result);
1195 }
1196
1197 if (NULL != hwmgr->dyn_state.vddci_dependency_on_mclk) {
1198 result = ci_get_dependency_volt_by_clk(hwmgr,
1199 hwmgr->dyn_state.vddci_dependency_on_mclk,
1200 memory_clock,
1201 &memory_level->MinVddci);
1202 PP_ASSERT_WITH_CODE((0 == result),
1203 "can not find MinVddci voltage value from memory VDDCI voltage dependency table", return result);
1204 }
1205
1206 if (NULL != hwmgr->dyn_state.mvdd_dependency_on_mclk) {
1207 result = ci_get_dependency_volt_by_clk(hwmgr,
1208 hwmgr->dyn_state.mvdd_dependency_on_mclk,
1209 memory_clock,
1210 &memory_level->MinMvdd);
1211 PP_ASSERT_WITH_CODE((0 == result),
1212 "can not find MinVddci voltage value from memory MVDD voltage dependency table", return result);
1213 }
1214
1215 memory_level->MinVddcPhases = 1;
1216
1217 if (data->vddc_phase_shed_control) {
1218 ci_populate_phase_value_based_on_mclk(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table,
1219 memory_clock, &memory_level->MinVddcPhases);
1220 }
1221
1222 memory_level->EnabledForThrottle = 1;
1223 memory_level->EnabledForActivity = 1;
1224 memory_level->UpH = data->current_profile_setting.mclk_up_hyst;
1225 memory_level->DownH = data->current_profile_setting.mclk_down_hyst;
1226 memory_level->VoltageDownH = 0;
1227
1228
1229 memory_level->ActivityLevel = data->current_profile_setting.mclk_activity;
1230 memory_level->StutterEnable = 0;
1231 memory_level->StrobeEnable = 0;
1232 memory_level->EdcReadEnable = 0;
1233 memory_level->EdcWriteEnable = 0;
1234 memory_level->RttEnable = 0;
1235
1236
1237 memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
1238
1239 cgs_get_active_displays_info(hwmgr->device, &info);
1240 data->display_timing.num_existing_displays = info.display_count;
1241
1242
1243
1244
1245 memory_level->StrobeEnable = (mclk_strobe_mode_threshold != 0) &&
1246 (memory_clock <= mclk_strobe_mode_threshold);
1247
1248
1249 if (data->is_memory_gddr5) {
1250 memory_level->StrobeRatio = ci_get_mclk_frequency_ratio(memory_clock,
1251 memory_level->StrobeEnable);
1252
1253 if ((mclk_edc_enable_threshold != 0) &&
1254 (memory_clock > mclk_edc_enable_threshold)) {
1255 memory_level->EdcReadEnable = 1;
1256 }
1257
1258 if ((mclk_edc_wr_enable_threshold != 0) &&
1259 (memory_clock > mclk_edc_wr_enable_threshold)) {
1260 memory_level->EdcWriteEnable = 1;
1261 }
1262
1263 if (memory_level->StrobeEnable) {
1264 if (ci_get_mclk_frequency_ratio(memory_clock, 1) >=
1265 ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf))
1266 dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
1267 else
1268 dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC6) >> 1) & 0x1) ? 1 : 0;
1269 } else
1270 dll_state_on = data->dll_default_on;
1271 } else {
1272 memory_level->StrobeRatio =
1273 ci_get_ddr3_mclk_frequency_ratio(memory_clock);
1274 dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
1275 }
1276
1277 result = ci_calculate_mclk_params(hwmgr,
1278 memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
1279
1280 if (0 == result) {
1281 memory_level->MinVddc = PP_HOST_TO_SMC_UL(memory_level->MinVddc * VOLTAGE_SCALE);
1282 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MinVddcPhases);
1283 memory_level->MinVddci = PP_HOST_TO_SMC_UL(memory_level->MinVddci * VOLTAGE_SCALE);
1284 memory_level->MinMvdd = PP_HOST_TO_SMC_UL(memory_level->MinMvdd * VOLTAGE_SCALE);
1285
1286 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkFrequency);
1287
1288 CONVERT_FROM_HOST_TO_SMC_US(memory_level->ActivityLevel);
1289 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl);
1290 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_1);
1291 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_2);
1292 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllAdFuncCntl);
1293 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllDqFuncCntl);
1294 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkPwrmgtCntl);
1295 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->DllCntl);
1296 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs1);
1297 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs2);
1298 }
1299
1300 return result;
1301}
1302
1303static int ci_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
1304{
1305 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1306 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1307 struct smu7_dpm_table *dpm_table = &data->dpm_table;
1308 int result;
1309 struct amdgpu_device *adev = hwmgr->adev;
1310 uint32_t dev_id;
1311
1312 uint32_t level_array_address = smu_data->dpm_table_start + offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
1313 uint32_t level_array_size = sizeof(SMU7_Discrete_MemoryLevel) * SMU7_MAX_LEVELS_MEMORY;
1314 SMU7_Discrete_MemoryLevel *levels = smu_data->smc_state_table.MemoryLevel;
1315 uint32_t i;
1316
1317 memset(levels, 0x00, level_array_size);
1318
1319 for (i = 0; i < dpm_table->mclk_table.count; i++) {
1320 PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
1321 "can not populate memory level as memory clock is zero", return -EINVAL);
1322 result = ci_populate_single_memory_level(hwmgr, dpm_table->mclk_table.dpm_levels[i].value,
1323 &(smu_data->smc_state_table.MemoryLevel[i]));
1324 if (0 != result)
1325 return result;
1326 }
1327
1328 smu_data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
1329
1330 dev_id = adev->pdev->device;
1331
1332 if ((dpm_table->mclk_table.count >= 2)
1333 && ((dev_id == 0x67B0) || (dev_id == 0x67B1))) {
1334 smu_data->smc_state_table.MemoryLevel[1].MinVddci =
1335 smu_data->smc_state_table.MemoryLevel[0].MinVddci;
1336 smu_data->smc_state_table.MemoryLevel[1].MinMvdd =
1337 smu_data->smc_state_table.MemoryLevel[0].MinMvdd;
1338 }
1339 smu_data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F;
1340 CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.MemoryLevel[0].ActivityLevel);
1341
1342 smu_data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count;
1343 data->dpm_level_enable_mask.mclk_dpm_enable_mask = phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
1344 smu_data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
1345
1346 result = ci_copy_bytes_to_smc(hwmgr,
1347 level_array_address, (uint8_t *)levels, (uint32_t)level_array_size,
1348 SMC_RAM_END);
1349
1350 return result;
1351}
1352
1353static int ci_populate_mvdd_value(struct pp_hwmgr *hwmgr, uint32_t mclk,
1354 SMU7_Discrete_VoltageLevel *voltage)
1355{
1356 const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1357
1358 uint32_t i = 0;
1359
1360 if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
1361
1362 for (i = 0; i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count; i++) {
1363 if (mclk <= hwmgr->dyn_state.mvdd_dependency_on_mclk->entries[i].clk) {
1364
1365 voltage->Voltage = data->mvdd_voltage_table.entries[i].value;
1366 break;
1367 }
1368 }
1369
1370 PP_ASSERT_WITH_CODE(i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count,
1371 "MVDD Voltage is outside the supported range.", return -EINVAL);
1372
1373 } else {
1374 return -EINVAL;
1375 }
1376
1377 return 0;
1378}
1379
1380static int ci_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
1381 SMU7_Discrete_DpmTable *table)
1382{
1383 int result = 0;
1384 const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1385 struct pp_atomctrl_clock_dividers_vi dividers;
1386
1387 SMU7_Discrete_VoltageLevel voltage_level;
1388 uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
1389 uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
1390 uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
1391 uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
1392
1393
1394
1395 table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
1396
1397 if (data->acpi_vddc)
1398 table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->acpi_vddc * VOLTAGE_SCALE);
1399 else
1400 table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->min_vddc_in_pptable * VOLTAGE_SCALE);
1401
1402 table->ACPILevel.MinVddcPhases = data->vddc_phase_shed_control ? 0 : 1;
1403
1404 table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr);
1405
1406
1407 result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
1408 table->ACPILevel.SclkFrequency, ÷rs);
1409
1410 PP_ASSERT_WITH_CODE(result == 0,
1411 "Error retrieving Engine Clock dividers from VBIOS.", return result);
1412
1413
1414 table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
1415 table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
1416 table->ACPILevel.DeepSleepDivId = 0;
1417
1418 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
1419 CG_SPLL_FUNC_CNTL, SPLL_PWRON, 0);
1420 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
1421 CG_SPLL_FUNC_CNTL, SPLL_RESET, 1);
1422 spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2,
1423 CG_SPLL_FUNC_CNTL_2, SCLK_MUX_SEL, 4);
1424
1425 table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
1426 table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
1427 table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
1428 table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
1429 table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
1430 table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
1431 table->ACPILevel.CcPwrDynRm = 0;
1432 table->ACPILevel.CcPwrDynRm1 = 0;
1433
1434
1435 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
1436
1437 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
1438 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
1439 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
1440 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
1441 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
1442 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
1443 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
1444 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
1445 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
1446
1447
1448
1449 table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
1450 table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
1451
1452 if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
1453 table->MemoryACPILevel.MinVddci = table->MemoryACPILevel.MinVddc;
1454 else {
1455 if (data->acpi_vddci != 0)
1456 table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->acpi_vddci * VOLTAGE_SCALE);
1457 else
1458 table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->min_vddci_in_pptable * VOLTAGE_SCALE);
1459 }
1460
1461 if (0 == ci_populate_mvdd_value(hwmgr, 0, &voltage_level))
1462 table->MemoryACPILevel.MinMvdd =
1463 PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE);
1464 else
1465 table->MemoryACPILevel.MinMvdd = 0;
1466
1467
1468 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1469 MCLK_PWRMGT_CNTL, MRDCK0_RESET, 0x1);
1470 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1471 MCLK_PWRMGT_CNTL, MRDCK1_RESET, 0x1);
1472
1473
1474 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1475 MCLK_PWRMGT_CNTL, MRDCK0_PDNB, 0);
1476 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1477 MCLK_PWRMGT_CNTL, MRDCK1_PDNB, 0);
1478
1479
1480 dll_cntl = PHM_SET_FIELD(dll_cntl,
1481 DLL_CNTL, MRDCK0_BYPASS, 0);
1482 dll_cntl = PHM_SET_FIELD(dll_cntl,
1483 DLL_CNTL, MRDCK1_BYPASS, 0);
1484
1485 table->MemoryACPILevel.DllCntl =
1486 PP_HOST_TO_SMC_UL(dll_cntl);
1487 table->MemoryACPILevel.MclkPwrmgtCntl =
1488 PP_HOST_TO_SMC_UL(mclk_pwrmgt_cntl);
1489 table->MemoryACPILevel.MpllAdFuncCntl =
1490 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_AD_FUNC_CNTL);
1491 table->MemoryACPILevel.MpllDqFuncCntl =
1492 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_DQ_FUNC_CNTL);
1493 table->MemoryACPILevel.MpllFuncCntl =
1494 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL);
1495 table->MemoryACPILevel.MpllFuncCntl_1 =
1496 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_1);
1497 table->MemoryACPILevel.MpllFuncCntl_2 =
1498 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_2);
1499 table->MemoryACPILevel.MpllSs1 =
1500 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS1);
1501 table->MemoryACPILevel.MpllSs2 =
1502 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS2);
1503
1504 table->MemoryACPILevel.EnabledForThrottle = 0;
1505 table->MemoryACPILevel.EnabledForActivity = 0;
1506 table->MemoryACPILevel.UpH = 0;
1507 table->MemoryACPILevel.DownH = 100;
1508 table->MemoryACPILevel.VoltageDownH = 0;
1509
1510 table->MemoryACPILevel.ActivityLevel = PP_HOST_TO_SMC_US(data->current_profile_setting.mclk_activity);
1511
1512 table->MemoryACPILevel.StutterEnable = 0;
1513 table->MemoryACPILevel.StrobeEnable = 0;
1514 table->MemoryACPILevel.EdcReadEnable = 0;
1515 table->MemoryACPILevel.EdcWriteEnable = 0;
1516 table->MemoryACPILevel.RttEnable = 0;
1517
1518 return result;
1519}
1520
1521static int ci_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
1522 SMU7_Discrete_DpmTable *table)
1523{
1524 int result = 0;
1525 uint8_t count;
1526 struct pp_atomctrl_clock_dividers_vi dividers;
1527 struct phm_uvd_clock_voltage_dependency_table *uvd_table =
1528 hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
1529
1530 table->UvdLevelCount = (uint8_t)(uvd_table->count);
1531
1532 for (count = 0; count < table->UvdLevelCount; count++) {
1533 table->UvdLevel[count].VclkFrequency =
1534 uvd_table->entries[count].vclk;
1535 table->UvdLevel[count].DclkFrequency =
1536 uvd_table->entries[count].dclk;
1537 table->UvdLevel[count].MinVddc =
1538 uvd_table->entries[count].v * VOLTAGE_SCALE;
1539 table->UvdLevel[count].MinVddcPhases = 1;
1540
1541 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1542 table->UvdLevel[count].VclkFrequency, ÷rs);
1543 PP_ASSERT_WITH_CODE((0 == result),
1544 "can not find divide id for Vclk clock", return result);
1545
1546 table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
1547
1548 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1549 table->UvdLevel[count].DclkFrequency, ÷rs);
1550 PP_ASSERT_WITH_CODE((0 == result),
1551 "can not find divide id for Dclk clock", return result);
1552
1553 table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
1554 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
1555 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
1556 CONVERT_FROM_HOST_TO_SMC_US(table->UvdLevel[count].MinVddc);
1557 }
1558
1559 return result;
1560}
1561
1562static int ci_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
1563 SMU7_Discrete_DpmTable *table)
1564{
1565 int result = -EINVAL;
1566 uint8_t count;
1567 struct pp_atomctrl_clock_dividers_vi dividers;
1568 struct phm_vce_clock_voltage_dependency_table *vce_table =
1569 hwmgr->dyn_state.vce_clock_voltage_dependency_table;
1570
1571 table->VceLevelCount = (uint8_t)(vce_table->count);
1572 table->VceBootLevel = 0;
1573
1574 for (count = 0; count < table->VceLevelCount; count++) {
1575 table->VceLevel[count].Frequency = vce_table->entries[count].evclk;
1576 table->VceLevel[count].MinVoltage =
1577 vce_table->entries[count].v * VOLTAGE_SCALE;
1578 table->VceLevel[count].MinPhases = 1;
1579
1580 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1581 table->VceLevel[count].Frequency, ÷rs);
1582 PP_ASSERT_WITH_CODE((0 == result),
1583 "can not find divide id for VCE engine clock",
1584 return result);
1585
1586 table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
1587
1588 CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
1589 CONVERT_FROM_HOST_TO_SMC_US(table->VceLevel[count].MinVoltage);
1590 }
1591 return result;
1592}
1593
1594static int ci_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
1595 SMU7_Discrete_DpmTable *table)
1596{
1597 int result = -EINVAL;
1598 uint8_t count;
1599 struct pp_atomctrl_clock_dividers_vi dividers;
1600 struct phm_acp_clock_voltage_dependency_table *acp_table =
1601 hwmgr->dyn_state.acp_clock_voltage_dependency_table;
1602
1603 table->AcpLevelCount = (uint8_t)(acp_table->count);
1604 table->AcpBootLevel = 0;
1605
1606 for (count = 0; count < table->AcpLevelCount; count++) {
1607 table->AcpLevel[count].Frequency = acp_table->entries[count].acpclk;
1608 table->AcpLevel[count].MinVoltage = acp_table->entries[count].v;
1609 table->AcpLevel[count].MinPhases = 1;
1610
1611 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1612 table->AcpLevel[count].Frequency, ÷rs);
1613 PP_ASSERT_WITH_CODE((0 == result),
1614 "can not find divide id for engine clock", return result);
1615
1616 table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
1617
1618 CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency);
1619 CONVERT_FROM_HOST_TO_SMC_US(table->AcpLevel[count].MinVoltage);
1620 }
1621 return result;
1622}
1623
1624static int ci_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
1625 SMU7_Discrete_DpmTable *table)
1626{
1627 int result = -EINVAL;
1628 uint8_t count;
1629 struct pp_atomctrl_clock_dividers_vi dividers;
1630 struct phm_samu_clock_voltage_dependency_table *samu_table =
1631 hwmgr->dyn_state.samu_clock_voltage_dependency_table;
1632
1633 table->SamuBootLevel = 0;
1634 table->SamuLevelCount = (uint8_t)(samu_table->count);
1635
1636 for (count = 0; count < table->SamuLevelCount; count++) {
1637 table->SamuLevel[count].Frequency = samu_table->entries[count].samclk;
1638 table->SamuLevel[count].MinVoltage = samu_table->entries[count].v * VOLTAGE_SCALE;
1639 table->SamuLevel[count].MinPhases = 1;
1640
1641
1642 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1643 table->SamuLevel[count].Frequency, ÷rs);
1644 PP_ASSERT_WITH_CODE((0 == result),
1645 "can not find divide id for samu clock", return result);
1646
1647 table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
1648
1649 CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
1650 CONVERT_FROM_HOST_TO_SMC_US(table->SamuLevel[count].MinVoltage);
1651 }
1652 return result;
1653}
1654
1655static int ci_populate_memory_timing_parameters(
1656 struct pp_hwmgr *hwmgr,
1657 uint32_t engine_clock,
1658 uint32_t memory_clock,
1659 struct SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs
1660 )
1661{
1662 uint32_t dramTiming;
1663 uint32_t dramTiming2;
1664 uint32_t burstTime;
1665 int result;
1666
1667 result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
1668 engine_clock, memory_clock);
1669
1670 PP_ASSERT_WITH_CODE(result == 0,
1671 "Error calling VBIOS to set DRAM_TIMING.", return result);
1672
1673 dramTiming = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
1674 dramTiming2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
1675 burstTime = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
1676
1677 arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dramTiming);
1678 arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dramTiming2);
1679 arb_regs->McArbBurstTime = (uint8_t)burstTime;
1680
1681 return 0;
1682}
1683
1684static int ci_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
1685{
1686 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1687 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1688 int result = 0;
1689 SMU7_Discrete_MCArbDramTimingTable arb_regs;
1690 uint32_t i, j;
1691
1692 memset(&arb_regs, 0x00, sizeof(SMU7_Discrete_MCArbDramTimingTable));
1693
1694 for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
1695 for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
1696 result = ci_populate_memory_timing_parameters
1697 (hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value,
1698 data->dpm_table.mclk_table.dpm_levels[j].value,
1699 &arb_regs.entries[i][j]);
1700
1701 if (0 != result)
1702 break;
1703 }
1704 }
1705
1706 if (0 == result) {
1707 result = ci_copy_bytes_to_smc(
1708 hwmgr,
1709 smu_data->arb_table_start,
1710 (uint8_t *)&arb_regs,
1711 sizeof(SMU7_Discrete_MCArbDramTimingTable),
1712 SMC_RAM_END
1713 );
1714 }
1715
1716 return result;
1717}
1718
1719static int ci_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
1720 SMU7_Discrete_DpmTable *table)
1721{
1722 int result = 0;
1723 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1724 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1725
1726 table->GraphicsBootLevel = 0;
1727 table->MemoryBootLevel = 0;
1728
1729
1730 result = phm_find_boot_level(&(data->dpm_table.sclk_table),
1731 data->vbios_boot_state.sclk_bootup_value,
1732 (uint32_t *)&(smu_data->smc_state_table.GraphicsBootLevel));
1733
1734 if (0 != result) {
1735 smu_data->smc_state_table.GraphicsBootLevel = 0;
1736 pr_err("VBIOS did not find boot engine clock value in dependency table. Using Graphics DPM level 0!\n");
1737 result = 0;
1738 }
1739
1740 result = phm_find_boot_level(&(data->dpm_table.mclk_table),
1741 data->vbios_boot_state.mclk_bootup_value,
1742 (uint32_t *)&(smu_data->smc_state_table.MemoryBootLevel));
1743
1744 if (0 != result) {
1745 smu_data->smc_state_table.MemoryBootLevel = 0;
1746 pr_err("VBIOS did not find boot engine clock value in dependency table. Using Memory DPM level 0!\n");
1747 result = 0;
1748 }
1749
1750 table->BootVddc = data->vbios_boot_state.vddc_bootup_value;
1751 table->BootVddci = data->vbios_boot_state.vddci_bootup_value;
1752 table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value;
1753
1754 return result;
1755}
1756
1757static int ci_populate_mc_reg_address(struct pp_hwmgr *hwmgr,
1758 SMU7_Discrete_MCRegisters *mc_reg_table)
1759{
1760 const struct ci_smumgr *smu_data = (struct ci_smumgr *)hwmgr->smu_backend;
1761
1762 uint32_t i, j;
1763
1764 for (i = 0, j = 0; j < smu_data->mc_reg_table.last; j++) {
1765 if (smu_data->mc_reg_table.validflag & 1<<j) {
1766 PP_ASSERT_WITH_CODE(i < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE,
1767 "Index of mc_reg_table->address[] array out of boundary", return -EINVAL);
1768 mc_reg_table->address[i].s0 =
1769 PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s0);
1770 mc_reg_table->address[i].s1 =
1771 PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s1);
1772 i++;
1773 }
1774 }
1775
1776 mc_reg_table->last = (uint8_t)i;
1777
1778 return 0;
1779}
1780
1781static void ci_convert_mc_registers(
1782 const struct ci_mc_reg_entry *entry,
1783 SMU7_Discrete_MCRegisterSet *data,
1784 uint32_t num_entries, uint32_t valid_flag)
1785{
1786 uint32_t i, j;
1787
1788 for (i = 0, j = 0; j < num_entries; j++) {
1789 if (valid_flag & 1<<j) {
1790 data->value[i] = PP_HOST_TO_SMC_UL(entry->mc_data[j]);
1791 i++;
1792 }
1793 }
1794}
1795
1796static int ci_convert_mc_reg_table_entry_to_smc(
1797 struct pp_hwmgr *hwmgr,
1798 const uint32_t memory_clock,
1799 SMU7_Discrete_MCRegisterSet *mc_reg_table_data
1800 )
1801{
1802 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1803 uint32_t i = 0;
1804
1805 for (i = 0; i < smu_data->mc_reg_table.num_entries; i++) {
1806 if (memory_clock <=
1807 smu_data->mc_reg_table.mc_reg_table_entry[i].mclk_max) {
1808 break;
1809 }
1810 }
1811
1812 if ((i == smu_data->mc_reg_table.num_entries) && (i > 0))
1813 --i;
1814
1815 ci_convert_mc_registers(&smu_data->mc_reg_table.mc_reg_table_entry[i],
1816 mc_reg_table_data, smu_data->mc_reg_table.last,
1817 smu_data->mc_reg_table.validflag);
1818
1819 return 0;
1820}
1821
1822static int ci_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
1823 SMU7_Discrete_MCRegisters *mc_regs)
1824{
1825 int result = 0;
1826 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1827 int res;
1828 uint32_t i;
1829
1830 for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
1831 res = ci_convert_mc_reg_table_entry_to_smc(
1832 hwmgr,
1833 data->dpm_table.mclk_table.dpm_levels[i].value,
1834 &mc_regs->data[i]
1835 );
1836
1837 if (0 != res)
1838 result = res;
1839 }
1840
1841 return result;
1842}
1843
1844static int ci_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr)
1845{
1846 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1847 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1848 uint32_t address;
1849 int32_t result;
1850
1851 if (0 == (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
1852 return 0;
1853
1854
1855 memset(&smu_data->mc_regs, 0, sizeof(SMU7_Discrete_MCRegisters));
1856
1857 result = ci_convert_mc_reg_table_to_smc(hwmgr, &(smu_data->mc_regs));
1858
1859 if (result != 0)
1860 return result;
1861
1862 address = smu_data->mc_reg_table_start + (uint32_t)offsetof(SMU7_Discrete_MCRegisters, data[0]);
1863
1864 return ci_copy_bytes_to_smc(hwmgr, address,
1865 (uint8_t *)&smu_data->mc_regs.data[0],
1866 sizeof(SMU7_Discrete_MCRegisterSet) * data->dpm_table.mclk_table.count,
1867 SMC_RAM_END);
1868}
1869
1870static int ci_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr)
1871{
1872 int result;
1873 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1874
1875 memset(&smu_data->mc_regs, 0x00, sizeof(SMU7_Discrete_MCRegisters));
1876 result = ci_populate_mc_reg_address(hwmgr, &(smu_data->mc_regs));
1877 PP_ASSERT_WITH_CODE(0 == result,
1878 "Failed to initialize MCRegTable for the MC register addresses!", return result;);
1879
1880 result = ci_convert_mc_reg_table_to_smc(hwmgr, &smu_data->mc_regs);
1881 PP_ASSERT_WITH_CODE(0 == result,
1882 "Failed to initialize MCRegTable for driver state!", return result;);
1883
1884 return ci_copy_bytes_to_smc(hwmgr, smu_data->mc_reg_table_start,
1885 (uint8_t *)&smu_data->mc_regs, sizeof(SMU7_Discrete_MCRegisters), SMC_RAM_END);
1886}
1887
1888static int ci_populate_smc_initial_state(struct pp_hwmgr *hwmgr)
1889{
1890 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1891 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1892 uint8_t count, level;
1893
1894 count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->count);
1895
1896 for (level = 0; level < count; level++) {
1897 if (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[level].clk
1898 >= data->vbios_boot_state.sclk_bootup_value) {
1899 smu_data->smc_state_table.GraphicsBootLevel = level;
1900 break;
1901 }
1902 }
1903
1904 count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_mclk->count);
1905
1906 for (level = 0; level < count; level++) {
1907 if (hwmgr->dyn_state.vddc_dependency_on_mclk->entries[level].clk
1908 >= data->vbios_boot_state.mclk_bootup_value) {
1909 smu_data->smc_state_table.MemoryBootLevel = level;
1910 break;
1911 }
1912 }
1913
1914 return 0;
1915}
1916
1917static int ci_populate_smc_svi2_config(struct pp_hwmgr *hwmgr,
1918 SMU7_Discrete_DpmTable *table)
1919{
1920 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1921
1922 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control)
1923 table->SVI2Enable = 1;
1924 else
1925 table->SVI2Enable = 0;
1926 return 0;
1927}
1928
1929static int ci_start_smc(struct pp_hwmgr *hwmgr)
1930{
1931
1932 ci_program_jump_on_start(hwmgr);
1933
1934
1935 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
1936
1937 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
1938
1939 PHM_WAIT_INDIRECT_FIELD(hwmgr, SMC_IND, FIRMWARE_FLAGS,
1940 INTERRUPTS_ENABLED, 1);
1941
1942 return 0;
1943}
1944
1945static int ci_populate_vr_config(struct pp_hwmgr *hwmgr, SMU7_Discrete_DpmTable *table)
1946{
1947 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1948 uint16_t config;
1949
1950 config = VR_SVI2_PLANE_1;
1951 table->VRConfig |= (config<<VRCONF_VDDGFX_SHIFT);
1952
1953 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
1954 config = VR_SVI2_PLANE_2;
1955 table->VRConfig |= config;
1956 } else {
1957 pr_info("VDDCshould be on SVI2 controller!");
1958 }
1959
1960 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
1961 config = VR_SVI2_PLANE_2;
1962 table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT);
1963 } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
1964 config = VR_SMIO_PATTERN_1;
1965 table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT);
1966 }
1967
1968 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
1969 config = VR_SMIO_PATTERN_2;
1970 table->VRConfig |= (config<<VRCONF_MVDD_SHIFT);
1971 }
1972
1973 return 0;
1974}
1975
1976static int ci_init_smc_table(struct pp_hwmgr *hwmgr)
1977{
1978 int result;
1979 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1980 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1981 SMU7_Discrete_DpmTable *table = &(smu_data->smc_state_table);
1982 struct pp_atomctrl_gpio_pin_assignment gpio_pin;
1983 u32 i;
1984
1985 ci_initialize_power_tune_defaults(hwmgr);
1986 memset(&(smu_data->smc_state_table), 0x00, sizeof(smu_data->smc_state_table));
1987
1988 if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control)
1989 ci_populate_smc_voltage_tables(hwmgr, table);
1990
1991 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1992 PHM_PlatformCaps_AutomaticDCTransition))
1993 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
1994
1995
1996 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1997 PHM_PlatformCaps_StepVddc))
1998 table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
1999
2000 if (data->is_memory_gddr5)
2001 table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
2002
2003 if (data->ulv_supported) {
2004 result = ci_populate_ulv_state(hwmgr, &(table->Ulv));
2005 PP_ASSERT_WITH_CODE(0 == result,
2006 "Failed to initialize ULV state!", return result);
2007
2008 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
2009 ixCG_ULV_PARAMETER, 0x40035);
2010 }
2011
2012 result = ci_populate_all_graphic_levels(hwmgr);
2013 PP_ASSERT_WITH_CODE(0 == result,
2014 "Failed to initialize Graphics Level!", return result);
2015
2016 result = ci_populate_all_memory_levels(hwmgr);
2017 PP_ASSERT_WITH_CODE(0 == result,
2018 "Failed to initialize Memory Level!", return result);
2019
2020 result = ci_populate_smc_link_level(hwmgr, table);
2021 PP_ASSERT_WITH_CODE(0 == result,
2022 "Failed to initialize Link Level!", return result);
2023
2024 result = ci_populate_smc_acpi_level(hwmgr, table);
2025 PP_ASSERT_WITH_CODE(0 == result,
2026 "Failed to initialize ACPI Level!", return result);
2027
2028 result = ci_populate_smc_vce_level(hwmgr, table);
2029 PP_ASSERT_WITH_CODE(0 == result,
2030 "Failed to initialize VCE Level!", return result);
2031
2032 result = ci_populate_smc_acp_level(hwmgr, table);
2033 PP_ASSERT_WITH_CODE(0 == result,
2034 "Failed to initialize ACP Level!", return result);
2035
2036 result = ci_populate_smc_samu_level(hwmgr, table);
2037 PP_ASSERT_WITH_CODE(0 == result,
2038 "Failed to initialize SAMU Level!", return result);
2039
2040
2041
2042 result = ci_program_memory_timing_parameters(hwmgr);
2043 PP_ASSERT_WITH_CODE(0 == result,
2044 "Failed to Write ARB settings for the initial state.", return result);
2045
2046 result = ci_populate_smc_uvd_level(hwmgr, table);
2047 PP_ASSERT_WITH_CODE(0 == result,
2048 "Failed to initialize UVD Level!", return result);
2049
2050 table->UvdBootLevel = 0;
2051 table->VceBootLevel = 0;
2052 table->AcpBootLevel = 0;
2053 table->SamuBootLevel = 0;
2054
2055 table->GraphicsBootLevel = 0;
2056 table->MemoryBootLevel = 0;
2057
2058 result = ci_populate_smc_boot_level(hwmgr, table);
2059 PP_ASSERT_WITH_CODE(0 == result,
2060 "Failed to initialize Boot Level!", return result);
2061
2062 result = ci_populate_smc_initial_state(hwmgr);
2063 PP_ASSERT_WITH_CODE(0 == result, "Failed to initialize Boot State!", return result);
2064
2065 result = ci_populate_bapm_parameters_in_dpm_table(hwmgr);
2066 PP_ASSERT_WITH_CODE(0 == result, "Failed to populate BAPM Parameters!", return result);
2067
2068 table->UVDInterval = 1;
2069 table->VCEInterval = 1;
2070 table->ACPInterval = 1;
2071 table->SAMUInterval = 1;
2072 table->GraphicsVoltageChangeEnable = 1;
2073 table->GraphicsThermThrottleEnable = 1;
2074 table->GraphicsInterval = 1;
2075 table->VoltageInterval = 1;
2076 table->ThermalInterval = 1;
2077
2078 table->TemperatureLimitHigh =
2079 (data->thermal_temp_setting.temperature_high *
2080 SMU7_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
2081 table->TemperatureLimitLow =
2082 (data->thermal_temp_setting.temperature_low *
2083 SMU7_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
2084
2085 table->MemoryVoltageChangeEnable = 1;
2086 table->MemoryInterval = 1;
2087 table->VoltageResponseTime = 0;
2088 table->VddcVddciDelta = 4000;
2089 table->PhaseResponseTime = 0;
2090 table->MemoryThermThrottleEnable = 1;
2091
2092 PP_ASSERT_WITH_CODE((1 <= data->dpm_table.pcie_speed_table.count),
2093 "There must be 1 or more PCIE levels defined in PPTable.",
2094 return -EINVAL);
2095
2096 table->PCIeBootLinkLevel = (uint8_t)data->dpm_table.pcie_speed_table.count;
2097 table->PCIeGenInterval = 1;
2098
2099 result = ci_populate_vr_config(hwmgr, table);
2100 PP_ASSERT_WITH_CODE(0 == result,
2101 "Failed to populate VRConfig setting!", return result);
2102 data->vr_config = table->VRConfig;
2103
2104 ci_populate_smc_svi2_config(hwmgr, table);
2105
2106 for (i = 0; i < SMU7_MAX_ENTRIES_SMIO; i++)
2107 CONVERT_FROM_HOST_TO_SMC_UL(table->Smio[i]);
2108
2109 table->ThermGpio = 17;
2110 table->SclkStepSize = 0x4000;
2111 if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
2112 table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
2113 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2114 PHM_PlatformCaps_RegulatorHot);
2115 } else {
2116 table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
2117 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2118 PHM_PlatformCaps_RegulatorHot);
2119 }
2120
2121 table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
2122
2123 CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
2124 CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
2125 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcVid);
2126 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcPhase);
2127 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddciVid);
2128 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskMvddVid);
2129 CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
2130 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
2131 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
2132 table->VddcVddciDelta = PP_HOST_TO_SMC_US(table->VddcVddciDelta);
2133 CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
2134 CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
2135
2136 table->BootVddc = PP_HOST_TO_SMC_US(table->BootVddc * VOLTAGE_SCALE);
2137 table->BootVddci = PP_HOST_TO_SMC_US(table->BootVddci * VOLTAGE_SCALE);
2138 table->BootMVdd = PP_HOST_TO_SMC_US(table->BootMVdd * VOLTAGE_SCALE);
2139
2140
2141 result = ci_copy_bytes_to_smc(hwmgr, smu_data->dpm_table_start +
2142 offsetof(SMU7_Discrete_DpmTable, SystemFlags),
2143 (uint8_t *)&(table->SystemFlags),
2144 sizeof(SMU7_Discrete_DpmTable)-3 * sizeof(SMU7_PIDController),
2145 SMC_RAM_END);
2146
2147 PP_ASSERT_WITH_CODE(0 == result,
2148 "Failed to upload dpm data to SMC memory!", return result;);
2149
2150 result = ci_populate_initial_mc_reg_table(hwmgr);
2151 PP_ASSERT_WITH_CODE((0 == result),
2152 "Failed to populate initialize MC Reg table!", return result);
2153
2154 result = ci_populate_pm_fuses(hwmgr);
2155 PP_ASSERT_WITH_CODE(0 == result,
2156 "Failed to populate PM fuses to SMC memory!", return result);
2157
2158 ci_start_smc(hwmgr);
2159
2160 return 0;
2161}
2162
2163static int ci_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
2164{
2165 struct ci_smumgr *ci_data = (struct ci_smumgr *)(hwmgr->smu_backend);
2166 SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
2167 uint32_t duty100;
2168 uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
2169 uint16_t fdo_min, slope1, slope2;
2170 uint32_t reference_clock;
2171 int res;
2172 uint64_t tmp64;
2173
2174 if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl))
2175 return 0;
2176
2177 if (hwmgr->thermal_controller.fanInfo.bNoFan) {
2178 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2179 PHM_PlatformCaps_MicrocodeFanControl);
2180 return 0;
2181 }
2182
2183 if (0 == ci_data->fan_table_start) {
2184 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
2185 return 0;
2186 }
2187
2188 duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100);
2189
2190 if (0 == duty100) {
2191 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
2192 return 0;
2193 }
2194
2195 tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100;
2196 do_div(tmp64, 10000);
2197 fdo_min = (uint16_t)tmp64;
2198
2199 t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed - hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
2200 t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh - hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
2201
2202 pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
2203 pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
2204
2205 slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
2206 slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
2207
2208 fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100);
2209 fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100);
2210 fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100);
2211
2212 fan_table.Slope1 = cpu_to_be16(slope1);
2213 fan_table.Slope2 = cpu_to_be16(slope2);
2214
2215 fan_table.FdoMin = cpu_to_be16(fdo_min);
2216
2217 fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst);
2218
2219 fan_table.HystUp = cpu_to_be16(1);
2220
2221 fan_table.HystSlope = cpu_to_be16(1);
2222
2223 fan_table.TempRespLim = cpu_to_be16(5);
2224
2225 reference_clock = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev);
2226
2227 fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600);
2228
2229 fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
2230
2231 fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL);
2232
2233 res = ci_copy_bytes_to_smc(hwmgr, ci_data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), SMC_RAM_END);
2234
2235 return 0;
2236}
2237
2238static int ci_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
2239{
2240 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2241
2242 if (data->need_update_smu7_dpm_table &
2243 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
2244 return ci_program_memory_timing_parameters(hwmgr);
2245
2246 return 0;
2247}
2248
2249static int ci_update_sclk_threshold(struct pp_hwmgr *hwmgr)
2250{
2251 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2252 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
2253
2254 int result = 0;
2255 uint32_t low_sclk_interrupt_threshold = 0;
2256
2257 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2258 PHM_PlatformCaps_SclkThrottleLowNotification)
2259 && (data->low_sclk_interrupt_threshold != 0)) {
2260 low_sclk_interrupt_threshold =
2261 data->low_sclk_interrupt_threshold;
2262
2263 CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
2264
2265 result = ci_copy_bytes_to_smc(
2266 hwmgr,
2267 smu_data->dpm_table_start +
2268 offsetof(SMU7_Discrete_DpmTable,
2269 LowSclkInterruptT),
2270 (uint8_t *)&low_sclk_interrupt_threshold,
2271 sizeof(uint32_t),
2272 SMC_RAM_END);
2273 }
2274
2275 result = ci_update_and_upload_mc_reg_table(hwmgr);
2276
2277 PP_ASSERT_WITH_CODE((0 == result), "Failed to upload MC reg table!", return result);
2278
2279 result = ci_program_mem_timing_parameters(hwmgr);
2280 PP_ASSERT_WITH_CODE((result == 0),
2281 "Failed to program memory timing parameters!",
2282 );
2283
2284 return result;
2285}
2286
2287static uint32_t ci_get_offsetof(uint32_t type, uint32_t member)
2288{
2289 switch (type) {
2290 case SMU_SoftRegisters:
2291 switch (member) {
2292 case HandshakeDisables:
2293 return offsetof(SMU7_SoftRegisters, HandshakeDisables);
2294 case VoltageChangeTimeout:
2295 return offsetof(SMU7_SoftRegisters, VoltageChangeTimeout);
2296 case AverageGraphicsActivity:
2297 return offsetof(SMU7_SoftRegisters, AverageGraphicsA);
2298 case PreVBlankGap:
2299 return offsetof(SMU7_SoftRegisters, PreVBlankGap);
2300 case VBlankTimeout:
2301 return offsetof(SMU7_SoftRegisters, VBlankTimeout);
2302 case DRAM_LOG_ADDR_H:
2303 return offsetof(SMU7_SoftRegisters, DRAM_LOG_ADDR_H);
2304 case DRAM_LOG_ADDR_L:
2305 return offsetof(SMU7_SoftRegisters, DRAM_LOG_ADDR_L);
2306 case DRAM_LOG_PHY_ADDR_H:
2307 return offsetof(SMU7_SoftRegisters, DRAM_LOG_PHY_ADDR_H);
2308 case DRAM_LOG_PHY_ADDR_L:
2309 return offsetof(SMU7_SoftRegisters, DRAM_LOG_PHY_ADDR_L);
2310 case DRAM_LOG_BUFF_SIZE:
2311 return offsetof(SMU7_SoftRegisters, DRAM_LOG_BUFF_SIZE);
2312 }
2313 case SMU_Discrete_DpmTable:
2314 switch (member) {
2315 case LowSclkInterruptThreshold:
2316 return offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT);
2317 }
2318 }
2319 pr_debug("can't get the offset of type %x member %x\n", type, member);
2320 return 0;
2321}
2322
2323static uint32_t ci_get_mac_definition(uint32_t value)
2324{
2325 switch (value) {
2326 case SMU_MAX_LEVELS_GRAPHICS:
2327 return SMU7_MAX_LEVELS_GRAPHICS;
2328 case SMU_MAX_LEVELS_MEMORY:
2329 return SMU7_MAX_LEVELS_MEMORY;
2330 case SMU_MAX_LEVELS_LINK:
2331 return SMU7_MAX_LEVELS_LINK;
2332 case SMU_MAX_ENTRIES_SMIO:
2333 return SMU7_MAX_ENTRIES_SMIO;
2334 case SMU_MAX_LEVELS_VDDC:
2335 return SMU7_MAX_LEVELS_VDDC;
2336 case SMU_MAX_LEVELS_VDDCI:
2337 return SMU7_MAX_LEVELS_VDDCI;
2338 case SMU_MAX_LEVELS_MVDD:
2339 return SMU7_MAX_LEVELS_MVDD;
2340 }
2341
2342 pr_debug("can't get the mac of %x\n", value);
2343 return 0;
2344}
2345
2346static int ci_load_smc_ucode(struct pp_hwmgr *hwmgr)
2347{
2348 uint32_t byte_count, start_addr;
2349 uint8_t *src;
2350 uint32_t data;
2351
2352 struct cgs_firmware_info info = {0};
2353
2354 cgs_get_firmware_info(hwmgr->device, CGS_UCODE_ID_SMU, &info);
2355
2356 hwmgr->is_kicker = info.is_kicker;
2357 hwmgr->smu_version = info.version;
2358 byte_count = info.image_size;
2359 src = (uint8_t *)info.kptr;
2360 start_addr = info.ucode_start_address;
2361
2362 if (byte_count > SMC_RAM_END) {
2363 pr_err("SMC address is beyond the SMC RAM area.\n");
2364 return -EINVAL;
2365 }
2366
2367 cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_0, start_addr);
2368 PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
2369
2370 for (; byte_count >= 4; byte_count -= 4) {
2371 data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
2372 cgs_write_register(hwmgr->device, mmSMC_IND_DATA_0, data);
2373 src += 4;
2374 }
2375 PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
2376
2377 if (0 != byte_count) {
2378 pr_err("SMC size must be divisible by 4\n");
2379 return -EINVAL;
2380 }
2381
2382 return 0;
2383}
2384
2385static int ci_upload_firmware(struct pp_hwmgr *hwmgr)
2386{
2387 if (ci_is_smc_ram_running(hwmgr)) {
2388 pr_info("smc is running, no need to load smc firmware\n");
2389 return 0;
2390 }
2391 PHM_WAIT_INDIRECT_FIELD(hwmgr, SMC_IND, RCU_UC_EVENTS,
2392 boot_seq_done, 1);
2393 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_MISC_CNTL,
2394 pre_fetcher_en, 1);
2395
2396 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1);
2397 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
2398 return ci_load_smc_ucode(hwmgr);
2399}
2400
2401static int ci_process_firmware_header(struct pp_hwmgr *hwmgr)
2402{
2403 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2404 struct ci_smumgr *ci_data = (struct ci_smumgr *)(hwmgr->smu_backend);
2405
2406 uint32_t tmp = 0;
2407 int result;
2408 bool error = false;
2409
2410 if (ci_upload_firmware(hwmgr))
2411 return -EINVAL;
2412
2413 result = ci_read_smc_sram_dword(hwmgr,
2414 SMU7_FIRMWARE_HEADER_LOCATION +
2415 offsetof(SMU7_Firmware_Header, DpmTable),
2416 &tmp, SMC_RAM_END);
2417
2418 if (0 == result)
2419 ci_data->dpm_table_start = tmp;
2420
2421 error |= (0 != result);
2422
2423 result = ci_read_smc_sram_dword(hwmgr,
2424 SMU7_FIRMWARE_HEADER_LOCATION +
2425 offsetof(SMU7_Firmware_Header, SoftRegisters),
2426 &tmp, SMC_RAM_END);
2427
2428 if (0 == result) {
2429 data->soft_regs_start = tmp;
2430 ci_data->soft_regs_start = tmp;
2431 }
2432
2433 error |= (0 != result);
2434
2435 result = ci_read_smc_sram_dword(hwmgr,
2436 SMU7_FIRMWARE_HEADER_LOCATION +
2437 offsetof(SMU7_Firmware_Header, mcRegisterTable),
2438 &tmp, SMC_RAM_END);
2439
2440 if (0 == result)
2441 ci_data->mc_reg_table_start = tmp;
2442
2443 result = ci_read_smc_sram_dword(hwmgr,
2444 SMU7_FIRMWARE_HEADER_LOCATION +
2445 offsetof(SMU7_Firmware_Header, FanTable),
2446 &tmp, SMC_RAM_END);
2447
2448 if (0 == result)
2449 ci_data->fan_table_start = tmp;
2450
2451 error |= (0 != result);
2452
2453 result = ci_read_smc_sram_dword(hwmgr,
2454 SMU7_FIRMWARE_HEADER_LOCATION +
2455 offsetof(SMU7_Firmware_Header, mcArbDramTimingTable),
2456 &tmp, SMC_RAM_END);
2457
2458 if (0 == result)
2459 ci_data->arb_table_start = tmp;
2460
2461 error |= (0 != result);
2462
2463 result = ci_read_smc_sram_dword(hwmgr,
2464 SMU7_FIRMWARE_HEADER_LOCATION +
2465 offsetof(SMU7_Firmware_Header, Version),
2466 &tmp, SMC_RAM_END);
2467
2468 if (0 == result)
2469 hwmgr->microcode_version_info.SMC = tmp;
2470
2471 error |= (0 != result);
2472
2473 return error ? 1 : 0;
2474}
2475
2476static uint8_t ci_get_memory_modile_index(struct pp_hwmgr *hwmgr)
2477{
2478 return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16));
2479}
2480
2481static bool ci_check_s0_mc_reg_index(uint16_t in_reg, uint16_t *out_reg)
2482{
2483 bool result = true;
2484
2485 switch (in_reg) {
2486 case mmMC_SEQ_RAS_TIMING:
2487 *out_reg = mmMC_SEQ_RAS_TIMING_LP;
2488 break;
2489
2490 case mmMC_SEQ_DLL_STBY:
2491 *out_reg = mmMC_SEQ_DLL_STBY_LP;
2492 break;
2493
2494 case mmMC_SEQ_G5PDX_CMD0:
2495 *out_reg = mmMC_SEQ_G5PDX_CMD0_LP;
2496 break;
2497
2498 case mmMC_SEQ_G5PDX_CMD1:
2499 *out_reg = mmMC_SEQ_G5PDX_CMD1_LP;
2500 break;
2501
2502 case mmMC_SEQ_G5PDX_CTRL:
2503 *out_reg = mmMC_SEQ_G5PDX_CTRL_LP;
2504 break;
2505
2506 case mmMC_SEQ_CAS_TIMING:
2507 *out_reg = mmMC_SEQ_CAS_TIMING_LP;
2508 break;
2509
2510 case mmMC_SEQ_MISC_TIMING:
2511 *out_reg = mmMC_SEQ_MISC_TIMING_LP;
2512 break;
2513
2514 case mmMC_SEQ_MISC_TIMING2:
2515 *out_reg = mmMC_SEQ_MISC_TIMING2_LP;
2516 break;
2517
2518 case mmMC_SEQ_PMG_DVS_CMD:
2519 *out_reg = mmMC_SEQ_PMG_DVS_CMD_LP;
2520 break;
2521
2522 case mmMC_SEQ_PMG_DVS_CTL:
2523 *out_reg = mmMC_SEQ_PMG_DVS_CTL_LP;
2524 break;
2525
2526 case mmMC_SEQ_RD_CTL_D0:
2527 *out_reg = mmMC_SEQ_RD_CTL_D0_LP;
2528 break;
2529
2530 case mmMC_SEQ_RD_CTL_D1:
2531 *out_reg = mmMC_SEQ_RD_CTL_D1_LP;
2532 break;
2533
2534 case mmMC_SEQ_WR_CTL_D0:
2535 *out_reg = mmMC_SEQ_WR_CTL_D0_LP;
2536 break;
2537
2538 case mmMC_SEQ_WR_CTL_D1:
2539 *out_reg = mmMC_SEQ_WR_CTL_D1_LP;
2540 break;
2541
2542 case mmMC_PMG_CMD_EMRS:
2543 *out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP;
2544 break;
2545
2546 case mmMC_PMG_CMD_MRS:
2547 *out_reg = mmMC_SEQ_PMG_CMD_MRS_LP;
2548 break;
2549
2550 case mmMC_PMG_CMD_MRS1:
2551 *out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP;
2552 break;
2553
2554 case mmMC_SEQ_PMG_TIMING:
2555 *out_reg = mmMC_SEQ_PMG_TIMING_LP;
2556 break;
2557
2558 case mmMC_PMG_CMD_MRS2:
2559 *out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP;
2560 break;
2561
2562 case mmMC_SEQ_WR_CTL_2:
2563 *out_reg = mmMC_SEQ_WR_CTL_2_LP;
2564 break;
2565
2566 default:
2567 result = false;
2568 break;
2569 }
2570
2571 return result;
2572}
2573
2574static int ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table)
2575{
2576 uint32_t i;
2577 uint16_t address;
2578
2579 for (i = 0; i < table->last; i++) {
2580 table->mc_reg_address[i].s0 =
2581 ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address)
2582 ? address : table->mc_reg_address[i].s1;
2583 }
2584 return 0;
2585}
2586
2587static int ci_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table,
2588 struct ci_mc_reg_table *ni_table)
2589{
2590 uint8_t i, j;
2591
2592 PP_ASSERT_WITH_CODE((table->last <= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2593 "Invalid VramInfo table.", return -EINVAL);
2594 PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES),
2595 "Invalid VramInfo table.", return -EINVAL);
2596
2597 for (i = 0; i < table->last; i++)
2598 ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
2599
2600 ni_table->last = table->last;
2601
2602 for (i = 0; i < table->num_entries; i++) {
2603 ni_table->mc_reg_table_entry[i].mclk_max =
2604 table->mc_reg_table_entry[i].mclk_max;
2605 for (j = 0; j < table->last; j++) {
2606 ni_table->mc_reg_table_entry[i].mc_data[j] =
2607 table->mc_reg_table_entry[i].mc_data[j];
2608 }
2609 }
2610
2611 ni_table->num_entries = table->num_entries;
2612
2613 return 0;
2614}
2615
2616static int ci_set_mc_special_registers(struct pp_hwmgr *hwmgr,
2617 struct ci_mc_reg_table *table)
2618{
2619 uint8_t i, j, k;
2620 uint32_t temp_reg;
2621 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2622
2623 for (i = 0, j = table->last; i < table->last; i++) {
2624 PP_ASSERT_WITH_CODE((j < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2625 "Invalid VramInfo table.", return -EINVAL);
2626
2627 switch (table->mc_reg_address[i].s1) {
2628
2629 case mmMC_SEQ_MISC1:
2630 temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS);
2631 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
2632 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
2633 for (k = 0; k < table->num_entries; k++) {
2634 table->mc_reg_table_entry[k].mc_data[j] =
2635 ((temp_reg & 0xffff0000)) |
2636 ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
2637 }
2638 j++;
2639
2640 PP_ASSERT_WITH_CODE((j < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2641 "Invalid VramInfo table.", return -EINVAL);
2642 temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS);
2643 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
2644 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
2645 for (k = 0; k < table->num_entries; k++) {
2646 table->mc_reg_table_entry[k].mc_data[j] =
2647 (temp_reg & 0xffff0000) |
2648 (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
2649
2650 if (!data->is_memory_gddr5)
2651 table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
2652 }
2653 j++;
2654
2655 if (!data->is_memory_gddr5) {
2656 PP_ASSERT_WITH_CODE((j < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2657 "Invalid VramInfo table.", return -EINVAL);
2658 table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
2659 table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
2660 for (k = 0; k < table->num_entries; k++) {
2661 table->mc_reg_table_entry[k].mc_data[j] =
2662 (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
2663 }
2664 j++;
2665 }
2666
2667 break;
2668
2669 case mmMC_SEQ_RESERVE_M:
2670 temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1);
2671 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
2672 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
2673 for (k = 0; k < table->num_entries; k++) {
2674 table->mc_reg_table_entry[k].mc_data[j] =
2675 (temp_reg & 0xffff0000) |
2676 (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
2677 }
2678 j++;
2679 break;
2680
2681 default:
2682 break;
2683 }
2684
2685 }
2686
2687 table->last = j;
2688
2689 return 0;
2690}
2691
2692static int ci_set_valid_flag(struct ci_mc_reg_table *table)
2693{
2694 uint8_t i, j;
2695
2696 for (i = 0; i < table->last; i++) {
2697 for (j = 1; j < table->num_entries; j++) {
2698 if (table->mc_reg_table_entry[j-1].mc_data[i] !=
2699 table->mc_reg_table_entry[j].mc_data[i]) {
2700 table->validflag |= (1 << i);
2701 break;
2702 }
2703 }
2704 }
2705
2706 return 0;
2707}
2708
2709static int ci_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
2710{
2711 int result;
2712 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
2713 pp_atomctrl_mc_reg_table *table;
2714 struct ci_mc_reg_table *ni_table = &smu_data->mc_reg_table;
2715 uint8_t module_index = ci_get_memory_modile_index(hwmgr);
2716
2717 table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL);
2718
2719 if (NULL == table)
2720 return -ENOMEM;
2721
2722
2723 cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
2724 cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
2725 cgs_write_register(hwmgr->device, mmMC_SEQ_DLL_STBY_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_DLL_STBY));
2726 cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0));
2727 cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1));
2728 cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL));
2729 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD));
2730 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL));
2731 cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING));
2732 cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
2733 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_EMRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS));
2734 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS));
2735 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS1_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1));
2736 cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0));
2737 cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
2738 cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
2739 cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
2740 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
2741 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2));
2742 cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2));
2743
2744 memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table));
2745
2746 result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table);
2747
2748 if (0 == result)
2749 result = ci_copy_vbios_smc_reg_table(table, ni_table);
2750
2751 if (0 == result) {
2752 ci_set_s0_mc_reg_index(ni_table);
2753 result = ci_set_mc_special_registers(hwmgr, ni_table);
2754 }
2755
2756 if (0 == result)
2757 ci_set_valid_flag(ni_table);
2758
2759 kfree(table);
2760
2761 return result;
2762}
2763
2764static bool ci_is_dpm_running(struct pp_hwmgr *hwmgr)
2765{
2766 return ci_is_smc_ram_running(hwmgr);
2767}
2768
2769static int ci_smu_init(struct pp_hwmgr *hwmgr)
2770{
2771 struct ci_smumgr *ci_priv = NULL;
2772
2773 ci_priv = kzalloc(sizeof(struct ci_smumgr), GFP_KERNEL);
2774
2775 if (ci_priv == NULL)
2776 return -ENOMEM;
2777
2778 hwmgr->smu_backend = ci_priv;
2779
2780 return 0;
2781}
2782
2783static int ci_smu_fini(struct pp_hwmgr *hwmgr)
2784{
2785 kfree(hwmgr->smu_backend);
2786 hwmgr->smu_backend = NULL;
2787 cgs_rel_firmware(hwmgr->device, CGS_UCODE_ID_SMU);
2788 return 0;
2789}
2790
2791static int ci_start_smu(struct pp_hwmgr *hwmgr)
2792{
2793 return 0;
2794}
2795
2796static int ci_update_dpm_settings(struct pp_hwmgr *hwmgr,
2797 void *profile_setting)
2798{
2799 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2800 struct ci_smumgr *smu_data = (struct ci_smumgr *)
2801 (hwmgr->smu_backend);
2802 struct profile_mode_setting *setting;
2803 struct SMU7_Discrete_GraphicsLevel *levels =
2804 smu_data->smc_state_table.GraphicsLevel;
2805 uint32_t array = smu_data->dpm_table_start +
2806 offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
2807
2808 uint32_t mclk_array = smu_data->dpm_table_start +
2809 offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
2810 struct SMU7_Discrete_MemoryLevel *mclk_levels =
2811 smu_data->smc_state_table.MemoryLevel;
2812 uint32_t i;
2813 uint32_t offset, up_hyst_offset, down_hyst_offset, clk_activity_offset, tmp;
2814
2815 if (profile_setting == NULL)
2816 return -EINVAL;
2817
2818 setting = (struct profile_mode_setting *)profile_setting;
2819
2820 if (setting->bupdate_sclk) {
2821 if (!data->sclk_dpm_key_disabled)
2822 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel);
2823 for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
2824 if (levels[i].ActivityLevel !=
2825 cpu_to_be16(setting->sclk_activity)) {
2826 levels[i].ActivityLevel = cpu_to_be16(setting->sclk_activity);
2827
2828 clk_activity_offset = array + (sizeof(SMU7_Discrete_GraphicsLevel) * i)
2829 + offsetof(SMU7_Discrete_GraphicsLevel, ActivityLevel);
2830 offset = clk_activity_offset & ~0x3;
2831 tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset));
2832 tmp = phm_set_field_to_u32(clk_activity_offset, tmp, levels[i].ActivityLevel, sizeof(uint16_t));
2833 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp));
2834
2835 }
2836 if (levels[i].UpH != setting->sclk_up_hyst ||
2837 levels[i].DownH != setting->sclk_down_hyst) {
2838 levels[i].UpH = setting->sclk_up_hyst;
2839 levels[i].DownH = setting->sclk_down_hyst;
2840 up_hyst_offset = array + (sizeof(SMU7_Discrete_GraphicsLevel) * i)
2841 + offsetof(SMU7_Discrete_GraphicsLevel, UpH);
2842 down_hyst_offset = array + (sizeof(SMU7_Discrete_GraphicsLevel) * i)
2843 + offsetof(SMU7_Discrete_GraphicsLevel, DownH);
2844 offset = up_hyst_offset & ~0x3;
2845 tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset));
2846 tmp = phm_set_field_to_u32(up_hyst_offset, tmp, levels[i].UpH, sizeof(uint8_t));
2847 tmp = phm_set_field_to_u32(down_hyst_offset, tmp, levels[i].DownH, sizeof(uint8_t));
2848 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp));
2849 }
2850 }
2851 if (!data->sclk_dpm_key_disabled)
2852 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
2853 }
2854
2855 if (setting->bupdate_mclk) {
2856 if (!data->mclk_dpm_key_disabled)
2857 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel);
2858 for (i = 0; i < smu_data->smc_state_table.MemoryDpmLevelCount; i++) {
2859 if (mclk_levels[i].ActivityLevel !=
2860 cpu_to_be16(setting->mclk_activity)) {
2861 mclk_levels[i].ActivityLevel = cpu_to_be16(setting->mclk_activity);
2862
2863 clk_activity_offset = mclk_array + (sizeof(SMU7_Discrete_MemoryLevel) * i)
2864 + offsetof(SMU7_Discrete_MemoryLevel, ActivityLevel);
2865 offset = clk_activity_offset & ~0x3;
2866 tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset));
2867 tmp = phm_set_field_to_u32(clk_activity_offset, tmp, mclk_levels[i].ActivityLevel, sizeof(uint16_t));
2868 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp));
2869
2870 }
2871 if (mclk_levels[i].UpH != setting->mclk_up_hyst ||
2872 mclk_levels[i].DownH != setting->mclk_down_hyst) {
2873 mclk_levels[i].UpH = setting->mclk_up_hyst;
2874 mclk_levels[i].DownH = setting->mclk_down_hyst;
2875 up_hyst_offset = mclk_array + (sizeof(SMU7_Discrete_MemoryLevel) * i)
2876 + offsetof(SMU7_Discrete_MemoryLevel, UpH);
2877 down_hyst_offset = mclk_array + (sizeof(SMU7_Discrete_MemoryLevel) * i)
2878 + offsetof(SMU7_Discrete_MemoryLevel, DownH);
2879 offset = up_hyst_offset & ~0x3;
2880 tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset));
2881 tmp = phm_set_field_to_u32(up_hyst_offset, tmp, mclk_levels[i].UpH, sizeof(uint8_t));
2882 tmp = phm_set_field_to_u32(down_hyst_offset, tmp, mclk_levels[i].DownH, sizeof(uint8_t));
2883 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp));
2884 }
2885 }
2886 if (!data->mclk_dpm_key_disabled)
2887 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
2888 }
2889 return 0;
2890}
2891
2892const struct pp_smumgr_func ci_smu_funcs = {
2893 .smu_init = ci_smu_init,
2894 .smu_fini = ci_smu_fini,
2895 .start_smu = ci_start_smu,
2896 .check_fw_load_finish = NULL,
2897 .request_smu_load_fw = NULL,
2898 .request_smu_load_specific_fw = NULL,
2899 .send_msg_to_smc = ci_send_msg_to_smc,
2900 .send_msg_to_smc_with_parameter = ci_send_msg_to_smc_with_parameter,
2901 .download_pptable_settings = NULL,
2902 .upload_pptable_settings = NULL,
2903 .get_offsetof = ci_get_offsetof,
2904 .process_firmware_header = ci_process_firmware_header,
2905 .init_smc_table = ci_init_smc_table,
2906 .update_sclk_threshold = ci_update_sclk_threshold,
2907 .thermal_setup_fan_table = ci_thermal_setup_fan_table,
2908 .populate_all_graphic_levels = ci_populate_all_graphic_levels,
2909 .populate_all_memory_levels = ci_populate_all_memory_levels,
2910 .get_mac_definition = ci_get_mac_definition,
2911 .initialize_mc_reg_table = ci_initialize_mc_reg_table,
2912 .is_dpm_running = ci_is_dpm_running,
2913 .update_dpm_settings = ci_update_dpm_settings,
2914};
2915