1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include "pp_debug.h"
24#include <linux/delay.h>
25#include <linux/fb.h>
26#include <linux/module.h>
27#include <linux/slab.h>
28#include <asm/div64.h>
29#include <drm/amdgpu_drm.h>
30#include "ppatomctrl.h"
31#include "atombios.h"
32#include "pptable_v1_0.h"
33#include "pppcielanes.h"
34#include "amd_pcie_helpers.h"
35#include "hardwaremanager.h"
36#include "process_pptables_v1_0.h"
37#include "cgs_common.h"
38
39#include "smu7_common.h"
40
41#include "hwmgr.h"
42#include "smu7_hwmgr.h"
43#include "smu_ucode_xfer_vi.h"
44#include "smu7_powertune.h"
45#include "smu7_dyn_defaults.h"
46#include "smu7_thermal.h"
47#include "smu7_clockpowergating.h"
48#include "processpptables.h"
49#include "pp_thermal.h"
50
51#define MC_CG_ARB_FREQ_F0 0x0a
52#define MC_CG_ARB_FREQ_F1 0x0b
53#define MC_CG_ARB_FREQ_F2 0x0c
54#define MC_CG_ARB_FREQ_F3 0x0d
55
56#define MC_CG_SEQ_DRAMCONF_S0 0x05
57#define MC_CG_SEQ_DRAMCONF_S1 0x06
58#define MC_CG_SEQ_YCLK_SUSPEND 0x04
59#define MC_CG_SEQ_YCLK_RESUME 0x0a
60
61#define SMC_CG_IND_START 0xc0030000
62#define SMC_CG_IND_END 0xc0040000
63
64#define MEM_FREQ_LOW_LATENCY 25000
65#define MEM_FREQ_HIGH_LATENCY 80000
66
67#define MEM_LATENCY_HIGH 45
68#define MEM_LATENCY_LOW 35
69#define MEM_LATENCY_ERR 0xFFFF
70
71#define MC_SEQ_MISC0_GDDR5_SHIFT 28
72#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000
73#define MC_SEQ_MISC0_GDDR5_VALUE 5
74
75#define PCIE_BUS_CLK 10000
76#define TCLK (PCIE_BUS_CLK / 10)
77
78static const struct profile_mode_setting smu7_profiling[6] =
79 {{1, 0, 100, 30, 1, 0, 100, 10},
80 {1, 10, 0, 30, 0, 0, 0, 0},
81 {0, 0, 0, 0, 1, 10, 16, 31},
82 {1, 0, 11, 50, 1, 0, 100, 10},
83 {1, 0, 5, 30, 0, 0, 0, 0},
84 {0, 0, 0, 0, 0, 0, 0, 0},
85 };
86
87#define PPSMC_MSG_SetVBITimeout_VEGAM ((uint16_t) 0x310)
88
89#define ixPWR_SVI2_PLANE1_LOAD 0xC0200280
90#define PWR_SVI2_PLANE1_LOAD__PSI1_MASK 0x00000020L
91#define PWR_SVI2_PLANE1_LOAD__PSI0_EN_MASK 0x00000040L
92#define PWR_SVI2_PLANE1_LOAD__PSI1__SHIFT 0x00000005
93#define PWR_SVI2_PLANE1_LOAD__PSI0_EN__SHIFT 0x00000006
94
95
96enum DPM_EVENT_SRC {
97 DPM_EVENT_SRC_ANALOG = 0,
98 DPM_EVENT_SRC_EXTERNAL = 1,
99 DPM_EVENT_SRC_DIGITAL = 2,
100 DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
101 DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4
102};
103
104static const unsigned long PhwVIslands_Magic = (unsigned long)(PHM_VIslands_Magic);
105static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
106 enum pp_clock_type type, uint32_t mask);
107
108static struct smu7_power_state *cast_phw_smu7_power_state(
109 struct pp_hw_power_state *hw_ps)
110{
111 PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic),
112 "Invalid Powerstate Type!",
113 return NULL);
114
115 return (struct smu7_power_state *)hw_ps;
116}
117
118static const struct smu7_power_state *cast_const_phw_smu7_power_state(
119 const struct pp_hw_power_state *hw_ps)
120{
121 PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic),
122 "Invalid Powerstate Type!",
123 return NULL);
124
125 return (const struct smu7_power_state *)hw_ps;
126}
127
128
129
130
131
132
133
134static int smu7_get_mc_microcode_version(struct pp_hwmgr *hwmgr)
135{
136 cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
137
138 hwmgr->microcode_version_info.MC = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA);
139
140 return 0;
141}
142
143static uint16_t smu7_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
144{
145 uint32_t speedCntl = 0;
146
147
148 speedCntl = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__PCIE,
149 ixPCIE_LC_SPEED_CNTL);
150 return((uint16_t)PHM_GET_FIELD(speedCntl,
151 PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
152}
153
154static int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
155{
156 uint32_t link_width;
157
158
159 link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
160 PCIE_LC_LINK_WIDTH_CNTL, LC_LINK_WIDTH_RD);
161
162 PP_ASSERT_WITH_CODE((7 >= link_width),
163 "Invalid PCIe lane width!", return 0);
164
165 return decode_pcie_lane_width(link_width);
166}
167
168
169
170
171
172
173
174static int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
175{
176 if (hwmgr->chip_id == CHIP_VEGAM) {
177 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device,
178 CGS_IND_REG__SMC, PWR_SVI2_PLANE1_LOAD, PSI1, 0);
179 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device,
180 CGS_IND_REG__SMC, PWR_SVI2_PLANE1_LOAD, PSI0_EN, 0);
181 }
182
183 if (hwmgr->feature_mask & PP_SMC_VOLTAGE_CONTROL_MASK)
184 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Enable);
185
186 return 0;
187}
188
189
190
191
192
193
194static bool smu7_voltage_control(const struct pp_hwmgr *hwmgr)
195{
196 const struct smu7_hwmgr *data =
197 (const struct smu7_hwmgr *)(hwmgr->backend);
198
199 return (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control);
200}
201
202
203
204
205
206
207
208static int smu7_enable_voltage_control(struct pp_hwmgr *hwmgr)
209{
210
211 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
212 GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1);
213
214 return 0;
215}
216
217static int phm_get_svi2_voltage_table_v0(pp_atomctrl_voltage_table *voltage_table,
218 struct phm_clock_voltage_dependency_table *voltage_dependency_table
219 )
220{
221 uint32_t i;
222
223 PP_ASSERT_WITH_CODE((NULL != voltage_table),
224 "Voltage Dependency Table empty.", return -EINVAL;);
225
226 voltage_table->mask_low = 0;
227 voltage_table->phase_delay = 0;
228 voltage_table->count = voltage_dependency_table->count;
229
230 for (i = 0; i < voltage_dependency_table->count; i++) {
231 voltage_table->entries[i].value =
232 voltage_dependency_table->entries[i].v;
233 voltage_table->entries[i].smio_low = 0;
234 }
235
236 return 0;
237}
238
239
240
241
242
243
244
245
246static int smu7_construct_voltage_tables(struct pp_hwmgr *hwmgr)
247{
248 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
249 struct phm_ppt_v1_information *table_info =
250 (struct phm_ppt_v1_information *)hwmgr->pptable;
251 int result = 0;
252 uint32_t tmp;
253
254 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
255 result = atomctrl_get_voltage_table_v3(hwmgr,
256 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT,
257 &(data->mvdd_voltage_table));
258 PP_ASSERT_WITH_CODE((0 == result),
259 "Failed to retrieve MVDD table.",
260 return result);
261 } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
262 if (hwmgr->pp_table_version == PP_TABLE_V1)
263 result = phm_get_svi2_mvdd_voltage_table(&(data->mvdd_voltage_table),
264 table_info->vdd_dep_on_mclk);
265 else if (hwmgr->pp_table_version == PP_TABLE_V0)
266 result = phm_get_svi2_voltage_table_v0(&(data->mvdd_voltage_table),
267 hwmgr->dyn_state.mvdd_dependency_on_mclk);
268
269 PP_ASSERT_WITH_CODE((0 == result),
270 "Failed to retrieve SVI2 MVDD table from dependancy table.",
271 return result;);
272 }
273
274 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
275 result = atomctrl_get_voltage_table_v3(hwmgr,
276 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT,
277 &(data->vddci_voltage_table));
278 PP_ASSERT_WITH_CODE((0 == result),
279 "Failed to retrieve VDDCI table.",
280 return result);
281 } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
282 if (hwmgr->pp_table_version == PP_TABLE_V1)
283 result = phm_get_svi2_vddci_voltage_table(&(data->vddci_voltage_table),
284 table_info->vdd_dep_on_mclk);
285 else if (hwmgr->pp_table_version == PP_TABLE_V0)
286 result = phm_get_svi2_voltage_table_v0(&(data->vddci_voltage_table),
287 hwmgr->dyn_state.vddci_dependency_on_mclk);
288 PP_ASSERT_WITH_CODE((0 == result),
289 "Failed to retrieve SVI2 VDDCI table from dependancy table.",
290 return result);
291 }
292
293 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
294
295 result = phm_get_svi2_vdd_voltage_table(&(data->vddgfx_voltage_table),
296 table_info->vddgfx_lookup_table);
297 PP_ASSERT_WITH_CODE((0 == result),
298 "Failed to retrieve SVI2 VDDGFX table from lookup table.", return result;);
299 }
300
301
302 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control) {
303 result = atomctrl_get_voltage_table_v3(hwmgr,
304 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT,
305 &data->vddc_voltage_table);
306 PP_ASSERT_WITH_CODE((0 == result),
307 "Failed to retrieve VDDC table.", return result;);
308 } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
309
310 if (hwmgr->pp_table_version == PP_TABLE_V0)
311 result = phm_get_svi2_voltage_table_v0(&data->vddc_voltage_table,
312 hwmgr->dyn_state.vddc_dependency_on_mclk);
313 else if (hwmgr->pp_table_version == PP_TABLE_V1)
314 result = phm_get_svi2_vdd_voltage_table(&(data->vddc_voltage_table),
315 table_info->vddc_lookup_table);
316
317 PP_ASSERT_WITH_CODE((0 == result),
318 "Failed to retrieve SVI2 VDDC table from dependancy table.", return result;);
319 }
320
321 tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDC);
322 PP_ASSERT_WITH_CODE(
323 (data->vddc_voltage_table.count <= tmp),
324 "Too many voltage values for VDDC. Trimming to fit state table.",
325 phm_trim_voltage_table_to_fit_state_table(tmp,
326 &(data->vddc_voltage_table)));
327
328 tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDGFX);
329 PP_ASSERT_WITH_CODE(
330 (data->vddgfx_voltage_table.count <= tmp),
331 "Too many voltage values for VDDC. Trimming to fit state table.",
332 phm_trim_voltage_table_to_fit_state_table(tmp,
333 &(data->vddgfx_voltage_table)));
334
335 tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDCI);
336 PP_ASSERT_WITH_CODE(
337 (data->vddci_voltage_table.count <= tmp),
338 "Too many voltage values for VDDCI. Trimming to fit state table.",
339 phm_trim_voltage_table_to_fit_state_table(tmp,
340 &(data->vddci_voltage_table)));
341
342 tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_MVDD);
343 PP_ASSERT_WITH_CODE(
344 (data->mvdd_voltage_table.count <= tmp),
345 "Too many voltage values for MVDD. Trimming to fit state table.",
346 phm_trim_voltage_table_to_fit_state_table(tmp,
347 &(data->mvdd_voltage_table)));
348
349 return 0;
350}
351
352
353
354
355
356
357
358static int smu7_program_static_screen_threshold_parameters(
359 struct pp_hwmgr *hwmgr)
360{
361 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
362
363
364 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
365 CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT,
366 data->static_screen_threshold_unit);
367
368 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
369 CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD,
370 data->static_screen_threshold);
371
372 return 0;
373}
374
375
376
377
378
379
380
381static int smu7_enable_display_gap(struct pp_hwmgr *hwmgr)
382{
383 uint32_t display_gap =
384 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
385 ixCG_DISPLAY_GAP_CNTL);
386
387 display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
388 DISP_GAP, DISPLAY_GAP_IGNORE);
389
390 display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
391 DISP_GAP_MCHG, DISPLAY_GAP_VBLANK);
392
393 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
394 ixCG_DISPLAY_GAP_CNTL, display_gap);
395
396 return 0;
397}
398
399
400
401
402
403
404
405static int smu7_program_voting_clients(struct pp_hwmgr *hwmgr)
406{
407 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
408 int i;
409
410
411 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
412 SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0);
413 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
414 SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0);
415
416 for (i = 0; i < 8; i++)
417 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
418 ixCG_FREQ_TRAN_VOTING_0 + i * 4,
419 data->voting_rights_clients[i]);
420 return 0;
421}
422
423static int smu7_clear_voting_clients(struct pp_hwmgr *hwmgr)
424{
425 int i;
426
427
428 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
429 SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1);
430 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
431 SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1);
432
433 for (i = 0; i < 8; i++)
434 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
435 ixCG_FREQ_TRAN_VOTING_0 + i * 4, 0);
436
437 return 0;
438}
439
440
441
442
443static int smu7_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
444 uint32_t arb_src, uint32_t arb_dest)
445{
446 uint32_t mc_arb_dram_timing;
447 uint32_t mc_arb_dram_timing2;
448 uint32_t burst_time;
449 uint32_t mc_cg_config;
450
451 switch (arb_src) {
452 case MC_CG_ARB_FREQ_F0:
453 mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
454 mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
455 burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
456 break;
457 case MC_CG_ARB_FREQ_F1:
458 mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1);
459 mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1);
460 burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1);
461 break;
462 default:
463 return -EINVAL;
464 }
465
466 switch (arb_dest) {
467 case MC_CG_ARB_FREQ_F0:
468 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
469 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
470 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time);
471 break;
472 case MC_CG_ARB_FREQ_F1:
473 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
474 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
475 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time);
476 break;
477 default:
478 return -EINVAL;
479 }
480
481 mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG);
482 mc_cg_config |= 0x0000000F;
483 cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config);
484 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arb_dest);
485
486 return 0;
487}
488
489static int smu7_reset_to_default(struct pp_hwmgr *hwmgr)
490{
491 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ResetToDefaults);
492}
493
494
495
496
497
498
499
500
501static int smu7_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr)
502{
503 return smu7_copy_and_switch_arb_sets(hwmgr,
504 MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
505}
506
507static int smu7_force_switch_to_arbf0(struct pp_hwmgr *hwmgr)
508{
509 uint32_t tmp;
510
511 tmp = (cgs_read_ind_register(hwmgr->device,
512 CGS_IND_REG__SMC, ixSMC_SCRATCH9) &
513 0x0000ff00) >> 8;
514
515 if (tmp == MC_CG_ARB_FREQ_F0)
516 return 0;
517
518 return smu7_copy_and_switch_arb_sets(hwmgr,
519 tmp, MC_CG_ARB_FREQ_F0);
520}
521
522static int smu7_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
523{
524 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
525
526 struct phm_ppt_v1_information *table_info =
527 (struct phm_ppt_v1_information *)(hwmgr->pptable);
528 struct phm_ppt_v1_pcie_table *pcie_table = NULL;
529
530 uint32_t i, max_entry;
531 uint32_t tmp;
532
533 PP_ASSERT_WITH_CODE((data->use_pcie_performance_levels ||
534 data->use_pcie_power_saving_levels), "No pcie performance levels!",
535 return -EINVAL);
536
537 if (table_info != NULL)
538 pcie_table = table_info->pcie_table;
539
540 if (data->use_pcie_performance_levels &&
541 !data->use_pcie_power_saving_levels) {
542 data->pcie_gen_power_saving = data->pcie_gen_performance;
543 data->pcie_lane_power_saving = data->pcie_lane_performance;
544 } else if (!data->use_pcie_performance_levels &&
545 data->use_pcie_power_saving_levels) {
546 data->pcie_gen_performance = data->pcie_gen_power_saving;
547 data->pcie_lane_performance = data->pcie_lane_power_saving;
548 }
549 tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_LINK);
550 phm_reset_single_dpm_table(&data->dpm_table.pcie_speed_table,
551 tmp,
552 MAX_REGULAR_DPM_NUMBER);
553
554 if (pcie_table != NULL) {
555
556
557
558
559 max_entry = (tmp < pcie_table->count) ? tmp : pcie_table->count;
560 for (i = 1; i < max_entry; i++) {
561 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i - 1,
562 get_pcie_gen_support(data->pcie_gen_cap,
563 pcie_table->entries[i].gen_speed),
564 get_pcie_lane_support(data->pcie_lane_cap,
565 pcie_table->entries[i].lane_width));
566 }
567 data->dpm_table.pcie_speed_table.count = max_entry - 1;
568 smum_update_smc_table(hwmgr, SMU_BIF_TABLE);
569 } else {
570
571 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0,
572 get_pcie_gen_support(data->pcie_gen_cap,
573 PP_Min_PCIEGen),
574 get_pcie_lane_support(data->pcie_lane_cap,
575 PP_Max_PCIELane));
576 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1,
577 get_pcie_gen_support(data->pcie_gen_cap,
578 PP_Min_PCIEGen),
579 get_pcie_lane_support(data->pcie_lane_cap,
580 PP_Max_PCIELane));
581 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2,
582 get_pcie_gen_support(data->pcie_gen_cap,
583 PP_Max_PCIEGen),
584 get_pcie_lane_support(data->pcie_lane_cap,
585 PP_Max_PCIELane));
586 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3,
587 get_pcie_gen_support(data->pcie_gen_cap,
588 PP_Max_PCIEGen),
589 get_pcie_lane_support(data->pcie_lane_cap,
590 PP_Max_PCIELane));
591 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4,
592 get_pcie_gen_support(data->pcie_gen_cap,
593 PP_Max_PCIEGen),
594 get_pcie_lane_support(data->pcie_lane_cap,
595 PP_Max_PCIELane));
596 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5,
597 get_pcie_gen_support(data->pcie_gen_cap,
598 PP_Max_PCIEGen),
599 get_pcie_lane_support(data->pcie_lane_cap,
600 PP_Max_PCIELane));
601
602 data->dpm_table.pcie_speed_table.count = 6;
603 }
604
605 if (hwmgr->chip_family == AMDGPU_FAMILY_CI) {
606 for (i = 0; i <= data->dpm_table.pcie_speed_table.count; i++)
607 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i,
608 get_pcie_gen_support(data->pcie_gen_cap,
609 PP_Max_PCIEGen),
610 data->vbios_boot_state.pcie_lane_bootup_value);
611 } else {
612 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table,
613 data->dpm_table.pcie_speed_table.count,
614 get_pcie_gen_support(data->pcie_gen_cap,
615 PP_Min_PCIEGen),
616 get_pcie_lane_support(data->pcie_lane_cap,
617 PP_Max_PCIELane));
618 }
619 return 0;
620}
621
622static int smu7_reset_dpm_tables(struct pp_hwmgr *hwmgr)
623{
624 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
625
626 memset(&(data->dpm_table), 0x00, sizeof(data->dpm_table));
627
628 phm_reset_single_dpm_table(
629 &data->dpm_table.sclk_table,
630 smum_get_mac_definition(hwmgr,
631 SMU_MAX_LEVELS_GRAPHICS),
632 MAX_REGULAR_DPM_NUMBER);
633 phm_reset_single_dpm_table(
634 &data->dpm_table.mclk_table,
635 smum_get_mac_definition(hwmgr,
636 SMU_MAX_LEVELS_MEMORY), MAX_REGULAR_DPM_NUMBER);
637
638 phm_reset_single_dpm_table(
639 &data->dpm_table.vddc_table,
640 smum_get_mac_definition(hwmgr,
641 SMU_MAX_LEVELS_VDDC),
642 MAX_REGULAR_DPM_NUMBER);
643 phm_reset_single_dpm_table(
644 &data->dpm_table.vddci_table,
645 smum_get_mac_definition(hwmgr,
646 SMU_MAX_LEVELS_VDDCI), MAX_REGULAR_DPM_NUMBER);
647
648 phm_reset_single_dpm_table(
649 &data->dpm_table.mvdd_table,
650 smum_get_mac_definition(hwmgr,
651 SMU_MAX_LEVELS_MVDD),
652 MAX_REGULAR_DPM_NUMBER);
653 return 0;
654}
655
656
657
658
659
660
661
662
663
664static int smu7_setup_dpm_tables_v0(struct pp_hwmgr *hwmgr)
665{
666 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
667 struct phm_clock_voltage_dependency_table *allowed_vdd_sclk_table =
668 hwmgr->dyn_state.vddc_dependency_on_sclk;
669 struct phm_clock_voltage_dependency_table *allowed_vdd_mclk_table =
670 hwmgr->dyn_state.vddc_dependency_on_mclk;
671 struct phm_cac_leakage_table *std_voltage_table =
672 hwmgr->dyn_state.cac_leakage_table;
673 uint32_t i;
674
675 PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL,
676 "SCLK dependency table is missing. This table is mandatory", return -EINVAL);
677 PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table->count >= 1,
678 "SCLK dependency table has to have is missing. This table is mandatory", return -EINVAL);
679
680 PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
681 "MCLK dependency table is missing. This table is mandatory", return -EINVAL);
682 PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table->count >= 1,
683 "VMCLK dependency table has to have is missing. This table is mandatory", return -EINVAL);
684
685
686
687 data->dpm_table.sclk_table.count = 0;
688
689 for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
690 if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count-1].value !=
691 allowed_vdd_sclk_table->entries[i].clk) {
692 data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
693 allowed_vdd_sclk_table->entries[i].clk;
694 data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = (i == 0) ? 1 : 0;
695 data->dpm_table.sclk_table.count++;
696 }
697 }
698
699 PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
700 "MCLK dependency table is missing. This table is mandatory", return -EINVAL);
701
702 data->dpm_table.mclk_table.count = 0;
703 for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
704 if (i == 0 || data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count-1].value !=
705 allowed_vdd_mclk_table->entries[i].clk) {
706 data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
707 allowed_vdd_mclk_table->entries[i].clk;
708 data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = (i == 0) ? 1 : 0;
709 data->dpm_table.mclk_table.count++;
710 }
711 }
712
713
714 for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
715 data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
716 data->dpm_table.vddc_table.dpm_levels[i].param1 = std_voltage_table->entries[i].Leakage;
717
718 data->dpm_table.vddc_table.dpm_levels[i].enabled = 1;
719 }
720
721 data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count;
722 allowed_vdd_mclk_table = hwmgr->dyn_state.vddci_dependency_on_mclk;
723
724 if (NULL != allowed_vdd_mclk_table) {
725
726 for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
727 data->dpm_table.vddci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
728 data->dpm_table.vddci_table.dpm_levels[i].enabled = 1;
729 }
730 data->dpm_table.vddci_table.count = allowed_vdd_mclk_table->count;
731 }
732
733 allowed_vdd_mclk_table = hwmgr->dyn_state.mvdd_dependency_on_mclk;
734
735 if (NULL != allowed_vdd_mclk_table) {
736
737
738
739
740 for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
741 data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
742 data->dpm_table.mvdd_table.dpm_levels[i].enabled = 1;
743 }
744 data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count;
745 }
746
747 return 0;
748}
749
750static int smu7_setup_dpm_tables_v1(struct pp_hwmgr *hwmgr)
751{
752 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
753 struct phm_ppt_v1_information *table_info =
754 (struct phm_ppt_v1_information *)(hwmgr->pptable);
755 uint32_t i;
756
757 struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
758 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
759
760 if (table_info == NULL)
761 return -EINVAL;
762
763 dep_sclk_table = table_info->vdd_dep_on_sclk;
764 dep_mclk_table = table_info->vdd_dep_on_mclk;
765
766 PP_ASSERT_WITH_CODE(dep_sclk_table != NULL,
767 "SCLK dependency table is missing.",
768 return -EINVAL);
769 PP_ASSERT_WITH_CODE(dep_sclk_table->count >= 1,
770 "SCLK dependency table count is 0.",
771 return -EINVAL);
772
773 PP_ASSERT_WITH_CODE(dep_mclk_table != NULL,
774 "MCLK dependency table is missing.",
775 return -EINVAL);
776 PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1,
777 "MCLK dependency table count is 0",
778 return -EINVAL);
779
780
781 data->dpm_table.sclk_table.count = 0;
782 for (i = 0; i < dep_sclk_table->count; i++) {
783 if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count - 1].value !=
784 dep_sclk_table->entries[i].clk) {
785
786 data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
787 dep_sclk_table->entries[i].clk;
788
789 data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled =
790 (i == 0) ? true : false;
791 data->dpm_table.sclk_table.count++;
792 }
793 }
794 if (hwmgr->platform_descriptor.overdriveLimit.engineClock == 0)
795 hwmgr->platform_descriptor.overdriveLimit.engineClock = dep_sclk_table->entries[i-1].clk;
796
797 data->dpm_table.mclk_table.count = 0;
798 for (i = 0; i < dep_mclk_table->count; i++) {
799 if (i == 0 || data->dpm_table.mclk_table.dpm_levels
800 [data->dpm_table.mclk_table.count - 1].value !=
801 dep_mclk_table->entries[i].clk) {
802 data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
803 dep_mclk_table->entries[i].clk;
804 data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled =
805 (i == 0) ? true : false;
806 data->dpm_table.mclk_table.count++;
807 }
808 }
809
810 if (hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0)
811 hwmgr->platform_descriptor.overdriveLimit.memoryClock = dep_mclk_table->entries[i-1].clk;
812 return 0;
813}
814
815static int smu7_odn_initial_default_setting(struct pp_hwmgr *hwmgr)
816{
817 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
818 struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
819 struct phm_ppt_v1_information *table_info =
820 (struct phm_ppt_v1_information *)(hwmgr->pptable);
821 uint32_t i;
822
823 struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
824 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
825 struct phm_odn_performance_level *entries;
826
827 if (table_info == NULL)
828 return -EINVAL;
829
830 dep_sclk_table = table_info->vdd_dep_on_sclk;
831 dep_mclk_table = table_info->vdd_dep_on_mclk;
832
833 odn_table->odn_core_clock_dpm_levels.num_of_pl =
834 data->golden_dpm_table.sclk_table.count;
835 entries = odn_table->odn_core_clock_dpm_levels.entries;
836 for (i=0; i<data->golden_dpm_table.sclk_table.count; i++) {
837 entries[i].clock = data->golden_dpm_table.sclk_table.dpm_levels[i].value;
838 entries[i].enabled = true;
839 entries[i].vddc = dep_sclk_table->entries[i].vddc;
840 }
841
842 smu_get_voltage_dependency_table_ppt_v1(dep_sclk_table,
843 (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk));
844
845 odn_table->odn_memory_clock_dpm_levels.num_of_pl =
846 data->golden_dpm_table.mclk_table.count;
847 entries = odn_table->odn_memory_clock_dpm_levels.entries;
848 for (i=0; i<data->golden_dpm_table.mclk_table.count; i++) {
849 entries[i].clock = data->golden_dpm_table.mclk_table.dpm_levels[i].value;
850 entries[i].enabled = true;
851 entries[i].vddc = dep_mclk_table->entries[i].vddc;
852 }
853
854 smu_get_voltage_dependency_table_ppt_v1(dep_mclk_table,
855 (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk));
856
857 return 0;
858}
859
860static void smu7_setup_voltage_range_from_vbios(struct pp_hwmgr *hwmgr)
861{
862 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
863 struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
864 struct phm_ppt_v1_information *table_info =
865 (struct phm_ppt_v1_information *)(hwmgr->pptable);
866 uint32_t min_vddc = 0;
867 uint32_t max_vddc = 0;
868
869 if (!table_info)
870 return;
871
872 dep_sclk_table = table_info->vdd_dep_on_sclk;
873
874 atomctrl_get_voltage_range(hwmgr, &max_vddc, &min_vddc);
875
876 if (min_vddc == 0 || min_vddc > 2000
877 || min_vddc > dep_sclk_table->entries[0].vddc)
878 min_vddc = dep_sclk_table->entries[0].vddc;
879
880 if (max_vddc == 0 || max_vddc > 2000
881 || max_vddc < dep_sclk_table->entries[dep_sclk_table->count-1].vddc)
882 max_vddc = dep_sclk_table->entries[dep_sclk_table->count-1].vddc;
883
884 data->odn_dpm_table.min_vddc = min_vddc;
885 data->odn_dpm_table.max_vddc = max_vddc;
886}
887
888static int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
889{
890 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
891
892 smu7_reset_dpm_tables(hwmgr);
893
894 if (hwmgr->pp_table_version == PP_TABLE_V1)
895 smu7_setup_dpm_tables_v1(hwmgr);
896 else if (hwmgr->pp_table_version == PP_TABLE_V0)
897 smu7_setup_dpm_tables_v0(hwmgr);
898
899 smu7_setup_default_pcie_table(hwmgr);
900
901
902 memcpy(&(data->golden_dpm_table), &(data->dpm_table),
903 sizeof(struct smu7_dpm_table));
904
905
906 if (hwmgr->od_enabled) {
907 smu7_setup_voltage_range_from_vbios(hwmgr);
908 smu7_odn_initial_default_setting(hwmgr);
909 }
910
911 return 0;
912}
913
914static int smu7_enable_vrhot_gpio_interrupt(struct pp_hwmgr *hwmgr)
915{
916
917 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
918 PHM_PlatformCaps_RegulatorHot))
919 return smum_send_msg_to_smc(hwmgr,
920 PPSMC_MSG_EnableVRHotGPIOInterrupt);
921
922 return 0;
923}
924
925static int smu7_enable_sclk_control(struct pp_hwmgr *hwmgr)
926{
927 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
928 SCLK_PWRMGT_OFF, 0);
929 return 0;
930}
931
932static int smu7_enable_ulv(struct pp_hwmgr *hwmgr)
933{
934 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
935
936 if (data->ulv_supported)
937 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableULV);
938
939 return 0;
940}
941
942static int smu7_disable_ulv(struct pp_hwmgr *hwmgr)
943{
944 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
945
946 if (data->ulv_supported)
947 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableULV);
948
949 return 0;
950}
951
952static int smu7_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
953{
954 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
955 PHM_PlatformCaps_SclkDeepSleep)) {
956 if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MASTER_DeepSleep_ON))
957 PP_ASSERT_WITH_CODE(false,
958 "Attempt to enable Master Deep Sleep switch failed!",
959 return -EINVAL);
960 } else {
961 if (smum_send_msg_to_smc(hwmgr,
962 PPSMC_MSG_MASTER_DeepSleep_OFF)) {
963 PP_ASSERT_WITH_CODE(false,
964 "Attempt to disable Master Deep Sleep switch failed!",
965 return -EINVAL);
966 }
967 }
968
969 return 0;
970}
971
972static int smu7_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
973{
974 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
975 PHM_PlatformCaps_SclkDeepSleep)) {
976 if (smum_send_msg_to_smc(hwmgr,
977 PPSMC_MSG_MASTER_DeepSleep_OFF)) {
978 PP_ASSERT_WITH_CODE(false,
979 "Attempt to disable Master Deep Sleep switch failed!",
980 return -EINVAL);
981 }
982 }
983
984 return 0;
985}
986
987static int smu7_disable_sclk_vce_handshake(struct pp_hwmgr *hwmgr)
988{
989 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
990 uint32_t soft_register_value = 0;
991 uint32_t handshake_disables_offset = data->soft_regs_start
992 + smum_get_offsetof(hwmgr,
993 SMU_SoftRegisters, HandshakeDisables);
994
995 soft_register_value = cgs_read_ind_register(hwmgr->device,
996 CGS_IND_REG__SMC, handshake_disables_offset);
997 soft_register_value |= SMU7_VCE_SCLK_HANDSHAKE_DISABLE;
998 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
999 handshake_disables_offset, soft_register_value);
1000 return 0;
1001}
1002
1003static int smu7_disable_handshake_uvd(struct pp_hwmgr *hwmgr)
1004{
1005 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1006 uint32_t soft_register_value = 0;
1007 uint32_t handshake_disables_offset = data->soft_regs_start
1008 + smum_get_offsetof(hwmgr,
1009 SMU_SoftRegisters, HandshakeDisables);
1010
1011 soft_register_value = cgs_read_ind_register(hwmgr->device,
1012 CGS_IND_REG__SMC, handshake_disables_offset);
1013 soft_register_value |= smum_get_mac_definition(hwmgr,
1014 SMU_UVD_MCLK_HANDSHAKE_DISABLE);
1015 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1016 handshake_disables_offset, soft_register_value);
1017 return 0;
1018}
1019
1020static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
1021{
1022 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1023
1024
1025 if (!data->sclk_dpm_key_disabled) {
1026 if (hwmgr->chip_id == CHIP_VEGAM)
1027 smu7_disable_sclk_vce_handshake(hwmgr);
1028
1029 PP_ASSERT_WITH_CODE(
1030 (0 == smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Enable)),
1031 "Failed to enable SCLK DPM during DPM Start Function!",
1032 return -EINVAL);
1033 }
1034
1035
1036 if (0 == data->mclk_dpm_key_disabled) {
1037 if (!(hwmgr->feature_mask & PP_UVD_HANDSHAKE_MASK))
1038 smu7_disable_handshake_uvd(hwmgr);
1039
1040 PP_ASSERT_WITH_CODE(
1041 (0 == smum_send_msg_to_smc(hwmgr,
1042 PPSMC_MSG_MCLKDPM_Enable)),
1043 "Failed to enable MCLK DPM during DPM Start Function!",
1044 return -EINVAL);
1045
1046 if (hwmgr->chip_family != CHIP_VEGAM)
1047 PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
1048
1049
1050 if (hwmgr->chip_family == AMDGPU_FAMILY_CI) {
1051 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d30, 0x5);
1052 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d3c, 0x5);
1053 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d80, 0x100005);
1054 udelay(10);
1055 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d30, 0x400005);
1056 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d3c, 0x400005);
1057 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d80, 0x500005);
1058 } else {
1059 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5);
1060 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x5);
1061 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x100005);
1062 udelay(10);
1063 if (hwmgr->chip_id == CHIP_VEGAM) {
1064 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400009);
1065 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400009);
1066 } else {
1067 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400005);
1068 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400005);
1069 }
1070 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x500005);
1071 }
1072 }
1073
1074 return 0;
1075}
1076
1077static int smu7_start_dpm(struct pp_hwmgr *hwmgr)
1078{
1079 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1080
1081
1082
1083 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
1084 GLOBAL_PWRMGT_EN, 1);
1085
1086
1087
1088 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
1089 DYNAMIC_PM_EN, 1);
1090
1091
1092
1093 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1094 data->soft_regs_start +
1095 smum_get_offsetof(hwmgr, SMU_SoftRegisters,
1096 VoltageChangeTimeout), 0x1000);
1097 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
1098 SWRST_COMMAND_1, RESETLC, 0x0);
1099
1100 if (hwmgr->chip_family == AMDGPU_FAMILY_CI)
1101 cgs_write_register(hwmgr->device, 0x1488,
1102 (cgs_read_register(hwmgr->device, 0x1488) & ~0x1));
1103
1104 if (smu7_enable_sclk_mclk_dpm(hwmgr)) {
1105 pr_err("Failed to enable Sclk DPM and Mclk DPM!");
1106 return -EINVAL;
1107 }
1108
1109
1110 if (0 == data->pcie_dpm_key_disabled) {
1111 PP_ASSERT_WITH_CODE(
1112 (0 == smum_send_msg_to_smc(hwmgr,
1113 PPSMC_MSG_PCIeDPM_Enable)),
1114 "Failed to enable pcie DPM during DPM Start Function!",
1115 return -EINVAL);
1116 }
1117
1118 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1119 PHM_PlatformCaps_Falcon_QuickTransition)) {
1120 PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr,
1121 PPSMC_MSG_EnableACDCGPIOInterrupt)),
1122 "Failed to enable AC DC GPIO Interrupt!",
1123 );
1124 }
1125
1126 return 0;
1127}
1128
1129static int smu7_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
1130{
1131 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1132
1133
1134 if (!data->sclk_dpm_key_disabled) {
1135 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
1136 "Trying to disable SCLK DPM when DPM is disabled",
1137 return 0);
1138 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Disable);
1139 }
1140
1141
1142 if (!data->mclk_dpm_key_disabled) {
1143 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
1144 "Trying to disable MCLK DPM when DPM is disabled",
1145 return 0);
1146 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_Disable);
1147 }
1148
1149 return 0;
1150}
1151
1152static int smu7_stop_dpm(struct pp_hwmgr *hwmgr)
1153{
1154 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1155
1156
1157 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
1158 GLOBAL_PWRMGT_EN, 0);
1159
1160 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
1161 DYNAMIC_PM_EN, 0);
1162
1163
1164 if (!data->pcie_dpm_key_disabled) {
1165 PP_ASSERT_WITH_CODE(
1166 (smum_send_msg_to_smc(hwmgr,
1167 PPSMC_MSG_PCIeDPM_Disable) == 0),
1168 "Failed to disable pcie DPM during DPM Stop Function!",
1169 return -EINVAL);
1170 }
1171
1172 smu7_disable_sclk_mclk_dpm(hwmgr);
1173
1174 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
1175 "Trying to disable voltage DPM when DPM is disabled",
1176 return 0);
1177
1178 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Disable);
1179
1180 return 0;
1181}
1182
1183static void smu7_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources)
1184{
1185 bool protection;
1186 enum DPM_EVENT_SRC src;
1187
1188 switch (sources) {
1189 default:
1190 pr_err("Unknown throttling event sources.");
1191
1192 case 0:
1193 protection = false;
1194
1195 break;
1196 case (1 << PHM_AutoThrottleSource_Thermal):
1197 protection = true;
1198 src = DPM_EVENT_SRC_DIGITAL;
1199 break;
1200 case (1 << PHM_AutoThrottleSource_External):
1201 protection = true;
1202 src = DPM_EVENT_SRC_EXTERNAL;
1203 break;
1204 case (1 << PHM_AutoThrottleSource_External) |
1205 (1 << PHM_AutoThrottleSource_Thermal):
1206 protection = true;
1207 src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL;
1208 break;
1209 }
1210
1211 if (protection) {
1212 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL,
1213 DPM_EVENT_SRC, src);
1214 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
1215 THERMAL_PROTECTION_DIS,
1216 !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1217 PHM_PlatformCaps_ThermalController));
1218 } else
1219 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
1220 THERMAL_PROTECTION_DIS, 1);
1221}
1222
1223static int smu7_enable_auto_throttle_source(struct pp_hwmgr *hwmgr,
1224 PHM_AutoThrottleSource source)
1225{
1226 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1227
1228 if (!(data->active_auto_throttle_sources & (1 << source))) {
1229 data->active_auto_throttle_sources |= 1 << source;
1230 smu7_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
1231 }
1232 return 0;
1233}
1234
1235static int smu7_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
1236{
1237 return smu7_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
1238}
1239
1240static int smu7_disable_auto_throttle_source(struct pp_hwmgr *hwmgr,
1241 PHM_AutoThrottleSource source)
1242{
1243 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1244
1245 if (data->active_auto_throttle_sources & (1 << source)) {
1246 data->active_auto_throttle_sources &= ~(1 << source);
1247 smu7_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
1248 }
1249 return 0;
1250}
1251
1252static int smu7_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
1253{
1254 return smu7_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
1255}
1256
1257static int smu7_pcie_performance_request(struct pp_hwmgr *hwmgr)
1258{
1259 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1260 data->pcie_performance_request = true;
1261
1262 return 0;
1263}
1264
1265static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
1266{
1267 int tmp_result = 0;
1268 int result = 0;
1269
1270 if (smu7_voltage_control(hwmgr)) {
1271 tmp_result = smu7_enable_voltage_control(hwmgr);
1272 PP_ASSERT_WITH_CODE(tmp_result == 0,
1273 "Failed to enable voltage control!",
1274 result = tmp_result);
1275
1276 tmp_result = smu7_construct_voltage_tables(hwmgr);
1277 PP_ASSERT_WITH_CODE((0 == tmp_result),
1278 "Failed to construct voltage tables!",
1279 result = tmp_result);
1280 }
1281 smum_initialize_mc_reg_table(hwmgr);
1282
1283 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1284 PHM_PlatformCaps_EngineSpreadSpectrumSupport))
1285 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1286 GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 1);
1287
1288 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1289 PHM_PlatformCaps_ThermalController))
1290 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1291 GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 0);
1292
1293 tmp_result = smu7_program_static_screen_threshold_parameters(hwmgr);
1294 PP_ASSERT_WITH_CODE((0 == tmp_result),
1295 "Failed to program static screen threshold parameters!",
1296 result = tmp_result);
1297
1298 tmp_result = smu7_enable_display_gap(hwmgr);
1299 PP_ASSERT_WITH_CODE((0 == tmp_result),
1300 "Failed to enable display gap!", result = tmp_result);
1301
1302 tmp_result = smu7_program_voting_clients(hwmgr);
1303 PP_ASSERT_WITH_CODE((0 == tmp_result),
1304 "Failed to program voting clients!", result = tmp_result);
1305
1306 tmp_result = smum_process_firmware_header(hwmgr);
1307 PP_ASSERT_WITH_CODE((0 == tmp_result),
1308 "Failed to process firmware header!", result = tmp_result);
1309
1310 if (hwmgr->chip_id != CHIP_VEGAM) {
1311 tmp_result = smu7_initial_switch_from_arbf0_to_f1(hwmgr);
1312 PP_ASSERT_WITH_CODE((0 == tmp_result),
1313 "Failed to initialize switch from ArbF0 to F1!",
1314 result = tmp_result);
1315 }
1316
1317 result = smu7_setup_default_dpm_tables(hwmgr);
1318 PP_ASSERT_WITH_CODE(0 == result,
1319 "Failed to setup default DPM tables!", return result);
1320
1321 tmp_result = smum_init_smc_table(hwmgr);
1322 PP_ASSERT_WITH_CODE((0 == tmp_result),
1323 "Failed to initialize SMC table!", result = tmp_result);
1324
1325 tmp_result = smu7_enable_vrhot_gpio_interrupt(hwmgr);
1326 PP_ASSERT_WITH_CODE((0 == tmp_result),
1327 "Failed to enable VR hot GPIO interrupt!", result = tmp_result);
1328
1329 smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_NoDisplay);
1330
1331 tmp_result = smu7_enable_sclk_control(hwmgr);
1332 PP_ASSERT_WITH_CODE((0 == tmp_result),
1333 "Failed to enable SCLK control!", result = tmp_result);
1334
1335 tmp_result = smu7_enable_smc_voltage_controller(hwmgr);
1336 PP_ASSERT_WITH_CODE((0 == tmp_result),
1337 "Failed to enable voltage control!", result = tmp_result);
1338
1339 tmp_result = smu7_enable_ulv(hwmgr);
1340 PP_ASSERT_WITH_CODE((0 == tmp_result),
1341 "Failed to enable ULV!", result = tmp_result);
1342
1343 tmp_result = smu7_enable_deep_sleep_master_switch(hwmgr);
1344 PP_ASSERT_WITH_CODE((0 == tmp_result),
1345 "Failed to enable deep sleep master switch!", result = tmp_result);
1346
1347 tmp_result = smu7_enable_didt_config(hwmgr);
1348 PP_ASSERT_WITH_CODE((tmp_result == 0),
1349 "Failed to enable deep sleep master switch!", result = tmp_result);
1350
1351 tmp_result = smu7_start_dpm(hwmgr);
1352 PP_ASSERT_WITH_CODE((0 == tmp_result),
1353 "Failed to start DPM!", result = tmp_result);
1354
1355 tmp_result = smu7_enable_smc_cac(hwmgr);
1356 PP_ASSERT_WITH_CODE((0 == tmp_result),
1357 "Failed to enable SMC CAC!", result = tmp_result);
1358
1359 tmp_result = smu7_enable_power_containment(hwmgr);
1360 PP_ASSERT_WITH_CODE((0 == tmp_result),
1361 "Failed to enable power containment!", result = tmp_result);
1362
1363 tmp_result = smu7_power_control_set_level(hwmgr);
1364 PP_ASSERT_WITH_CODE((0 == tmp_result),
1365 "Failed to power control set level!", result = tmp_result);
1366
1367 tmp_result = smu7_enable_thermal_auto_throttle(hwmgr);
1368 PP_ASSERT_WITH_CODE((0 == tmp_result),
1369 "Failed to enable thermal auto throttle!", result = tmp_result);
1370
1371 tmp_result = smu7_pcie_performance_request(hwmgr);
1372 PP_ASSERT_WITH_CODE((0 == tmp_result),
1373 "pcie performance request failed!", result = tmp_result);
1374
1375 return 0;
1376}
1377
1378static int smu7_avfs_control(struct pp_hwmgr *hwmgr, bool enable)
1379{
1380 if (!hwmgr->avfs_supported)
1381 return 0;
1382
1383 if (enable) {
1384 if (!PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
1385 CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) {
1386 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(
1387 hwmgr, PPSMC_MSG_EnableAvfs),
1388 "Failed to enable AVFS!",
1389 return -EINVAL);
1390 }
1391 } else if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
1392 CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) {
1393 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(
1394 hwmgr, PPSMC_MSG_DisableAvfs),
1395 "Failed to disable AVFS!",
1396 return -EINVAL);
1397 }
1398
1399 return 0;
1400}
1401
1402static int smu7_update_avfs(struct pp_hwmgr *hwmgr)
1403{
1404 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1405
1406 if (!hwmgr->avfs_supported)
1407 return 0;
1408
1409 if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
1410 smu7_avfs_control(hwmgr, false);
1411 } else if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
1412 smu7_avfs_control(hwmgr, false);
1413 smu7_avfs_control(hwmgr, true);
1414 } else {
1415 smu7_avfs_control(hwmgr, true);
1416 }
1417
1418 return 0;
1419}
1420
1421int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
1422{
1423 int tmp_result, result = 0;
1424
1425 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1426 PHM_PlatformCaps_ThermalController))
1427 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1428 GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 1);
1429
1430 tmp_result = smu7_disable_power_containment(hwmgr);
1431 PP_ASSERT_WITH_CODE((tmp_result == 0),
1432 "Failed to disable power containment!", result = tmp_result);
1433
1434 tmp_result = smu7_disable_smc_cac(hwmgr);
1435 PP_ASSERT_WITH_CODE((tmp_result == 0),
1436 "Failed to disable SMC CAC!", result = tmp_result);
1437
1438 tmp_result = smu7_disable_didt_config(hwmgr);
1439 PP_ASSERT_WITH_CODE((tmp_result == 0),
1440 "Failed to disable DIDT!", result = tmp_result);
1441
1442 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1443 CG_SPLL_SPREAD_SPECTRUM, SSEN, 0);
1444 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1445 GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 0);
1446
1447 tmp_result = smu7_disable_thermal_auto_throttle(hwmgr);
1448 PP_ASSERT_WITH_CODE((tmp_result == 0),
1449 "Failed to disable thermal auto throttle!", result = tmp_result);
1450
1451 tmp_result = smu7_avfs_control(hwmgr, false);
1452 PP_ASSERT_WITH_CODE((tmp_result == 0),
1453 "Failed to disable AVFS!", result = tmp_result);
1454
1455 tmp_result = smu7_stop_dpm(hwmgr);
1456 PP_ASSERT_WITH_CODE((tmp_result == 0),
1457 "Failed to stop DPM!", result = tmp_result);
1458
1459 tmp_result = smu7_disable_deep_sleep_master_switch(hwmgr);
1460 PP_ASSERT_WITH_CODE((tmp_result == 0),
1461 "Failed to disable deep sleep master switch!", result = tmp_result);
1462
1463 tmp_result = smu7_disable_ulv(hwmgr);
1464 PP_ASSERT_WITH_CODE((tmp_result == 0),
1465 "Failed to disable ULV!", result = tmp_result);
1466
1467 tmp_result = smu7_clear_voting_clients(hwmgr);
1468 PP_ASSERT_WITH_CODE((tmp_result == 0),
1469 "Failed to clear voting clients!", result = tmp_result);
1470
1471 tmp_result = smu7_reset_to_default(hwmgr);
1472 PP_ASSERT_WITH_CODE((tmp_result == 0),
1473 "Failed to reset to default!", result = tmp_result);
1474
1475 tmp_result = smu7_force_switch_to_arbf0(hwmgr);
1476 PP_ASSERT_WITH_CODE((tmp_result == 0),
1477 "Failed to force to switch arbf0!", result = tmp_result);
1478
1479 return result;
1480}
1481
1482int smu7_reset_asic_tasks(struct pp_hwmgr *hwmgr)
1483{
1484
1485 return 0;
1486}
1487
1488static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
1489{
1490 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1491 struct phm_ppt_v1_information *table_info =
1492 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1493 struct amdgpu_device *adev = hwmgr->adev;
1494
1495 data->dll_default_on = false;
1496 data->mclk_dpm0_activity_target = 0xa;
1497 data->vddc_vddgfx_delta = 300;
1498 data->static_screen_threshold = SMU7_STATICSCREENTHRESHOLD_DFLT;
1499 data->static_screen_threshold_unit = SMU7_STATICSCREENTHRESHOLDUNIT_DFLT;
1500 data->voting_rights_clients[0] = SMU7_VOTINGRIGHTSCLIENTS_DFLT0;
1501 data->voting_rights_clients[1]= SMU7_VOTINGRIGHTSCLIENTS_DFLT1;
1502 data->voting_rights_clients[2] = SMU7_VOTINGRIGHTSCLIENTS_DFLT2;
1503 data->voting_rights_clients[3]= SMU7_VOTINGRIGHTSCLIENTS_DFLT3;
1504 data->voting_rights_clients[4]= SMU7_VOTINGRIGHTSCLIENTS_DFLT4;
1505 data->voting_rights_clients[5]= SMU7_VOTINGRIGHTSCLIENTS_DFLT5;
1506 data->voting_rights_clients[6]= SMU7_VOTINGRIGHTSCLIENTS_DFLT6;
1507 data->voting_rights_clients[7]= SMU7_VOTINGRIGHTSCLIENTS_DFLT7;
1508
1509 data->mclk_dpm_key_disabled = hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true;
1510 data->sclk_dpm_key_disabled = hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true;
1511 data->pcie_dpm_key_disabled = hwmgr->feature_mask & PP_PCIE_DPM_MASK ? false : true;
1512
1513 data->voltage_control = SMU7_VOLTAGE_CONTROL_NONE;
1514 data->vddci_control = SMU7_VOLTAGE_CONTROL_NONE;
1515 data->mvdd_control = SMU7_VOLTAGE_CONTROL_NONE;
1516 data->enable_tdc_limit_feature = true;
1517 data->enable_pkg_pwr_tracking_feature = true;
1518 data->force_pcie_gen = PP_PCIEGenInvalid;
1519 data->ulv_supported = hwmgr->feature_mask & PP_ULV_MASK ? true : false;
1520 data->current_profile_setting.bupdate_sclk = 1;
1521 data->current_profile_setting.sclk_up_hyst = 0;
1522 data->current_profile_setting.sclk_down_hyst = 100;
1523 data->current_profile_setting.sclk_activity = SMU7_SCLK_TARGETACTIVITY_DFLT;
1524 data->current_profile_setting.bupdate_sclk = 1;
1525 data->current_profile_setting.mclk_up_hyst = 0;
1526 data->current_profile_setting.mclk_down_hyst = 100;
1527 data->current_profile_setting.mclk_activity = SMU7_MCLK_TARGETACTIVITY_DFLT;
1528 hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D];
1529 hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
1530 hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
1531
1532 if (hwmgr->chip_id == CHIP_POLARIS12 || hwmgr->is_kicker) {
1533 uint8_t tmp1, tmp2;
1534 uint16_t tmp3 = 0;
1535 atomctrl_get_svi2_info(hwmgr, VOLTAGE_TYPE_VDDC, &tmp1, &tmp2,
1536 &tmp3);
1537 tmp3 = (tmp3 >> 5) & 0x3;
1538 data->vddc_phase_shed_control = ((tmp3 << 1) | (tmp3 >> 1)) & 0x3;
1539 } else if (hwmgr->chip_family == AMDGPU_FAMILY_CI) {
1540 data->vddc_phase_shed_control = 1;
1541 } else {
1542 data->vddc_phase_shed_control = 0;
1543 }
1544
1545 if (hwmgr->chip_id == CHIP_HAWAII) {
1546 data->thermal_temp_setting.temperature_low = 94500;
1547 data->thermal_temp_setting.temperature_high = 95000;
1548 data->thermal_temp_setting.temperature_shutdown = 104000;
1549 } else {
1550 data->thermal_temp_setting.temperature_low = 99500;
1551 data->thermal_temp_setting.temperature_high = 100000;
1552 data->thermal_temp_setting.temperature_shutdown = 104000;
1553 }
1554
1555 data->fast_watermark_threshold = 100;
1556 if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1557 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
1558 data->voltage_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1559 else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1560 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT))
1561 data->voltage_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
1562
1563 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1564 PHM_PlatformCaps_ControlVDDGFX)) {
1565 if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1566 VOLTAGE_TYPE_VDDGFX, VOLTAGE_OBJ_SVID2)) {
1567 data->vdd_gfx_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1568 }
1569 }
1570
1571 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1572 PHM_PlatformCaps_EnableMVDDControl)) {
1573 if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1574 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
1575 data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
1576 else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1577 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
1578 data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1579 }
1580
1581 if (SMU7_VOLTAGE_CONTROL_NONE == data->vdd_gfx_control)
1582 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1583 PHM_PlatformCaps_ControlVDDGFX);
1584
1585 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1586 PHM_PlatformCaps_ControlVDDCI)) {
1587 if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1588 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
1589 data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
1590 else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1591 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
1592 data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1593 }
1594
1595 if (data->mvdd_control == SMU7_VOLTAGE_CONTROL_NONE)
1596 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1597 PHM_PlatformCaps_EnableMVDDControl);
1598
1599 if (data->vddci_control == SMU7_VOLTAGE_CONTROL_NONE)
1600 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1601 PHM_PlatformCaps_ControlVDDCI);
1602
1603 if ((hwmgr->pp_table_version != PP_TABLE_V0) && (hwmgr->feature_mask & PP_CLOCK_STRETCH_MASK)
1604 && (table_info->cac_dtp_table->usClockStretchAmount != 0))
1605 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1606 PHM_PlatformCaps_ClockStretcher);
1607
1608 data->pcie_gen_performance.max = PP_PCIEGen1;
1609 data->pcie_gen_performance.min = PP_PCIEGen3;
1610 data->pcie_gen_power_saving.max = PP_PCIEGen1;
1611 data->pcie_gen_power_saving.min = PP_PCIEGen3;
1612 data->pcie_lane_performance.max = 0;
1613 data->pcie_lane_performance.min = 16;
1614 data->pcie_lane_power_saving.max = 0;
1615 data->pcie_lane_power_saving.min = 16;
1616
1617
1618 if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
1619 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1620 PHM_PlatformCaps_UVDPowerGating);
1621 if (adev->pg_flags & AMD_PG_SUPPORT_VCE)
1622 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1623 PHM_PlatformCaps_VCEPowerGating);
1624}
1625
1626
1627
1628
1629
1630
1631
1632static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr)
1633{
1634 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1635 uint16_t vv_id;
1636 uint16_t vddc = 0;
1637 uint16_t vddgfx = 0;
1638 uint16_t i, j;
1639 uint32_t sclk = 0;
1640 struct phm_ppt_v1_information *table_info =
1641 (struct phm_ppt_v1_information *)hwmgr->pptable;
1642 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = NULL;
1643
1644
1645 for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) {
1646 vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1647
1648 if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
1649 if ((hwmgr->pp_table_version == PP_TABLE_V1)
1650 && !phm_get_sclk_for_voltage_evv(hwmgr,
1651 table_info->vddgfx_lookup_table, vv_id, &sclk)) {
1652 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1653 PHM_PlatformCaps_ClockStretcher)) {
1654 sclk_table = table_info->vdd_dep_on_sclk;
1655
1656 for (j = 1; j < sclk_table->count; j++) {
1657 if (sclk_table->entries[j].clk == sclk &&
1658 sclk_table->entries[j].cks_enable == 0) {
1659 sclk += 5000;
1660 break;
1661 }
1662 }
1663 }
1664 if (0 == atomctrl_get_voltage_evv_on_sclk
1665 (hwmgr, VOLTAGE_TYPE_VDDGFX, sclk,
1666 vv_id, &vddgfx)) {
1667
1668 PP_ASSERT_WITH_CODE((vddgfx < 2000 && vddgfx != 0), "Invalid VDDGFX value!", return -EINVAL);
1669
1670
1671 if (vddgfx != 0 && vddgfx != vv_id) {
1672 data->vddcgfx_leakage.actual_voltage[data->vddcgfx_leakage.count] = vddgfx;
1673 data->vddcgfx_leakage.leakage_id[data->vddcgfx_leakage.count] = vv_id;
1674 data->vddcgfx_leakage.count++;
1675 }
1676 } else {
1677 pr_info("Error retrieving EVV voltage value!\n");
1678 }
1679 }
1680 } else {
1681 if ((hwmgr->pp_table_version == PP_TABLE_V0)
1682 || !phm_get_sclk_for_voltage_evv(hwmgr,
1683 table_info->vddc_lookup_table, vv_id, &sclk)) {
1684 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1685 PHM_PlatformCaps_ClockStretcher)) {
1686 if (table_info == NULL)
1687 return -EINVAL;
1688 sclk_table = table_info->vdd_dep_on_sclk;
1689
1690 for (j = 1; j < sclk_table->count; j++) {
1691 if (sclk_table->entries[j].clk == sclk &&
1692 sclk_table->entries[j].cks_enable == 0) {
1693 sclk += 5000;
1694 break;
1695 }
1696 }
1697 }
1698
1699 if (phm_get_voltage_evv_on_sclk(hwmgr,
1700 VOLTAGE_TYPE_VDDC,
1701 sclk, vv_id, &vddc) == 0) {
1702 if (vddc >= 2000 || vddc == 0)
1703 return -EINVAL;
1704 } else {
1705 pr_debug("failed to retrieving EVV voltage!\n");
1706 continue;
1707 }
1708
1709
1710 if (vddc != 0 && vddc != vv_id) {
1711 data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = (uint16_t)(vddc);
1712 data->vddc_leakage.leakage_id[data->vddc_leakage.count] = vv_id;
1713 data->vddc_leakage.count++;
1714 }
1715 }
1716 }
1717 }
1718
1719 return 0;
1720}
1721
1722
1723
1724
1725
1726
1727
1728
1729static void smu7_patch_ppt_v1_with_vdd_leakage(struct pp_hwmgr *hwmgr,
1730 uint16_t *voltage, struct smu7_leakage_voltage *leakage_table)
1731{
1732 uint32_t index;
1733
1734
1735 for (index = 0; index < leakage_table->count; index++) {
1736
1737
1738 if (leakage_table->leakage_id[index] == *voltage) {
1739 *voltage = leakage_table->actual_voltage[index];
1740 break;
1741 }
1742 }
1743
1744 if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
1745 pr_err("Voltage value looks like a Leakage ID but it's not patched \n");
1746}
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756static int smu7_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
1757 phm_ppt_v1_voltage_lookup_table *lookup_table,
1758 struct smu7_leakage_voltage *leakage_table)
1759{
1760 uint32_t i;
1761
1762 for (i = 0; i < lookup_table->count; i++)
1763 smu7_patch_ppt_v1_with_vdd_leakage(hwmgr,
1764 &lookup_table->entries[i].us_vdd, leakage_table);
1765
1766 return 0;
1767}
1768
1769static int smu7_patch_clock_voltage_limits_with_vddc_leakage(
1770 struct pp_hwmgr *hwmgr, struct smu7_leakage_voltage *leakage_table,
1771 uint16_t *vddc)
1772{
1773 struct phm_ppt_v1_information *table_info =
1774 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1775 smu7_patch_ppt_v1_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table);
1776 hwmgr->dyn_state.max_clock_voltage_on_dc.vddc =
1777 table_info->max_clock_voltage_on_dc.vddc;
1778 return 0;
1779}
1780
1781static int smu7_patch_voltage_dependency_tables_with_lookup_table(
1782 struct pp_hwmgr *hwmgr)
1783{
1784 uint8_t entry_id;
1785 uint8_t voltage_id;
1786 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1787 struct phm_ppt_v1_information *table_info =
1788 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1789
1790 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
1791 table_info->vdd_dep_on_sclk;
1792 struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table =
1793 table_info->vdd_dep_on_mclk;
1794 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1795 table_info->mm_dep_table;
1796
1797 if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
1798 for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
1799 voltage_id = sclk_table->entries[entry_id].vddInd;
1800 sclk_table->entries[entry_id].vddgfx =
1801 table_info->vddgfx_lookup_table->entries[voltage_id].us_vdd;
1802 }
1803 } else {
1804 for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
1805 voltage_id = sclk_table->entries[entry_id].vddInd;
1806 sclk_table->entries[entry_id].vddc =
1807 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
1808 }
1809 }
1810
1811 for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) {
1812 voltage_id = mclk_table->entries[entry_id].vddInd;
1813 mclk_table->entries[entry_id].vddc =
1814 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
1815 }
1816
1817 for (entry_id = 0; entry_id < mm_table->count; ++entry_id) {
1818 voltage_id = mm_table->entries[entry_id].vddcInd;
1819 mm_table->entries[entry_id].vddc =
1820 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
1821 }
1822
1823 return 0;
1824
1825}
1826
1827static int phm_add_voltage(struct pp_hwmgr *hwmgr,
1828 phm_ppt_v1_voltage_lookup_table *look_up_table,
1829 phm_ppt_v1_voltage_lookup_record *record)
1830{
1831 uint32_t i;
1832
1833 PP_ASSERT_WITH_CODE((NULL != look_up_table),
1834 "Lookup Table empty.", return -EINVAL);
1835 PP_ASSERT_WITH_CODE((0 != look_up_table->count),
1836 "Lookup Table empty.", return -EINVAL);
1837
1838 i = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDGFX);
1839 PP_ASSERT_WITH_CODE((i >= look_up_table->count),
1840 "Lookup Table is full.", return -EINVAL);
1841
1842
1843 for (i = 0; i < look_up_table->count; i++) {
1844 if (look_up_table->entries[i].us_vdd == record->us_vdd) {
1845 if (look_up_table->entries[i].us_calculated == 1)
1846 return 0;
1847 break;
1848 }
1849 }
1850
1851 look_up_table->entries[i].us_calculated = 1;
1852 look_up_table->entries[i].us_vdd = record->us_vdd;
1853 look_up_table->entries[i].us_cac_low = record->us_cac_low;
1854 look_up_table->entries[i].us_cac_mid = record->us_cac_mid;
1855 look_up_table->entries[i].us_cac_high = record->us_cac_high;
1856
1857 if (i == look_up_table->count)
1858 look_up_table->count++;
1859
1860 return 0;
1861}
1862
1863
1864static int smu7_calc_voltage_dependency_tables(struct pp_hwmgr *hwmgr)
1865{
1866 uint8_t entry_id;
1867 struct phm_ppt_v1_voltage_lookup_record v_record;
1868 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1869 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
1870
1871 phm_ppt_v1_clock_voltage_dependency_table *sclk_table = pptable_info->vdd_dep_on_sclk;
1872 phm_ppt_v1_clock_voltage_dependency_table *mclk_table = pptable_info->vdd_dep_on_mclk;
1873
1874 if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
1875 for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
1876 if (sclk_table->entries[entry_id].vdd_offset & (1 << 15))
1877 v_record.us_vdd = sclk_table->entries[entry_id].vddgfx +
1878 sclk_table->entries[entry_id].vdd_offset - 0xFFFF;
1879 else
1880 v_record.us_vdd = sclk_table->entries[entry_id].vddgfx +
1881 sclk_table->entries[entry_id].vdd_offset;
1882
1883 sclk_table->entries[entry_id].vddc =
1884 v_record.us_cac_low = v_record.us_cac_mid =
1885 v_record.us_cac_high = v_record.us_vdd;
1886
1887 phm_add_voltage(hwmgr, pptable_info->vddc_lookup_table, &v_record);
1888 }
1889
1890 for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) {
1891 if (mclk_table->entries[entry_id].vdd_offset & (1 << 15))
1892 v_record.us_vdd = mclk_table->entries[entry_id].vddc +
1893 mclk_table->entries[entry_id].vdd_offset - 0xFFFF;
1894 else
1895 v_record.us_vdd = mclk_table->entries[entry_id].vddc +
1896 mclk_table->entries[entry_id].vdd_offset;
1897
1898 mclk_table->entries[entry_id].vddgfx = v_record.us_cac_low =
1899 v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd;
1900 phm_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record);
1901 }
1902 }
1903 return 0;
1904}
1905
1906static int smu7_calc_mm_voltage_dependency_table(struct pp_hwmgr *hwmgr)
1907{
1908 uint8_t entry_id;
1909 struct phm_ppt_v1_voltage_lookup_record v_record;
1910 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1911 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
1912 phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table;
1913
1914 if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
1915 for (entry_id = 0; entry_id < mm_table->count; entry_id++) {
1916 if (mm_table->entries[entry_id].vddgfx_offset & (1 << 15))
1917 v_record.us_vdd = mm_table->entries[entry_id].vddc +
1918 mm_table->entries[entry_id].vddgfx_offset - 0xFFFF;
1919 else
1920 v_record.us_vdd = mm_table->entries[entry_id].vddc +
1921 mm_table->entries[entry_id].vddgfx_offset;
1922
1923
1924 mm_table->entries[entry_id].vddgfx = v_record.us_cac_low =
1925 v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd;
1926 phm_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record);
1927 }
1928 }
1929 return 0;
1930}
1931
1932static int smu7_sort_lookup_table(struct pp_hwmgr *hwmgr,
1933 struct phm_ppt_v1_voltage_lookup_table *lookup_table)
1934{
1935 uint32_t table_size, i, j;
1936 struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record;
1937 table_size = lookup_table->count;
1938
1939 PP_ASSERT_WITH_CODE(0 != lookup_table->count,
1940 "Lookup table is empty", return -EINVAL);
1941
1942
1943 for (i = 0; i < table_size - 1; i++) {
1944 for (j = i + 1; j > 0; j--) {
1945 if (lookup_table->entries[j].us_vdd <
1946 lookup_table->entries[j - 1].us_vdd) {
1947 tmp_voltage_lookup_record = lookup_table->entries[j - 1];
1948 lookup_table->entries[j - 1] = lookup_table->entries[j];
1949 lookup_table->entries[j] = tmp_voltage_lookup_record;
1950 }
1951 }
1952 }
1953
1954 return 0;
1955}
1956
1957static int smu7_complete_dependency_tables(struct pp_hwmgr *hwmgr)
1958{
1959 int result = 0;
1960 int tmp_result;
1961 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1962 struct phm_ppt_v1_information *table_info =
1963 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1964
1965 if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
1966 tmp_result = smu7_patch_lookup_table_with_leakage(hwmgr,
1967 table_info->vddgfx_lookup_table, &(data->vddcgfx_leakage));
1968 if (tmp_result != 0)
1969 result = tmp_result;
1970
1971 smu7_patch_ppt_v1_with_vdd_leakage(hwmgr,
1972 &table_info->max_clock_voltage_on_dc.vddgfx, &(data->vddcgfx_leakage));
1973 } else {
1974
1975 tmp_result = smu7_patch_lookup_table_with_leakage(hwmgr,
1976 table_info->vddc_lookup_table, &(data->vddc_leakage));
1977 if (tmp_result)
1978 result = tmp_result;
1979
1980 tmp_result = smu7_patch_clock_voltage_limits_with_vddc_leakage(hwmgr,
1981 &(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc);
1982 if (tmp_result)
1983 result = tmp_result;
1984 }
1985
1986 tmp_result = smu7_patch_voltage_dependency_tables_with_lookup_table(hwmgr);
1987 if (tmp_result)
1988 result = tmp_result;
1989
1990 tmp_result = smu7_calc_voltage_dependency_tables(hwmgr);
1991 if (tmp_result)
1992 result = tmp_result;
1993
1994 tmp_result = smu7_calc_mm_voltage_dependency_table(hwmgr);
1995 if (tmp_result)
1996 result = tmp_result;
1997
1998 tmp_result = smu7_sort_lookup_table(hwmgr, table_info->vddgfx_lookup_table);
1999 if (tmp_result)
2000 result = tmp_result;
2001
2002 tmp_result = smu7_sort_lookup_table(hwmgr, table_info->vddc_lookup_table);
2003 if (tmp_result)
2004 result = tmp_result;
2005
2006 return result;
2007}
2008
2009static int smu7_set_private_data_based_on_pptable_v1(struct pp_hwmgr *hwmgr)
2010{
2011 struct phm_ppt_v1_information *table_info =
2012 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2013
2014 struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
2015 table_info->vdd_dep_on_sclk;
2016 struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table =
2017 table_info->vdd_dep_on_mclk;
2018
2019 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL,
2020 "VDD dependency on SCLK table is missing.",
2021 return -EINVAL);
2022 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
2023 "VDD dependency on SCLK table has to have is missing.",
2024 return -EINVAL);
2025
2026 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL,
2027 "VDD dependency on MCLK table is missing",
2028 return -EINVAL);
2029 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
2030 "VDD dependency on MCLK table has to have is missing.",
2031 return -EINVAL);
2032
2033 table_info->max_clock_voltage_on_ac.sclk =
2034 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk;
2035 table_info->max_clock_voltage_on_ac.mclk =
2036 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk;
2037 table_info->max_clock_voltage_on_ac.vddc =
2038 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
2039 table_info->max_clock_voltage_on_ac.vddci =
2040 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci;
2041
2042 hwmgr->dyn_state.max_clock_voltage_on_ac.sclk = table_info->max_clock_voltage_on_ac.sclk;
2043 hwmgr->dyn_state.max_clock_voltage_on_ac.mclk = table_info->max_clock_voltage_on_ac.mclk;
2044 hwmgr->dyn_state.max_clock_voltage_on_ac.vddc = table_info->max_clock_voltage_on_ac.vddc;
2045 hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = table_info->max_clock_voltage_on_ac.vddci;
2046
2047 return 0;
2048}
2049
2050static int smu7_patch_voltage_workaround(struct pp_hwmgr *hwmgr)
2051{
2052 struct phm_ppt_v1_information *table_info =
2053 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2054 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
2055 struct phm_ppt_v1_voltage_lookup_table *lookup_table;
2056 uint32_t i;
2057 uint32_t hw_revision, sub_vendor_id, sub_sys_id;
2058 struct amdgpu_device *adev = hwmgr->adev;
2059
2060 if (table_info != NULL) {
2061 dep_mclk_table = table_info->vdd_dep_on_mclk;
2062 lookup_table = table_info->vddc_lookup_table;
2063 } else
2064 return 0;
2065
2066 hw_revision = adev->pdev->revision;
2067 sub_sys_id = adev->pdev->subsystem_device;
2068 sub_vendor_id = adev->pdev->subsystem_vendor;
2069
2070 if (hwmgr->chip_id == CHIP_POLARIS10 && hw_revision == 0xC7 &&
2071 ((sub_sys_id == 0xb37 && sub_vendor_id == 0x1002) ||
2072 (sub_sys_id == 0x4a8 && sub_vendor_id == 0x1043) ||
2073 (sub_sys_id == 0x9480 && sub_vendor_id == 0x1682))) {
2074 if (lookup_table->entries[dep_mclk_table->entries[dep_mclk_table->count-1].vddInd].us_vdd >= 1000)
2075 return 0;
2076
2077 for (i = 0; i < lookup_table->count; i++) {
2078 if (lookup_table->entries[i].us_vdd < 0xff01 && lookup_table->entries[i].us_vdd >= 1000) {
2079 dep_mclk_table->entries[dep_mclk_table->count-1].vddInd = (uint8_t) i;
2080 return 0;
2081 }
2082 }
2083 }
2084 return 0;
2085}
2086
2087static int smu7_thermal_parameter_init(struct pp_hwmgr *hwmgr)
2088{
2089 struct pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
2090 uint32_t temp_reg;
2091 struct phm_ppt_v1_information *table_info =
2092 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2093
2094
2095 if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) {
2096 temp_reg = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL);
2097 switch (gpio_pin_assignment.uc_gpio_pin_bit_shift) {
2098 case 0:
2099 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x1);
2100 break;
2101 case 1:
2102 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x2);
2103 break;
2104 case 2:
2105 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW, 0x1);
2106 break;
2107 case 3:
2108 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, FORCE_NB_PS1, 0x1);
2109 break;
2110 case 4:
2111 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, DPM_ENABLED, 0x1);
2112 break;
2113 default:
2114 break;
2115 }
2116 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL, temp_reg);
2117 }
2118
2119 if (table_info == NULL)
2120 return 0;
2121
2122 if (table_info->cac_dtp_table->usDefaultTargetOperatingTemp != 0 &&
2123 hwmgr->thermal_controller.advanceFanControlParameters.ucFanControlMode) {
2124 hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMinLimit =
2125 (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit;
2126
2127 hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMaxLimit =
2128 (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
2129
2130 hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMStep = 1;
2131
2132 hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMaxLimit = 100;
2133
2134 hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMinLimit =
2135 (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit;
2136
2137 hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMStep = 1;
2138
2139 table_info->cac_dtp_table->usDefaultTargetOperatingTemp = (table_info->cac_dtp_table->usDefaultTargetOperatingTemp >= 50) ?
2140 (table_info->cac_dtp_table->usDefaultTargetOperatingTemp - 50) : 0;
2141
2142 table_info->cac_dtp_table->usOperatingTempMaxLimit = table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
2143 table_info->cac_dtp_table->usOperatingTempStep = 1;
2144 table_info->cac_dtp_table->usOperatingTempHyst = 1;
2145
2146 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM =
2147 hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
2148
2149 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM =
2150 hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM;
2151
2152 hwmgr->dyn_state.cac_dtp_table->usOperatingTempMinLimit =
2153 table_info->cac_dtp_table->usOperatingTempMinLimit;
2154
2155 hwmgr->dyn_state.cac_dtp_table->usOperatingTempMaxLimit =
2156 table_info->cac_dtp_table->usOperatingTempMaxLimit;
2157
2158 hwmgr->dyn_state.cac_dtp_table->usDefaultTargetOperatingTemp =
2159 table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
2160
2161 hwmgr->dyn_state.cac_dtp_table->usOperatingTempStep =
2162 table_info->cac_dtp_table->usOperatingTempStep;
2163
2164 hwmgr->dyn_state.cac_dtp_table->usTargetOperatingTemp =
2165 table_info->cac_dtp_table->usTargetOperatingTemp;
2166 if (hwmgr->feature_mask & PP_OD_FUZZY_FAN_CONTROL_MASK)
2167 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2168 PHM_PlatformCaps_ODFuzzyFanControlSupport);
2169 }
2170
2171 return 0;
2172}
2173
2174
2175
2176
2177
2178
2179
2180
2181static void smu7_patch_ppt_v0_with_vdd_leakage(struct pp_hwmgr *hwmgr,
2182 uint32_t *voltage, struct smu7_leakage_voltage *leakage_table)
2183{
2184 uint32_t index;
2185
2186
2187 for (index = 0; index < leakage_table->count; index++) {
2188
2189
2190 if (leakage_table->leakage_id[index] == *voltage) {
2191 *voltage = leakage_table->actual_voltage[index];
2192 break;
2193 }
2194 }
2195
2196 if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
2197 pr_err("Voltage value looks like a Leakage ID but it's not patched \n");
2198}
2199
2200
2201static int smu7_patch_vddc(struct pp_hwmgr *hwmgr,
2202 struct phm_clock_voltage_dependency_table *tab)
2203{
2204 uint16_t i;
2205 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2206
2207 if (tab)
2208 for (i = 0; i < tab->count; i++)
2209 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2210 &data->vddc_leakage);
2211
2212 return 0;
2213}
2214
2215static int smu7_patch_vddci(struct pp_hwmgr *hwmgr,
2216 struct phm_clock_voltage_dependency_table *tab)
2217{
2218 uint16_t i;
2219 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2220
2221 if (tab)
2222 for (i = 0; i < tab->count; i++)
2223 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2224 &data->vddci_leakage);
2225
2226 return 0;
2227}
2228
2229static int smu7_patch_vce_vddc(struct pp_hwmgr *hwmgr,
2230 struct phm_vce_clock_voltage_dependency_table *tab)
2231{
2232 uint16_t i;
2233 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2234
2235 if (tab)
2236 for (i = 0; i < tab->count; i++)
2237 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2238 &data->vddc_leakage);
2239
2240 return 0;
2241}
2242
2243
2244static int smu7_patch_uvd_vddc(struct pp_hwmgr *hwmgr,
2245 struct phm_uvd_clock_voltage_dependency_table *tab)
2246{
2247 uint16_t i;
2248 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2249
2250 if (tab)
2251 for (i = 0; i < tab->count; i++)
2252 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2253 &data->vddc_leakage);
2254
2255 return 0;
2256}
2257
2258static int smu7_patch_vddc_shed_limit(struct pp_hwmgr *hwmgr,
2259 struct phm_phase_shedding_limits_table *tab)
2260{
2261 uint16_t i;
2262 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2263
2264 if (tab)
2265 for (i = 0; i < tab->count; i++)
2266 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].Voltage,
2267 &data->vddc_leakage);
2268
2269 return 0;
2270}
2271
2272static int smu7_patch_samu_vddc(struct pp_hwmgr *hwmgr,
2273 struct phm_samu_clock_voltage_dependency_table *tab)
2274{
2275 uint16_t i;
2276 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2277
2278 if (tab)
2279 for (i = 0; i < tab->count; i++)
2280 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2281 &data->vddc_leakage);
2282
2283 return 0;
2284}
2285
2286static int smu7_patch_acp_vddc(struct pp_hwmgr *hwmgr,
2287 struct phm_acp_clock_voltage_dependency_table *tab)
2288{
2289 uint16_t i;
2290 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2291
2292 if (tab)
2293 for (i = 0; i < tab->count; i++)
2294 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2295 &data->vddc_leakage);
2296
2297 return 0;
2298}
2299
2300static int smu7_patch_limits_vddc(struct pp_hwmgr *hwmgr,
2301 struct phm_clock_and_voltage_limits *tab)
2302{
2303 uint32_t vddc, vddci;
2304 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2305
2306 if (tab) {
2307 vddc = tab->vddc;
2308 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc,
2309 &data->vddc_leakage);
2310 tab->vddc = vddc;
2311 vddci = tab->vddci;
2312 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddci,
2313 &data->vddci_leakage);
2314 tab->vddci = vddci;
2315 }
2316
2317 return 0;
2318}
2319
2320static int smu7_patch_cac_vddc(struct pp_hwmgr *hwmgr, struct phm_cac_leakage_table *tab)
2321{
2322 uint32_t i;
2323 uint32_t vddc;
2324 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2325
2326 if (tab) {
2327 for (i = 0; i < tab->count; i++) {
2328 vddc = (uint32_t)(tab->entries[i].Vddc);
2329 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc, &data->vddc_leakage);
2330 tab->entries[i].Vddc = (uint16_t)vddc;
2331 }
2332 }
2333
2334 return 0;
2335}
2336
2337static int smu7_patch_dependency_tables_with_leakage(struct pp_hwmgr *hwmgr)
2338{
2339 int tmp;
2340
2341 tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_sclk);
2342 if (tmp)
2343 return -EINVAL;
2344
2345 tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_mclk);
2346 if (tmp)
2347 return -EINVAL;
2348
2349 tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
2350 if (tmp)
2351 return -EINVAL;
2352
2353 tmp = smu7_patch_vddci(hwmgr, hwmgr->dyn_state.vddci_dependency_on_mclk);
2354 if (tmp)
2355 return -EINVAL;
2356
2357 tmp = smu7_patch_vce_vddc(hwmgr, hwmgr->dyn_state.vce_clock_voltage_dependency_table);
2358 if (tmp)
2359 return -EINVAL;
2360
2361 tmp = smu7_patch_uvd_vddc(hwmgr, hwmgr->dyn_state.uvd_clock_voltage_dependency_table);
2362 if (tmp)
2363 return -EINVAL;
2364
2365 tmp = smu7_patch_samu_vddc(hwmgr, hwmgr->dyn_state.samu_clock_voltage_dependency_table);
2366 if (tmp)
2367 return -EINVAL;
2368
2369 tmp = smu7_patch_acp_vddc(hwmgr, hwmgr->dyn_state.acp_clock_voltage_dependency_table);
2370 if (tmp)
2371 return -EINVAL;
2372
2373 tmp = smu7_patch_vddc_shed_limit(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table);
2374 if (tmp)
2375 return -EINVAL;
2376
2377 tmp = smu7_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_ac);
2378 if (tmp)
2379 return -EINVAL;
2380
2381 tmp = smu7_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_dc);
2382 if (tmp)
2383 return -EINVAL;
2384
2385 tmp = smu7_patch_cac_vddc(hwmgr, hwmgr->dyn_state.cac_leakage_table);
2386 if (tmp)
2387 return -EINVAL;
2388
2389 return 0;
2390}
2391
2392
2393static int smu7_set_private_data_based_on_pptable_v0(struct pp_hwmgr *hwmgr)
2394{
2395 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2396
2397 struct phm_clock_voltage_dependency_table *allowed_sclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_sclk;
2398 struct phm_clock_voltage_dependency_table *allowed_mclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_mclk;
2399 struct phm_clock_voltage_dependency_table *allowed_mclk_vddci_table = hwmgr->dyn_state.vddci_dependency_on_mclk;
2400
2401 PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table != NULL,
2402 "VDDC dependency on SCLK table is missing. This table is mandatory",
2403 return -EINVAL);
2404 PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table->count >= 1,
2405 "VDDC dependency on SCLK table has to have is missing. This table is mandatory",
2406 return -EINVAL);
2407
2408 PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table != NULL,
2409 "VDDC dependency on MCLK table is missing. This table is mandatory",
2410 return -EINVAL);
2411 PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table->count >= 1,
2412 "VDD dependency on MCLK table has to have is missing. This table is mandatory",
2413 return -EINVAL);
2414
2415 data->min_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[0].v;
2416 data->max_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
2417
2418 hwmgr->dyn_state.max_clock_voltage_on_ac.sclk =
2419 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
2420 hwmgr->dyn_state.max_clock_voltage_on_ac.mclk =
2421 allowed_mclk_vddc_table->entries[allowed_mclk_vddc_table->count - 1].clk;
2422 hwmgr->dyn_state.max_clock_voltage_on_ac.vddc =
2423 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
2424
2425 if (allowed_mclk_vddci_table != NULL && allowed_mclk_vddci_table->count >= 1) {
2426 data->min_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[0].v;
2427 data->max_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
2428 }
2429
2430 if (hwmgr->dyn_state.vddci_dependency_on_mclk != NULL && hwmgr->dyn_state.vddci_dependency_on_mclk->count >= 1)
2431 hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = hwmgr->dyn_state.vddci_dependency_on_mclk->entries[hwmgr->dyn_state.vddci_dependency_on_mclk->count - 1].v;
2432
2433 return 0;
2434}
2435
2436static int smu7_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
2437{
2438 kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
2439 hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
2440 kfree(hwmgr->backend);
2441 hwmgr->backend = NULL;
2442
2443 return 0;
2444}
2445
2446static int smu7_get_elb_voltages(struct pp_hwmgr *hwmgr)
2447{
2448 uint16_t virtual_voltage_id, vddc, vddci, efuse_voltage_id;
2449 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2450 int i;
2451
2452 if (atomctrl_get_leakage_id_from_efuse(hwmgr, &efuse_voltage_id) == 0) {
2453 for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) {
2454 virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
2455 if (atomctrl_get_leakage_vddc_base_on_leakage(hwmgr, &vddc, &vddci,
2456 virtual_voltage_id,
2457 efuse_voltage_id) == 0) {
2458 if (vddc != 0 && vddc != virtual_voltage_id) {
2459 data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = vddc;
2460 data->vddc_leakage.leakage_id[data->vddc_leakage.count] = virtual_voltage_id;
2461 data->vddc_leakage.count++;
2462 }
2463 if (vddci != 0 && vddci != virtual_voltage_id) {
2464 data->vddci_leakage.actual_voltage[data->vddci_leakage.count] = vddci;
2465 data->vddci_leakage.leakage_id[data->vddci_leakage.count] = virtual_voltage_id;
2466 data->vddci_leakage.count++;
2467 }
2468 }
2469 }
2470 }
2471 return 0;
2472}
2473
2474static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
2475{
2476 struct smu7_hwmgr *data;
2477 int result = 0;
2478
2479 data = kzalloc(sizeof(struct smu7_hwmgr), GFP_KERNEL);
2480 if (data == NULL)
2481 return -ENOMEM;
2482
2483 hwmgr->backend = data;
2484 smu7_patch_voltage_workaround(hwmgr);
2485 smu7_init_dpm_defaults(hwmgr);
2486
2487
2488 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2489 PHM_PlatformCaps_EVV)) {
2490 result = smu7_get_evv_voltages(hwmgr);
2491 if (result) {
2492 pr_info("Get EVV Voltage Failed. Abort Driver loading!\n");
2493 return -EINVAL;
2494 }
2495 } else {
2496 smu7_get_elb_voltages(hwmgr);
2497 }
2498
2499 if (hwmgr->pp_table_version == PP_TABLE_V1) {
2500 smu7_complete_dependency_tables(hwmgr);
2501 smu7_set_private_data_based_on_pptable_v1(hwmgr);
2502 } else if (hwmgr->pp_table_version == PP_TABLE_V0) {
2503 smu7_patch_dependency_tables_with_leakage(hwmgr);
2504 smu7_set_private_data_based_on_pptable_v0(hwmgr);
2505 }
2506
2507
2508 result = phm_initializa_dynamic_state_adjustment_rule_settings(hwmgr);
2509
2510 if (0 == result) {
2511 struct amdgpu_device *adev = hwmgr->adev;
2512
2513 data->is_tlu_enabled = false;
2514
2515 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
2516 SMU7_MAX_HARDWARE_POWERLEVELS;
2517 hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
2518 hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
2519
2520 data->pcie_gen_cap = adev->pm.pcie_gen_mask;
2521 if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
2522 data->pcie_spc_cap = 20;
2523 data->pcie_lane_cap = adev->pm.pcie_mlw_mask;
2524
2525 hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400;
2526
2527 hwmgr->platform_descriptor.clockStep.engineClock = 500;
2528 hwmgr->platform_descriptor.clockStep.memoryClock = 500;
2529 smu7_thermal_parameter_init(hwmgr);
2530 } else {
2531
2532 smu7_hwmgr_backend_fini(hwmgr);
2533 }
2534
2535 return 0;
2536}
2537
2538static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr)
2539{
2540 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2541 uint32_t level, tmp;
2542
2543 if (!data->pcie_dpm_key_disabled) {
2544 if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
2545 level = 0;
2546 tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask;
2547 while (tmp >>= 1)
2548 level++;
2549
2550 if (level)
2551 smum_send_msg_to_smc_with_parameter(hwmgr,
2552 PPSMC_MSG_PCIeDPM_ForceLevel, level);
2553 }
2554 }
2555
2556 if (!data->sclk_dpm_key_disabled) {
2557 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
2558 level = 0;
2559 tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask;
2560 while (tmp >>= 1)
2561 level++;
2562
2563 if (level)
2564 smum_send_msg_to_smc_with_parameter(hwmgr,
2565 PPSMC_MSG_SCLKDPM_SetEnabledMask,
2566 (1 << level));
2567 }
2568 }
2569
2570 if (!data->mclk_dpm_key_disabled) {
2571 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
2572 level = 0;
2573 tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask;
2574 while (tmp >>= 1)
2575 level++;
2576
2577 if (level)
2578 smum_send_msg_to_smc_with_parameter(hwmgr,
2579 PPSMC_MSG_MCLKDPM_SetEnabledMask,
2580 (1 << level));
2581 }
2582 }
2583
2584 return 0;
2585}
2586
2587static int smu7_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
2588{
2589 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2590
2591 if (hwmgr->pp_table_version == PP_TABLE_V1)
2592 phm_apply_dal_min_voltage_request(hwmgr);
2593
2594
2595 if (!data->sclk_dpm_key_disabled) {
2596 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask)
2597 smum_send_msg_to_smc_with_parameter(hwmgr,
2598 PPSMC_MSG_SCLKDPM_SetEnabledMask,
2599 data->dpm_level_enable_mask.sclk_dpm_enable_mask);
2600 }
2601
2602 if (!data->mclk_dpm_key_disabled) {
2603 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask)
2604 smum_send_msg_to_smc_with_parameter(hwmgr,
2605 PPSMC_MSG_MCLKDPM_SetEnabledMask,
2606 data->dpm_level_enable_mask.mclk_dpm_enable_mask);
2607 }
2608
2609 return 0;
2610}
2611
2612static int smu7_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
2613{
2614 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2615
2616 if (!smum_is_dpm_running(hwmgr))
2617 return -EINVAL;
2618
2619 if (!data->pcie_dpm_key_disabled) {
2620 smum_send_msg_to_smc(hwmgr,
2621 PPSMC_MSG_PCIeDPM_UnForceLevel);
2622 }
2623
2624 return smu7_upload_dpm_level_enable_mask(hwmgr);
2625}
2626
2627static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr)
2628{
2629 struct smu7_hwmgr *data =
2630 (struct smu7_hwmgr *)(hwmgr->backend);
2631 uint32_t level;
2632
2633 if (!data->sclk_dpm_key_disabled)
2634 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
2635 level = phm_get_lowest_enabled_level(hwmgr,
2636 data->dpm_level_enable_mask.sclk_dpm_enable_mask);
2637 smum_send_msg_to_smc_with_parameter(hwmgr,
2638 PPSMC_MSG_SCLKDPM_SetEnabledMask,
2639 (1 << level));
2640
2641 }
2642
2643 if (!data->mclk_dpm_key_disabled) {
2644 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
2645 level = phm_get_lowest_enabled_level(hwmgr,
2646 data->dpm_level_enable_mask.mclk_dpm_enable_mask);
2647 smum_send_msg_to_smc_with_parameter(hwmgr,
2648 PPSMC_MSG_MCLKDPM_SetEnabledMask,
2649 (1 << level));
2650 }
2651 }
2652
2653 if (!data->pcie_dpm_key_disabled) {
2654 if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
2655 level = phm_get_lowest_enabled_level(hwmgr,
2656 data->dpm_level_enable_mask.pcie_dpm_enable_mask);
2657 smum_send_msg_to_smc_with_parameter(hwmgr,
2658 PPSMC_MSG_PCIeDPM_ForceLevel,
2659 (level));
2660 }
2661 }
2662
2663 return 0;
2664}
2665
2666static int smu7_get_profiling_clk(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level,
2667 uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *pcie_mask)
2668{
2669 uint32_t percentage;
2670 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2671 struct smu7_dpm_table *golden_dpm_table = &data->golden_dpm_table;
2672 int32_t tmp_mclk;
2673 int32_t tmp_sclk;
2674 int32_t count;
2675
2676 if (golden_dpm_table->mclk_table.count < 1)
2677 return -EINVAL;
2678
2679 percentage = 100 * golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count - 1].value /
2680 golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value;
2681
2682 if (golden_dpm_table->mclk_table.count == 1) {
2683 percentage = 70;
2684 tmp_mclk = golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value;
2685 *mclk_mask = golden_dpm_table->mclk_table.count - 1;
2686 } else {
2687 tmp_mclk = golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 2].value;
2688 *mclk_mask = golden_dpm_table->mclk_table.count - 2;
2689 }
2690
2691 tmp_sclk = tmp_mclk * percentage / 100;
2692
2693 if (hwmgr->pp_table_version == PP_TABLE_V0) {
2694 for (count = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1;
2695 count >= 0; count--) {
2696 if (tmp_sclk >= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk) {
2697 tmp_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk;
2698 *sclk_mask = count;
2699 break;
2700 }
2701 }
2702 if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
2703 *sclk_mask = 0;
2704 tmp_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].clk;
2705 }
2706
2707 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
2708 *sclk_mask = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1;
2709 } else if (hwmgr->pp_table_version == PP_TABLE_V1) {
2710 struct phm_ppt_v1_information *table_info =
2711 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2712
2713 for (count = table_info->vdd_dep_on_sclk->count-1; count >= 0; count--) {
2714 if (tmp_sclk >= table_info->vdd_dep_on_sclk->entries[count].clk) {
2715 tmp_sclk = table_info->vdd_dep_on_sclk->entries[count].clk;
2716 *sclk_mask = count;
2717 break;
2718 }
2719 }
2720 if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
2721 *sclk_mask = 0;
2722 tmp_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
2723 }
2724
2725 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
2726 *sclk_mask = table_info->vdd_dep_on_sclk->count - 1;
2727 }
2728
2729 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK)
2730 *mclk_mask = 0;
2731 else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
2732 *mclk_mask = golden_dpm_table->mclk_table.count - 1;
2733
2734 *pcie_mask = data->dpm_table.pcie_speed_table.count - 1;
2735 hwmgr->pstate_sclk = tmp_sclk;
2736 hwmgr->pstate_mclk = tmp_mclk;
2737
2738 return 0;
2739}
2740
2741static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr,
2742 enum amd_dpm_forced_level level)
2743{
2744 int ret = 0;
2745 uint32_t sclk_mask = 0;
2746 uint32_t mclk_mask = 0;
2747 uint32_t pcie_mask = 0;
2748
2749 if (hwmgr->pstate_sclk == 0)
2750 smu7_get_profiling_clk(hwmgr, level, &sclk_mask, &mclk_mask, &pcie_mask);
2751
2752 switch (level) {
2753 case AMD_DPM_FORCED_LEVEL_HIGH:
2754 ret = smu7_force_dpm_highest(hwmgr);
2755 break;
2756 case AMD_DPM_FORCED_LEVEL_LOW:
2757 ret = smu7_force_dpm_lowest(hwmgr);
2758 break;
2759 case AMD_DPM_FORCED_LEVEL_AUTO:
2760 ret = smu7_unforce_dpm_levels(hwmgr);
2761 break;
2762 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
2763 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
2764 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
2765 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
2766 ret = smu7_get_profiling_clk(hwmgr, level, &sclk_mask, &mclk_mask, &pcie_mask);
2767 if (ret)
2768 return ret;
2769 smu7_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
2770 smu7_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
2771 smu7_force_clock_level(hwmgr, PP_PCIE, 1<<pcie_mask);
2772 break;
2773 case AMD_DPM_FORCED_LEVEL_MANUAL:
2774 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
2775 default:
2776 break;
2777 }
2778
2779 if (!ret) {
2780 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
2781 smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
2782 else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
2783 smu7_fan_ctrl_reset_fan_speed_to_default(hwmgr);
2784 }
2785 return ret;
2786}
2787
2788static int smu7_get_power_state_size(struct pp_hwmgr *hwmgr)
2789{
2790 return sizeof(struct smu7_power_state);
2791}
2792
2793static int smu7_vblank_too_short(struct pp_hwmgr *hwmgr,
2794 uint32_t vblank_time_us)
2795{
2796 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2797 uint32_t switch_limit_us;
2798
2799 switch (hwmgr->chip_id) {
2800 case CHIP_POLARIS10:
2801 case CHIP_POLARIS11:
2802 case CHIP_POLARIS12:
2803 switch_limit_us = data->is_memory_gddr5 ? 190 : 150;
2804 break;
2805 case CHIP_VEGAM:
2806 switch_limit_us = 30;
2807 break;
2808 default:
2809 switch_limit_us = data->is_memory_gddr5 ? 450 : 150;
2810 break;
2811 }
2812
2813 if (vblank_time_us < switch_limit_us)
2814 return true;
2815 else
2816 return false;
2817}
2818
2819static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
2820 struct pp_power_state *request_ps,
2821 const struct pp_power_state *current_ps)
2822{
2823
2824 struct smu7_power_state *smu7_ps =
2825 cast_phw_smu7_power_state(&request_ps->hardware);
2826 uint32_t sclk;
2827 uint32_t mclk;
2828 struct PP_Clocks minimum_clocks = {0};
2829 bool disable_mclk_switching;
2830 bool disable_mclk_switching_for_frame_lock;
2831 const struct phm_clock_and_voltage_limits *max_limits;
2832 uint32_t i;
2833 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2834 struct phm_ppt_v1_information *table_info =
2835 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2836 int32_t count;
2837 int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
2838
2839 data->battery_state = (PP_StateUILabel_Battery ==
2840 request_ps->classification.ui_label);
2841
2842 PP_ASSERT_WITH_CODE(smu7_ps->performance_level_count == 2,
2843 "VI should always have 2 performance levels",
2844 );
2845
2846 max_limits = (PP_PowerSource_AC == hwmgr->power_source) ?
2847 &(hwmgr->dyn_state.max_clock_voltage_on_ac) :
2848 &(hwmgr->dyn_state.max_clock_voltage_on_dc);
2849
2850
2851 if (PP_PowerSource_DC == hwmgr->power_source) {
2852 for (i = 0; i < smu7_ps->performance_level_count; i++) {
2853 if (smu7_ps->performance_levels[i].memory_clock > max_limits->mclk)
2854 smu7_ps->performance_levels[i].memory_clock = max_limits->mclk;
2855 if (smu7_ps->performance_levels[i].engine_clock > max_limits->sclk)
2856 smu7_ps->performance_levels[i].engine_clock = max_limits->sclk;
2857 }
2858 }
2859
2860 minimum_clocks.engineClock = hwmgr->display_config->min_core_set_clock;
2861 minimum_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock;
2862
2863 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2864 PHM_PlatformCaps_StablePState)) {
2865 max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
2866 stable_pstate_sclk = (max_limits->sclk * 75) / 100;
2867
2868 for (count = table_info->vdd_dep_on_sclk->count - 1;
2869 count >= 0; count--) {
2870 if (stable_pstate_sclk >=
2871 table_info->vdd_dep_on_sclk->entries[count].clk) {
2872 stable_pstate_sclk =
2873 table_info->vdd_dep_on_sclk->entries[count].clk;
2874 break;
2875 }
2876 }
2877
2878 if (count < 0)
2879 stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
2880
2881 stable_pstate_mclk = max_limits->mclk;
2882
2883 minimum_clocks.engineClock = stable_pstate_sclk;
2884 minimum_clocks.memoryClock = stable_pstate_mclk;
2885 }
2886
2887 disable_mclk_switching_for_frame_lock = phm_cap_enabled(
2888 hwmgr->platform_descriptor.platformCaps,
2889 PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
2890
2891
2892 if (hwmgr->display_config->num_display == 0)
2893 disable_mclk_switching = false;
2894 else
2895 disable_mclk_switching = ((1 < hwmgr->display_config->num_display) ||
2896 disable_mclk_switching_for_frame_lock ||
2897 smu7_vblank_too_short(hwmgr, hwmgr->display_config->min_vblank_time));
2898
2899 sclk = smu7_ps->performance_levels[0].engine_clock;
2900 mclk = smu7_ps->performance_levels[0].memory_clock;
2901
2902 if (disable_mclk_switching)
2903 mclk = smu7_ps->performance_levels
2904 [smu7_ps->performance_level_count - 1].memory_clock;
2905
2906 if (sclk < minimum_clocks.engineClock)
2907 sclk = (minimum_clocks.engineClock > max_limits->sclk) ?
2908 max_limits->sclk : minimum_clocks.engineClock;
2909
2910 if (mclk < minimum_clocks.memoryClock)
2911 mclk = (minimum_clocks.memoryClock > max_limits->mclk) ?
2912 max_limits->mclk : minimum_clocks.memoryClock;
2913
2914 smu7_ps->performance_levels[0].engine_clock = sclk;
2915 smu7_ps->performance_levels[0].memory_clock = mclk;
2916
2917 smu7_ps->performance_levels[1].engine_clock =
2918 (smu7_ps->performance_levels[1].engine_clock >=
2919 smu7_ps->performance_levels[0].engine_clock) ?
2920 smu7_ps->performance_levels[1].engine_clock :
2921 smu7_ps->performance_levels[0].engine_clock;
2922
2923 if (disable_mclk_switching) {
2924 if (mclk < smu7_ps->performance_levels[1].memory_clock)
2925 mclk = smu7_ps->performance_levels[1].memory_clock;
2926
2927 smu7_ps->performance_levels[0].memory_clock = mclk;
2928 smu7_ps->performance_levels[1].memory_clock = mclk;
2929 } else {
2930 if (smu7_ps->performance_levels[1].memory_clock <
2931 smu7_ps->performance_levels[0].memory_clock)
2932 smu7_ps->performance_levels[1].memory_clock =
2933 smu7_ps->performance_levels[0].memory_clock;
2934 }
2935
2936 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2937 PHM_PlatformCaps_StablePState)) {
2938 for (i = 0; i < smu7_ps->performance_level_count; i++) {
2939 smu7_ps->performance_levels[i].engine_clock = stable_pstate_sclk;
2940 smu7_ps->performance_levels[i].memory_clock = stable_pstate_mclk;
2941 smu7_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max;
2942 smu7_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max;
2943 }
2944 }
2945 return 0;
2946}
2947
2948
2949static uint32_t smu7_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
2950{
2951 struct pp_power_state *ps;
2952 struct smu7_power_state *smu7_ps;
2953
2954 if (hwmgr == NULL)
2955 return -EINVAL;
2956
2957 ps = hwmgr->request_ps;
2958
2959 if (ps == NULL)
2960 return -EINVAL;
2961
2962 smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
2963
2964 if (low)
2965 return smu7_ps->performance_levels[0].memory_clock;
2966 else
2967 return smu7_ps->performance_levels
2968 [smu7_ps->performance_level_count-1].memory_clock;
2969}
2970
2971static uint32_t smu7_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
2972{
2973 struct pp_power_state *ps;
2974 struct smu7_power_state *smu7_ps;
2975
2976 if (hwmgr == NULL)
2977 return -EINVAL;
2978
2979 ps = hwmgr->request_ps;
2980
2981 if (ps == NULL)
2982 return -EINVAL;
2983
2984 smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
2985
2986 if (low)
2987 return smu7_ps->performance_levels[0].engine_clock;
2988 else
2989 return smu7_ps->performance_levels
2990 [smu7_ps->performance_level_count-1].engine_clock;
2991}
2992
2993static int smu7_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
2994 struct pp_hw_power_state *hw_ps)
2995{
2996 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2997 struct smu7_power_state *ps = (struct smu7_power_state *)hw_ps;
2998 ATOM_FIRMWARE_INFO_V2_2 *fw_info;
2999 uint16_t size;
3000 uint8_t frev, crev;
3001 int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
3002
3003
3004
3005
3006 fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)smu_atom_get_data_table(hwmgr->adev, index,
3007 &size, &frev, &crev);
3008 if (!fw_info)
3009
3010 return 0;
3011
3012
3013 data->vbios_boot_state.sclk_bootup_value =
3014 le32_to_cpu(fw_info->ulDefaultEngineClock);
3015 data->vbios_boot_state.mclk_bootup_value =
3016 le32_to_cpu(fw_info->ulDefaultMemoryClock);
3017 data->vbios_boot_state.mvdd_bootup_value =
3018 le16_to_cpu(fw_info->usBootUpMVDDCVoltage);
3019 data->vbios_boot_state.vddc_bootup_value =
3020 le16_to_cpu(fw_info->usBootUpVDDCVoltage);
3021 data->vbios_boot_state.vddci_bootup_value =
3022 le16_to_cpu(fw_info->usBootUpVDDCIVoltage);
3023 data->vbios_boot_state.pcie_gen_bootup_value =
3024 smu7_get_current_pcie_speed(hwmgr);
3025
3026 data->vbios_boot_state.pcie_lane_bootup_value =
3027 (uint16_t)smu7_get_current_pcie_lane_number(hwmgr);
3028
3029
3030 ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value;
3031 ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value;
3032 ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value;
3033 ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value;
3034
3035 return 0;
3036}
3037
3038static int smu7_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr)
3039{
3040 int result;
3041 unsigned long ret = 0;
3042
3043 if (hwmgr->pp_table_version == PP_TABLE_V0) {
3044 result = pp_tables_get_num_of_entries(hwmgr, &ret);
3045 return result ? 0 : ret;
3046 } else if (hwmgr->pp_table_version == PP_TABLE_V1) {
3047 result = get_number_of_powerplay_table_entries_v1_0(hwmgr);
3048 return result;
3049 }
3050 return 0;
3051}
3052
3053static int smu7_get_pp_table_entry_callback_func_v1(struct pp_hwmgr *hwmgr,
3054 void *state, struct pp_power_state *power_state,
3055 void *pp_table, uint32_t classification_flag)
3056{
3057 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3058 struct smu7_power_state *smu7_power_state =
3059 (struct smu7_power_state *)(&(power_state->hardware));
3060 struct smu7_performance_level *performance_level;
3061 ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state;
3062 ATOM_Tonga_POWERPLAYTABLE *powerplay_table =
3063 (ATOM_Tonga_POWERPLAYTABLE *)pp_table;
3064 PPTable_Generic_SubTable_Header *sclk_dep_table =
3065 (PPTable_Generic_SubTable_Header *)
3066 (((unsigned long)powerplay_table) +
3067 le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
3068
3069 ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
3070 (ATOM_Tonga_MCLK_Dependency_Table *)
3071 (((unsigned long)powerplay_table) +
3072 le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
3073
3074
3075 power_state->classification.ui_label =
3076 (le16_to_cpu(state_entry->usClassification) &
3077 ATOM_PPLIB_CLASSIFICATION_UI_MASK) >>
3078 ATOM_PPLIB_CLASSIFICATION_UI_SHIFT;
3079 power_state->classification.flags = classification_flag;
3080
3081
3082 power_state->classification.temporary_state = false;
3083 power_state->classification.to_be_deleted = false;
3084
3085 power_state->validation.disallowOnDC =
3086 (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
3087 ATOM_Tonga_DISALLOW_ON_DC));
3088
3089 power_state->pcie.lanes = 0;
3090
3091 power_state->display.disableFrameModulation = false;
3092 power_state->display.limitRefreshrate = false;
3093 power_state->display.enableVariBright =
3094 (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
3095 ATOM_Tonga_ENABLE_VARIBRIGHT));
3096
3097 power_state->validation.supportedPowerLevels = 0;
3098 power_state->uvd_clocks.VCLK = 0;
3099 power_state->uvd_clocks.DCLK = 0;
3100 power_state->temperatures.min = 0;
3101 power_state->temperatures.max = 0;
3102
3103 performance_level = &(smu7_power_state->performance_levels
3104 [smu7_power_state->performance_level_count++]);
3105
3106 PP_ASSERT_WITH_CODE(
3107 (smu7_power_state->performance_level_count < smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_GRAPHICS)),
3108 "Performance levels exceeds SMC limit!",
3109 return -EINVAL);
3110
3111 PP_ASSERT_WITH_CODE(
3112 (smu7_power_state->performance_level_count <=
3113 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
3114 "Performance levels exceeds Driver limit!",
3115 return -EINVAL);
3116
3117
3118 performance_level->memory_clock = mclk_dep_table->entries
3119 [state_entry->ucMemoryClockIndexLow].ulMclk;
3120 if (sclk_dep_table->ucRevId == 0)
3121 performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
3122 [state_entry->ucEngineClockIndexLow].ulSclk;
3123 else if (sclk_dep_table->ucRevId == 1)
3124 performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
3125 [state_entry->ucEngineClockIndexLow].ulSclk;
3126 performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
3127 state_entry->ucPCIEGenLow);
3128 performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
3129 state_entry->ucPCIELaneHigh);
3130
3131 performance_level = &(smu7_power_state->performance_levels
3132 [smu7_power_state->performance_level_count++]);
3133 performance_level->memory_clock = mclk_dep_table->entries
3134 [state_entry->ucMemoryClockIndexHigh].ulMclk;
3135
3136 if (sclk_dep_table->ucRevId == 0)
3137 performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
3138 [state_entry->ucEngineClockIndexHigh].ulSclk;
3139 else if (sclk_dep_table->ucRevId == 1)
3140 performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
3141 [state_entry->ucEngineClockIndexHigh].ulSclk;
3142
3143 performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
3144 state_entry->ucPCIEGenHigh);
3145 performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
3146 state_entry->ucPCIELaneHigh);
3147
3148 return 0;
3149}
3150
3151static int smu7_get_pp_table_entry_v1(struct pp_hwmgr *hwmgr,
3152 unsigned long entry_index, struct pp_power_state *state)
3153{
3154 int result;
3155 struct smu7_power_state *ps;
3156 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3157 struct phm_ppt_v1_information *table_info =
3158 (struct phm_ppt_v1_information *)(hwmgr->pptable);
3159 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
3160 table_info->vdd_dep_on_mclk;
3161
3162 state->hardware.magic = PHM_VIslands_Magic;
3163
3164 ps = (struct smu7_power_state *)(&state->hardware);
3165
3166 result = get_powerplay_table_entry_v1_0(hwmgr, entry_index, state,
3167 smu7_get_pp_table_entry_callback_func_v1);
3168
3169
3170
3171
3172
3173 if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
3174 if (dep_mclk_table->entries[0].clk !=
3175 data->vbios_boot_state.mclk_bootup_value)
3176 pr_debug("Single MCLK entry VDDCI/MCLK dependency table "
3177 "does not match VBIOS boot MCLK level");
3178 if (dep_mclk_table->entries[0].vddci !=
3179 data->vbios_boot_state.vddci_bootup_value)
3180 pr_debug("Single VDDCI entry VDDCI/MCLK dependency table "
3181 "does not match VBIOS boot VDDCI level");
3182 }
3183
3184
3185 if (!state->validation.disallowOnDC)
3186 ps->dc_compatible = true;
3187
3188 if (state->classification.flags & PP_StateClassificationFlag_ACPI)
3189 data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen;
3190
3191 ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
3192 ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
3193
3194 if (!result) {
3195 uint32_t i;
3196
3197 switch (state->classification.ui_label) {
3198 case PP_StateUILabel_Performance:
3199 data->use_pcie_performance_levels = true;
3200 for (i = 0; i < ps->performance_level_count; i++) {
3201 if (data->pcie_gen_performance.max <
3202 ps->performance_levels[i].pcie_gen)
3203 data->pcie_gen_performance.max =
3204 ps->performance_levels[i].pcie_gen;
3205
3206 if (data->pcie_gen_performance.min >
3207 ps->performance_levels[i].pcie_gen)
3208 data->pcie_gen_performance.min =
3209 ps->performance_levels[i].pcie_gen;
3210
3211 if (data->pcie_lane_performance.max <
3212 ps->performance_levels[i].pcie_lane)
3213 data->pcie_lane_performance.max =
3214 ps->performance_levels[i].pcie_lane;
3215 if (data->pcie_lane_performance.min >
3216 ps->performance_levels[i].pcie_lane)
3217 data->pcie_lane_performance.min =
3218 ps->performance_levels[i].pcie_lane;
3219 }
3220 break;
3221 case PP_StateUILabel_Battery:
3222 data->use_pcie_power_saving_levels = true;
3223
3224 for (i = 0; i < ps->performance_level_count; i++) {
3225 if (data->pcie_gen_power_saving.max <
3226 ps->performance_levels[i].pcie_gen)
3227 data->pcie_gen_power_saving.max =
3228 ps->performance_levels[i].pcie_gen;
3229
3230 if (data->pcie_gen_power_saving.min >
3231 ps->performance_levels[i].pcie_gen)
3232 data->pcie_gen_power_saving.min =
3233 ps->performance_levels[i].pcie_gen;
3234
3235 if (data->pcie_lane_power_saving.max <
3236 ps->performance_levels[i].pcie_lane)
3237 data->pcie_lane_power_saving.max =
3238 ps->performance_levels[i].pcie_lane;
3239
3240 if (data->pcie_lane_power_saving.min >
3241 ps->performance_levels[i].pcie_lane)
3242 data->pcie_lane_power_saving.min =
3243 ps->performance_levels[i].pcie_lane;
3244 }
3245 break;
3246 default:
3247 break;
3248 }
3249 }
3250 return 0;
3251}
3252
3253static int smu7_get_pp_table_entry_callback_func_v0(struct pp_hwmgr *hwmgr,
3254 struct pp_hw_power_state *power_state,
3255 unsigned int index, const void *clock_info)
3256{
3257 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3258 struct smu7_power_state *ps = cast_phw_smu7_power_state(power_state);
3259 const ATOM_PPLIB_CI_CLOCK_INFO *visland_clk_info = clock_info;
3260 struct smu7_performance_level *performance_level;
3261 uint32_t engine_clock, memory_clock;
3262 uint16_t pcie_gen_from_bios;
3263
3264 engine_clock = visland_clk_info->ucEngineClockHigh << 16 | visland_clk_info->usEngineClockLow;
3265 memory_clock = visland_clk_info->ucMemoryClockHigh << 16 | visland_clk_info->usMemoryClockLow;
3266
3267 if (!(data->mc_micro_code_feature & DISABLE_MC_LOADMICROCODE) && memory_clock > data->highest_mclk)
3268 data->highest_mclk = memory_clock;
3269
3270 PP_ASSERT_WITH_CODE(
3271 (ps->performance_level_count < smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_GRAPHICS)),
3272 "Performance levels exceeds SMC limit!",
3273 return -EINVAL);
3274
3275 PP_ASSERT_WITH_CODE(
3276 (ps->performance_level_count <
3277 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
3278 "Performance levels exceeds Driver limit, Skip!",
3279 return 0);
3280
3281 performance_level = &(ps->performance_levels
3282 [ps->performance_level_count++]);
3283
3284
3285 performance_level->memory_clock = memory_clock;
3286 performance_level->engine_clock = engine_clock;
3287
3288 pcie_gen_from_bios = visland_clk_info->ucPCIEGen;
3289
3290 performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, pcie_gen_from_bios);
3291 performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, visland_clk_info->usPCIELane);
3292
3293 return 0;
3294}
3295
3296static int smu7_get_pp_table_entry_v0(struct pp_hwmgr *hwmgr,
3297 unsigned long entry_index, struct pp_power_state *state)
3298{
3299 int result;
3300 struct smu7_power_state *ps;
3301 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3302 struct phm_clock_voltage_dependency_table *dep_mclk_table =
3303 hwmgr->dyn_state.vddci_dependency_on_mclk;
3304
3305 memset(&state->hardware, 0x00, sizeof(struct pp_hw_power_state));
3306
3307 state->hardware.magic = PHM_VIslands_Magic;
3308
3309 ps = (struct smu7_power_state *)(&state->hardware);
3310
3311 result = pp_tables_get_entry(hwmgr, entry_index, state,
3312 smu7_get_pp_table_entry_callback_func_v0);
3313
3314
3315
3316
3317
3318
3319
3320
3321 if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
3322 if (dep_mclk_table->entries[0].clk !=
3323 data->vbios_boot_state.mclk_bootup_value)
3324 pr_debug("Single MCLK entry VDDCI/MCLK dependency table "
3325 "does not match VBIOS boot MCLK level");
3326 if (dep_mclk_table->entries[0].v !=
3327 data->vbios_boot_state.vddci_bootup_value)
3328 pr_debug("Single VDDCI entry VDDCI/MCLK dependency table "
3329 "does not match VBIOS boot VDDCI level");
3330 }
3331
3332
3333 if (!state->validation.disallowOnDC)
3334 ps->dc_compatible = true;
3335
3336 if (state->classification.flags & PP_StateClassificationFlag_ACPI)
3337 data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen;
3338
3339 ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
3340 ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
3341
3342 if (!result) {
3343 uint32_t i;
3344
3345 switch (state->classification.ui_label) {
3346 case PP_StateUILabel_Performance:
3347 data->use_pcie_performance_levels = true;
3348
3349 for (i = 0; i < ps->performance_level_count; i++) {
3350 if (data->pcie_gen_performance.max <
3351 ps->performance_levels[i].pcie_gen)
3352 data->pcie_gen_performance.max =
3353 ps->performance_levels[i].pcie_gen;
3354
3355 if (data->pcie_gen_performance.min >
3356 ps->performance_levels[i].pcie_gen)
3357 data->pcie_gen_performance.min =
3358 ps->performance_levels[i].pcie_gen;
3359
3360 if (data->pcie_lane_performance.max <
3361 ps->performance_levels[i].pcie_lane)
3362 data->pcie_lane_performance.max =
3363 ps->performance_levels[i].pcie_lane;
3364
3365 if (data->pcie_lane_performance.min >
3366 ps->performance_levels[i].pcie_lane)
3367 data->pcie_lane_performance.min =
3368 ps->performance_levels[i].pcie_lane;
3369 }
3370 break;
3371 case PP_StateUILabel_Battery:
3372 data->use_pcie_power_saving_levels = true;
3373
3374 for (i = 0; i < ps->performance_level_count; i++) {
3375 if (data->pcie_gen_power_saving.max <
3376 ps->performance_levels[i].pcie_gen)
3377 data->pcie_gen_power_saving.max =
3378 ps->performance_levels[i].pcie_gen;
3379
3380 if (data->pcie_gen_power_saving.min >
3381 ps->performance_levels[i].pcie_gen)
3382 data->pcie_gen_power_saving.min =
3383 ps->performance_levels[i].pcie_gen;
3384
3385 if (data->pcie_lane_power_saving.max <
3386 ps->performance_levels[i].pcie_lane)
3387 data->pcie_lane_power_saving.max =
3388 ps->performance_levels[i].pcie_lane;
3389
3390 if (data->pcie_lane_power_saving.min >
3391 ps->performance_levels[i].pcie_lane)
3392 data->pcie_lane_power_saving.min =
3393 ps->performance_levels[i].pcie_lane;
3394 }
3395 break;
3396 default:
3397 break;
3398 }
3399 }
3400 return 0;
3401}
3402
3403static int smu7_get_pp_table_entry(struct pp_hwmgr *hwmgr,
3404 unsigned long entry_index, struct pp_power_state *state)
3405{
3406 if (hwmgr->pp_table_version == PP_TABLE_V0)
3407 return smu7_get_pp_table_entry_v0(hwmgr, entry_index, state);
3408 else if (hwmgr->pp_table_version == PP_TABLE_V1)
3409 return smu7_get_pp_table_entry_v1(hwmgr, entry_index, state);
3410
3411 return 0;
3412}
3413
3414static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, u32 *query)
3415{
3416 int i;
3417 u32 tmp = 0;
3418
3419 if (!query)
3420 return -EINVAL;
3421
3422 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0);
3423 tmp = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
3424 *query = tmp;
3425
3426 if (tmp != 0)
3427 return 0;
3428
3429 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart);
3430 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3431 ixSMU_PM_STATUS_94, 0);
3432
3433 for (i = 0; i < 10; i++) {
3434 mdelay(1);
3435 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogSample);
3436 tmp = cgs_read_ind_register(hwmgr->device,
3437 CGS_IND_REG__SMC,
3438 ixSMU_PM_STATUS_94);
3439 if (tmp != 0)
3440 break;
3441 }
3442 *query = tmp;
3443
3444 return 0;
3445}
3446
3447static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx,
3448 void *value, int *size)
3449{
3450 uint32_t sclk, mclk, activity_percent;
3451 uint32_t offset, val_vid;
3452 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3453
3454
3455 if (*size < 4)
3456 return -EINVAL;
3457
3458 switch (idx) {
3459 case AMDGPU_PP_SENSOR_GFX_SCLK:
3460 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency);
3461 sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
3462 *((uint32_t *)value) = sclk;
3463 *size = 4;
3464 return 0;
3465 case AMDGPU_PP_SENSOR_GFX_MCLK:
3466 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency);
3467 mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
3468 *((uint32_t *)value) = mclk;
3469 *size = 4;
3470 return 0;
3471 case AMDGPU_PP_SENSOR_GPU_LOAD:
3472 offset = data->soft_regs_start + smum_get_offsetof(hwmgr,
3473 SMU_SoftRegisters,
3474 AverageGraphicsActivity);
3475
3476 activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset);
3477 activity_percent += 0x80;
3478 activity_percent >>= 8;
3479 *((uint32_t *)value) = activity_percent > 100 ? 100 : activity_percent;
3480 *size = 4;
3481 return 0;
3482 case AMDGPU_PP_SENSOR_GPU_TEMP:
3483 *((uint32_t *)value) = smu7_thermal_get_temperature(hwmgr);
3484 *size = 4;
3485 return 0;
3486 case AMDGPU_PP_SENSOR_UVD_POWER:
3487 *((uint32_t *)value) = data->uvd_power_gated ? 0 : 1;
3488 *size = 4;
3489 return 0;
3490 case AMDGPU_PP_SENSOR_VCE_POWER:
3491 *((uint32_t *)value) = data->vce_power_gated ? 0 : 1;
3492 *size = 4;
3493 return 0;
3494 case AMDGPU_PP_SENSOR_GPU_POWER:
3495 return smu7_get_gpu_power(hwmgr, (uint32_t *)value);
3496 case AMDGPU_PP_SENSOR_VDDGFX:
3497 if ((data->vr_config & 0xff) == 0x2)
3498 val_vid = PHM_READ_INDIRECT_FIELD(hwmgr->device,
3499 CGS_IND_REG__SMC, PWR_SVI2_STATUS, PLANE2_VID);
3500 else
3501 val_vid = PHM_READ_INDIRECT_FIELD(hwmgr->device,
3502 CGS_IND_REG__SMC, PWR_SVI2_STATUS, PLANE1_VID);
3503
3504 *((uint32_t *)value) = (uint32_t)convert_to_vddc(val_vid);
3505 return 0;
3506 default:
3507 return -EINVAL;
3508 }
3509}
3510
3511static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
3512{
3513 const struct phm_set_power_state_input *states =
3514 (const struct phm_set_power_state_input *)input;
3515 const struct smu7_power_state *smu7_ps =
3516 cast_const_phw_smu7_power_state(states->pnew_state);
3517 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3518 struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
3519 uint32_t sclk = smu7_ps->performance_levels
3520 [smu7_ps->performance_level_count - 1].engine_clock;
3521 struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
3522 uint32_t mclk = smu7_ps->performance_levels
3523 [smu7_ps->performance_level_count - 1].memory_clock;
3524 struct PP_Clocks min_clocks = {0};
3525 uint32_t i;
3526
3527 for (i = 0; i < sclk_table->count; i++) {
3528 if (sclk == sclk_table->dpm_levels[i].value)
3529 break;
3530 }
3531
3532 if (i >= sclk_table->count)
3533 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
3534 else {
3535
3536
3537
3538 if (data->display_timing.min_clock_in_sr != min_clocks.engineClockInSR &&
3539 (min_clocks.engineClockInSR >= SMU7_MINIMUM_ENGINE_CLOCK ||
3540 data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK))
3541 data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
3542 }
3543
3544 for (i = 0; i < mclk_table->count; i++) {
3545 if (mclk == mclk_table->dpm_levels[i].value)
3546 break;
3547 }
3548
3549 if (i >= mclk_table->count)
3550 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
3551
3552
3553 if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
3554 data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
3555
3556 return 0;
3557}
3558
3559static uint16_t smu7_get_maximum_link_speed(struct pp_hwmgr *hwmgr,
3560 const struct smu7_power_state *smu7_ps)
3561{
3562 uint32_t i;
3563 uint32_t sclk, max_sclk = 0;
3564 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3565 struct smu7_dpm_table *dpm_table = &data->dpm_table;
3566
3567 for (i = 0; i < smu7_ps->performance_level_count; i++) {
3568 sclk = smu7_ps->performance_levels[i].engine_clock;
3569 if (max_sclk < sclk)
3570 max_sclk = sclk;
3571 }
3572
3573 for (i = 0; i < dpm_table->sclk_table.count; i++) {
3574 if (dpm_table->sclk_table.dpm_levels[i].value == max_sclk)
3575 return (uint16_t) ((i >= dpm_table->pcie_speed_table.count) ?
3576 dpm_table->pcie_speed_table.dpm_levels
3577 [dpm_table->pcie_speed_table.count - 1].value :
3578 dpm_table->pcie_speed_table.dpm_levels[i].value);
3579 }
3580
3581 return 0;
3582}
3583
3584static int smu7_request_link_speed_change_before_state_change(
3585 struct pp_hwmgr *hwmgr, const void *input)
3586{
3587 const struct phm_set_power_state_input *states =
3588 (const struct phm_set_power_state_input *)input;
3589 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3590 const struct smu7_power_state *smu7_nps =
3591 cast_const_phw_smu7_power_state(states->pnew_state);
3592 const struct smu7_power_state *polaris10_cps =
3593 cast_const_phw_smu7_power_state(states->pcurrent_state);
3594
3595 uint16_t target_link_speed = smu7_get_maximum_link_speed(hwmgr, smu7_nps);
3596 uint16_t current_link_speed;
3597
3598 if (data->force_pcie_gen == PP_PCIEGenInvalid)
3599 current_link_speed = smu7_get_maximum_link_speed(hwmgr, polaris10_cps);
3600 else
3601 current_link_speed = data->force_pcie_gen;
3602
3603 data->force_pcie_gen = PP_PCIEGenInvalid;
3604 data->pspp_notify_required = false;
3605
3606 if (target_link_speed > current_link_speed) {
3607 switch (target_link_speed) {
3608#ifdef CONFIG_ACPI
3609 case PP_PCIEGen3:
3610 if (0 == amdgpu_acpi_pcie_performance_request(hwmgr->adev, PCIE_PERF_REQ_GEN3, false))
3611 break;
3612 data->force_pcie_gen = PP_PCIEGen2;
3613 if (current_link_speed == PP_PCIEGen2)
3614 break;
3615 case PP_PCIEGen2:
3616 if (0 == amdgpu_acpi_pcie_performance_request(hwmgr->adev, PCIE_PERF_REQ_GEN2, false))
3617 break;
3618#endif
3619 default:
3620 data->force_pcie_gen = smu7_get_current_pcie_speed(hwmgr);
3621 break;
3622 }
3623 } else {
3624 if (target_link_speed < current_link_speed)
3625 data->pspp_notify_required = true;
3626 }
3627
3628 return 0;
3629}
3630
3631static int smu7_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
3632{
3633 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3634
3635 if (0 == data->need_update_smu7_dpm_table)
3636 return 0;
3637
3638 if ((0 == data->sclk_dpm_key_disabled) &&
3639 (data->need_update_smu7_dpm_table &
3640 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
3641 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
3642 "Trying to freeze SCLK DPM when DPM is disabled",
3643 );
3644 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
3645 PPSMC_MSG_SCLKDPM_FreezeLevel),
3646 "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!",
3647 return -EINVAL);
3648 }
3649
3650 if ((0 == data->mclk_dpm_key_disabled) &&
3651 (data->need_update_smu7_dpm_table &
3652 DPMTABLE_OD_UPDATE_MCLK)) {
3653 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
3654 "Trying to freeze MCLK DPM when DPM is disabled",
3655 );
3656 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
3657 PPSMC_MSG_MCLKDPM_FreezeLevel),
3658 "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!",
3659 return -EINVAL);
3660 }
3661
3662 return 0;
3663}
3664
3665static int smu7_populate_and_upload_sclk_mclk_dpm_levels(
3666 struct pp_hwmgr *hwmgr, const void *input)
3667{
3668 int result = 0;
3669 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3670 struct smu7_dpm_table *dpm_table = &data->dpm_table;
3671 uint32_t count;
3672 struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
3673 struct phm_odn_clock_levels *odn_sclk_table = &(odn_table->odn_core_clock_dpm_levels);
3674 struct phm_odn_clock_levels *odn_mclk_table = &(odn_table->odn_memory_clock_dpm_levels);
3675
3676 if (0 == data->need_update_smu7_dpm_table)
3677 return 0;
3678
3679 if (hwmgr->od_enabled && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
3680 for (count = 0; count < dpm_table->sclk_table.count; count++) {
3681 dpm_table->sclk_table.dpm_levels[count].enabled = odn_sclk_table->entries[count].enabled;
3682 dpm_table->sclk_table.dpm_levels[count].value = odn_sclk_table->entries[count].clock;
3683 }
3684 }
3685
3686 if (hwmgr->od_enabled && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
3687 for (count = 0; count < dpm_table->mclk_table.count; count++) {
3688 dpm_table->mclk_table.dpm_levels[count].enabled = odn_mclk_table->entries[count].enabled;
3689 dpm_table->mclk_table.dpm_levels[count].value = odn_mclk_table->entries[count].clock;
3690 }
3691 }
3692
3693 if (data->need_update_smu7_dpm_table &
3694 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) {
3695 result = smum_populate_all_graphic_levels(hwmgr);
3696 PP_ASSERT_WITH_CODE((0 == result),
3697 "Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
3698 return result);
3699 }
3700
3701 if (data->need_update_smu7_dpm_table &
3702 (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
3703
3704 result = smum_populate_all_memory_levels(hwmgr);
3705 PP_ASSERT_WITH_CODE((0 == result),
3706 "Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
3707 return result);
3708 }
3709
3710 return result;
3711}
3712
3713static int smu7_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
3714 struct smu7_single_dpm_table *dpm_table,
3715 uint32_t low_limit, uint32_t high_limit)
3716{
3717 uint32_t i;
3718
3719 for (i = 0; i < dpm_table->count; i++) {
3720 if ((dpm_table->dpm_levels[i].value < low_limit)
3721 || (dpm_table->dpm_levels[i].value > high_limit))
3722 dpm_table->dpm_levels[i].enabled = false;
3723 else
3724 dpm_table->dpm_levels[i].enabled = true;
3725 }
3726
3727 return 0;
3728}
3729
3730static int smu7_trim_dpm_states(struct pp_hwmgr *hwmgr,
3731 const struct smu7_power_state *smu7_ps)
3732{
3733 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3734 uint32_t high_limit_count;
3735
3736 PP_ASSERT_WITH_CODE((smu7_ps->performance_level_count >= 1),
3737 "power state did not have any performance level",
3738 return -EINVAL);
3739
3740 high_limit_count = (1 == smu7_ps->performance_level_count) ? 0 : 1;
3741
3742 smu7_trim_single_dpm_states(hwmgr,
3743 &(data->dpm_table.sclk_table),
3744 smu7_ps->performance_levels[0].engine_clock,
3745 smu7_ps->performance_levels[high_limit_count].engine_clock);
3746
3747 smu7_trim_single_dpm_states(hwmgr,
3748 &(data->dpm_table.mclk_table),
3749 smu7_ps->performance_levels[0].memory_clock,
3750 smu7_ps->performance_levels[high_limit_count].memory_clock);
3751
3752 return 0;
3753}
3754
3755static int smu7_generate_dpm_level_enable_mask(
3756 struct pp_hwmgr *hwmgr, const void *input)
3757{
3758 int result = 0;
3759 const struct phm_set_power_state_input *states =
3760 (const struct phm_set_power_state_input *)input;
3761 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3762 const struct smu7_power_state *smu7_ps =
3763 cast_const_phw_smu7_power_state(states->pnew_state);
3764
3765
3766 if (!hwmgr->od_enabled)
3767 result = smu7_trim_dpm_states(hwmgr, smu7_ps);
3768
3769 if (result)
3770 return result;
3771
3772 data->dpm_level_enable_mask.sclk_dpm_enable_mask =
3773 phm_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table);
3774 data->dpm_level_enable_mask.mclk_dpm_enable_mask =
3775 phm_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table);
3776 data->dpm_level_enable_mask.pcie_dpm_enable_mask =
3777 phm_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table);
3778
3779 return 0;
3780}
3781
3782static int smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
3783{
3784 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3785
3786 if (0 == data->need_update_smu7_dpm_table)
3787 return 0;
3788
3789 if ((0 == data->sclk_dpm_key_disabled) &&
3790 (data->need_update_smu7_dpm_table &
3791 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
3792
3793 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
3794 "Trying to Unfreeze SCLK DPM when DPM is disabled",
3795 );
3796 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
3797 PPSMC_MSG_SCLKDPM_UnfreezeLevel),
3798 "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!",
3799 return -EINVAL);
3800 }
3801
3802 if ((0 == data->mclk_dpm_key_disabled) &&
3803 (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
3804
3805 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
3806 "Trying to Unfreeze MCLK DPM when DPM is disabled",
3807 );
3808 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
3809 PPSMC_MSG_MCLKDPM_UnfreezeLevel),
3810 "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!",
3811 return -EINVAL);
3812 }
3813
3814 data->need_update_smu7_dpm_table &= DPMTABLE_OD_UPDATE_VDDC;
3815
3816 return 0;
3817}
3818
3819static int smu7_notify_link_speed_change_after_state_change(
3820 struct pp_hwmgr *hwmgr, const void *input)
3821{
3822 const struct phm_set_power_state_input *states =
3823 (const struct phm_set_power_state_input *)input;
3824 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3825 const struct smu7_power_state *smu7_ps =
3826 cast_const_phw_smu7_power_state(states->pnew_state);
3827 uint16_t target_link_speed = smu7_get_maximum_link_speed(hwmgr, smu7_ps);
3828 uint8_t request;
3829
3830 if (data->pspp_notify_required) {
3831 if (target_link_speed == PP_PCIEGen3)
3832 request = PCIE_PERF_REQ_GEN3;
3833 else if (target_link_speed == PP_PCIEGen2)
3834 request = PCIE_PERF_REQ_GEN2;
3835 else
3836 request = PCIE_PERF_REQ_GEN1;
3837
3838 if (request == PCIE_PERF_REQ_GEN1 &&
3839 smu7_get_current_pcie_speed(hwmgr) > 0)
3840 return 0;
3841
3842#ifdef CONFIG_ACPI
3843 if (amdgpu_acpi_pcie_performance_request(hwmgr->adev, request, false)) {
3844 if (PP_PCIEGen2 == target_link_speed)
3845 pr_info("PSPP request to switch to Gen2 from Gen3 Failed!");
3846 else
3847 pr_info("PSPP request to switch to Gen1 from Gen2 Failed!");
3848 }
3849#endif
3850 }
3851
3852 return 0;
3853}
3854
3855static int smu7_notify_smc_display(struct pp_hwmgr *hwmgr)
3856{
3857 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3858
3859 if (hwmgr->feature_mask & PP_VBI_TIME_SUPPORT_MASK) {
3860 if (hwmgr->chip_id == CHIP_VEGAM)
3861 smum_send_msg_to_smc_with_parameter(hwmgr,
3862 (PPSMC_Msg)PPSMC_MSG_SetVBITimeout_VEGAM, data->frame_time_x2);
3863 else
3864 smum_send_msg_to_smc_with_parameter(hwmgr,
3865 (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2);
3866 }
3867 return (smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ? 0 : -EINVAL;
3868}
3869
3870static int smu7_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
3871{
3872 int tmp_result, result = 0;
3873 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3874
3875 tmp_result = smu7_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
3876 PP_ASSERT_WITH_CODE((0 == tmp_result),
3877 "Failed to find DPM states clocks in DPM table!",
3878 result = tmp_result);
3879
3880 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3881 PHM_PlatformCaps_PCIEPerformanceRequest)) {
3882 tmp_result =
3883 smu7_request_link_speed_change_before_state_change(hwmgr, input);
3884 PP_ASSERT_WITH_CODE((0 == tmp_result),
3885 "Failed to request link speed change before state change!",
3886 result = tmp_result);
3887 }
3888
3889 tmp_result = smu7_freeze_sclk_mclk_dpm(hwmgr);
3890 PP_ASSERT_WITH_CODE((0 == tmp_result),
3891 "Failed to freeze SCLK MCLK DPM!", result = tmp_result);
3892
3893 tmp_result = smu7_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input);
3894 PP_ASSERT_WITH_CODE((0 == tmp_result),
3895 "Failed to populate and upload SCLK MCLK DPM levels!",
3896 result = tmp_result);
3897
3898 tmp_result = smu7_update_avfs(hwmgr);
3899 PP_ASSERT_WITH_CODE((0 == tmp_result),
3900 "Failed to update avfs voltages!",
3901 result = tmp_result);
3902
3903 tmp_result = smu7_generate_dpm_level_enable_mask(hwmgr, input);
3904 PP_ASSERT_WITH_CODE((0 == tmp_result),
3905 "Failed to generate DPM level enabled mask!",
3906 result = tmp_result);
3907
3908 tmp_result = smum_update_sclk_threshold(hwmgr);
3909 PP_ASSERT_WITH_CODE((0 == tmp_result),
3910 "Failed to update SCLK threshold!",
3911 result = tmp_result);
3912
3913 tmp_result = smu7_notify_smc_display(hwmgr);
3914 PP_ASSERT_WITH_CODE((0 == tmp_result),
3915 "Failed to notify smc display settings!",
3916 result = tmp_result);
3917
3918 tmp_result = smu7_unfreeze_sclk_mclk_dpm(hwmgr);
3919 PP_ASSERT_WITH_CODE((0 == tmp_result),
3920 "Failed to unfreeze SCLK MCLK DPM!",
3921 result = tmp_result);
3922
3923 tmp_result = smu7_upload_dpm_level_enable_mask(hwmgr);
3924 PP_ASSERT_WITH_CODE((0 == tmp_result),
3925 "Failed to upload DPM level enabled mask!",
3926 result = tmp_result);
3927
3928 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3929 PHM_PlatformCaps_PCIEPerformanceRequest)) {
3930 tmp_result =
3931 smu7_notify_link_speed_change_after_state_change(hwmgr, input);
3932 PP_ASSERT_WITH_CODE((0 == tmp_result),
3933 "Failed to notify link speed change after state change!",
3934 result = tmp_result);
3935 }
3936 data->apply_optimized_settings = false;
3937 return result;
3938}
3939
3940static int smu7_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm)
3941{
3942 hwmgr->thermal_controller.
3943 advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm;
3944
3945 return smum_send_msg_to_smc_with_parameter(hwmgr,
3946 PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm);
3947}
3948
3949static int
3950smu7_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
3951{
3952 PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay;
3953
3954 return (smum_send_msg_to_smc(hwmgr, msg) == 0) ? 0 : -1;
3955}
3956
3957static int
3958smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
3959{
3960 if (hwmgr->display_config->num_display > 1 &&
3961 !hwmgr->display_config->multi_monitor_in_sync)
3962 smu7_notify_smc_display_change(hwmgr, false);
3963
3964 return 0;
3965}
3966
3967
3968
3969
3970
3971
3972
3973static int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
3974{
3975 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3976 uint32_t display_gap = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL);
3977 uint32_t display_gap2;
3978 uint32_t pre_vbi_time_in_us;
3979 uint32_t frame_time_in_us;
3980 uint32_t ref_clock, refresh_rate;
3981
3982 display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (hwmgr->display_config->num_display > 0) ? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE);
3983 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL, display_gap);
3984
3985 ref_clock = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev);
3986 refresh_rate = hwmgr->display_config->vrefresh;
3987
3988 if (0 == refresh_rate)
3989 refresh_rate = 60;
3990
3991 frame_time_in_us = 1000000 / refresh_rate;
3992
3993 pre_vbi_time_in_us = frame_time_in_us - 200 - hwmgr->display_config->min_vblank_time;
3994
3995 data->frame_time_x2 = frame_time_in_us * 2 / 100;
3996
3997 display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
3998
3999 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2);
4000
4001 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4002 data->soft_regs_start + smum_get_offsetof(hwmgr,
4003 SMU_SoftRegisters,
4004 PreVBlankGap), 0x64);
4005
4006 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4007 data->soft_regs_start + smum_get_offsetof(hwmgr,
4008 SMU_SoftRegisters,
4009 VBlankTimeout),
4010 (frame_time_in_us - pre_vbi_time_in_us));
4011
4012 return 0;
4013}
4014
4015static int smu7_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
4016{
4017 return smu7_program_display_gap(hwmgr);
4018}
4019
4020
4021
4022
4023
4024
4025
4026
4027static int smu7_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_rpm)
4028{
4029 hwmgr->thermal_controller.
4030 advanceFanControlParameters.usMaxFanRPM = us_max_fan_rpm;
4031
4032 return smum_send_msg_to_smc_with_parameter(hwmgr,
4033 PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm);
4034}
4035
4036static const struct amdgpu_irq_src_funcs smu7_irq_funcs = {
4037 .process = phm_irq_process,
4038};
4039
4040static int smu7_register_irq_handlers(struct pp_hwmgr *hwmgr)
4041{
4042 struct amdgpu_irq_src *source =
4043 kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL);
4044
4045 if (!source)
4046 return -ENOMEM;
4047
4048 source->funcs = &smu7_irq_funcs;
4049
4050 amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
4051 AMDGPU_IH_CLIENTID_LEGACY,
4052 230,
4053 source);
4054 amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
4055 AMDGPU_IH_CLIENTID_LEGACY,
4056 231,
4057 source);
4058
4059
4060 amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
4061 AMDGPU_IH_CLIENTID_LEGACY,
4062 83,
4063 source);
4064
4065 return 0;
4066}
4067
4068static bool
4069smu7_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
4070{
4071 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4072 bool is_update_required = false;
4073
4074 if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
4075 is_update_required = true;
4076
4077 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
4078 if (data->display_timing.min_clock_in_sr != hwmgr->display_config->min_core_set_clock_in_sr &&
4079 (data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK ||
4080 hwmgr->display_config->min_core_set_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK))
4081 is_update_required = true;
4082 }
4083 return is_update_required;
4084}
4085
4086static inline bool smu7_are_power_levels_equal(const struct smu7_performance_level *pl1,
4087 const struct smu7_performance_level *pl2)
4088{
4089 return ((pl1->memory_clock == pl2->memory_clock) &&
4090 (pl1->engine_clock == pl2->engine_clock) &&
4091 (pl1->pcie_gen == pl2->pcie_gen) &&
4092 (pl1->pcie_lane == pl2->pcie_lane));
4093}
4094
4095static int smu7_check_states_equal(struct pp_hwmgr *hwmgr,
4096 const struct pp_hw_power_state *pstate1,
4097 const struct pp_hw_power_state *pstate2, bool *equal)
4098{
4099 const struct smu7_power_state *psa;
4100 const struct smu7_power_state *psb;
4101 int i;
4102 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4103
4104 if (pstate1 == NULL || pstate2 == NULL || equal == NULL)
4105 return -EINVAL;
4106
4107 psa = cast_const_phw_smu7_power_state(pstate1);
4108 psb = cast_const_phw_smu7_power_state(pstate2);
4109
4110 if (psa->performance_level_count != psb->performance_level_count) {
4111 *equal = false;
4112 return 0;
4113 }
4114
4115 for (i = 0; i < psa->performance_level_count; i++) {
4116 if (!smu7_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
4117
4118 *equal = false;
4119 return 0;
4120 }
4121 }
4122
4123
4124 *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
4125 *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
4126 *equal &= (psa->sclk_threshold == psb->sclk_threshold);
4127
4128 *equal &= !(data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK |
4129 DPMTABLE_OD_UPDATE_MCLK |
4130 DPMTABLE_OD_UPDATE_VDDC));
4131
4132 return 0;
4133}
4134
4135static int smu7_check_mc_firmware(struct pp_hwmgr *hwmgr)
4136{
4137 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4138
4139 uint32_t vbios_version;
4140 uint32_t tmp;
4141
4142
4143
4144
4145
4146
4147 smu7_get_mc_microcode_version(hwmgr);
4148 vbios_version = hwmgr->microcode_version_info.MC & 0xf;
4149
4150 data->need_long_memory_training = false;
4151
4152 cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX,
4153 ixMC_IO_DEBUG_UP_13);
4154 tmp = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA);
4155
4156 if (tmp & (1 << 23)) {
4157 data->mem_latency_high = MEM_LATENCY_HIGH;
4158 data->mem_latency_low = MEM_LATENCY_LOW;
4159 } else {
4160 data->mem_latency_high = 330;
4161 data->mem_latency_low = 330;
4162 }
4163
4164 return 0;
4165}
4166
4167static int smu7_read_clock_registers(struct pp_hwmgr *hwmgr)
4168{
4169 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4170
4171 data->clock_registers.vCG_SPLL_FUNC_CNTL =
4172 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL);
4173 data->clock_registers.vCG_SPLL_FUNC_CNTL_2 =
4174 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_2);
4175 data->clock_registers.vCG_SPLL_FUNC_CNTL_3 =
4176 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_3);
4177 data->clock_registers.vCG_SPLL_FUNC_CNTL_4 =
4178 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_4);
4179 data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM =
4180 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM);
4181 data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2 =
4182 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM_2);
4183 data->clock_registers.vDLL_CNTL =
4184 cgs_read_register(hwmgr->device, mmDLL_CNTL);
4185 data->clock_registers.vMCLK_PWRMGT_CNTL =
4186 cgs_read_register(hwmgr->device, mmMCLK_PWRMGT_CNTL);
4187 data->clock_registers.vMPLL_AD_FUNC_CNTL =
4188 cgs_read_register(hwmgr->device, mmMPLL_AD_FUNC_CNTL);
4189 data->clock_registers.vMPLL_DQ_FUNC_CNTL =
4190 cgs_read_register(hwmgr->device, mmMPLL_DQ_FUNC_CNTL);
4191 data->clock_registers.vMPLL_FUNC_CNTL =
4192 cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL);
4193 data->clock_registers.vMPLL_FUNC_CNTL_1 =
4194 cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_1);
4195 data->clock_registers.vMPLL_FUNC_CNTL_2 =
4196 cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_2);
4197 data->clock_registers.vMPLL_SS1 =
4198 cgs_read_register(hwmgr->device, mmMPLL_SS1);
4199 data->clock_registers.vMPLL_SS2 =
4200 cgs_read_register(hwmgr->device, mmMPLL_SS2);
4201 return 0;
4202
4203}
4204
4205
4206
4207
4208
4209
4210
4211static int smu7_get_memory_type(struct pp_hwmgr *hwmgr)
4212{
4213 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4214 struct amdgpu_device *adev = hwmgr->adev;
4215
4216 data->is_memory_gddr5 = (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5);
4217
4218 return 0;
4219}
4220
4221
4222
4223
4224
4225
4226
4227static int smu7_enable_acpi_power_management(struct pp_hwmgr *hwmgr)
4228{
4229 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
4230 GENERAL_PWRMGT, STATIC_PM_EN, 1);
4231
4232 return 0;
4233}
4234
4235
4236
4237
4238
4239
4240
4241static int smu7_init_power_gate_state(struct pp_hwmgr *hwmgr)
4242{
4243 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4244
4245 data->uvd_power_gated = false;
4246 data->vce_power_gated = false;
4247 data->samu_power_gated = false;
4248
4249 return 0;
4250}
4251
4252static int smu7_init_sclk_threshold(struct pp_hwmgr *hwmgr)
4253{
4254 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4255
4256 data->low_sclk_interrupt_threshold = 0;
4257 return 0;
4258}
4259
4260static int smu7_setup_asic_task(struct pp_hwmgr *hwmgr)
4261{
4262 int tmp_result, result = 0;
4263
4264 smu7_check_mc_firmware(hwmgr);
4265
4266 tmp_result = smu7_read_clock_registers(hwmgr);
4267 PP_ASSERT_WITH_CODE((0 == tmp_result),
4268 "Failed to read clock registers!", result = tmp_result);
4269
4270 tmp_result = smu7_get_memory_type(hwmgr);
4271 PP_ASSERT_WITH_CODE((0 == tmp_result),
4272 "Failed to get memory type!", result = tmp_result);
4273
4274 tmp_result = smu7_enable_acpi_power_management(hwmgr);
4275 PP_ASSERT_WITH_CODE((0 == tmp_result),
4276 "Failed to enable ACPI power management!", result = tmp_result);
4277
4278 tmp_result = smu7_init_power_gate_state(hwmgr);
4279 PP_ASSERT_WITH_CODE((0 == tmp_result),
4280 "Failed to init power gate state!", result = tmp_result);
4281
4282 tmp_result = smu7_get_mc_microcode_version(hwmgr);
4283 PP_ASSERT_WITH_CODE((0 == tmp_result),
4284 "Failed to get MC microcode version!", result = tmp_result);
4285
4286 tmp_result = smu7_init_sclk_threshold(hwmgr);
4287 PP_ASSERT_WITH_CODE((0 == tmp_result),
4288 "Failed to init sclk threshold!", result = tmp_result);
4289
4290 return result;
4291}
4292
4293static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
4294 enum pp_clock_type type, uint32_t mask)
4295{
4296 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4297
4298 if (mask == 0)
4299 return -EINVAL;
4300
4301 switch (type) {
4302 case PP_SCLK:
4303 if (!data->sclk_dpm_key_disabled)
4304 smum_send_msg_to_smc_with_parameter(hwmgr,
4305 PPSMC_MSG_SCLKDPM_SetEnabledMask,
4306 data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
4307 break;
4308 case PP_MCLK:
4309 if (!data->mclk_dpm_key_disabled)
4310 smum_send_msg_to_smc_with_parameter(hwmgr,
4311 PPSMC_MSG_MCLKDPM_SetEnabledMask,
4312 data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
4313 break;
4314 case PP_PCIE:
4315 {
4316 uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask;
4317
4318 if (!data->pcie_dpm_key_disabled) {
4319 if (fls(tmp) != ffs(tmp))
4320 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PCIeDPM_UnForceLevel);
4321 else
4322 smum_send_msg_to_smc_with_parameter(hwmgr,
4323 PPSMC_MSG_PCIeDPM_ForceLevel,
4324 fls(tmp) - 1);
4325 }
4326 break;
4327 }
4328 default:
4329 break;
4330 }
4331
4332 return 0;
4333}
4334
4335static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
4336 enum pp_clock_type type, char *buf)
4337{
4338 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4339 struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
4340 struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
4341 struct smu7_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table);
4342 struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
4343 struct phm_odn_clock_levels *odn_sclk_table = &(odn_table->odn_core_clock_dpm_levels);
4344 struct phm_odn_clock_levels *odn_mclk_table = &(odn_table->odn_memory_clock_dpm_levels);
4345 int i, now, size = 0;
4346 uint32_t clock, pcie_speed;
4347
4348 switch (type) {
4349 case PP_SCLK:
4350 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency);
4351 clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
4352
4353 for (i = 0; i < sclk_table->count; i++) {
4354 if (clock > sclk_table->dpm_levels[i].value)
4355 continue;
4356 break;
4357 }
4358 now = i;
4359
4360 for (i = 0; i < sclk_table->count; i++)
4361 size += sprintf(buf + size, "%d: %uMhz %s\n",
4362 i, sclk_table->dpm_levels[i].value / 100,
4363 (i == now) ? "*" : "");
4364 break;
4365 case PP_MCLK:
4366 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency);
4367 clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
4368
4369 for (i = 0; i < mclk_table->count; i++) {
4370 if (clock > mclk_table->dpm_levels[i].value)
4371 continue;
4372 break;
4373 }
4374 now = i;
4375
4376 for (i = 0; i < mclk_table->count; i++)
4377 size += sprintf(buf + size, "%d: %uMhz %s\n",
4378 i, mclk_table->dpm_levels[i].value / 100,
4379 (i == now) ? "*" : "");
4380 break;
4381 case PP_PCIE:
4382 pcie_speed = smu7_get_current_pcie_speed(hwmgr);
4383 for (i = 0; i < pcie_table->count; i++) {
4384 if (pcie_speed != pcie_table->dpm_levels[i].value)
4385 continue;
4386 break;
4387 }
4388 now = i;
4389
4390 for (i = 0; i < pcie_table->count; i++)
4391 size += sprintf(buf + size, "%d: %s %s\n", i,
4392 (pcie_table->dpm_levels[i].value == 0) ? "2.5GT/s, x8" :
4393 (pcie_table->dpm_levels[i].value == 1) ? "5.0GT/s, x16" :
4394 (pcie_table->dpm_levels[i].value == 2) ? "8.0GT/s, x16" : "",
4395 (i == now) ? "*" : "");
4396 break;
4397 case OD_SCLK:
4398 if (hwmgr->od_enabled) {
4399 size = sprintf(buf, "%s:\n", "OD_SCLK");
4400 for (i = 0; i < odn_sclk_table->num_of_pl; i++)
4401 size += sprintf(buf + size, "%d: %10uMHz %10umV\n",
4402 i, odn_sclk_table->entries[i].clock/100,
4403 odn_sclk_table->entries[i].vddc);
4404 }
4405 break;
4406 case OD_MCLK:
4407 if (hwmgr->od_enabled) {
4408 size = sprintf(buf, "%s:\n", "OD_MCLK");
4409 for (i = 0; i < odn_mclk_table->num_of_pl; i++)
4410 size += sprintf(buf + size, "%d: %10uMHz %10umV\n",
4411 i, odn_mclk_table->entries[i].clock/100,
4412 odn_mclk_table->entries[i].vddc);
4413 }
4414 break;
4415 case OD_RANGE:
4416 if (hwmgr->od_enabled) {
4417 size = sprintf(buf, "%s:\n", "OD_RANGE");
4418 size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
4419 data->golden_dpm_table.sclk_table.dpm_levels[0].value/100,
4420 hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
4421 size += sprintf(buf + size, "MCLK: %7uMHz %10uMHz\n",
4422 data->golden_dpm_table.mclk_table.dpm_levels[0].value/100,
4423 hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
4424 size += sprintf(buf + size, "VDDC: %7umV %11umV\n",
4425 data->odn_dpm_table.min_vddc,
4426 data->odn_dpm_table.max_vddc);
4427 }
4428 break;
4429 default:
4430 break;
4431 }
4432 return size;
4433}
4434
4435static void smu7_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
4436{
4437 switch (mode) {
4438 case AMD_FAN_CTRL_NONE:
4439 smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
4440 break;
4441 case AMD_FAN_CTRL_MANUAL:
4442 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4443 PHM_PlatformCaps_MicrocodeFanControl))
4444 smu7_fan_ctrl_stop_smc_fan_control(hwmgr);
4445 break;
4446 case AMD_FAN_CTRL_AUTO:
4447 if (!smu7_fan_ctrl_set_static_mode(hwmgr, mode))
4448 smu7_fan_ctrl_start_smc_fan_control(hwmgr);
4449 break;
4450 default:
4451 break;
4452 }
4453}
4454
4455static uint32_t smu7_get_fan_control_mode(struct pp_hwmgr *hwmgr)
4456{
4457 return hwmgr->fan_ctrl_enabled ? AMD_FAN_CTRL_AUTO : AMD_FAN_CTRL_MANUAL;
4458}
4459
4460static int smu7_get_sclk_od(struct pp_hwmgr *hwmgr)
4461{
4462 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4463 struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
4464 struct smu7_single_dpm_table *golden_sclk_table =
4465 &(data->golden_dpm_table.sclk_table);
4466 int value;
4467
4468 value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
4469 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
4470 100 /
4471 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
4472
4473 return value;
4474}
4475
4476static int smu7_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
4477{
4478 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4479 struct smu7_single_dpm_table *golden_sclk_table =
4480 &(data->golden_dpm_table.sclk_table);
4481 struct pp_power_state *ps;
4482 struct smu7_power_state *smu7_ps;
4483
4484 if (value > 20)
4485 value = 20;
4486
4487 ps = hwmgr->request_ps;
4488
4489 if (ps == NULL)
4490 return -EINVAL;
4491
4492 smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
4493
4494 smu7_ps->performance_levels[smu7_ps->performance_level_count - 1].engine_clock =
4495 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
4496 value / 100 +
4497 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
4498
4499 return 0;
4500}
4501
4502static int smu7_get_mclk_od(struct pp_hwmgr *hwmgr)
4503{
4504 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4505 struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
4506 struct smu7_single_dpm_table *golden_mclk_table =
4507 &(data->golden_dpm_table.mclk_table);
4508 int value;
4509
4510 value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
4511 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
4512 100 /
4513 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
4514
4515 return value;
4516}
4517
4518static int smu7_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
4519{
4520 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4521 struct smu7_single_dpm_table *golden_mclk_table =
4522 &(data->golden_dpm_table.mclk_table);
4523 struct pp_power_state *ps;
4524 struct smu7_power_state *smu7_ps;
4525
4526 if (value > 20)
4527 value = 20;
4528
4529 ps = hwmgr->request_ps;
4530
4531 if (ps == NULL)
4532 return -EINVAL;
4533
4534 smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
4535
4536 smu7_ps->performance_levels[smu7_ps->performance_level_count - 1].memory_clock =
4537 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
4538 value / 100 +
4539 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
4540
4541 return 0;
4542}
4543
4544
4545static int smu7_get_sclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
4546{
4547 struct phm_ppt_v1_information *table_info =
4548 (struct phm_ppt_v1_information *)hwmgr->pptable;
4549 struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table = NULL;
4550 struct phm_clock_voltage_dependency_table *sclk_table;
4551 int i;
4552
4553 if (hwmgr->pp_table_version == PP_TABLE_V1) {
4554 if (table_info == NULL || table_info->vdd_dep_on_sclk == NULL)
4555 return -EINVAL;
4556 dep_sclk_table = table_info->vdd_dep_on_sclk;
4557 for (i = 0; i < dep_sclk_table->count; i++)
4558 clocks->clock[i] = dep_sclk_table->entries[i].clk;
4559 clocks->count = dep_sclk_table->count;
4560 } else if (hwmgr->pp_table_version == PP_TABLE_V0) {
4561 sclk_table = hwmgr->dyn_state.vddc_dependency_on_sclk;
4562 for (i = 0; i < sclk_table->count; i++)
4563 clocks->clock[i] = sclk_table->entries[i].clk;
4564 clocks->count = sclk_table->count;
4565 }
4566
4567 return 0;
4568}
4569
4570static uint32_t smu7_get_mem_latency(struct pp_hwmgr *hwmgr, uint32_t clk)
4571{
4572 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4573
4574 if (clk >= MEM_FREQ_LOW_LATENCY && clk < MEM_FREQ_HIGH_LATENCY)
4575 return data->mem_latency_high;
4576 else if (clk >= MEM_FREQ_HIGH_LATENCY)
4577 return data->mem_latency_low;
4578 else
4579 return MEM_LATENCY_ERR;
4580}
4581
4582static int smu7_get_mclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
4583{
4584 struct phm_ppt_v1_information *table_info =
4585 (struct phm_ppt_v1_information *)hwmgr->pptable;
4586 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
4587 int i;
4588 struct phm_clock_voltage_dependency_table *mclk_table;
4589
4590 if (hwmgr->pp_table_version == PP_TABLE_V1) {
4591 if (table_info == NULL)
4592 return -EINVAL;
4593 dep_mclk_table = table_info->vdd_dep_on_mclk;
4594 for (i = 0; i < dep_mclk_table->count; i++) {
4595 clocks->clock[i] = dep_mclk_table->entries[i].clk;
4596 clocks->latency[i] = smu7_get_mem_latency(hwmgr,
4597 dep_mclk_table->entries[i].clk);
4598 }
4599 clocks->count = dep_mclk_table->count;
4600 } else if (hwmgr->pp_table_version == PP_TABLE_V0) {
4601 mclk_table = hwmgr->dyn_state.vddc_dependency_on_mclk;
4602 for (i = 0; i < mclk_table->count; i++)
4603 clocks->clock[i] = mclk_table->entries[i].clk;
4604 clocks->count = mclk_table->count;
4605 }
4606 return 0;
4607}
4608
4609static int smu7_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type,
4610 struct amd_pp_clocks *clocks)
4611{
4612 switch (type) {
4613 case amd_pp_sys_clock:
4614 smu7_get_sclks(hwmgr, clocks);
4615 break;
4616 case amd_pp_mem_clock:
4617 smu7_get_mclks(hwmgr, clocks);
4618 break;
4619 default:
4620 return -EINVAL;
4621 }
4622
4623 return 0;
4624}
4625
4626static int smu7_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
4627 uint32_t virtual_addr_low,
4628 uint32_t virtual_addr_hi,
4629 uint32_t mc_addr_low,
4630 uint32_t mc_addr_hi,
4631 uint32_t size)
4632{
4633 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4634
4635 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4636 data->soft_regs_start +
4637 smum_get_offsetof(hwmgr,
4638 SMU_SoftRegisters, DRAM_LOG_ADDR_H),
4639 mc_addr_hi);
4640
4641 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4642 data->soft_regs_start +
4643 smum_get_offsetof(hwmgr,
4644 SMU_SoftRegisters, DRAM_LOG_ADDR_L),
4645 mc_addr_low);
4646
4647 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4648 data->soft_regs_start +
4649 smum_get_offsetof(hwmgr,
4650 SMU_SoftRegisters, DRAM_LOG_PHY_ADDR_H),
4651 virtual_addr_hi);
4652
4653 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4654 data->soft_regs_start +
4655 smum_get_offsetof(hwmgr,
4656 SMU_SoftRegisters, DRAM_LOG_PHY_ADDR_L),
4657 virtual_addr_low);
4658
4659 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4660 data->soft_regs_start +
4661 smum_get_offsetof(hwmgr,
4662 SMU_SoftRegisters, DRAM_LOG_BUFF_SIZE),
4663 size);
4664 return 0;
4665}
4666
4667static int smu7_get_max_high_clocks(struct pp_hwmgr *hwmgr,
4668 struct amd_pp_simple_clock_info *clocks)
4669{
4670 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4671 struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
4672 struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
4673
4674 if (clocks == NULL)
4675 return -EINVAL;
4676
4677 clocks->memory_max_clock = mclk_table->count > 1 ?
4678 mclk_table->dpm_levels[mclk_table->count-1].value :
4679 mclk_table->dpm_levels[0].value;
4680 clocks->engine_max_clock = sclk_table->count > 1 ?
4681 sclk_table->dpm_levels[sclk_table->count-1].value :
4682 sclk_table->dpm_levels[0].value;
4683 return 0;
4684}
4685
4686static int smu7_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
4687 struct PP_TemperatureRange *thermal_data)
4688{
4689 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4690 struct phm_ppt_v1_information *table_info =
4691 (struct phm_ppt_v1_information *)hwmgr->pptable;
4692
4693 memcpy(thermal_data, &SMU7ThermalPolicy[0], sizeof(struct PP_TemperatureRange));
4694
4695 if (hwmgr->pp_table_version == PP_TABLE_V1)
4696 thermal_data->max = table_info->cac_dtp_table->usSoftwareShutdownTemp *
4697 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
4698 else if (hwmgr->pp_table_version == PP_TABLE_V0)
4699 thermal_data->max = data->thermal_temp_setting.temperature_shutdown *
4700 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
4701
4702 return 0;
4703}
4704
4705static bool smu7_check_clk_voltage_valid(struct pp_hwmgr *hwmgr,
4706 enum PP_OD_DPM_TABLE_COMMAND type,
4707 uint32_t clk,
4708 uint32_t voltage)
4709{
4710 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4711
4712 if (voltage < data->odn_dpm_table.min_vddc || voltage > data->odn_dpm_table.max_vddc) {
4713 pr_info("OD voltage is out of range [%d - %d] mV\n",
4714 data->odn_dpm_table.min_vddc,
4715 data->odn_dpm_table.max_vddc);
4716 return false;
4717 }
4718
4719 if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) {
4720 if (data->golden_dpm_table.sclk_table.dpm_levels[0].value > clk ||
4721 hwmgr->platform_descriptor.overdriveLimit.engineClock < clk) {
4722 pr_info("OD engine clock is out of range [%d - %d] MHz\n",
4723 data->golden_dpm_table.sclk_table.dpm_levels[0].value/100,
4724 hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
4725 return false;
4726 }
4727 } else if (type == PP_OD_EDIT_MCLK_VDDC_TABLE) {
4728 if (data->golden_dpm_table.mclk_table.dpm_levels[0].value > clk ||
4729 hwmgr->platform_descriptor.overdriveLimit.memoryClock < clk) {
4730 pr_info("OD memory clock is out of range [%d - %d] MHz\n",
4731 data->golden_dpm_table.mclk_table.dpm_levels[0].value/100,
4732 hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
4733 return false;
4734 }
4735 } else {
4736 return false;
4737 }
4738
4739 return true;
4740}
4741
4742static void smu7_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
4743{
4744 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4745 struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
4746 struct phm_ppt_v1_information *table_info =
4747 (struct phm_ppt_v1_information *)(hwmgr->pptable);
4748 uint32_t i;
4749
4750 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
4751 struct phm_ppt_v1_clock_voltage_dependency_table *odn_dep_table;
4752
4753 if (table_info == NULL)
4754 return;
4755
4756 for (i=0; i<data->dpm_table.sclk_table.count; i++) {
4757 if (odn_table->odn_core_clock_dpm_levels.entries[i].clock !=
4758 data->dpm_table.sclk_table.dpm_levels[i].value) {
4759 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
4760 break;
4761 }
4762 }
4763
4764 for (i=0; i<data->dpm_table.mclk_table.count; i++) {
4765 if (odn_table->odn_memory_clock_dpm_levels.entries[i].clock !=
4766 data->dpm_table.mclk_table.dpm_levels[i].value) {
4767 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
4768 break;
4769 }
4770 }
4771
4772 dep_table = table_info->vdd_dep_on_mclk;
4773 odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk);
4774
4775 for (i=0; i < dep_table->count; i++) {
4776 if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
4777 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK;
4778 return;
4779 }
4780 }
4781
4782 dep_table = table_info->vdd_dep_on_sclk;
4783 odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk);
4784 for (i=0; i < dep_table->count; i++) {
4785 if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
4786 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK;
4787 return;
4788 }
4789 }
4790 if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
4791 data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
4792 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK;
4793 }
4794}
4795
4796static int smu7_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
4797 enum PP_OD_DPM_TABLE_COMMAND type,
4798 long *input, uint32_t size)
4799{
4800 uint32_t i;
4801 struct phm_odn_clock_levels *podn_dpm_table_in_backend = NULL;
4802 struct smu7_odn_clock_voltage_dependency_table *podn_vdd_dep_in_backend = NULL;
4803 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4804
4805 uint32_t input_clk;
4806 uint32_t input_vol;
4807 uint32_t input_level;
4808
4809 PP_ASSERT_WITH_CODE(input, "NULL user input for clock and voltage",
4810 return -EINVAL);
4811
4812 if (!hwmgr->od_enabled) {
4813 pr_info("OverDrive feature not enabled\n");
4814 return -EINVAL;
4815 }
4816
4817 if (PP_OD_EDIT_SCLK_VDDC_TABLE == type) {
4818 podn_dpm_table_in_backend = &data->odn_dpm_table.odn_core_clock_dpm_levels;
4819 podn_vdd_dep_in_backend = &data->odn_dpm_table.vdd_dependency_on_sclk;
4820 PP_ASSERT_WITH_CODE((podn_dpm_table_in_backend && podn_vdd_dep_in_backend),
4821 "Failed to get ODN SCLK and Voltage tables",
4822 return -EINVAL);
4823 } else if (PP_OD_EDIT_MCLK_VDDC_TABLE == type) {
4824 podn_dpm_table_in_backend = &data->odn_dpm_table.odn_memory_clock_dpm_levels;
4825 podn_vdd_dep_in_backend = &data->odn_dpm_table.vdd_dependency_on_mclk;
4826
4827 PP_ASSERT_WITH_CODE((podn_dpm_table_in_backend && podn_vdd_dep_in_backend),
4828 "Failed to get ODN MCLK and Voltage tables",
4829 return -EINVAL);
4830 } else if (PP_OD_RESTORE_DEFAULT_TABLE == type) {
4831 smu7_odn_initial_default_setting(hwmgr);
4832 return 0;
4833 } else if (PP_OD_COMMIT_DPM_TABLE == type) {
4834 smu7_check_dpm_table_updated(hwmgr);
4835 return 0;
4836 } else {
4837 return -EINVAL;
4838 }
4839
4840 for (i = 0; i < size; i += 3) {
4841 if (i + 3 > size || input[i] >= podn_dpm_table_in_backend->num_of_pl) {
4842 pr_info("invalid clock voltage input \n");
4843 return 0;
4844 }
4845 input_level = input[i];
4846 input_clk = input[i+1] * 100;
4847 input_vol = input[i+2];
4848
4849 if (smu7_check_clk_voltage_valid(hwmgr, type, input_clk, input_vol)) {
4850 podn_dpm_table_in_backend->entries[input_level].clock = input_clk;
4851 podn_vdd_dep_in_backend->entries[input_level].clk = input_clk;
4852 podn_dpm_table_in_backend->entries[input_level].vddc = input_vol;
4853 podn_vdd_dep_in_backend->entries[input_level].vddc = input_vol;
4854 } else {
4855 return -EINVAL;
4856 }
4857 }
4858
4859 return 0;
4860}
4861
4862static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
4863{
4864 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4865 uint32_t i, size = 0;
4866 uint32_t len;
4867
4868 static const char *profile_name[6] = {"3D_FULL_SCREEN",
4869 "POWER_SAVING",
4870 "VIDEO",
4871 "VR",
4872 "COMPUTE",
4873 "CUSTOM"};
4874
4875 static const char *title[8] = {"NUM",
4876 "MODE_NAME",
4877 "SCLK_UP_HYST",
4878 "SCLK_DOWN_HYST",
4879 "SCLK_ACTIVE_LEVEL",
4880 "MCLK_UP_HYST",
4881 "MCLK_DOWN_HYST",
4882 "MCLK_ACTIVE_LEVEL"};
4883
4884 if (!buf)
4885 return -EINVAL;
4886
4887 size += sprintf(buf + size, "%s %16s %16s %16s %16s %16s %16s %16s\n",
4888 title[0], title[1], title[2], title[3],
4889 title[4], title[5], title[6], title[7]);
4890
4891 len = sizeof(smu7_profiling) / sizeof(struct profile_mode_setting);
4892
4893 for (i = 0; i < len; i++) {
4894 if (i == hwmgr->power_profile_mode) {
4895 size += sprintf(buf + size, "%3d %14s %s: %8d %16d %16d %16d %16d %16d\n",
4896 i, profile_name[i], "*",
4897 data->current_profile_setting.sclk_up_hyst,
4898 data->current_profile_setting.sclk_down_hyst,
4899 data->current_profile_setting.sclk_activity,
4900 data->current_profile_setting.mclk_up_hyst,
4901 data->current_profile_setting.mclk_down_hyst,
4902 data->current_profile_setting.mclk_activity);
4903 continue;
4904 }
4905 if (smu7_profiling[i].bupdate_sclk)
4906 size += sprintf(buf + size, "%3d %16s: %8d %16d %16d ",
4907 i, profile_name[i], smu7_profiling[i].sclk_up_hyst,
4908 smu7_profiling[i].sclk_down_hyst,
4909 smu7_profiling[i].sclk_activity);
4910 else
4911 size += sprintf(buf + size, "%3d %16s: %8s %16s %16s ",
4912 i, profile_name[i], "-", "-", "-");
4913
4914 if (smu7_profiling[i].bupdate_mclk)
4915 size += sprintf(buf + size, "%16d %16d %16d\n",
4916 smu7_profiling[i].mclk_up_hyst,
4917 smu7_profiling[i].mclk_down_hyst,
4918 smu7_profiling[i].mclk_activity);
4919 else
4920 size += sprintf(buf + size, "%16s %16s %16s\n",
4921 "-", "-", "-");
4922 }
4923
4924 return size;
4925}
4926
4927static void smu7_patch_compute_profile_mode(struct pp_hwmgr *hwmgr,
4928 enum PP_SMC_POWER_PROFILE requst)
4929{
4930 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4931 uint32_t tmp, level;
4932
4933 if (requst == PP_SMC_POWER_PROFILE_COMPUTE) {
4934 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
4935 level = 0;
4936 tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask;
4937 while (tmp >>= 1)
4938 level++;
4939 if (level > 0)
4940 smu7_force_clock_level(hwmgr, PP_SCLK, 3 << (level-1));
4941 }
4942 } else if (hwmgr->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE) {
4943 smu7_force_clock_level(hwmgr, PP_SCLK, data->dpm_level_enable_mask.sclk_dpm_enable_mask);
4944 }
4945}
4946
4947static int smu7_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size)
4948{
4949 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4950 struct profile_mode_setting tmp;
4951 enum PP_SMC_POWER_PROFILE mode;
4952
4953 if (input == NULL)
4954 return -EINVAL;
4955
4956 mode = input[size];
4957 switch (mode) {
4958 case PP_SMC_POWER_PROFILE_CUSTOM:
4959 if (size < 8)
4960 return -EINVAL;
4961
4962 tmp.bupdate_sclk = input[0];
4963 tmp.sclk_up_hyst = input[1];
4964 tmp.sclk_down_hyst = input[2];
4965 tmp.sclk_activity = input[3];
4966 tmp.bupdate_mclk = input[4];
4967 tmp.mclk_up_hyst = input[5];
4968 tmp.mclk_down_hyst = input[6];
4969 tmp.mclk_activity = input[7];
4970 if (!smum_update_dpm_settings(hwmgr, &tmp)) {
4971 memcpy(&data->current_profile_setting, &tmp, sizeof(struct profile_mode_setting));
4972 hwmgr->power_profile_mode = mode;
4973 }
4974 break;
4975 case PP_SMC_POWER_PROFILE_FULLSCREEN3D:
4976 case PP_SMC_POWER_PROFILE_POWERSAVING:
4977 case PP_SMC_POWER_PROFILE_VIDEO:
4978 case PP_SMC_POWER_PROFILE_VR:
4979 case PP_SMC_POWER_PROFILE_COMPUTE:
4980 if (mode == hwmgr->power_profile_mode)
4981 return 0;
4982
4983 memcpy(&tmp, &smu7_profiling[mode], sizeof(struct profile_mode_setting));
4984 if (!smum_update_dpm_settings(hwmgr, &tmp)) {
4985 if (tmp.bupdate_sclk) {
4986 data->current_profile_setting.bupdate_sclk = tmp.bupdate_sclk;
4987 data->current_profile_setting.sclk_up_hyst = tmp.sclk_up_hyst;
4988 data->current_profile_setting.sclk_down_hyst = tmp.sclk_down_hyst;
4989 data->current_profile_setting.sclk_activity = tmp.sclk_activity;
4990 }
4991 if (tmp.bupdate_mclk) {
4992 data->current_profile_setting.bupdate_mclk = tmp.bupdate_mclk;
4993 data->current_profile_setting.mclk_up_hyst = tmp.mclk_up_hyst;
4994 data->current_profile_setting.mclk_down_hyst = tmp.mclk_down_hyst;
4995 data->current_profile_setting.mclk_activity = tmp.mclk_activity;
4996 }
4997 smu7_patch_compute_profile_mode(hwmgr, mode);
4998 hwmgr->power_profile_mode = mode;
4999 }
5000 break;
5001 default:
5002 return -EINVAL;
5003 }
5004
5005 return 0;
5006}
5007
5008static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
5009 .backend_init = &smu7_hwmgr_backend_init,
5010 .backend_fini = &smu7_hwmgr_backend_fini,
5011 .asic_setup = &smu7_setup_asic_task,
5012 .dynamic_state_management_enable = &smu7_enable_dpm_tasks,
5013 .apply_state_adjust_rules = smu7_apply_state_adjust_rules,
5014 .force_dpm_level = &smu7_force_dpm_level,
5015 .power_state_set = smu7_set_power_state_tasks,
5016 .get_power_state_size = smu7_get_power_state_size,
5017 .get_mclk = smu7_dpm_get_mclk,
5018 .get_sclk = smu7_dpm_get_sclk,
5019 .patch_boot_state = smu7_dpm_patch_boot_state,
5020 .get_pp_table_entry = smu7_get_pp_table_entry,
5021 .get_num_of_pp_table_entries = smu7_get_number_of_powerplay_table_entries,
5022 .powerdown_uvd = smu7_powerdown_uvd,
5023 .powergate_uvd = smu7_powergate_uvd,
5024 .powergate_vce = smu7_powergate_vce,
5025 .disable_clock_power_gating = smu7_disable_clock_power_gating,
5026 .update_clock_gatings = smu7_update_clock_gatings,
5027 .notify_smc_display_config_after_ps_adjustment = smu7_notify_smc_display_config_after_ps_adjustment,
5028 .display_config_changed = smu7_display_configuration_changed_task,
5029 .set_max_fan_pwm_output = smu7_set_max_fan_pwm_output,
5030 .set_max_fan_rpm_output = smu7_set_max_fan_rpm_output,
5031 .stop_thermal_controller = smu7_thermal_stop_thermal_controller,
5032 .get_fan_speed_info = smu7_fan_ctrl_get_fan_speed_info,
5033 .get_fan_speed_percent = smu7_fan_ctrl_get_fan_speed_percent,
5034 .set_fan_speed_percent = smu7_fan_ctrl_set_fan_speed_percent,
5035 .reset_fan_speed_to_default = smu7_fan_ctrl_reset_fan_speed_to_default,
5036 .get_fan_speed_rpm = smu7_fan_ctrl_get_fan_speed_rpm,
5037 .set_fan_speed_rpm = smu7_fan_ctrl_set_fan_speed_rpm,
5038 .uninitialize_thermal_controller = smu7_thermal_ctrl_uninitialize_thermal_controller,
5039 .register_irq_handlers = smu7_register_irq_handlers,
5040 .check_smc_update_required_for_display_configuration = smu7_check_smc_update_required_for_display_configuration,
5041 .check_states_equal = smu7_check_states_equal,
5042 .set_fan_control_mode = smu7_set_fan_control_mode,
5043 .get_fan_control_mode = smu7_get_fan_control_mode,
5044 .force_clock_level = smu7_force_clock_level,
5045 .print_clock_levels = smu7_print_clock_levels,
5046 .enable_per_cu_power_gating = smu7_enable_per_cu_power_gating,
5047 .get_sclk_od = smu7_get_sclk_od,
5048 .set_sclk_od = smu7_set_sclk_od,
5049 .get_mclk_od = smu7_get_mclk_od,
5050 .set_mclk_od = smu7_set_mclk_od,
5051 .get_clock_by_type = smu7_get_clock_by_type,
5052 .read_sensor = smu7_read_sensor,
5053 .dynamic_state_management_disable = smu7_disable_dpm_tasks,
5054 .avfs_control = smu7_avfs_control,
5055 .disable_smc_firmware_ctf = smu7_thermal_disable_alert,
5056 .start_thermal_controller = smu7_start_thermal_controller,
5057 .notify_cac_buffer_info = smu7_notify_cac_buffer_info,
5058 .get_max_high_clocks = smu7_get_max_high_clocks,
5059 .get_thermal_temperature_range = smu7_get_thermal_temperature_range,
5060 .odn_edit_dpm_table = smu7_odn_edit_dpm_table,
5061 .set_power_limit = smu7_set_power_limit,
5062 .get_power_profile_mode = smu7_get_power_profile_mode,
5063 .set_power_profile_mode = smu7_set_power_profile_mode,
5064};
5065
5066uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock,
5067 uint32_t clock_insr)
5068{
5069 uint8_t i;
5070 uint32_t temp;
5071 uint32_t min = max(clock_insr, (uint32_t)SMU7_MINIMUM_ENGINE_CLOCK);
5072
5073 PP_ASSERT_WITH_CODE((clock >= min), "Engine clock can't satisfy stutter requirement!", return 0);
5074 for (i = SMU7_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) {
5075 temp = clock >> i;
5076
5077 if (temp >= min || i == 0)
5078 break;
5079 }
5080 return i;
5081}
5082
5083int smu7_init_function_pointers(struct pp_hwmgr *hwmgr)
5084{
5085 int ret = 0;
5086
5087 hwmgr->hwmgr_func = &smu7_hwmgr_funcs;
5088 if (hwmgr->pp_table_version == PP_TABLE_V0)
5089 hwmgr->pptable_func = &pptable_funcs;
5090 else if (hwmgr->pp_table_version == PP_TABLE_V1)
5091 hwmgr->pptable_func = &pptable_v1_0_funcs;
5092
5093 return ret;
5094}
5095