1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include "pp_debug.h"
24#include <linux/delay.h>
25#include <linux/fb.h>
26#include <linux/module.h>
27#include <linux/slab.h>
28#include <asm/div64.h>
29#include <drm/amdgpu_drm.h>
30#include "ppatomctrl.h"
31#include "atombios.h"
32#include "pptable_v1_0.h"
33#include "pppcielanes.h"
34#include "amd_pcie_helpers.h"
35#include "hardwaremanager.h"
36#include "process_pptables_v1_0.h"
37#include "cgs_common.h"
38
39#include "smu7_common.h"
40
41#include "hwmgr.h"
42#include "smu7_hwmgr.h"
43#include "smu_ucode_xfer_vi.h"
44#include "smu7_powertune.h"
45#include "smu7_dyn_defaults.h"
46#include "smu7_thermal.h"
47#include "smu7_clockpowergating.h"
48#include "processpptables.h"
49#include "pp_thermal.h"
50
51#define MC_CG_ARB_FREQ_F0 0x0a
52#define MC_CG_ARB_FREQ_F1 0x0b
53#define MC_CG_ARB_FREQ_F2 0x0c
54#define MC_CG_ARB_FREQ_F3 0x0d
55
56#define MC_CG_SEQ_DRAMCONF_S0 0x05
57#define MC_CG_SEQ_DRAMCONF_S1 0x06
58#define MC_CG_SEQ_YCLK_SUSPEND 0x04
59#define MC_CG_SEQ_YCLK_RESUME 0x0a
60
61#define SMC_CG_IND_START 0xc0030000
62#define SMC_CG_IND_END 0xc0040000
63
64#define VOLTAGE_SCALE 4
65#define VOLTAGE_VID_OFFSET_SCALE1 625
66#define VOLTAGE_VID_OFFSET_SCALE2 100
67
68#define MEM_FREQ_LOW_LATENCY 25000
69#define MEM_FREQ_HIGH_LATENCY 80000
70
71#define MEM_LATENCY_HIGH 45
72#define MEM_LATENCY_LOW 35
73#define MEM_LATENCY_ERR 0xFFFF
74
75#define MC_SEQ_MISC0_GDDR5_SHIFT 28
76#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000
77#define MC_SEQ_MISC0_GDDR5_VALUE 5
78
79#define PCIE_BUS_CLK 10000
80#define TCLK (PCIE_BUS_CLK / 10)
81
82static const struct profile_mode_setting smu7_profiling[6] =
83 {{1, 0, 100, 30, 1, 0, 100, 10},
84 {1, 10, 0, 30, 0, 0, 0, 0},
85 {0, 0, 0, 0, 1, 10, 16, 31},
86 {1, 0, 11, 50, 1, 0, 100, 10},
87 {1, 0, 5, 30, 0, 0, 0, 0},
88 {0, 0, 0, 0, 0, 0, 0, 0},
89 };
90
91
92enum DPM_EVENT_SRC {
93 DPM_EVENT_SRC_ANALOG = 0,
94 DPM_EVENT_SRC_EXTERNAL = 1,
95 DPM_EVENT_SRC_DIGITAL = 2,
96 DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
97 DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4
98};
99
100static const unsigned long PhwVIslands_Magic = (unsigned long)(PHM_VIslands_Magic);
101static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
102 enum pp_clock_type type, uint32_t mask);
103
104static struct smu7_power_state *cast_phw_smu7_power_state(
105 struct pp_hw_power_state *hw_ps)
106{
107 PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic),
108 "Invalid Powerstate Type!",
109 return NULL);
110
111 return (struct smu7_power_state *)hw_ps;
112}
113
114static const struct smu7_power_state *cast_const_phw_smu7_power_state(
115 const struct pp_hw_power_state *hw_ps)
116{
117 PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic),
118 "Invalid Powerstate Type!",
119 return NULL);
120
121 return (const struct smu7_power_state *)hw_ps;
122}
123
124
125
126
127
128
129
130static int smu7_get_mc_microcode_version(struct pp_hwmgr *hwmgr)
131{
132 cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
133
134 hwmgr->microcode_version_info.MC = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA);
135
136 return 0;
137}
138
139static uint16_t smu7_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
140{
141 uint32_t speedCntl = 0;
142
143
144 speedCntl = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__PCIE,
145 ixPCIE_LC_SPEED_CNTL);
146 return((uint16_t)PHM_GET_FIELD(speedCntl,
147 PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
148}
149
150static int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
151{
152 uint32_t link_width;
153
154
155 link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
156 PCIE_LC_LINK_WIDTH_CNTL, LC_LINK_WIDTH_RD);
157
158 PP_ASSERT_WITH_CODE((7 >= link_width),
159 "Invalid PCIe lane width!", return 0);
160
161 return decode_pcie_lane_width(link_width);
162}
163
164
165
166
167
168
169
170static int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
171{
172 if (hwmgr->feature_mask & PP_SMC_VOLTAGE_CONTROL_MASK)
173 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Enable);
174
175 return 0;
176}
177
178
179
180
181
182
183static bool smu7_voltage_control(const struct pp_hwmgr *hwmgr)
184{
185 const struct smu7_hwmgr *data =
186 (const struct smu7_hwmgr *)(hwmgr->backend);
187
188 return (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control);
189}
190
191
192
193
194
195
196
197static int smu7_enable_voltage_control(struct pp_hwmgr *hwmgr)
198{
199
200 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
201 GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1);
202
203 return 0;
204}
205
206static int phm_get_svi2_voltage_table_v0(pp_atomctrl_voltage_table *voltage_table,
207 struct phm_clock_voltage_dependency_table *voltage_dependency_table
208 )
209{
210 uint32_t i;
211
212 PP_ASSERT_WITH_CODE((NULL != voltage_table),
213 "Voltage Dependency Table empty.", return -EINVAL;);
214
215 voltage_table->mask_low = 0;
216 voltage_table->phase_delay = 0;
217 voltage_table->count = voltage_dependency_table->count;
218
219 for (i = 0; i < voltage_dependency_table->count; i++) {
220 voltage_table->entries[i].value =
221 voltage_dependency_table->entries[i].v;
222 voltage_table->entries[i].smio_low = 0;
223 }
224
225 return 0;
226}
227
228
229
230
231
232
233
234
235static int smu7_construct_voltage_tables(struct pp_hwmgr *hwmgr)
236{
237 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
238 struct phm_ppt_v1_information *table_info =
239 (struct phm_ppt_v1_information *)hwmgr->pptable;
240 int result = 0;
241 uint32_t tmp;
242
243 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
244 result = atomctrl_get_voltage_table_v3(hwmgr,
245 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT,
246 &(data->mvdd_voltage_table));
247 PP_ASSERT_WITH_CODE((0 == result),
248 "Failed to retrieve MVDD table.",
249 return result);
250 } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
251 if (hwmgr->pp_table_version == PP_TABLE_V1)
252 result = phm_get_svi2_mvdd_voltage_table(&(data->mvdd_voltage_table),
253 table_info->vdd_dep_on_mclk);
254 else if (hwmgr->pp_table_version == PP_TABLE_V0)
255 result = phm_get_svi2_voltage_table_v0(&(data->mvdd_voltage_table),
256 hwmgr->dyn_state.mvdd_dependency_on_mclk);
257
258 PP_ASSERT_WITH_CODE((0 == result),
259 "Failed to retrieve SVI2 MVDD table from dependancy table.",
260 return result;);
261 }
262
263 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
264 result = atomctrl_get_voltage_table_v3(hwmgr,
265 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT,
266 &(data->vddci_voltage_table));
267 PP_ASSERT_WITH_CODE((0 == result),
268 "Failed to retrieve VDDCI table.",
269 return result);
270 } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
271 if (hwmgr->pp_table_version == PP_TABLE_V1)
272 result = phm_get_svi2_vddci_voltage_table(&(data->vddci_voltage_table),
273 table_info->vdd_dep_on_mclk);
274 else if (hwmgr->pp_table_version == PP_TABLE_V0)
275 result = phm_get_svi2_voltage_table_v0(&(data->vddci_voltage_table),
276 hwmgr->dyn_state.vddci_dependency_on_mclk);
277 PP_ASSERT_WITH_CODE((0 == result),
278 "Failed to retrieve SVI2 VDDCI table from dependancy table.",
279 return result);
280 }
281
282 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
283
284 result = phm_get_svi2_vdd_voltage_table(&(data->vddgfx_voltage_table),
285 table_info->vddgfx_lookup_table);
286 PP_ASSERT_WITH_CODE((0 == result),
287 "Failed to retrieve SVI2 VDDGFX table from lookup table.", return result;);
288 }
289
290
291 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control) {
292 result = atomctrl_get_voltage_table_v3(hwmgr,
293 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT,
294 &data->vddc_voltage_table);
295 PP_ASSERT_WITH_CODE((0 == result),
296 "Failed to retrieve VDDC table.", return result;);
297 } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
298
299 if (hwmgr->pp_table_version == PP_TABLE_V0)
300 result = phm_get_svi2_voltage_table_v0(&data->vddc_voltage_table,
301 hwmgr->dyn_state.vddc_dependency_on_mclk);
302 else if (hwmgr->pp_table_version == PP_TABLE_V1)
303 result = phm_get_svi2_vdd_voltage_table(&(data->vddc_voltage_table),
304 table_info->vddc_lookup_table);
305
306 PP_ASSERT_WITH_CODE((0 == result),
307 "Failed to retrieve SVI2 VDDC table from dependancy table.", return result;);
308 }
309
310 tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDC);
311 PP_ASSERT_WITH_CODE(
312 (data->vddc_voltage_table.count <= tmp),
313 "Too many voltage values for VDDC. Trimming to fit state table.",
314 phm_trim_voltage_table_to_fit_state_table(tmp,
315 &(data->vddc_voltage_table)));
316
317 tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDGFX);
318 PP_ASSERT_WITH_CODE(
319 (data->vddgfx_voltage_table.count <= tmp),
320 "Too many voltage values for VDDC. Trimming to fit state table.",
321 phm_trim_voltage_table_to_fit_state_table(tmp,
322 &(data->vddgfx_voltage_table)));
323
324 tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDCI);
325 PP_ASSERT_WITH_CODE(
326 (data->vddci_voltage_table.count <= tmp),
327 "Too many voltage values for VDDCI. Trimming to fit state table.",
328 phm_trim_voltage_table_to_fit_state_table(tmp,
329 &(data->vddci_voltage_table)));
330
331 tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_MVDD);
332 PP_ASSERT_WITH_CODE(
333 (data->mvdd_voltage_table.count <= tmp),
334 "Too many voltage values for MVDD. Trimming to fit state table.",
335 phm_trim_voltage_table_to_fit_state_table(tmp,
336 &(data->mvdd_voltage_table)));
337
338 return 0;
339}
340
341
342
343
344
345
346
347static int smu7_program_static_screen_threshold_parameters(
348 struct pp_hwmgr *hwmgr)
349{
350 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
351
352
353 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
354 CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT,
355 data->static_screen_threshold_unit);
356
357 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
358 CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD,
359 data->static_screen_threshold);
360
361 return 0;
362}
363
364
365
366
367
368
369
370static int smu7_enable_display_gap(struct pp_hwmgr *hwmgr)
371{
372 uint32_t display_gap =
373 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
374 ixCG_DISPLAY_GAP_CNTL);
375
376 display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
377 DISP_GAP, DISPLAY_GAP_IGNORE);
378
379 display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
380 DISP_GAP_MCHG, DISPLAY_GAP_VBLANK);
381
382 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
383 ixCG_DISPLAY_GAP_CNTL, display_gap);
384
385 return 0;
386}
387
388
389
390
391
392
393
394static int smu7_program_voting_clients(struct pp_hwmgr *hwmgr)
395{
396 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
397 int i;
398
399
400 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
401 SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0);
402 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
403 SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0);
404
405 for (i = 0; i < 8; i++)
406 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
407 ixCG_FREQ_TRAN_VOTING_0 + i * 4,
408 data->voting_rights_clients[i]);
409 return 0;
410}
411
412static int smu7_clear_voting_clients(struct pp_hwmgr *hwmgr)
413{
414 int i;
415
416
417 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
418 SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1);
419 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
420 SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1);
421
422 for (i = 0; i < 8; i++)
423 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
424 ixCG_FREQ_TRAN_VOTING_0 + i * 4, 0);
425
426 return 0;
427}
428
429
430
431
432static int smu7_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
433 uint32_t arb_src, uint32_t arb_dest)
434{
435 uint32_t mc_arb_dram_timing;
436 uint32_t mc_arb_dram_timing2;
437 uint32_t burst_time;
438 uint32_t mc_cg_config;
439
440 switch (arb_src) {
441 case MC_CG_ARB_FREQ_F0:
442 mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
443 mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
444 burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
445 break;
446 case MC_CG_ARB_FREQ_F1:
447 mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1);
448 mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1);
449 burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1);
450 break;
451 default:
452 return -EINVAL;
453 }
454
455 switch (arb_dest) {
456 case MC_CG_ARB_FREQ_F0:
457 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
458 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
459 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time);
460 break;
461 case MC_CG_ARB_FREQ_F1:
462 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
463 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
464 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time);
465 break;
466 default:
467 return -EINVAL;
468 }
469
470 mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG);
471 mc_cg_config |= 0x0000000F;
472 cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config);
473 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arb_dest);
474
475 return 0;
476}
477
478static int smu7_reset_to_default(struct pp_hwmgr *hwmgr)
479{
480 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ResetToDefaults);
481}
482
483
484
485
486
487
488
489
490static int smu7_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr)
491{
492 return smu7_copy_and_switch_arb_sets(hwmgr,
493 MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
494}
495
496static int smu7_force_switch_to_arbf0(struct pp_hwmgr *hwmgr)
497{
498 uint32_t tmp;
499
500 tmp = (cgs_read_ind_register(hwmgr->device,
501 CGS_IND_REG__SMC, ixSMC_SCRATCH9) &
502 0x0000ff00) >> 8;
503
504 if (tmp == MC_CG_ARB_FREQ_F0)
505 return 0;
506
507 return smu7_copy_and_switch_arb_sets(hwmgr,
508 tmp, MC_CG_ARB_FREQ_F0);
509}
510
511static int smu7_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
512{
513 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
514
515 struct phm_ppt_v1_information *table_info =
516 (struct phm_ppt_v1_information *)(hwmgr->pptable);
517 struct phm_ppt_v1_pcie_table *pcie_table = NULL;
518
519 uint32_t i, max_entry;
520 uint32_t tmp;
521
522 PP_ASSERT_WITH_CODE((data->use_pcie_performance_levels ||
523 data->use_pcie_power_saving_levels), "No pcie performance levels!",
524 return -EINVAL);
525
526 if (table_info != NULL)
527 pcie_table = table_info->pcie_table;
528
529 if (data->use_pcie_performance_levels &&
530 !data->use_pcie_power_saving_levels) {
531 data->pcie_gen_power_saving = data->pcie_gen_performance;
532 data->pcie_lane_power_saving = data->pcie_lane_performance;
533 } else if (!data->use_pcie_performance_levels &&
534 data->use_pcie_power_saving_levels) {
535 data->pcie_gen_performance = data->pcie_gen_power_saving;
536 data->pcie_lane_performance = data->pcie_lane_power_saving;
537 }
538 tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_LINK);
539 phm_reset_single_dpm_table(&data->dpm_table.pcie_speed_table,
540 tmp,
541 MAX_REGULAR_DPM_NUMBER);
542
543 if (pcie_table != NULL) {
544
545
546
547
548 max_entry = (tmp < pcie_table->count) ? tmp : pcie_table->count;
549 for (i = 1; i < max_entry; i++) {
550 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i - 1,
551 get_pcie_gen_support(data->pcie_gen_cap,
552 pcie_table->entries[i].gen_speed),
553 get_pcie_lane_support(data->pcie_lane_cap,
554 pcie_table->entries[i].lane_width));
555 }
556 data->dpm_table.pcie_speed_table.count = max_entry - 1;
557 smum_update_smc_table(hwmgr, SMU_BIF_TABLE);
558 } else {
559
560 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0,
561 get_pcie_gen_support(data->pcie_gen_cap,
562 PP_Min_PCIEGen),
563 get_pcie_lane_support(data->pcie_lane_cap,
564 PP_Max_PCIELane));
565 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1,
566 get_pcie_gen_support(data->pcie_gen_cap,
567 PP_Min_PCIEGen),
568 get_pcie_lane_support(data->pcie_lane_cap,
569 PP_Max_PCIELane));
570 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2,
571 get_pcie_gen_support(data->pcie_gen_cap,
572 PP_Max_PCIEGen),
573 get_pcie_lane_support(data->pcie_lane_cap,
574 PP_Max_PCIELane));
575 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3,
576 get_pcie_gen_support(data->pcie_gen_cap,
577 PP_Max_PCIEGen),
578 get_pcie_lane_support(data->pcie_lane_cap,
579 PP_Max_PCIELane));
580 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4,
581 get_pcie_gen_support(data->pcie_gen_cap,
582 PP_Max_PCIEGen),
583 get_pcie_lane_support(data->pcie_lane_cap,
584 PP_Max_PCIELane));
585 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5,
586 get_pcie_gen_support(data->pcie_gen_cap,
587 PP_Max_PCIEGen),
588 get_pcie_lane_support(data->pcie_lane_cap,
589 PP_Max_PCIELane));
590
591 data->dpm_table.pcie_speed_table.count = 6;
592 }
593
594 if (hwmgr->chip_family == AMDGPU_FAMILY_CI) {
595 for (i = 0; i <= data->dpm_table.pcie_speed_table.count; i++)
596 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i,
597 get_pcie_gen_support(data->pcie_gen_cap,
598 PP_Max_PCIEGen),
599 data->vbios_boot_state.pcie_lane_bootup_value);
600 } else {
601 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table,
602 data->dpm_table.pcie_speed_table.count,
603 get_pcie_gen_support(data->pcie_gen_cap,
604 PP_Min_PCIEGen),
605 get_pcie_lane_support(data->pcie_lane_cap,
606 PP_Max_PCIELane));
607 }
608 return 0;
609}
610
611static int smu7_reset_dpm_tables(struct pp_hwmgr *hwmgr)
612{
613 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
614
615 memset(&(data->dpm_table), 0x00, sizeof(data->dpm_table));
616
617 phm_reset_single_dpm_table(
618 &data->dpm_table.sclk_table,
619 smum_get_mac_definition(hwmgr,
620 SMU_MAX_LEVELS_GRAPHICS),
621 MAX_REGULAR_DPM_NUMBER);
622 phm_reset_single_dpm_table(
623 &data->dpm_table.mclk_table,
624 smum_get_mac_definition(hwmgr,
625 SMU_MAX_LEVELS_MEMORY), MAX_REGULAR_DPM_NUMBER);
626
627 phm_reset_single_dpm_table(
628 &data->dpm_table.vddc_table,
629 smum_get_mac_definition(hwmgr,
630 SMU_MAX_LEVELS_VDDC),
631 MAX_REGULAR_DPM_NUMBER);
632 phm_reset_single_dpm_table(
633 &data->dpm_table.vddci_table,
634 smum_get_mac_definition(hwmgr,
635 SMU_MAX_LEVELS_VDDCI), MAX_REGULAR_DPM_NUMBER);
636
637 phm_reset_single_dpm_table(
638 &data->dpm_table.mvdd_table,
639 smum_get_mac_definition(hwmgr,
640 SMU_MAX_LEVELS_MVDD),
641 MAX_REGULAR_DPM_NUMBER);
642 return 0;
643}
644
645
646
647
648
649
650
651
652
653static int smu7_setup_dpm_tables_v0(struct pp_hwmgr *hwmgr)
654{
655 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
656 struct phm_clock_voltage_dependency_table *allowed_vdd_sclk_table =
657 hwmgr->dyn_state.vddc_dependency_on_sclk;
658 struct phm_clock_voltage_dependency_table *allowed_vdd_mclk_table =
659 hwmgr->dyn_state.vddc_dependency_on_mclk;
660 struct phm_cac_leakage_table *std_voltage_table =
661 hwmgr->dyn_state.cac_leakage_table;
662 uint32_t i;
663
664 PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL,
665 "SCLK dependency table is missing. This table is mandatory", return -EINVAL);
666 PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table->count >= 1,
667 "SCLK dependency table has to have is missing. This table is mandatory", return -EINVAL);
668
669 PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
670 "MCLK dependency table is missing. This table is mandatory", return -EINVAL);
671 PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table->count >= 1,
672 "VMCLK dependency table has to have is missing. This table is mandatory", return -EINVAL);
673
674
675
676 data->dpm_table.sclk_table.count = 0;
677
678 for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
679 if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count-1].value !=
680 allowed_vdd_sclk_table->entries[i].clk) {
681 data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
682 allowed_vdd_sclk_table->entries[i].clk;
683 data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = (i == 0) ? 1 : 0;
684 data->dpm_table.sclk_table.count++;
685 }
686 }
687
688 PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
689 "MCLK dependency table is missing. This table is mandatory", return -EINVAL);
690
691 data->dpm_table.mclk_table.count = 0;
692 for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
693 if (i == 0 || data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count-1].value !=
694 allowed_vdd_mclk_table->entries[i].clk) {
695 data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
696 allowed_vdd_mclk_table->entries[i].clk;
697 data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = (i == 0) ? 1 : 0;
698 data->dpm_table.mclk_table.count++;
699 }
700 }
701
702
703 for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
704 data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
705 data->dpm_table.vddc_table.dpm_levels[i].param1 = std_voltage_table->entries[i].Leakage;
706
707 data->dpm_table.vddc_table.dpm_levels[i].enabled = 1;
708 }
709
710 data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count;
711 allowed_vdd_mclk_table = hwmgr->dyn_state.vddci_dependency_on_mclk;
712
713 if (NULL != allowed_vdd_mclk_table) {
714
715 for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
716 data->dpm_table.vddci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
717 data->dpm_table.vddci_table.dpm_levels[i].enabled = 1;
718 }
719 data->dpm_table.vddci_table.count = allowed_vdd_mclk_table->count;
720 }
721
722 allowed_vdd_mclk_table = hwmgr->dyn_state.mvdd_dependency_on_mclk;
723
724 if (NULL != allowed_vdd_mclk_table) {
725
726
727
728
729 for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
730 data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
731 data->dpm_table.mvdd_table.dpm_levels[i].enabled = 1;
732 }
733 data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count;
734 }
735
736 return 0;
737}
738
739static int smu7_setup_dpm_tables_v1(struct pp_hwmgr *hwmgr)
740{
741 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
742 struct phm_ppt_v1_information *table_info =
743 (struct phm_ppt_v1_information *)(hwmgr->pptable);
744 uint32_t i;
745
746 struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
747 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
748
749 if (table_info == NULL)
750 return -EINVAL;
751
752 dep_sclk_table = table_info->vdd_dep_on_sclk;
753 dep_mclk_table = table_info->vdd_dep_on_mclk;
754
755 PP_ASSERT_WITH_CODE(dep_sclk_table != NULL,
756 "SCLK dependency table is missing.",
757 return -EINVAL);
758 PP_ASSERT_WITH_CODE(dep_sclk_table->count >= 1,
759 "SCLK dependency table count is 0.",
760 return -EINVAL);
761
762 PP_ASSERT_WITH_CODE(dep_mclk_table != NULL,
763 "MCLK dependency table is missing.",
764 return -EINVAL);
765 PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1,
766 "MCLK dependency table count is 0",
767 return -EINVAL);
768
769
770 data->dpm_table.sclk_table.count = 0;
771 for (i = 0; i < dep_sclk_table->count; i++) {
772 if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count - 1].value !=
773 dep_sclk_table->entries[i].clk) {
774
775 data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
776 dep_sclk_table->entries[i].clk;
777
778 data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled =
779 (i == 0) ? true : false;
780 data->dpm_table.sclk_table.count++;
781 }
782 }
783
784
785 data->dpm_table.mclk_table.count = 0;
786 for (i = 0; i < dep_mclk_table->count; i++) {
787 if (i == 0 || data->dpm_table.mclk_table.dpm_levels
788 [data->dpm_table.mclk_table.count - 1].value !=
789 dep_mclk_table->entries[i].clk) {
790 data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
791 dep_mclk_table->entries[i].clk;
792 data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled =
793 (i == 0) ? true : false;
794 data->dpm_table.mclk_table.count++;
795 }
796 }
797
798 return 0;
799}
800
801static int smu7_get_voltage_dependency_table(
802 const struct phm_ppt_v1_clock_voltage_dependency_table *allowed_dep_table,
803 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table)
804{
805 uint8_t i = 0;
806 PP_ASSERT_WITH_CODE((0 != allowed_dep_table->count),
807 "Voltage Lookup Table empty",
808 return -EINVAL);
809
810 dep_table->count = allowed_dep_table->count;
811 for (i=0; i<dep_table->count; i++) {
812 dep_table->entries[i].clk = allowed_dep_table->entries[i].clk;
813 dep_table->entries[i].vddInd = allowed_dep_table->entries[i].vddInd;
814 dep_table->entries[i].vdd_offset = allowed_dep_table->entries[i].vdd_offset;
815 dep_table->entries[i].vddc = allowed_dep_table->entries[i].vddc;
816 dep_table->entries[i].vddgfx = allowed_dep_table->entries[i].vddgfx;
817 dep_table->entries[i].vddci = allowed_dep_table->entries[i].vddci;
818 dep_table->entries[i].mvdd = allowed_dep_table->entries[i].mvdd;
819 dep_table->entries[i].phases = allowed_dep_table->entries[i].phases;
820 dep_table->entries[i].cks_enable = allowed_dep_table->entries[i].cks_enable;
821 dep_table->entries[i].cks_voffset = allowed_dep_table->entries[i].cks_voffset;
822 }
823
824 return 0;
825}
826
827static int smu7_odn_initial_default_setting(struct pp_hwmgr *hwmgr)
828{
829 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
830 struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
831 struct phm_ppt_v1_information *table_info =
832 (struct phm_ppt_v1_information *)(hwmgr->pptable);
833 uint32_t i;
834
835 struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
836 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
837 struct phm_odn_performance_level *entries;
838
839 if (table_info == NULL)
840 return -EINVAL;
841
842 dep_sclk_table = table_info->vdd_dep_on_sclk;
843 dep_mclk_table = table_info->vdd_dep_on_mclk;
844
845 odn_table->odn_core_clock_dpm_levels.num_of_pl =
846 data->golden_dpm_table.sclk_table.count;
847 entries = odn_table->odn_core_clock_dpm_levels.entries;
848 for (i=0; i<data->golden_dpm_table.sclk_table.count; i++) {
849 entries[i].clock = data->golden_dpm_table.sclk_table.dpm_levels[i].value;
850 entries[i].enabled = true;
851 entries[i].vddc = dep_sclk_table->entries[i].vddc;
852 }
853
854 smu7_get_voltage_dependency_table(dep_sclk_table,
855 (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk));
856
857 odn_table->odn_memory_clock_dpm_levels.num_of_pl =
858 data->golden_dpm_table.mclk_table.count;
859 entries = odn_table->odn_memory_clock_dpm_levels.entries;
860 for (i=0; i<data->golden_dpm_table.mclk_table.count; i++) {
861 entries[i].clock = data->golden_dpm_table.mclk_table.dpm_levels[i].value;
862 entries[i].enabled = true;
863 entries[i].vddc = dep_mclk_table->entries[i].vddc;
864 }
865
866 smu7_get_voltage_dependency_table(dep_mclk_table,
867 (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk));
868
869 return 0;
870}
871
872static int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
873{
874 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
875
876 smu7_reset_dpm_tables(hwmgr);
877
878 if (hwmgr->pp_table_version == PP_TABLE_V1)
879 smu7_setup_dpm_tables_v1(hwmgr);
880 else if (hwmgr->pp_table_version == PP_TABLE_V0)
881 smu7_setup_dpm_tables_v0(hwmgr);
882
883 smu7_setup_default_pcie_table(hwmgr);
884
885
886 memcpy(&(data->golden_dpm_table), &(data->dpm_table),
887 sizeof(struct smu7_dpm_table));
888
889
890 if (hwmgr->od_enabled)
891 smu7_odn_initial_default_setting(hwmgr);
892
893 return 0;
894}
895
896static int smu7_enable_vrhot_gpio_interrupt(struct pp_hwmgr *hwmgr)
897{
898
899 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
900 PHM_PlatformCaps_RegulatorHot))
901 return smum_send_msg_to_smc(hwmgr,
902 PPSMC_MSG_EnableVRHotGPIOInterrupt);
903
904 return 0;
905}
906
907static int smu7_enable_sclk_control(struct pp_hwmgr *hwmgr)
908{
909 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
910 SCLK_PWRMGT_OFF, 0);
911 return 0;
912}
913
914static int smu7_enable_ulv(struct pp_hwmgr *hwmgr)
915{
916 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
917
918 if (data->ulv_supported)
919 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableULV);
920
921 return 0;
922}
923
924static int smu7_disable_ulv(struct pp_hwmgr *hwmgr)
925{
926 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
927
928 if (data->ulv_supported)
929 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableULV);
930
931 return 0;
932}
933
934static int smu7_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
935{
936 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
937 PHM_PlatformCaps_SclkDeepSleep)) {
938 if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MASTER_DeepSleep_ON))
939 PP_ASSERT_WITH_CODE(false,
940 "Attempt to enable Master Deep Sleep switch failed!",
941 return -EINVAL);
942 } else {
943 if (smum_send_msg_to_smc(hwmgr,
944 PPSMC_MSG_MASTER_DeepSleep_OFF)) {
945 PP_ASSERT_WITH_CODE(false,
946 "Attempt to disable Master Deep Sleep switch failed!",
947 return -EINVAL);
948 }
949 }
950
951 return 0;
952}
953
954static int smu7_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
955{
956 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
957 PHM_PlatformCaps_SclkDeepSleep)) {
958 if (smum_send_msg_to_smc(hwmgr,
959 PPSMC_MSG_MASTER_DeepSleep_OFF)) {
960 PP_ASSERT_WITH_CODE(false,
961 "Attempt to disable Master Deep Sleep switch failed!",
962 return -EINVAL);
963 }
964 }
965
966 return 0;
967}
968
969static int smu7_disable_handshake_uvd(struct pp_hwmgr *hwmgr)
970{
971 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
972 uint32_t soft_register_value = 0;
973 uint32_t handshake_disables_offset = data->soft_regs_start
974 + smum_get_offsetof(hwmgr,
975 SMU_SoftRegisters, HandshakeDisables);
976
977 soft_register_value = cgs_read_ind_register(hwmgr->device,
978 CGS_IND_REG__SMC, handshake_disables_offset);
979 soft_register_value |= smum_get_mac_definition(hwmgr,
980 SMU_UVD_MCLK_HANDSHAKE_DISABLE);
981 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
982 handshake_disables_offset, soft_register_value);
983 return 0;
984}
985
986static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
987{
988 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
989
990
991 if (!data->sclk_dpm_key_disabled)
992 PP_ASSERT_WITH_CODE(
993 (0 == smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Enable)),
994 "Failed to enable SCLK DPM during DPM Start Function!",
995 return -EINVAL);
996
997
998 if (0 == data->mclk_dpm_key_disabled) {
999 if (!(hwmgr->feature_mask & PP_UVD_HANDSHAKE_MASK))
1000 smu7_disable_handshake_uvd(hwmgr);
1001 PP_ASSERT_WITH_CODE(
1002 (0 == smum_send_msg_to_smc(hwmgr,
1003 PPSMC_MSG_MCLKDPM_Enable)),
1004 "Failed to enable MCLK DPM during DPM Start Function!",
1005 return -EINVAL);
1006
1007 PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
1008
1009
1010 if (hwmgr->chip_family == AMDGPU_FAMILY_CI) {
1011 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d30, 0x5);
1012 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d3c, 0x5);
1013 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d80, 0x100005);
1014 udelay(10);
1015 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d30, 0x400005);
1016 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d3c, 0x400005);
1017 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d80, 0x500005);
1018 } else {
1019 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5);
1020 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x5);
1021 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x100005);
1022 udelay(10);
1023 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400005);
1024 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400005);
1025 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x500005);
1026 }
1027 }
1028
1029 return 0;
1030}
1031
1032static int smu7_start_dpm(struct pp_hwmgr *hwmgr)
1033{
1034 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1035
1036
1037
1038 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
1039 GLOBAL_PWRMGT_EN, 1);
1040
1041
1042
1043 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
1044 DYNAMIC_PM_EN, 1);
1045
1046
1047
1048 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1049 data->soft_regs_start +
1050 smum_get_offsetof(hwmgr, SMU_SoftRegisters,
1051 VoltageChangeTimeout), 0x1000);
1052 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
1053 SWRST_COMMAND_1, RESETLC, 0x0);
1054
1055 if (hwmgr->chip_family == AMDGPU_FAMILY_CI)
1056 cgs_write_register(hwmgr->device, 0x1488,
1057 (cgs_read_register(hwmgr->device, 0x1488) & ~0x1));
1058
1059 if (smu7_enable_sclk_mclk_dpm(hwmgr)) {
1060 pr_err("Failed to enable Sclk DPM and Mclk DPM!");
1061 return -EINVAL;
1062 }
1063
1064
1065 if (0 == data->pcie_dpm_key_disabled) {
1066 PP_ASSERT_WITH_CODE(
1067 (0 == smum_send_msg_to_smc(hwmgr,
1068 PPSMC_MSG_PCIeDPM_Enable)),
1069 "Failed to enable pcie DPM during DPM Start Function!",
1070 return -EINVAL);
1071 }
1072
1073 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1074 PHM_PlatformCaps_Falcon_QuickTransition)) {
1075 PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr,
1076 PPSMC_MSG_EnableACDCGPIOInterrupt)),
1077 "Failed to enable AC DC GPIO Interrupt!",
1078 );
1079 }
1080
1081 return 0;
1082}
1083
1084static int smu7_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
1085{
1086 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1087
1088
1089 if (!data->sclk_dpm_key_disabled) {
1090 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
1091 "Trying to disable SCLK DPM when DPM is disabled",
1092 return 0);
1093 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Disable);
1094 }
1095
1096
1097 if (!data->mclk_dpm_key_disabled) {
1098 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
1099 "Trying to disable MCLK DPM when DPM is disabled",
1100 return 0);
1101 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_Disable);
1102 }
1103
1104 return 0;
1105}
1106
1107static int smu7_stop_dpm(struct pp_hwmgr *hwmgr)
1108{
1109 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1110
1111
1112 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
1113 GLOBAL_PWRMGT_EN, 0);
1114
1115 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
1116 DYNAMIC_PM_EN, 0);
1117
1118
1119 if (!data->pcie_dpm_key_disabled) {
1120 PP_ASSERT_WITH_CODE(
1121 (smum_send_msg_to_smc(hwmgr,
1122 PPSMC_MSG_PCIeDPM_Disable) == 0),
1123 "Failed to disable pcie DPM during DPM Stop Function!",
1124 return -EINVAL);
1125 }
1126
1127 smu7_disable_sclk_mclk_dpm(hwmgr);
1128
1129 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
1130 "Trying to disable voltage DPM when DPM is disabled",
1131 return 0);
1132
1133 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Disable);
1134
1135 return 0;
1136}
1137
1138static void smu7_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources)
1139{
1140 bool protection;
1141 enum DPM_EVENT_SRC src;
1142
1143 switch (sources) {
1144 default:
1145 pr_err("Unknown throttling event sources.");
1146
1147 case 0:
1148 protection = false;
1149
1150 break;
1151 case (1 << PHM_AutoThrottleSource_Thermal):
1152 protection = true;
1153 src = DPM_EVENT_SRC_DIGITAL;
1154 break;
1155 case (1 << PHM_AutoThrottleSource_External):
1156 protection = true;
1157 src = DPM_EVENT_SRC_EXTERNAL;
1158 break;
1159 case (1 << PHM_AutoThrottleSource_External) |
1160 (1 << PHM_AutoThrottleSource_Thermal):
1161 protection = true;
1162 src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL;
1163 break;
1164 }
1165
1166 if (protection) {
1167 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL,
1168 DPM_EVENT_SRC, src);
1169 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
1170 THERMAL_PROTECTION_DIS,
1171 !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1172 PHM_PlatformCaps_ThermalController));
1173 } else
1174 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
1175 THERMAL_PROTECTION_DIS, 1);
1176}
1177
1178static int smu7_enable_auto_throttle_source(struct pp_hwmgr *hwmgr,
1179 PHM_AutoThrottleSource source)
1180{
1181 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1182
1183 if (!(data->active_auto_throttle_sources & (1 << source))) {
1184 data->active_auto_throttle_sources |= 1 << source;
1185 smu7_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
1186 }
1187 return 0;
1188}
1189
1190static int smu7_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
1191{
1192 return smu7_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
1193}
1194
1195static int smu7_disable_auto_throttle_source(struct pp_hwmgr *hwmgr,
1196 PHM_AutoThrottleSource source)
1197{
1198 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1199
1200 if (data->active_auto_throttle_sources & (1 << source)) {
1201 data->active_auto_throttle_sources &= ~(1 << source);
1202 smu7_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
1203 }
1204 return 0;
1205}
1206
1207static int smu7_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
1208{
1209 return smu7_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
1210}
1211
1212static int smu7_pcie_performance_request(struct pp_hwmgr *hwmgr)
1213{
1214 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1215 data->pcie_performance_request = true;
1216
1217 return 0;
1218}
1219
1220static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
1221{
1222 int tmp_result = 0;
1223 int result = 0;
1224
1225 if (smu7_voltage_control(hwmgr)) {
1226 tmp_result = smu7_enable_voltage_control(hwmgr);
1227 PP_ASSERT_WITH_CODE(tmp_result == 0,
1228 "Failed to enable voltage control!",
1229 result = tmp_result);
1230
1231 tmp_result = smu7_construct_voltage_tables(hwmgr);
1232 PP_ASSERT_WITH_CODE((0 == tmp_result),
1233 "Failed to contruct voltage tables!",
1234 result = tmp_result);
1235 }
1236 smum_initialize_mc_reg_table(hwmgr);
1237
1238 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1239 PHM_PlatformCaps_EngineSpreadSpectrumSupport))
1240 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1241 GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 1);
1242
1243 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1244 PHM_PlatformCaps_ThermalController))
1245 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1246 GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 0);
1247
1248 tmp_result = smu7_program_static_screen_threshold_parameters(hwmgr);
1249 PP_ASSERT_WITH_CODE((0 == tmp_result),
1250 "Failed to program static screen threshold parameters!",
1251 result = tmp_result);
1252
1253 tmp_result = smu7_enable_display_gap(hwmgr);
1254 PP_ASSERT_WITH_CODE((0 == tmp_result),
1255 "Failed to enable display gap!", result = tmp_result);
1256
1257 tmp_result = smu7_program_voting_clients(hwmgr);
1258 PP_ASSERT_WITH_CODE((0 == tmp_result),
1259 "Failed to program voting clients!", result = tmp_result);
1260
1261 tmp_result = smum_process_firmware_header(hwmgr);
1262 PP_ASSERT_WITH_CODE((0 == tmp_result),
1263 "Failed to process firmware header!", result = tmp_result);
1264
1265 tmp_result = smu7_initial_switch_from_arbf0_to_f1(hwmgr);
1266 PP_ASSERT_WITH_CODE((0 == tmp_result),
1267 "Failed to initialize switch from ArbF0 to F1!",
1268 result = tmp_result);
1269
1270 result = smu7_setup_default_dpm_tables(hwmgr);
1271 PP_ASSERT_WITH_CODE(0 == result,
1272 "Failed to setup default DPM tables!", return result);
1273
1274 tmp_result = smum_init_smc_table(hwmgr);
1275 PP_ASSERT_WITH_CODE((0 == tmp_result),
1276 "Failed to initialize SMC table!", result = tmp_result);
1277
1278 tmp_result = smu7_enable_vrhot_gpio_interrupt(hwmgr);
1279 PP_ASSERT_WITH_CODE((0 == tmp_result),
1280 "Failed to enable VR hot GPIO interrupt!", result = tmp_result);
1281
1282 smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_NoDisplay);
1283
1284 tmp_result = smu7_enable_sclk_control(hwmgr);
1285 PP_ASSERT_WITH_CODE((0 == tmp_result),
1286 "Failed to enable SCLK control!", result = tmp_result);
1287
1288 tmp_result = smu7_enable_smc_voltage_controller(hwmgr);
1289 PP_ASSERT_WITH_CODE((0 == tmp_result),
1290 "Failed to enable voltage control!", result = tmp_result);
1291
1292 tmp_result = smu7_enable_ulv(hwmgr);
1293 PP_ASSERT_WITH_CODE((0 == tmp_result),
1294 "Failed to enable ULV!", result = tmp_result);
1295
1296 tmp_result = smu7_enable_deep_sleep_master_switch(hwmgr);
1297 PP_ASSERT_WITH_CODE((0 == tmp_result),
1298 "Failed to enable deep sleep master switch!", result = tmp_result);
1299
1300 tmp_result = smu7_enable_didt_config(hwmgr);
1301 PP_ASSERT_WITH_CODE((tmp_result == 0),
1302 "Failed to enable deep sleep master switch!", result = tmp_result);
1303
1304 tmp_result = smu7_start_dpm(hwmgr);
1305 PP_ASSERT_WITH_CODE((0 == tmp_result),
1306 "Failed to start DPM!", result = tmp_result);
1307
1308 tmp_result = smu7_enable_smc_cac(hwmgr);
1309 PP_ASSERT_WITH_CODE((0 == tmp_result),
1310 "Failed to enable SMC CAC!", result = tmp_result);
1311
1312 tmp_result = smu7_enable_power_containment(hwmgr);
1313 PP_ASSERT_WITH_CODE((0 == tmp_result),
1314 "Failed to enable power containment!", result = tmp_result);
1315
1316 tmp_result = smu7_power_control_set_level(hwmgr);
1317 PP_ASSERT_WITH_CODE((0 == tmp_result),
1318 "Failed to power control set level!", result = tmp_result);
1319
1320 tmp_result = smu7_enable_thermal_auto_throttle(hwmgr);
1321 PP_ASSERT_WITH_CODE((0 == tmp_result),
1322 "Failed to enable thermal auto throttle!", result = tmp_result);
1323
1324 tmp_result = smu7_pcie_performance_request(hwmgr);
1325 PP_ASSERT_WITH_CODE((0 == tmp_result),
1326 "pcie performance request failed!", result = tmp_result);
1327
1328 return 0;
1329}
1330
1331static int smu7_avfs_control(struct pp_hwmgr *hwmgr, bool enable)
1332{
1333 if (!hwmgr->avfs_supported)
1334 return 0;
1335
1336 if (enable) {
1337 if (!PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
1338 CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) {
1339 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(
1340 hwmgr, PPSMC_MSG_EnableAvfs),
1341 "Failed to enable AVFS!",
1342 return -EINVAL);
1343 }
1344 } else if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
1345 CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) {
1346 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(
1347 hwmgr, PPSMC_MSG_DisableAvfs),
1348 "Failed to disable AVFS!",
1349 return -EINVAL);
1350 }
1351
1352 return 0;
1353}
1354
1355static int smu7_update_avfs(struct pp_hwmgr *hwmgr)
1356{
1357 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1358
1359 if (!hwmgr->avfs_supported)
1360 return 0;
1361
1362 if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
1363 smu7_avfs_control(hwmgr, false);
1364 } else if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
1365 smu7_avfs_control(hwmgr, false);
1366 smu7_avfs_control(hwmgr, true);
1367 } else {
1368 smu7_avfs_control(hwmgr, true);
1369 }
1370
1371 return 0;
1372}
1373
1374int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
1375{
1376 int tmp_result, result = 0;
1377
1378 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1379 PHM_PlatformCaps_ThermalController))
1380 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1381 GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 1);
1382
1383 tmp_result = smu7_disable_power_containment(hwmgr);
1384 PP_ASSERT_WITH_CODE((tmp_result == 0),
1385 "Failed to disable power containment!", result = tmp_result);
1386
1387 tmp_result = smu7_disable_smc_cac(hwmgr);
1388 PP_ASSERT_WITH_CODE((tmp_result == 0),
1389 "Failed to disable SMC CAC!", result = tmp_result);
1390
1391 tmp_result = smu7_disable_didt_config(hwmgr);
1392 PP_ASSERT_WITH_CODE((tmp_result == 0),
1393 "Failed to disable DIDT!", result = tmp_result);
1394
1395 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1396 CG_SPLL_SPREAD_SPECTRUM, SSEN, 0);
1397 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1398 GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 0);
1399
1400 tmp_result = smu7_disable_thermal_auto_throttle(hwmgr);
1401 PP_ASSERT_WITH_CODE((tmp_result == 0),
1402 "Failed to disable thermal auto throttle!", result = tmp_result);
1403
1404 tmp_result = smu7_avfs_control(hwmgr, false);
1405 PP_ASSERT_WITH_CODE((tmp_result == 0),
1406 "Failed to disable AVFS!", result = tmp_result);
1407
1408 tmp_result = smu7_stop_dpm(hwmgr);
1409 PP_ASSERT_WITH_CODE((tmp_result == 0),
1410 "Failed to stop DPM!", result = tmp_result);
1411
1412 tmp_result = smu7_disable_deep_sleep_master_switch(hwmgr);
1413 PP_ASSERT_WITH_CODE((tmp_result == 0),
1414 "Failed to disable deep sleep master switch!", result = tmp_result);
1415
1416 tmp_result = smu7_disable_ulv(hwmgr);
1417 PP_ASSERT_WITH_CODE((tmp_result == 0),
1418 "Failed to disable ULV!", result = tmp_result);
1419
1420 tmp_result = smu7_clear_voting_clients(hwmgr);
1421 PP_ASSERT_WITH_CODE((tmp_result == 0),
1422 "Failed to clear voting clients!", result = tmp_result);
1423
1424 tmp_result = smu7_reset_to_default(hwmgr);
1425 PP_ASSERT_WITH_CODE((tmp_result == 0),
1426 "Failed to reset to default!", result = tmp_result);
1427
1428 tmp_result = smu7_force_switch_to_arbf0(hwmgr);
1429 PP_ASSERT_WITH_CODE((tmp_result == 0),
1430 "Failed to force to switch arbf0!", result = tmp_result);
1431
1432 return result;
1433}
1434
1435int smu7_reset_asic_tasks(struct pp_hwmgr *hwmgr)
1436{
1437
1438 return 0;
1439}
1440
1441static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
1442{
1443 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1444 struct phm_ppt_v1_information *table_info =
1445 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1446 struct amdgpu_device *adev = hwmgr->adev;
1447
1448 data->dll_default_on = false;
1449 data->mclk_dpm0_activity_target = 0xa;
1450 data->vddc_vddgfx_delta = 300;
1451 data->static_screen_threshold = SMU7_STATICSCREENTHRESHOLD_DFLT;
1452 data->static_screen_threshold_unit = SMU7_STATICSCREENTHRESHOLDUNIT_DFLT;
1453 data->voting_rights_clients[0] = SMU7_VOTINGRIGHTSCLIENTS_DFLT0;
1454 data->voting_rights_clients[1]= SMU7_VOTINGRIGHTSCLIENTS_DFLT1;
1455 data->voting_rights_clients[2] = SMU7_VOTINGRIGHTSCLIENTS_DFLT2;
1456 data->voting_rights_clients[3]= SMU7_VOTINGRIGHTSCLIENTS_DFLT3;
1457 data->voting_rights_clients[4]= SMU7_VOTINGRIGHTSCLIENTS_DFLT4;
1458 data->voting_rights_clients[5]= SMU7_VOTINGRIGHTSCLIENTS_DFLT5;
1459 data->voting_rights_clients[6]= SMU7_VOTINGRIGHTSCLIENTS_DFLT6;
1460 data->voting_rights_clients[7]= SMU7_VOTINGRIGHTSCLIENTS_DFLT7;
1461
1462 data->mclk_dpm_key_disabled = hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true;
1463 data->sclk_dpm_key_disabled = hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true;
1464 data->pcie_dpm_key_disabled = hwmgr->feature_mask & PP_PCIE_DPM_MASK ? false : true;
1465
1466 data->voltage_control = SMU7_VOLTAGE_CONTROL_NONE;
1467 data->vddci_control = SMU7_VOLTAGE_CONTROL_NONE;
1468 data->mvdd_control = SMU7_VOLTAGE_CONTROL_NONE;
1469 data->enable_tdc_limit_feature = true;
1470 data->enable_pkg_pwr_tracking_feature = true;
1471 data->force_pcie_gen = PP_PCIEGenInvalid;
1472 data->ulv_supported = hwmgr->feature_mask & PP_ULV_MASK ? true : false;
1473 data->current_profile_setting.bupdate_sclk = 1;
1474 data->current_profile_setting.sclk_up_hyst = 0;
1475 data->current_profile_setting.sclk_down_hyst = 100;
1476 data->current_profile_setting.sclk_activity = SMU7_SCLK_TARGETACTIVITY_DFLT;
1477 data->current_profile_setting.bupdate_sclk = 1;
1478 data->current_profile_setting.mclk_up_hyst = 0;
1479 data->current_profile_setting.mclk_down_hyst = 100;
1480 data->current_profile_setting.mclk_activity = SMU7_MCLK_TARGETACTIVITY_DFLT;
1481 hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D];
1482 hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
1483 hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
1484
1485 if (hwmgr->chip_id == CHIP_POLARIS12 || hwmgr->is_kicker) {
1486 uint8_t tmp1, tmp2;
1487 uint16_t tmp3 = 0;
1488 atomctrl_get_svi2_info(hwmgr, VOLTAGE_TYPE_VDDC, &tmp1, &tmp2,
1489 &tmp3);
1490 tmp3 = (tmp3 >> 5) & 0x3;
1491 data->vddc_phase_shed_control = ((tmp3 << 1) | (tmp3 >> 1)) & 0x3;
1492 } else if (hwmgr->chip_family == AMDGPU_FAMILY_CI) {
1493 data->vddc_phase_shed_control = 1;
1494 } else {
1495 data->vddc_phase_shed_control = 0;
1496 }
1497
1498 if (hwmgr->chip_id == CHIP_HAWAII) {
1499 data->thermal_temp_setting.temperature_low = 94500;
1500 data->thermal_temp_setting.temperature_high = 95000;
1501 data->thermal_temp_setting.temperature_shutdown = 104000;
1502 } else {
1503 data->thermal_temp_setting.temperature_low = 99500;
1504 data->thermal_temp_setting.temperature_high = 100000;
1505 data->thermal_temp_setting.temperature_shutdown = 104000;
1506 }
1507
1508 data->fast_watermark_threshold = 100;
1509 if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1510 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
1511 data->voltage_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1512 else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1513 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT))
1514 data->voltage_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
1515
1516 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1517 PHM_PlatformCaps_ControlVDDGFX)) {
1518 if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1519 VOLTAGE_TYPE_VDDGFX, VOLTAGE_OBJ_SVID2)) {
1520 data->vdd_gfx_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1521 }
1522 }
1523
1524 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1525 PHM_PlatformCaps_EnableMVDDControl)) {
1526 if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1527 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
1528 data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
1529 else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1530 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
1531 data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1532 }
1533
1534 if (SMU7_VOLTAGE_CONTROL_NONE == data->vdd_gfx_control)
1535 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1536 PHM_PlatformCaps_ControlVDDGFX);
1537
1538 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1539 PHM_PlatformCaps_ControlVDDCI)) {
1540 if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1541 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
1542 data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
1543 else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1544 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
1545 data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1546 }
1547
1548 if (data->mvdd_control == SMU7_VOLTAGE_CONTROL_NONE)
1549 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1550 PHM_PlatformCaps_EnableMVDDControl);
1551
1552 if (data->vddci_control == SMU7_VOLTAGE_CONTROL_NONE)
1553 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1554 PHM_PlatformCaps_ControlVDDCI);
1555
1556 if ((hwmgr->pp_table_version != PP_TABLE_V0) && (hwmgr->feature_mask & PP_CLOCK_STRETCH_MASK)
1557 && (table_info->cac_dtp_table->usClockStretchAmount != 0))
1558 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1559 PHM_PlatformCaps_ClockStretcher);
1560
1561 data->pcie_gen_performance.max = PP_PCIEGen1;
1562 data->pcie_gen_performance.min = PP_PCIEGen3;
1563 data->pcie_gen_power_saving.max = PP_PCIEGen1;
1564 data->pcie_gen_power_saving.min = PP_PCIEGen3;
1565 data->pcie_lane_performance.max = 0;
1566 data->pcie_lane_performance.min = 16;
1567 data->pcie_lane_power_saving.max = 0;
1568 data->pcie_lane_power_saving.min = 16;
1569
1570
1571 if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
1572 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1573 PHM_PlatformCaps_UVDPowerGating);
1574 if (adev->pg_flags & AMD_PG_SUPPORT_VCE)
1575 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1576 PHM_PlatformCaps_VCEPowerGating);
1577}
1578
1579
1580
1581
1582
1583
1584
1585static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr)
1586{
1587 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1588 uint16_t vv_id;
1589 uint16_t vddc = 0;
1590 uint16_t vddgfx = 0;
1591 uint16_t i, j;
1592 uint32_t sclk = 0;
1593 struct phm_ppt_v1_information *table_info =
1594 (struct phm_ppt_v1_information *)hwmgr->pptable;
1595 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = NULL;
1596
1597
1598 for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) {
1599 vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1600
1601 if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
1602 if ((hwmgr->pp_table_version == PP_TABLE_V1)
1603 && !phm_get_sclk_for_voltage_evv(hwmgr,
1604 table_info->vddgfx_lookup_table, vv_id, &sclk)) {
1605 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1606 PHM_PlatformCaps_ClockStretcher)) {
1607 sclk_table = table_info->vdd_dep_on_sclk;
1608
1609 for (j = 1; j < sclk_table->count; j++) {
1610 if (sclk_table->entries[j].clk == sclk &&
1611 sclk_table->entries[j].cks_enable == 0) {
1612 sclk += 5000;
1613 break;
1614 }
1615 }
1616 }
1617 if (0 == atomctrl_get_voltage_evv_on_sclk
1618 (hwmgr, VOLTAGE_TYPE_VDDGFX, sclk,
1619 vv_id, &vddgfx)) {
1620
1621 PP_ASSERT_WITH_CODE((vddgfx < 2000 && vddgfx != 0), "Invalid VDDGFX value!", return -EINVAL);
1622
1623
1624 if (vddgfx != 0 && vddgfx != vv_id) {
1625 data->vddcgfx_leakage.actual_voltage[data->vddcgfx_leakage.count] = vddgfx;
1626 data->vddcgfx_leakage.leakage_id[data->vddcgfx_leakage.count] = vv_id;
1627 data->vddcgfx_leakage.count++;
1628 }
1629 } else {
1630 pr_info("Error retrieving EVV voltage value!\n");
1631 }
1632 }
1633 } else {
1634 if ((hwmgr->pp_table_version == PP_TABLE_V0)
1635 || !phm_get_sclk_for_voltage_evv(hwmgr,
1636 table_info->vddc_lookup_table, vv_id, &sclk)) {
1637 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1638 PHM_PlatformCaps_ClockStretcher)) {
1639 if (table_info == NULL)
1640 return -EINVAL;
1641 sclk_table = table_info->vdd_dep_on_sclk;
1642
1643 for (j = 1; j < sclk_table->count; j++) {
1644 if (sclk_table->entries[j].clk == sclk &&
1645 sclk_table->entries[j].cks_enable == 0) {
1646 sclk += 5000;
1647 break;
1648 }
1649 }
1650 }
1651
1652 if (phm_get_voltage_evv_on_sclk(hwmgr,
1653 VOLTAGE_TYPE_VDDC,
1654 sclk, vv_id, &vddc) == 0) {
1655 if (vddc >= 2000 || vddc == 0)
1656 return -EINVAL;
1657 } else {
1658 pr_debug("failed to retrieving EVV voltage!\n");
1659 continue;
1660 }
1661
1662
1663 if (vddc != 0 && vddc != vv_id) {
1664 data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = (uint16_t)(vddc);
1665 data->vddc_leakage.leakage_id[data->vddc_leakage.count] = vv_id;
1666 data->vddc_leakage.count++;
1667 }
1668 }
1669 }
1670 }
1671
1672 return 0;
1673}
1674
1675
1676
1677
1678
1679
1680
1681
1682static void smu7_patch_ppt_v1_with_vdd_leakage(struct pp_hwmgr *hwmgr,
1683 uint16_t *voltage, struct smu7_leakage_voltage *leakage_table)
1684{
1685 uint32_t index;
1686
1687
1688 for (index = 0; index < leakage_table->count; index++) {
1689
1690
1691 if (leakage_table->leakage_id[index] == *voltage) {
1692 *voltage = leakage_table->actual_voltage[index];
1693 break;
1694 }
1695 }
1696
1697 if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
1698 pr_err("Voltage value looks like a Leakage ID but it's not patched \n");
1699}
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709static int smu7_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
1710 phm_ppt_v1_voltage_lookup_table *lookup_table,
1711 struct smu7_leakage_voltage *leakage_table)
1712{
1713 uint32_t i;
1714
1715 for (i = 0; i < lookup_table->count; i++)
1716 smu7_patch_ppt_v1_with_vdd_leakage(hwmgr,
1717 &lookup_table->entries[i].us_vdd, leakage_table);
1718
1719 return 0;
1720}
1721
1722static int smu7_patch_clock_voltage_limits_with_vddc_leakage(
1723 struct pp_hwmgr *hwmgr, struct smu7_leakage_voltage *leakage_table,
1724 uint16_t *vddc)
1725{
1726 struct phm_ppt_v1_information *table_info =
1727 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1728 smu7_patch_ppt_v1_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table);
1729 hwmgr->dyn_state.max_clock_voltage_on_dc.vddc =
1730 table_info->max_clock_voltage_on_dc.vddc;
1731 return 0;
1732}
1733
1734static int smu7_patch_voltage_dependency_tables_with_lookup_table(
1735 struct pp_hwmgr *hwmgr)
1736{
1737 uint8_t entry_id;
1738 uint8_t voltage_id;
1739 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1740 struct phm_ppt_v1_information *table_info =
1741 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1742
1743 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
1744 table_info->vdd_dep_on_sclk;
1745 struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table =
1746 table_info->vdd_dep_on_mclk;
1747 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1748 table_info->mm_dep_table;
1749
1750 if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
1751 for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
1752 voltage_id = sclk_table->entries[entry_id].vddInd;
1753 sclk_table->entries[entry_id].vddgfx =
1754 table_info->vddgfx_lookup_table->entries[voltage_id].us_vdd;
1755 }
1756 } else {
1757 for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
1758 voltage_id = sclk_table->entries[entry_id].vddInd;
1759 sclk_table->entries[entry_id].vddc =
1760 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
1761 }
1762 }
1763
1764 for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) {
1765 voltage_id = mclk_table->entries[entry_id].vddInd;
1766 mclk_table->entries[entry_id].vddc =
1767 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
1768 }
1769
1770 for (entry_id = 0; entry_id < mm_table->count; ++entry_id) {
1771 voltage_id = mm_table->entries[entry_id].vddcInd;
1772 mm_table->entries[entry_id].vddc =
1773 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
1774 }
1775
1776 return 0;
1777
1778}
1779
1780static int phm_add_voltage(struct pp_hwmgr *hwmgr,
1781 phm_ppt_v1_voltage_lookup_table *look_up_table,
1782 phm_ppt_v1_voltage_lookup_record *record)
1783{
1784 uint32_t i;
1785
1786 PP_ASSERT_WITH_CODE((NULL != look_up_table),
1787 "Lookup Table empty.", return -EINVAL);
1788 PP_ASSERT_WITH_CODE((0 != look_up_table->count),
1789 "Lookup Table empty.", return -EINVAL);
1790
1791 i = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDGFX);
1792 PP_ASSERT_WITH_CODE((i >= look_up_table->count),
1793 "Lookup Table is full.", return -EINVAL);
1794
1795
1796 for (i = 0; i < look_up_table->count; i++) {
1797 if (look_up_table->entries[i].us_vdd == record->us_vdd) {
1798 if (look_up_table->entries[i].us_calculated == 1)
1799 return 0;
1800 break;
1801 }
1802 }
1803
1804 look_up_table->entries[i].us_calculated = 1;
1805 look_up_table->entries[i].us_vdd = record->us_vdd;
1806 look_up_table->entries[i].us_cac_low = record->us_cac_low;
1807 look_up_table->entries[i].us_cac_mid = record->us_cac_mid;
1808 look_up_table->entries[i].us_cac_high = record->us_cac_high;
1809
1810 if (i == look_up_table->count)
1811 look_up_table->count++;
1812
1813 return 0;
1814}
1815
1816
1817static int smu7_calc_voltage_dependency_tables(struct pp_hwmgr *hwmgr)
1818{
1819 uint8_t entry_id;
1820 struct phm_ppt_v1_voltage_lookup_record v_record;
1821 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1822 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
1823
1824 phm_ppt_v1_clock_voltage_dependency_table *sclk_table = pptable_info->vdd_dep_on_sclk;
1825 phm_ppt_v1_clock_voltage_dependency_table *mclk_table = pptable_info->vdd_dep_on_mclk;
1826
1827 if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
1828 for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
1829 if (sclk_table->entries[entry_id].vdd_offset & (1 << 15))
1830 v_record.us_vdd = sclk_table->entries[entry_id].vddgfx +
1831 sclk_table->entries[entry_id].vdd_offset - 0xFFFF;
1832 else
1833 v_record.us_vdd = sclk_table->entries[entry_id].vddgfx +
1834 sclk_table->entries[entry_id].vdd_offset;
1835
1836 sclk_table->entries[entry_id].vddc =
1837 v_record.us_cac_low = v_record.us_cac_mid =
1838 v_record.us_cac_high = v_record.us_vdd;
1839
1840 phm_add_voltage(hwmgr, pptable_info->vddc_lookup_table, &v_record);
1841 }
1842
1843 for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) {
1844 if (mclk_table->entries[entry_id].vdd_offset & (1 << 15))
1845 v_record.us_vdd = mclk_table->entries[entry_id].vddc +
1846 mclk_table->entries[entry_id].vdd_offset - 0xFFFF;
1847 else
1848 v_record.us_vdd = mclk_table->entries[entry_id].vddc +
1849 mclk_table->entries[entry_id].vdd_offset;
1850
1851 mclk_table->entries[entry_id].vddgfx = v_record.us_cac_low =
1852 v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd;
1853 phm_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record);
1854 }
1855 }
1856 return 0;
1857}
1858
1859static int smu7_calc_mm_voltage_dependency_table(struct pp_hwmgr *hwmgr)
1860{
1861 uint8_t entry_id;
1862 struct phm_ppt_v1_voltage_lookup_record v_record;
1863 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1864 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
1865 phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table;
1866
1867 if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
1868 for (entry_id = 0; entry_id < mm_table->count; entry_id++) {
1869 if (mm_table->entries[entry_id].vddgfx_offset & (1 << 15))
1870 v_record.us_vdd = mm_table->entries[entry_id].vddc +
1871 mm_table->entries[entry_id].vddgfx_offset - 0xFFFF;
1872 else
1873 v_record.us_vdd = mm_table->entries[entry_id].vddc +
1874 mm_table->entries[entry_id].vddgfx_offset;
1875
1876
1877 mm_table->entries[entry_id].vddgfx = v_record.us_cac_low =
1878 v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd;
1879 phm_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record);
1880 }
1881 }
1882 return 0;
1883}
1884
1885static int smu7_sort_lookup_table(struct pp_hwmgr *hwmgr,
1886 struct phm_ppt_v1_voltage_lookup_table *lookup_table)
1887{
1888 uint32_t table_size, i, j;
1889 struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record;
1890 table_size = lookup_table->count;
1891
1892 PP_ASSERT_WITH_CODE(0 != lookup_table->count,
1893 "Lookup table is empty", return -EINVAL);
1894
1895
1896 for (i = 0; i < table_size - 1; i++) {
1897 for (j = i + 1; j > 0; j--) {
1898 if (lookup_table->entries[j].us_vdd <
1899 lookup_table->entries[j - 1].us_vdd) {
1900 tmp_voltage_lookup_record = lookup_table->entries[j - 1];
1901 lookup_table->entries[j - 1] = lookup_table->entries[j];
1902 lookup_table->entries[j] = tmp_voltage_lookup_record;
1903 }
1904 }
1905 }
1906
1907 return 0;
1908}
1909
1910static int smu7_complete_dependency_tables(struct pp_hwmgr *hwmgr)
1911{
1912 int result = 0;
1913 int tmp_result;
1914 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1915 struct phm_ppt_v1_information *table_info =
1916 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1917
1918 if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
1919 tmp_result = smu7_patch_lookup_table_with_leakage(hwmgr,
1920 table_info->vddgfx_lookup_table, &(data->vddcgfx_leakage));
1921 if (tmp_result != 0)
1922 result = tmp_result;
1923
1924 smu7_patch_ppt_v1_with_vdd_leakage(hwmgr,
1925 &table_info->max_clock_voltage_on_dc.vddgfx, &(data->vddcgfx_leakage));
1926 } else {
1927
1928 tmp_result = smu7_patch_lookup_table_with_leakage(hwmgr,
1929 table_info->vddc_lookup_table, &(data->vddc_leakage));
1930 if (tmp_result)
1931 result = tmp_result;
1932
1933 tmp_result = smu7_patch_clock_voltage_limits_with_vddc_leakage(hwmgr,
1934 &(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc);
1935 if (tmp_result)
1936 result = tmp_result;
1937 }
1938
1939 tmp_result = smu7_patch_voltage_dependency_tables_with_lookup_table(hwmgr);
1940 if (tmp_result)
1941 result = tmp_result;
1942
1943 tmp_result = smu7_calc_voltage_dependency_tables(hwmgr);
1944 if (tmp_result)
1945 result = tmp_result;
1946
1947 tmp_result = smu7_calc_mm_voltage_dependency_table(hwmgr);
1948 if (tmp_result)
1949 result = tmp_result;
1950
1951 tmp_result = smu7_sort_lookup_table(hwmgr, table_info->vddgfx_lookup_table);
1952 if (tmp_result)
1953 result = tmp_result;
1954
1955 tmp_result = smu7_sort_lookup_table(hwmgr, table_info->vddc_lookup_table);
1956 if (tmp_result)
1957 result = tmp_result;
1958
1959 return result;
1960}
1961
1962static int smu7_set_private_data_based_on_pptable_v1(struct pp_hwmgr *hwmgr)
1963{
1964 struct phm_ppt_v1_information *table_info =
1965 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1966
1967 struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
1968 table_info->vdd_dep_on_sclk;
1969 struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table =
1970 table_info->vdd_dep_on_mclk;
1971
1972 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL,
1973 "VDD dependency on SCLK table is missing.",
1974 return -EINVAL);
1975 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
1976 "VDD dependency on SCLK table has to have is missing.",
1977 return -EINVAL);
1978
1979 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL,
1980 "VDD dependency on MCLK table is missing",
1981 return -EINVAL);
1982 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
1983 "VDD dependency on MCLK table has to have is missing.",
1984 return -EINVAL);
1985
1986 table_info->max_clock_voltage_on_ac.sclk =
1987 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk;
1988 table_info->max_clock_voltage_on_ac.mclk =
1989 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk;
1990 table_info->max_clock_voltage_on_ac.vddc =
1991 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
1992 table_info->max_clock_voltage_on_ac.vddci =
1993 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci;
1994
1995 hwmgr->dyn_state.max_clock_voltage_on_ac.sclk = table_info->max_clock_voltage_on_ac.sclk;
1996 hwmgr->dyn_state.max_clock_voltage_on_ac.mclk = table_info->max_clock_voltage_on_ac.mclk;
1997 hwmgr->dyn_state.max_clock_voltage_on_ac.vddc = table_info->max_clock_voltage_on_ac.vddc;
1998 hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = table_info->max_clock_voltage_on_ac.vddci;
1999
2000 return 0;
2001}
2002
2003static int smu7_patch_voltage_workaround(struct pp_hwmgr *hwmgr)
2004{
2005 struct phm_ppt_v1_information *table_info =
2006 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2007 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
2008 struct phm_ppt_v1_voltage_lookup_table *lookup_table;
2009 uint32_t i;
2010 uint32_t hw_revision, sub_vendor_id, sub_sys_id;
2011 struct amdgpu_device *adev = hwmgr->adev;
2012
2013 if (table_info != NULL) {
2014 dep_mclk_table = table_info->vdd_dep_on_mclk;
2015 lookup_table = table_info->vddc_lookup_table;
2016 } else
2017 return 0;
2018
2019 hw_revision = adev->pdev->revision;
2020 sub_sys_id = adev->pdev->subsystem_device;
2021 sub_vendor_id = adev->pdev->subsystem_vendor;
2022
2023 if (hwmgr->chip_id == CHIP_POLARIS10 && hw_revision == 0xC7 &&
2024 ((sub_sys_id == 0xb37 && sub_vendor_id == 0x1002) ||
2025 (sub_sys_id == 0x4a8 && sub_vendor_id == 0x1043) ||
2026 (sub_sys_id == 0x9480 && sub_vendor_id == 0x1682))) {
2027 if (lookup_table->entries[dep_mclk_table->entries[dep_mclk_table->count-1].vddInd].us_vdd >= 1000)
2028 return 0;
2029
2030 for (i = 0; i < lookup_table->count; i++) {
2031 if (lookup_table->entries[i].us_vdd < 0xff01 && lookup_table->entries[i].us_vdd >= 1000) {
2032 dep_mclk_table->entries[dep_mclk_table->count-1].vddInd = (uint8_t) i;
2033 return 0;
2034 }
2035 }
2036 }
2037 return 0;
2038}
2039
2040static int smu7_thermal_parameter_init(struct pp_hwmgr *hwmgr)
2041{
2042 struct pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
2043 uint32_t temp_reg;
2044 struct phm_ppt_v1_information *table_info =
2045 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2046
2047
2048 if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) {
2049 temp_reg = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL);
2050 switch (gpio_pin_assignment.uc_gpio_pin_bit_shift) {
2051 case 0:
2052 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x1);
2053 break;
2054 case 1:
2055 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x2);
2056 break;
2057 case 2:
2058 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW, 0x1);
2059 break;
2060 case 3:
2061 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, FORCE_NB_PS1, 0x1);
2062 break;
2063 case 4:
2064 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, DPM_ENABLED, 0x1);
2065 break;
2066 default:
2067 break;
2068 }
2069 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL, temp_reg);
2070 }
2071
2072 if (table_info == NULL)
2073 return 0;
2074
2075 if (table_info->cac_dtp_table->usDefaultTargetOperatingTemp != 0 &&
2076 hwmgr->thermal_controller.advanceFanControlParameters.ucFanControlMode) {
2077 hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMinLimit =
2078 (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit;
2079
2080 hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMaxLimit =
2081 (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
2082
2083 hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMStep = 1;
2084
2085 hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMaxLimit = 100;
2086
2087 hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMinLimit =
2088 (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit;
2089
2090 hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMStep = 1;
2091
2092 table_info->cac_dtp_table->usDefaultTargetOperatingTemp = (table_info->cac_dtp_table->usDefaultTargetOperatingTemp >= 50) ?
2093 (table_info->cac_dtp_table->usDefaultTargetOperatingTemp - 50) : 0;
2094
2095 table_info->cac_dtp_table->usOperatingTempMaxLimit = table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
2096 table_info->cac_dtp_table->usOperatingTempStep = 1;
2097 table_info->cac_dtp_table->usOperatingTempHyst = 1;
2098
2099 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM =
2100 hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
2101
2102 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM =
2103 hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM;
2104
2105 hwmgr->dyn_state.cac_dtp_table->usOperatingTempMinLimit =
2106 table_info->cac_dtp_table->usOperatingTempMinLimit;
2107
2108 hwmgr->dyn_state.cac_dtp_table->usOperatingTempMaxLimit =
2109 table_info->cac_dtp_table->usOperatingTempMaxLimit;
2110
2111 hwmgr->dyn_state.cac_dtp_table->usDefaultTargetOperatingTemp =
2112 table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
2113
2114 hwmgr->dyn_state.cac_dtp_table->usOperatingTempStep =
2115 table_info->cac_dtp_table->usOperatingTempStep;
2116
2117 hwmgr->dyn_state.cac_dtp_table->usTargetOperatingTemp =
2118 table_info->cac_dtp_table->usTargetOperatingTemp;
2119 if (hwmgr->feature_mask & PP_OD_FUZZY_FAN_CONTROL_MASK)
2120 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2121 PHM_PlatformCaps_ODFuzzyFanControlSupport);
2122 }
2123
2124 return 0;
2125}
2126
2127
2128
2129
2130
2131
2132
2133
2134static void smu7_patch_ppt_v0_with_vdd_leakage(struct pp_hwmgr *hwmgr,
2135 uint32_t *voltage, struct smu7_leakage_voltage *leakage_table)
2136{
2137 uint32_t index;
2138
2139
2140 for (index = 0; index < leakage_table->count; index++) {
2141
2142
2143 if (leakage_table->leakage_id[index] == *voltage) {
2144 *voltage = leakage_table->actual_voltage[index];
2145 break;
2146 }
2147 }
2148
2149 if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
2150 pr_err("Voltage value looks like a Leakage ID but it's not patched \n");
2151}
2152
2153
2154static int smu7_patch_vddc(struct pp_hwmgr *hwmgr,
2155 struct phm_clock_voltage_dependency_table *tab)
2156{
2157 uint16_t i;
2158 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2159
2160 if (tab)
2161 for (i = 0; i < tab->count; i++)
2162 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2163 &data->vddc_leakage);
2164
2165 return 0;
2166}
2167
2168static int smu7_patch_vddci(struct pp_hwmgr *hwmgr,
2169 struct phm_clock_voltage_dependency_table *tab)
2170{
2171 uint16_t i;
2172 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2173
2174 if (tab)
2175 for (i = 0; i < tab->count; i++)
2176 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2177 &data->vddci_leakage);
2178
2179 return 0;
2180}
2181
2182static int smu7_patch_vce_vddc(struct pp_hwmgr *hwmgr,
2183 struct phm_vce_clock_voltage_dependency_table *tab)
2184{
2185 uint16_t i;
2186 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2187
2188 if (tab)
2189 for (i = 0; i < tab->count; i++)
2190 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2191 &data->vddc_leakage);
2192
2193 return 0;
2194}
2195
2196
2197static int smu7_patch_uvd_vddc(struct pp_hwmgr *hwmgr,
2198 struct phm_uvd_clock_voltage_dependency_table *tab)
2199{
2200 uint16_t i;
2201 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2202
2203 if (tab)
2204 for (i = 0; i < tab->count; i++)
2205 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2206 &data->vddc_leakage);
2207
2208 return 0;
2209}
2210
2211static int smu7_patch_vddc_shed_limit(struct pp_hwmgr *hwmgr,
2212 struct phm_phase_shedding_limits_table *tab)
2213{
2214 uint16_t i;
2215 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2216
2217 if (tab)
2218 for (i = 0; i < tab->count; i++)
2219 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].Voltage,
2220 &data->vddc_leakage);
2221
2222 return 0;
2223}
2224
2225static int smu7_patch_samu_vddc(struct pp_hwmgr *hwmgr,
2226 struct phm_samu_clock_voltage_dependency_table *tab)
2227{
2228 uint16_t i;
2229 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2230
2231 if (tab)
2232 for (i = 0; i < tab->count; i++)
2233 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2234 &data->vddc_leakage);
2235
2236 return 0;
2237}
2238
2239static int smu7_patch_acp_vddc(struct pp_hwmgr *hwmgr,
2240 struct phm_acp_clock_voltage_dependency_table *tab)
2241{
2242 uint16_t i;
2243 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2244
2245 if (tab)
2246 for (i = 0; i < tab->count; i++)
2247 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2248 &data->vddc_leakage);
2249
2250 return 0;
2251}
2252
2253static int smu7_patch_limits_vddc(struct pp_hwmgr *hwmgr,
2254 struct phm_clock_and_voltage_limits *tab)
2255{
2256 uint32_t vddc, vddci;
2257 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2258
2259 if (tab) {
2260 vddc = tab->vddc;
2261 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc,
2262 &data->vddc_leakage);
2263 tab->vddc = vddc;
2264 vddci = tab->vddci;
2265 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddci,
2266 &data->vddci_leakage);
2267 tab->vddci = vddci;
2268 }
2269
2270 return 0;
2271}
2272
2273static int smu7_patch_cac_vddc(struct pp_hwmgr *hwmgr, struct phm_cac_leakage_table *tab)
2274{
2275 uint32_t i;
2276 uint32_t vddc;
2277 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2278
2279 if (tab) {
2280 for (i = 0; i < tab->count; i++) {
2281 vddc = (uint32_t)(tab->entries[i].Vddc);
2282 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc, &data->vddc_leakage);
2283 tab->entries[i].Vddc = (uint16_t)vddc;
2284 }
2285 }
2286
2287 return 0;
2288}
2289
2290static int smu7_patch_dependency_tables_with_leakage(struct pp_hwmgr *hwmgr)
2291{
2292 int tmp;
2293
2294 tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_sclk);
2295 if (tmp)
2296 return -EINVAL;
2297
2298 tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_mclk);
2299 if (tmp)
2300 return -EINVAL;
2301
2302 tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
2303 if (tmp)
2304 return -EINVAL;
2305
2306 tmp = smu7_patch_vddci(hwmgr, hwmgr->dyn_state.vddci_dependency_on_mclk);
2307 if (tmp)
2308 return -EINVAL;
2309
2310 tmp = smu7_patch_vce_vddc(hwmgr, hwmgr->dyn_state.vce_clock_voltage_dependency_table);
2311 if (tmp)
2312 return -EINVAL;
2313
2314 tmp = smu7_patch_uvd_vddc(hwmgr, hwmgr->dyn_state.uvd_clock_voltage_dependency_table);
2315 if (tmp)
2316 return -EINVAL;
2317
2318 tmp = smu7_patch_samu_vddc(hwmgr, hwmgr->dyn_state.samu_clock_voltage_dependency_table);
2319 if (tmp)
2320 return -EINVAL;
2321
2322 tmp = smu7_patch_acp_vddc(hwmgr, hwmgr->dyn_state.acp_clock_voltage_dependency_table);
2323 if (tmp)
2324 return -EINVAL;
2325
2326 tmp = smu7_patch_vddc_shed_limit(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table);
2327 if (tmp)
2328 return -EINVAL;
2329
2330 tmp = smu7_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_ac);
2331 if (tmp)
2332 return -EINVAL;
2333
2334 tmp = smu7_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_dc);
2335 if (tmp)
2336 return -EINVAL;
2337
2338 tmp = smu7_patch_cac_vddc(hwmgr, hwmgr->dyn_state.cac_leakage_table);
2339 if (tmp)
2340 return -EINVAL;
2341
2342 return 0;
2343}
2344
2345
2346static int smu7_set_private_data_based_on_pptable_v0(struct pp_hwmgr *hwmgr)
2347{
2348 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2349
2350 struct phm_clock_voltage_dependency_table *allowed_sclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_sclk;
2351 struct phm_clock_voltage_dependency_table *allowed_mclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_mclk;
2352 struct phm_clock_voltage_dependency_table *allowed_mclk_vddci_table = hwmgr->dyn_state.vddci_dependency_on_mclk;
2353
2354 PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table != NULL,
2355 "VDDC dependency on SCLK table is missing. This table is mandatory",
2356 return -EINVAL);
2357 PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table->count >= 1,
2358 "VDDC dependency on SCLK table has to have is missing. This table is mandatory",
2359 return -EINVAL);
2360
2361 PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table != NULL,
2362 "VDDC dependency on MCLK table is missing. This table is mandatory",
2363 return -EINVAL);
2364 PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table->count >= 1,
2365 "VDD dependency on MCLK table has to have is missing. This table is mandatory",
2366 return -EINVAL);
2367
2368 data->min_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[0].v;
2369 data->max_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
2370
2371 hwmgr->dyn_state.max_clock_voltage_on_ac.sclk =
2372 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
2373 hwmgr->dyn_state.max_clock_voltage_on_ac.mclk =
2374 allowed_mclk_vddc_table->entries[allowed_mclk_vddc_table->count - 1].clk;
2375 hwmgr->dyn_state.max_clock_voltage_on_ac.vddc =
2376 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
2377
2378 if (allowed_mclk_vddci_table != NULL && allowed_mclk_vddci_table->count >= 1) {
2379 data->min_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[0].v;
2380 data->max_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
2381 }
2382
2383 if (hwmgr->dyn_state.vddci_dependency_on_mclk != NULL && hwmgr->dyn_state.vddci_dependency_on_mclk->count >= 1)
2384 hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = hwmgr->dyn_state.vddci_dependency_on_mclk->entries[hwmgr->dyn_state.vddci_dependency_on_mclk->count - 1].v;
2385
2386 return 0;
2387}
2388
2389static int smu7_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
2390{
2391 kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
2392 hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
2393 kfree(hwmgr->backend);
2394 hwmgr->backend = NULL;
2395
2396 return 0;
2397}
2398
2399static int smu7_get_elb_voltages(struct pp_hwmgr *hwmgr)
2400{
2401 uint16_t virtual_voltage_id, vddc, vddci, efuse_voltage_id;
2402 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2403 int i;
2404
2405 if (atomctrl_get_leakage_id_from_efuse(hwmgr, &efuse_voltage_id) == 0) {
2406 for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) {
2407 virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
2408 if (atomctrl_get_leakage_vddc_base_on_leakage(hwmgr, &vddc, &vddci,
2409 virtual_voltage_id,
2410 efuse_voltage_id) == 0) {
2411 if (vddc != 0 && vddc != virtual_voltage_id) {
2412 data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = vddc;
2413 data->vddc_leakage.leakage_id[data->vddc_leakage.count] = virtual_voltage_id;
2414 data->vddc_leakage.count++;
2415 }
2416 if (vddci != 0 && vddci != virtual_voltage_id) {
2417 data->vddci_leakage.actual_voltage[data->vddci_leakage.count] = vddci;
2418 data->vddci_leakage.leakage_id[data->vddci_leakage.count] = virtual_voltage_id;
2419 data->vddci_leakage.count++;
2420 }
2421 }
2422 }
2423 }
2424 return 0;
2425}
2426
2427static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
2428{
2429 struct smu7_hwmgr *data;
2430 int result = 0;
2431
2432 data = kzalloc(sizeof(struct smu7_hwmgr), GFP_KERNEL);
2433 if (data == NULL)
2434 return -ENOMEM;
2435
2436 hwmgr->backend = data;
2437 smu7_patch_voltage_workaround(hwmgr);
2438 smu7_init_dpm_defaults(hwmgr);
2439
2440
2441 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2442 PHM_PlatformCaps_EVV)) {
2443 result = smu7_get_evv_voltages(hwmgr);
2444 if (result) {
2445 pr_info("Get EVV Voltage Failed. Abort Driver loading!\n");
2446 return -EINVAL;
2447 }
2448 } else {
2449 smu7_get_elb_voltages(hwmgr);
2450 }
2451
2452 if (hwmgr->pp_table_version == PP_TABLE_V1) {
2453 smu7_complete_dependency_tables(hwmgr);
2454 smu7_set_private_data_based_on_pptable_v1(hwmgr);
2455 } else if (hwmgr->pp_table_version == PP_TABLE_V0) {
2456 smu7_patch_dependency_tables_with_leakage(hwmgr);
2457 smu7_set_private_data_based_on_pptable_v0(hwmgr);
2458 }
2459
2460
2461 result = phm_initializa_dynamic_state_adjustment_rule_settings(hwmgr);
2462
2463 if (0 == result) {
2464 struct amdgpu_device *adev = hwmgr->adev;
2465
2466 data->is_tlu_enabled = false;
2467
2468 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
2469 SMU7_MAX_HARDWARE_POWERLEVELS;
2470 hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
2471 hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
2472
2473 data->pcie_gen_cap = adev->pm.pcie_gen_mask;
2474 if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
2475 data->pcie_spc_cap = 20;
2476 data->pcie_lane_cap = adev->pm.pcie_mlw_mask;
2477
2478 hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400;
2479
2480 hwmgr->platform_descriptor.clockStep.engineClock = 500;
2481 hwmgr->platform_descriptor.clockStep.memoryClock = 500;
2482 smu7_thermal_parameter_init(hwmgr);
2483 } else {
2484
2485 smu7_hwmgr_backend_fini(hwmgr);
2486 }
2487
2488 return 0;
2489}
2490
2491static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr)
2492{
2493 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2494 uint32_t level, tmp;
2495
2496 if (!data->pcie_dpm_key_disabled) {
2497 if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
2498 level = 0;
2499 tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask;
2500 while (tmp >>= 1)
2501 level++;
2502
2503 if (level)
2504 smum_send_msg_to_smc_with_parameter(hwmgr,
2505 PPSMC_MSG_PCIeDPM_ForceLevel, level);
2506 }
2507 }
2508
2509 if (!data->sclk_dpm_key_disabled) {
2510 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
2511 level = 0;
2512 tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask;
2513 while (tmp >>= 1)
2514 level++;
2515
2516 if (level)
2517 smum_send_msg_to_smc_with_parameter(hwmgr,
2518 PPSMC_MSG_SCLKDPM_SetEnabledMask,
2519 (1 << level));
2520 }
2521 }
2522
2523 if (!data->mclk_dpm_key_disabled) {
2524 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
2525 level = 0;
2526 tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask;
2527 while (tmp >>= 1)
2528 level++;
2529
2530 if (level)
2531 smum_send_msg_to_smc_with_parameter(hwmgr,
2532 PPSMC_MSG_MCLKDPM_SetEnabledMask,
2533 (1 << level));
2534 }
2535 }
2536
2537 return 0;
2538}
2539
2540static int smu7_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
2541{
2542 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2543
2544 if (hwmgr->pp_table_version == PP_TABLE_V1)
2545 phm_apply_dal_min_voltage_request(hwmgr);
2546
2547
2548 if (!data->sclk_dpm_key_disabled) {
2549 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask)
2550 smum_send_msg_to_smc_with_parameter(hwmgr,
2551 PPSMC_MSG_SCLKDPM_SetEnabledMask,
2552 data->dpm_level_enable_mask.sclk_dpm_enable_mask);
2553 }
2554
2555 if (!data->mclk_dpm_key_disabled) {
2556 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask)
2557 smum_send_msg_to_smc_with_parameter(hwmgr,
2558 PPSMC_MSG_MCLKDPM_SetEnabledMask,
2559 data->dpm_level_enable_mask.mclk_dpm_enable_mask);
2560 }
2561
2562 return 0;
2563}
2564
2565static int smu7_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
2566{
2567 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2568
2569 if (!smum_is_dpm_running(hwmgr))
2570 return -EINVAL;
2571
2572 if (!data->pcie_dpm_key_disabled) {
2573 smum_send_msg_to_smc(hwmgr,
2574 PPSMC_MSG_PCIeDPM_UnForceLevel);
2575 }
2576
2577 return smu7_upload_dpm_level_enable_mask(hwmgr);
2578}
2579
2580static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr)
2581{
2582 struct smu7_hwmgr *data =
2583 (struct smu7_hwmgr *)(hwmgr->backend);
2584 uint32_t level;
2585
2586 if (!data->sclk_dpm_key_disabled)
2587 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
2588 level = phm_get_lowest_enabled_level(hwmgr,
2589 data->dpm_level_enable_mask.sclk_dpm_enable_mask);
2590 smum_send_msg_to_smc_with_parameter(hwmgr,
2591 PPSMC_MSG_SCLKDPM_SetEnabledMask,
2592 (1 << level));
2593
2594 }
2595
2596 if (!data->mclk_dpm_key_disabled) {
2597 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
2598 level = phm_get_lowest_enabled_level(hwmgr,
2599 data->dpm_level_enable_mask.mclk_dpm_enable_mask);
2600 smum_send_msg_to_smc_with_parameter(hwmgr,
2601 PPSMC_MSG_MCLKDPM_SetEnabledMask,
2602 (1 << level));
2603 }
2604 }
2605
2606 if (!data->pcie_dpm_key_disabled) {
2607 if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
2608 level = phm_get_lowest_enabled_level(hwmgr,
2609 data->dpm_level_enable_mask.pcie_dpm_enable_mask);
2610 smum_send_msg_to_smc_with_parameter(hwmgr,
2611 PPSMC_MSG_PCIeDPM_ForceLevel,
2612 (level));
2613 }
2614 }
2615
2616 return 0;
2617}
2618
2619static int smu7_get_profiling_clk(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level,
2620 uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *pcie_mask)
2621{
2622 uint32_t percentage;
2623 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2624 struct smu7_dpm_table *golden_dpm_table = &data->golden_dpm_table;
2625 int32_t tmp_mclk;
2626 int32_t tmp_sclk;
2627 int32_t count;
2628
2629 if (golden_dpm_table->mclk_table.count < 1)
2630 return -EINVAL;
2631
2632 percentage = 100 * golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count - 1].value /
2633 golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value;
2634
2635 if (golden_dpm_table->mclk_table.count == 1) {
2636 percentage = 70;
2637 tmp_mclk = golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value;
2638 *mclk_mask = golden_dpm_table->mclk_table.count - 1;
2639 } else {
2640 tmp_mclk = golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 2].value;
2641 *mclk_mask = golden_dpm_table->mclk_table.count - 2;
2642 }
2643
2644 tmp_sclk = tmp_mclk * percentage / 100;
2645
2646 if (hwmgr->pp_table_version == PP_TABLE_V0) {
2647 for (count = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1;
2648 count >= 0; count--) {
2649 if (tmp_sclk >= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk) {
2650 tmp_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk;
2651 *sclk_mask = count;
2652 break;
2653 }
2654 }
2655 if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
2656 *sclk_mask = 0;
2657 tmp_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].clk;
2658 }
2659
2660 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
2661 *sclk_mask = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1;
2662 } else if (hwmgr->pp_table_version == PP_TABLE_V1) {
2663 struct phm_ppt_v1_information *table_info =
2664 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2665
2666 for (count = table_info->vdd_dep_on_sclk->count-1; count >= 0; count--) {
2667 if (tmp_sclk >= table_info->vdd_dep_on_sclk->entries[count].clk) {
2668 tmp_sclk = table_info->vdd_dep_on_sclk->entries[count].clk;
2669 *sclk_mask = count;
2670 break;
2671 }
2672 }
2673 if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
2674 *sclk_mask = 0;
2675 tmp_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
2676 }
2677
2678 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
2679 *sclk_mask = table_info->vdd_dep_on_sclk->count - 1;
2680 }
2681
2682 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK)
2683 *mclk_mask = 0;
2684 else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
2685 *mclk_mask = golden_dpm_table->mclk_table.count - 1;
2686
2687 *pcie_mask = data->dpm_table.pcie_speed_table.count - 1;
2688 hwmgr->pstate_sclk = tmp_sclk;
2689 hwmgr->pstate_mclk = tmp_mclk;
2690
2691 return 0;
2692}
2693
2694static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr,
2695 enum amd_dpm_forced_level level)
2696{
2697 int ret = 0;
2698 uint32_t sclk_mask = 0;
2699 uint32_t mclk_mask = 0;
2700 uint32_t pcie_mask = 0;
2701
2702 if (hwmgr->pstate_sclk == 0)
2703 smu7_get_profiling_clk(hwmgr, level, &sclk_mask, &mclk_mask, &pcie_mask);
2704
2705 switch (level) {
2706 case AMD_DPM_FORCED_LEVEL_HIGH:
2707 ret = smu7_force_dpm_highest(hwmgr);
2708 break;
2709 case AMD_DPM_FORCED_LEVEL_LOW:
2710 ret = smu7_force_dpm_lowest(hwmgr);
2711 break;
2712 case AMD_DPM_FORCED_LEVEL_AUTO:
2713 ret = smu7_unforce_dpm_levels(hwmgr);
2714 break;
2715 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
2716 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
2717 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
2718 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
2719 ret = smu7_get_profiling_clk(hwmgr, level, &sclk_mask, &mclk_mask, &pcie_mask);
2720 if (ret)
2721 return ret;
2722 smu7_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
2723 smu7_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
2724 smu7_force_clock_level(hwmgr, PP_PCIE, 1<<pcie_mask);
2725 break;
2726 case AMD_DPM_FORCED_LEVEL_MANUAL:
2727 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
2728 default:
2729 break;
2730 }
2731
2732 if (!ret) {
2733 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
2734 smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
2735 else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
2736 smu7_fan_ctrl_reset_fan_speed_to_default(hwmgr);
2737 }
2738 return ret;
2739}
2740
2741static int smu7_get_power_state_size(struct pp_hwmgr *hwmgr)
2742{
2743 return sizeof(struct smu7_power_state);
2744}
2745
2746static int smu7_vblank_too_short(struct pp_hwmgr *hwmgr,
2747 uint32_t vblank_time_us)
2748{
2749 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2750 uint32_t switch_limit_us;
2751
2752 switch (hwmgr->chip_id) {
2753 case CHIP_POLARIS10:
2754 case CHIP_POLARIS11:
2755 case CHIP_POLARIS12:
2756 switch_limit_us = data->is_memory_gddr5 ? 190 : 150;
2757 break;
2758 default:
2759 switch_limit_us = data->is_memory_gddr5 ? 450 : 150;
2760 break;
2761 }
2762
2763 if (vblank_time_us < switch_limit_us)
2764 return true;
2765 else
2766 return false;
2767}
2768
2769static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
2770 struct pp_power_state *request_ps,
2771 const struct pp_power_state *current_ps)
2772{
2773
2774 struct smu7_power_state *smu7_ps =
2775 cast_phw_smu7_power_state(&request_ps->hardware);
2776 uint32_t sclk;
2777 uint32_t mclk;
2778 struct PP_Clocks minimum_clocks = {0};
2779 bool disable_mclk_switching;
2780 bool disable_mclk_switching_for_frame_lock;
2781 struct cgs_display_info info = {0};
2782 struct cgs_mode_info mode_info = {0};
2783 const struct phm_clock_and_voltage_limits *max_limits;
2784 uint32_t i;
2785 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2786 struct phm_ppt_v1_information *table_info =
2787 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2788 int32_t count;
2789 int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
2790
2791 info.mode_info = &mode_info;
2792 data->battery_state = (PP_StateUILabel_Battery ==
2793 request_ps->classification.ui_label);
2794
2795 PP_ASSERT_WITH_CODE(smu7_ps->performance_level_count == 2,
2796 "VI should always have 2 performance levels",
2797 );
2798
2799 max_limits = (PP_PowerSource_AC == hwmgr->power_source) ?
2800 &(hwmgr->dyn_state.max_clock_voltage_on_ac) :
2801 &(hwmgr->dyn_state.max_clock_voltage_on_dc);
2802
2803
2804 if (PP_PowerSource_DC == hwmgr->power_source) {
2805 for (i = 0; i < smu7_ps->performance_level_count; i++) {
2806 if (smu7_ps->performance_levels[i].memory_clock > max_limits->mclk)
2807 smu7_ps->performance_levels[i].memory_clock = max_limits->mclk;
2808 if (smu7_ps->performance_levels[i].engine_clock > max_limits->sclk)
2809 smu7_ps->performance_levels[i].engine_clock = max_limits->sclk;
2810 }
2811 }
2812
2813 cgs_get_active_displays_info(hwmgr->device, &info);
2814
2815 minimum_clocks.engineClock = hwmgr->display_config.min_core_set_clock;
2816 minimum_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock;
2817
2818 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2819 PHM_PlatformCaps_StablePState)) {
2820 max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
2821 stable_pstate_sclk = (max_limits->sclk * 75) / 100;
2822
2823 for (count = table_info->vdd_dep_on_sclk->count - 1;
2824 count >= 0; count--) {
2825 if (stable_pstate_sclk >=
2826 table_info->vdd_dep_on_sclk->entries[count].clk) {
2827 stable_pstate_sclk =
2828 table_info->vdd_dep_on_sclk->entries[count].clk;
2829 break;
2830 }
2831 }
2832
2833 if (count < 0)
2834 stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
2835
2836 stable_pstate_mclk = max_limits->mclk;
2837
2838 minimum_clocks.engineClock = stable_pstate_sclk;
2839 minimum_clocks.memoryClock = stable_pstate_mclk;
2840 }
2841
2842 disable_mclk_switching_for_frame_lock = phm_cap_enabled(
2843 hwmgr->platform_descriptor.platformCaps,
2844 PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
2845
2846
2847 if (info.display_count == 0)
2848 disable_mclk_switching = false;
2849 else
2850 disable_mclk_switching = ((1 < info.display_count) ||
2851 disable_mclk_switching_for_frame_lock ||
2852 smu7_vblank_too_short(hwmgr, mode_info.vblank_time_us));
2853
2854 sclk = smu7_ps->performance_levels[0].engine_clock;
2855 mclk = smu7_ps->performance_levels[0].memory_clock;
2856
2857 if (disable_mclk_switching)
2858 mclk = smu7_ps->performance_levels
2859 [smu7_ps->performance_level_count - 1].memory_clock;
2860
2861 if (sclk < minimum_clocks.engineClock)
2862 sclk = (minimum_clocks.engineClock > max_limits->sclk) ?
2863 max_limits->sclk : minimum_clocks.engineClock;
2864
2865 if (mclk < minimum_clocks.memoryClock)
2866 mclk = (minimum_clocks.memoryClock > max_limits->mclk) ?
2867 max_limits->mclk : minimum_clocks.memoryClock;
2868
2869 smu7_ps->performance_levels[0].engine_clock = sclk;
2870 smu7_ps->performance_levels[0].memory_clock = mclk;
2871
2872 smu7_ps->performance_levels[1].engine_clock =
2873 (smu7_ps->performance_levels[1].engine_clock >=
2874 smu7_ps->performance_levels[0].engine_clock) ?
2875 smu7_ps->performance_levels[1].engine_clock :
2876 smu7_ps->performance_levels[0].engine_clock;
2877
2878 if (disable_mclk_switching) {
2879 if (mclk < smu7_ps->performance_levels[1].memory_clock)
2880 mclk = smu7_ps->performance_levels[1].memory_clock;
2881
2882 smu7_ps->performance_levels[0].memory_clock = mclk;
2883 smu7_ps->performance_levels[1].memory_clock = mclk;
2884 } else {
2885 if (smu7_ps->performance_levels[1].memory_clock <
2886 smu7_ps->performance_levels[0].memory_clock)
2887 smu7_ps->performance_levels[1].memory_clock =
2888 smu7_ps->performance_levels[0].memory_clock;
2889 }
2890
2891 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2892 PHM_PlatformCaps_StablePState)) {
2893 for (i = 0; i < smu7_ps->performance_level_count; i++) {
2894 smu7_ps->performance_levels[i].engine_clock = stable_pstate_sclk;
2895 smu7_ps->performance_levels[i].memory_clock = stable_pstate_mclk;
2896 smu7_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max;
2897 smu7_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max;
2898 }
2899 }
2900 return 0;
2901}
2902
2903
2904static uint32_t smu7_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
2905{
2906 struct pp_power_state *ps;
2907 struct smu7_power_state *smu7_ps;
2908
2909 if (hwmgr == NULL)
2910 return -EINVAL;
2911
2912 ps = hwmgr->request_ps;
2913
2914 if (ps == NULL)
2915 return -EINVAL;
2916
2917 smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
2918
2919 if (low)
2920 return smu7_ps->performance_levels[0].memory_clock;
2921 else
2922 return smu7_ps->performance_levels
2923 [smu7_ps->performance_level_count-1].memory_clock;
2924}
2925
2926static uint32_t smu7_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
2927{
2928 struct pp_power_state *ps;
2929 struct smu7_power_state *smu7_ps;
2930
2931 if (hwmgr == NULL)
2932 return -EINVAL;
2933
2934 ps = hwmgr->request_ps;
2935
2936 if (ps == NULL)
2937 return -EINVAL;
2938
2939 smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
2940
2941 if (low)
2942 return smu7_ps->performance_levels[0].engine_clock;
2943 else
2944 return smu7_ps->performance_levels
2945 [smu7_ps->performance_level_count-1].engine_clock;
2946}
2947
2948static int smu7_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
2949 struct pp_hw_power_state *hw_ps)
2950{
2951 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2952 struct smu7_power_state *ps = (struct smu7_power_state *)hw_ps;
2953 ATOM_FIRMWARE_INFO_V2_2 *fw_info;
2954 uint16_t size;
2955 uint8_t frev, crev;
2956 int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
2957
2958
2959
2960
2961 fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)cgs_atom_get_data_table(
2962 hwmgr->device, index,
2963 &size, &frev, &crev);
2964 if (!fw_info)
2965
2966 return 0;
2967
2968
2969 data->vbios_boot_state.sclk_bootup_value =
2970 le32_to_cpu(fw_info->ulDefaultEngineClock);
2971 data->vbios_boot_state.mclk_bootup_value =
2972 le32_to_cpu(fw_info->ulDefaultMemoryClock);
2973 data->vbios_boot_state.mvdd_bootup_value =
2974 le16_to_cpu(fw_info->usBootUpMVDDCVoltage);
2975 data->vbios_boot_state.vddc_bootup_value =
2976 le16_to_cpu(fw_info->usBootUpVDDCVoltage);
2977 data->vbios_boot_state.vddci_bootup_value =
2978 le16_to_cpu(fw_info->usBootUpVDDCIVoltage);
2979 data->vbios_boot_state.pcie_gen_bootup_value =
2980 smu7_get_current_pcie_speed(hwmgr);
2981
2982 data->vbios_boot_state.pcie_lane_bootup_value =
2983 (uint16_t)smu7_get_current_pcie_lane_number(hwmgr);
2984
2985
2986 ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value;
2987 ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value;
2988 ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value;
2989 ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value;
2990
2991 return 0;
2992}
2993
2994static int smu7_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr)
2995{
2996 int result;
2997 unsigned long ret = 0;
2998
2999 if (hwmgr->pp_table_version == PP_TABLE_V0) {
3000 result = pp_tables_get_num_of_entries(hwmgr, &ret);
3001 return result ? 0 : ret;
3002 } else if (hwmgr->pp_table_version == PP_TABLE_V1) {
3003 result = get_number_of_powerplay_table_entries_v1_0(hwmgr);
3004 return result;
3005 }
3006 return 0;
3007}
3008
3009static int smu7_get_pp_table_entry_callback_func_v1(struct pp_hwmgr *hwmgr,
3010 void *state, struct pp_power_state *power_state,
3011 void *pp_table, uint32_t classification_flag)
3012{
3013 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3014 struct smu7_power_state *smu7_power_state =
3015 (struct smu7_power_state *)(&(power_state->hardware));
3016 struct smu7_performance_level *performance_level;
3017 ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state;
3018 ATOM_Tonga_POWERPLAYTABLE *powerplay_table =
3019 (ATOM_Tonga_POWERPLAYTABLE *)pp_table;
3020 PPTable_Generic_SubTable_Header *sclk_dep_table =
3021 (PPTable_Generic_SubTable_Header *)
3022 (((unsigned long)powerplay_table) +
3023 le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
3024
3025 ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
3026 (ATOM_Tonga_MCLK_Dependency_Table *)
3027 (((unsigned long)powerplay_table) +
3028 le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
3029
3030
3031 power_state->classification.ui_label =
3032 (le16_to_cpu(state_entry->usClassification) &
3033 ATOM_PPLIB_CLASSIFICATION_UI_MASK) >>
3034 ATOM_PPLIB_CLASSIFICATION_UI_SHIFT;
3035 power_state->classification.flags = classification_flag;
3036
3037
3038 power_state->classification.temporary_state = false;
3039 power_state->classification.to_be_deleted = false;
3040
3041 power_state->validation.disallowOnDC =
3042 (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
3043 ATOM_Tonga_DISALLOW_ON_DC));
3044
3045 power_state->pcie.lanes = 0;
3046
3047 power_state->display.disableFrameModulation = false;
3048 power_state->display.limitRefreshrate = false;
3049 power_state->display.enableVariBright =
3050 (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
3051 ATOM_Tonga_ENABLE_VARIBRIGHT));
3052
3053 power_state->validation.supportedPowerLevels = 0;
3054 power_state->uvd_clocks.VCLK = 0;
3055 power_state->uvd_clocks.DCLK = 0;
3056 power_state->temperatures.min = 0;
3057 power_state->temperatures.max = 0;
3058
3059 performance_level = &(smu7_power_state->performance_levels
3060 [smu7_power_state->performance_level_count++]);
3061
3062 PP_ASSERT_WITH_CODE(
3063 (smu7_power_state->performance_level_count < smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_GRAPHICS)),
3064 "Performance levels exceeds SMC limit!",
3065 return -EINVAL);
3066
3067 PP_ASSERT_WITH_CODE(
3068 (smu7_power_state->performance_level_count <=
3069 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
3070 "Performance levels exceeds Driver limit!",
3071 return -EINVAL);
3072
3073
3074 performance_level->memory_clock = mclk_dep_table->entries
3075 [state_entry->ucMemoryClockIndexLow].ulMclk;
3076 if (sclk_dep_table->ucRevId == 0)
3077 performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
3078 [state_entry->ucEngineClockIndexLow].ulSclk;
3079 else if (sclk_dep_table->ucRevId == 1)
3080 performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
3081 [state_entry->ucEngineClockIndexLow].ulSclk;
3082 performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
3083 state_entry->ucPCIEGenLow);
3084 performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
3085 state_entry->ucPCIELaneHigh);
3086
3087 performance_level = &(smu7_power_state->performance_levels
3088 [smu7_power_state->performance_level_count++]);
3089 performance_level->memory_clock = mclk_dep_table->entries
3090 [state_entry->ucMemoryClockIndexHigh].ulMclk;
3091
3092 if (sclk_dep_table->ucRevId == 0)
3093 performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
3094 [state_entry->ucEngineClockIndexHigh].ulSclk;
3095 else if (sclk_dep_table->ucRevId == 1)
3096 performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
3097 [state_entry->ucEngineClockIndexHigh].ulSclk;
3098
3099 performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
3100 state_entry->ucPCIEGenHigh);
3101 performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
3102 state_entry->ucPCIELaneHigh);
3103
3104 return 0;
3105}
3106
3107static int smu7_get_pp_table_entry_v1(struct pp_hwmgr *hwmgr,
3108 unsigned long entry_index, struct pp_power_state *state)
3109{
3110 int result;
3111 struct smu7_power_state *ps;
3112 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3113 struct phm_ppt_v1_information *table_info =
3114 (struct phm_ppt_v1_information *)(hwmgr->pptable);
3115 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
3116 table_info->vdd_dep_on_mclk;
3117
3118 state->hardware.magic = PHM_VIslands_Magic;
3119
3120 ps = (struct smu7_power_state *)(&state->hardware);
3121
3122 result = get_powerplay_table_entry_v1_0(hwmgr, entry_index, state,
3123 smu7_get_pp_table_entry_callback_func_v1);
3124
3125
3126
3127
3128
3129 if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
3130 if (dep_mclk_table->entries[0].clk !=
3131 data->vbios_boot_state.mclk_bootup_value)
3132 pr_debug("Single MCLK entry VDDCI/MCLK dependency table "
3133 "does not match VBIOS boot MCLK level");
3134 if (dep_mclk_table->entries[0].vddci !=
3135 data->vbios_boot_state.vddci_bootup_value)
3136 pr_debug("Single VDDCI entry VDDCI/MCLK dependency table "
3137 "does not match VBIOS boot VDDCI level");
3138 }
3139
3140
3141 if (!state->validation.disallowOnDC)
3142 ps->dc_compatible = true;
3143
3144 if (state->classification.flags & PP_StateClassificationFlag_ACPI)
3145 data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen;
3146
3147 ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
3148 ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
3149
3150 if (!result) {
3151 uint32_t i;
3152
3153 switch (state->classification.ui_label) {
3154 case PP_StateUILabel_Performance:
3155 data->use_pcie_performance_levels = true;
3156 for (i = 0; i < ps->performance_level_count; i++) {
3157 if (data->pcie_gen_performance.max <
3158 ps->performance_levels[i].pcie_gen)
3159 data->pcie_gen_performance.max =
3160 ps->performance_levels[i].pcie_gen;
3161
3162 if (data->pcie_gen_performance.min >
3163 ps->performance_levels[i].pcie_gen)
3164 data->pcie_gen_performance.min =
3165 ps->performance_levels[i].pcie_gen;
3166
3167 if (data->pcie_lane_performance.max <
3168 ps->performance_levels[i].pcie_lane)
3169 data->pcie_lane_performance.max =
3170 ps->performance_levels[i].pcie_lane;
3171 if (data->pcie_lane_performance.min >
3172 ps->performance_levels[i].pcie_lane)
3173 data->pcie_lane_performance.min =
3174 ps->performance_levels[i].pcie_lane;
3175 }
3176 break;
3177 case PP_StateUILabel_Battery:
3178 data->use_pcie_power_saving_levels = true;
3179
3180 for (i = 0; i < ps->performance_level_count; i++) {
3181 if (data->pcie_gen_power_saving.max <
3182 ps->performance_levels[i].pcie_gen)
3183 data->pcie_gen_power_saving.max =
3184 ps->performance_levels[i].pcie_gen;
3185
3186 if (data->pcie_gen_power_saving.min >
3187 ps->performance_levels[i].pcie_gen)
3188 data->pcie_gen_power_saving.min =
3189 ps->performance_levels[i].pcie_gen;
3190
3191 if (data->pcie_lane_power_saving.max <
3192 ps->performance_levels[i].pcie_lane)
3193 data->pcie_lane_power_saving.max =
3194 ps->performance_levels[i].pcie_lane;
3195
3196 if (data->pcie_lane_power_saving.min >
3197 ps->performance_levels[i].pcie_lane)
3198 data->pcie_lane_power_saving.min =
3199 ps->performance_levels[i].pcie_lane;
3200 }
3201 break;
3202 default:
3203 break;
3204 }
3205 }
3206 return 0;
3207}
3208
3209static int smu7_get_pp_table_entry_callback_func_v0(struct pp_hwmgr *hwmgr,
3210 struct pp_hw_power_state *power_state,
3211 unsigned int index, const void *clock_info)
3212{
3213 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3214 struct smu7_power_state *ps = cast_phw_smu7_power_state(power_state);
3215 const ATOM_PPLIB_CI_CLOCK_INFO *visland_clk_info = clock_info;
3216 struct smu7_performance_level *performance_level;
3217 uint32_t engine_clock, memory_clock;
3218 uint16_t pcie_gen_from_bios;
3219
3220 engine_clock = visland_clk_info->ucEngineClockHigh << 16 | visland_clk_info->usEngineClockLow;
3221 memory_clock = visland_clk_info->ucMemoryClockHigh << 16 | visland_clk_info->usMemoryClockLow;
3222
3223 if (!(data->mc_micro_code_feature & DISABLE_MC_LOADMICROCODE) && memory_clock > data->highest_mclk)
3224 data->highest_mclk = memory_clock;
3225
3226 PP_ASSERT_WITH_CODE(
3227 (ps->performance_level_count < smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_GRAPHICS)),
3228 "Performance levels exceeds SMC limit!",
3229 return -EINVAL);
3230
3231 PP_ASSERT_WITH_CODE(
3232 (ps->performance_level_count <
3233 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
3234 "Performance levels exceeds Driver limit, Skip!",
3235 return 0);
3236
3237 performance_level = &(ps->performance_levels
3238 [ps->performance_level_count++]);
3239
3240
3241 performance_level->memory_clock = memory_clock;
3242 performance_level->engine_clock = engine_clock;
3243
3244 pcie_gen_from_bios = visland_clk_info->ucPCIEGen;
3245
3246 performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, pcie_gen_from_bios);
3247 performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, visland_clk_info->usPCIELane);
3248
3249 return 0;
3250}
3251
3252static int smu7_get_pp_table_entry_v0(struct pp_hwmgr *hwmgr,
3253 unsigned long entry_index, struct pp_power_state *state)
3254{
3255 int result;
3256 struct smu7_power_state *ps;
3257 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3258 struct phm_clock_voltage_dependency_table *dep_mclk_table =
3259 hwmgr->dyn_state.vddci_dependency_on_mclk;
3260
3261 memset(&state->hardware, 0x00, sizeof(struct pp_hw_power_state));
3262
3263 state->hardware.magic = PHM_VIslands_Magic;
3264
3265 ps = (struct smu7_power_state *)(&state->hardware);
3266
3267 result = pp_tables_get_entry(hwmgr, entry_index, state,
3268 smu7_get_pp_table_entry_callback_func_v0);
3269
3270
3271
3272
3273
3274
3275
3276
3277 if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
3278 if (dep_mclk_table->entries[0].clk !=
3279 data->vbios_boot_state.mclk_bootup_value)
3280 pr_debug("Single MCLK entry VDDCI/MCLK dependency table "
3281 "does not match VBIOS boot MCLK level");
3282 if (dep_mclk_table->entries[0].v !=
3283 data->vbios_boot_state.vddci_bootup_value)
3284 pr_debug("Single VDDCI entry VDDCI/MCLK dependency table "
3285 "does not match VBIOS boot VDDCI level");
3286 }
3287
3288
3289 if (!state->validation.disallowOnDC)
3290 ps->dc_compatible = true;
3291
3292 if (state->classification.flags & PP_StateClassificationFlag_ACPI)
3293 data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen;
3294
3295 ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
3296 ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
3297
3298 if (!result) {
3299 uint32_t i;
3300
3301 switch (state->classification.ui_label) {
3302 case PP_StateUILabel_Performance:
3303 data->use_pcie_performance_levels = true;
3304
3305 for (i = 0; i < ps->performance_level_count; i++) {
3306 if (data->pcie_gen_performance.max <
3307 ps->performance_levels[i].pcie_gen)
3308 data->pcie_gen_performance.max =
3309 ps->performance_levels[i].pcie_gen;
3310
3311 if (data->pcie_gen_performance.min >
3312 ps->performance_levels[i].pcie_gen)
3313 data->pcie_gen_performance.min =
3314 ps->performance_levels[i].pcie_gen;
3315
3316 if (data->pcie_lane_performance.max <
3317 ps->performance_levels[i].pcie_lane)
3318 data->pcie_lane_performance.max =
3319 ps->performance_levels[i].pcie_lane;
3320
3321 if (data->pcie_lane_performance.min >
3322 ps->performance_levels[i].pcie_lane)
3323 data->pcie_lane_performance.min =
3324 ps->performance_levels[i].pcie_lane;
3325 }
3326 break;
3327 case PP_StateUILabel_Battery:
3328 data->use_pcie_power_saving_levels = true;
3329
3330 for (i = 0; i < ps->performance_level_count; i++) {
3331 if (data->pcie_gen_power_saving.max <
3332 ps->performance_levels[i].pcie_gen)
3333 data->pcie_gen_power_saving.max =
3334 ps->performance_levels[i].pcie_gen;
3335
3336 if (data->pcie_gen_power_saving.min >
3337 ps->performance_levels[i].pcie_gen)
3338 data->pcie_gen_power_saving.min =
3339 ps->performance_levels[i].pcie_gen;
3340
3341 if (data->pcie_lane_power_saving.max <
3342 ps->performance_levels[i].pcie_lane)
3343 data->pcie_lane_power_saving.max =
3344 ps->performance_levels[i].pcie_lane;
3345
3346 if (data->pcie_lane_power_saving.min >
3347 ps->performance_levels[i].pcie_lane)
3348 data->pcie_lane_power_saving.min =
3349 ps->performance_levels[i].pcie_lane;
3350 }
3351 break;
3352 default:
3353 break;
3354 }
3355 }
3356 return 0;
3357}
3358
3359static int smu7_get_pp_table_entry(struct pp_hwmgr *hwmgr,
3360 unsigned long entry_index, struct pp_power_state *state)
3361{
3362 if (hwmgr->pp_table_version == PP_TABLE_V0)
3363 return smu7_get_pp_table_entry_v0(hwmgr, entry_index, state);
3364 else if (hwmgr->pp_table_version == PP_TABLE_V1)
3365 return smu7_get_pp_table_entry_v1(hwmgr, entry_index, state);
3366
3367 return 0;
3368}
3369
3370static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr,
3371 struct pp_gpu_power *query)
3372{
3373 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
3374 PPSMC_MSG_PmStatusLogStart),
3375 "Failed to start pm status log!",
3376 return -1);
3377
3378
3379 msleep_interruptible(200);
3380
3381 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
3382 PPSMC_MSG_PmStatusLogSample),
3383 "Failed to sample pm status log!",
3384 return -1);
3385
3386 query->vddc_power = cgs_read_ind_register(hwmgr->device,
3387 CGS_IND_REG__SMC,
3388 ixSMU_PM_STATUS_40);
3389 query->vddci_power = cgs_read_ind_register(hwmgr->device,
3390 CGS_IND_REG__SMC,
3391 ixSMU_PM_STATUS_49);
3392 query->max_gpu_power = cgs_read_ind_register(hwmgr->device,
3393 CGS_IND_REG__SMC,
3394 ixSMU_PM_STATUS_94);
3395 query->average_gpu_power = cgs_read_ind_register(hwmgr->device,
3396 CGS_IND_REG__SMC,
3397 ixSMU_PM_STATUS_95);
3398
3399 return 0;
3400}
3401
3402static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx,
3403 void *value, int *size)
3404{
3405 uint32_t sclk, mclk, activity_percent;
3406 uint32_t offset, val_vid;
3407 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3408
3409
3410 if (*size < 4)
3411 return -EINVAL;
3412
3413 switch (idx) {
3414 case AMDGPU_PP_SENSOR_GFX_SCLK:
3415 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency);
3416 sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
3417 *((uint32_t *)value) = sclk;
3418 *size = 4;
3419 return 0;
3420 case AMDGPU_PP_SENSOR_GFX_MCLK:
3421 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency);
3422 mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
3423 *((uint32_t *)value) = mclk;
3424 *size = 4;
3425 return 0;
3426 case AMDGPU_PP_SENSOR_GPU_LOAD:
3427 offset = data->soft_regs_start + smum_get_offsetof(hwmgr,
3428 SMU_SoftRegisters,
3429 AverageGraphicsActivity);
3430
3431 activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset);
3432 activity_percent += 0x80;
3433 activity_percent >>= 8;
3434 *((uint32_t *)value) = activity_percent > 100 ? 100 : activity_percent;
3435 *size = 4;
3436 return 0;
3437 case AMDGPU_PP_SENSOR_GPU_TEMP:
3438 *((uint32_t *)value) = smu7_thermal_get_temperature(hwmgr);
3439 *size = 4;
3440 return 0;
3441 case AMDGPU_PP_SENSOR_UVD_POWER:
3442 *((uint32_t *)value) = data->uvd_power_gated ? 0 : 1;
3443 *size = 4;
3444 return 0;
3445 case AMDGPU_PP_SENSOR_VCE_POWER:
3446 *((uint32_t *)value) = data->vce_power_gated ? 0 : 1;
3447 *size = 4;
3448 return 0;
3449 case AMDGPU_PP_SENSOR_GPU_POWER:
3450 if (*size < sizeof(struct pp_gpu_power))
3451 return -EINVAL;
3452 *size = sizeof(struct pp_gpu_power);
3453 return smu7_get_gpu_power(hwmgr, (struct pp_gpu_power *)value);
3454 case AMDGPU_PP_SENSOR_VDDGFX:
3455 if ((data->vr_config & 0xff) == 0x2)
3456 val_vid = PHM_READ_INDIRECT_FIELD(hwmgr->device,
3457 CGS_IND_REG__SMC, PWR_SVI2_STATUS, PLANE2_VID);
3458 else
3459 val_vid = PHM_READ_INDIRECT_FIELD(hwmgr->device,
3460 CGS_IND_REG__SMC, PWR_SVI2_STATUS, PLANE1_VID);
3461
3462 *((uint32_t *)value) = (uint32_t)convert_to_vddc(val_vid);
3463 return 0;
3464 default:
3465 return -EINVAL;
3466 }
3467}
3468
3469static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
3470{
3471 const struct phm_set_power_state_input *states =
3472 (const struct phm_set_power_state_input *)input;
3473 const struct smu7_power_state *smu7_ps =
3474 cast_const_phw_smu7_power_state(states->pnew_state);
3475 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3476 struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
3477 uint32_t sclk = smu7_ps->performance_levels
3478 [smu7_ps->performance_level_count - 1].engine_clock;
3479 struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
3480 uint32_t mclk = smu7_ps->performance_levels
3481 [smu7_ps->performance_level_count - 1].memory_clock;
3482 struct PP_Clocks min_clocks = {0};
3483 uint32_t i;
3484 struct cgs_display_info info = {0};
3485
3486 for (i = 0; i < sclk_table->count; i++) {
3487 if (sclk == sclk_table->dpm_levels[i].value)
3488 break;
3489 }
3490
3491 if (i >= sclk_table->count)
3492 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
3493 else {
3494
3495
3496
3497 if (data->display_timing.min_clock_in_sr != min_clocks.engineClockInSR &&
3498 (min_clocks.engineClockInSR >= SMU7_MINIMUM_ENGINE_CLOCK ||
3499 data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK))
3500 data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
3501 }
3502
3503 for (i = 0; i < mclk_table->count; i++) {
3504 if (mclk == mclk_table->dpm_levels[i].value)
3505 break;
3506 }
3507
3508 if (i >= mclk_table->count)
3509 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
3510
3511 cgs_get_active_displays_info(hwmgr->device, &info);
3512
3513 if (data->display_timing.num_existing_displays != info.display_count)
3514 data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
3515
3516 return 0;
3517}
3518
3519static uint16_t smu7_get_maximum_link_speed(struct pp_hwmgr *hwmgr,
3520 const struct smu7_power_state *smu7_ps)
3521{
3522 uint32_t i;
3523 uint32_t sclk, max_sclk = 0;
3524 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3525 struct smu7_dpm_table *dpm_table = &data->dpm_table;
3526
3527 for (i = 0; i < smu7_ps->performance_level_count; i++) {
3528 sclk = smu7_ps->performance_levels[i].engine_clock;
3529 if (max_sclk < sclk)
3530 max_sclk = sclk;
3531 }
3532
3533 for (i = 0; i < dpm_table->sclk_table.count; i++) {
3534 if (dpm_table->sclk_table.dpm_levels[i].value == max_sclk)
3535 return (uint16_t) ((i >= dpm_table->pcie_speed_table.count) ?
3536 dpm_table->pcie_speed_table.dpm_levels
3537 [dpm_table->pcie_speed_table.count - 1].value :
3538 dpm_table->pcie_speed_table.dpm_levels[i].value);
3539 }
3540
3541 return 0;
3542}
3543
3544static int smu7_request_link_speed_change_before_state_change(
3545 struct pp_hwmgr *hwmgr, const void *input)
3546{
3547 const struct phm_set_power_state_input *states =
3548 (const struct phm_set_power_state_input *)input;
3549 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3550 const struct smu7_power_state *smu7_nps =
3551 cast_const_phw_smu7_power_state(states->pnew_state);
3552 const struct smu7_power_state *polaris10_cps =
3553 cast_const_phw_smu7_power_state(states->pcurrent_state);
3554
3555 uint16_t target_link_speed = smu7_get_maximum_link_speed(hwmgr, smu7_nps);
3556 uint16_t current_link_speed;
3557
3558 if (data->force_pcie_gen == PP_PCIEGenInvalid)
3559 current_link_speed = smu7_get_maximum_link_speed(hwmgr, polaris10_cps);
3560 else
3561 current_link_speed = data->force_pcie_gen;
3562
3563 data->force_pcie_gen = PP_PCIEGenInvalid;
3564 data->pspp_notify_required = false;
3565
3566 if (target_link_speed > current_link_speed) {
3567 switch (target_link_speed) {
3568#ifdef CONFIG_ACPI
3569 case PP_PCIEGen3:
3570 if (0 == amdgpu_acpi_pcie_performance_request(hwmgr->adev, PCIE_PERF_REQ_GEN3, false))
3571 break;
3572 data->force_pcie_gen = PP_PCIEGen2;
3573 if (current_link_speed == PP_PCIEGen2)
3574 break;
3575 case PP_PCIEGen2:
3576 if (0 == amdgpu_acpi_pcie_performance_request(hwmgr->adev, PCIE_PERF_REQ_GEN2, false))
3577 break;
3578#endif
3579 default:
3580 data->force_pcie_gen = smu7_get_current_pcie_speed(hwmgr);
3581 break;
3582 }
3583 } else {
3584 if (target_link_speed < current_link_speed)
3585 data->pspp_notify_required = true;
3586 }
3587
3588 return 0;
3589}
3590
3591static int smu7_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
3592{
3593 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3594
3595 if (0 == data->need_update_smu7_dpm_table)
3596 return 0;
3597
3598 if ((0 == data->sclk_dpm_key_disabled) &&
3599 (data->need_update_smu7_dpm_table &
3600 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
3601 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
3602 "Trying to freeze SCLK DPM when DPM is disabled",
3603 );
3604 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
3605 PPSMC_MSG_SCLKDPM_FreezeLevel),
3606 "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!",
3607 return -EINVAL);
3608 }
3609
3610 if ((0 == data->mclk_dpm_key_disabled) &&
3611 (data->need_update_smu7_dpm_table &
3612 DPMTABLE_OD_UPDATE_MCLK)) {
3613 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
3614 "Trying to freeze MCLK DPM when DPM is disabled",
3615 );
3616 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
3617 PPSMC_MSG_MCLKDPM_FreezeLevel),
3618 "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!",
3619 return -EINVAL);
3620 }
3621
3622 return 0;
3623}
3624
3625static int smu7_populate_and_upload_sclk_mclk_dpm_levels(
3626 struct pp_hwmgr *hwmgr, const void *input)
3627{
3628 int result = 0;
3629 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3630 struct smu7_dpm_table *dpm_table = &data->dpm_table;
3631 uint32_t count;
3632 struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
3633 struct phm_odn_clock_levels *odn_sclk_table = &(odn_table->odn_core_clock_dpm_levels);
3634 struct phm_odn_clock_levels *odn_mclk_table = &(odn_table->odn_memory_clock_dpm_levels);
3635
3636 if (0 == data->need_update_smu7_dpm_table)
3637 return 0;
3638
3639 if (hwmgr->od_enabled && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
3640 for (count = 0; count < dpm_table->sclk_table.count; count++) {
3641 dpm_table->sclk_table.dpm_levels[count].enabled = odn_sclk_table->entries[count].enabled;
3642 dpm_table->sclk_table.dpm_levels[count].value = odn_sclk_table->entries[count].clock;
3643 }
3644 }
3645
3646 if (hwmgr->od_enabled && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
3647 for (count = 0; count < dpm_table->mclk_table.count; count++) {
3648 dpm_table->mclk_table.dpm_levels[count].enabled = odn_mclk_table->entries[count].enabled;
3649 dpm_table->mclk_table.dpm_levels[count].value = odn_mclk_table->entries[count].clock;
3650 }
3651 }
3652
3653 if (data->need_update_smu7_dpm_table &
3654 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) {
3655 result = smum_populate_all_graphic_levels(hwmgr);
3656 PP_ASSERT_WITH_CODE((0 == result),
3657 "Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
3658 return result);
3659 }
3660
3661 if (data->need_update_smu7_dpm_table &
3662 (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
3663
3664 result = smum_populate_all_memory_levels(hwmgr);
3665 PP_ASSERT_WITH_CODE((0 == result),
3666 "Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
3667 return result);
3668 }
3669
3670 return result;
3671}
3672
3673static int smu7_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
3674 struct smu7_single_dpm_table *dpm_table,
3675 uint32_t low_limit, uint32_t high_limit)
3676{
3677 uint32_t i;
3678
3679 for (i = 0; i < dpm_table->count; i++) {
3680 if ((dpm_table->dpm_levels[i].value < low_limit)
3681 || (dpm_table->dpm_levels[i].value > high_limit))
3682 dpm_table->dpm_levels[i].enabled = false;
3683 else
3684 dpm_table->dpm_levels[i].enabled = true;
3685 }
3686
3687 return 0;
3688}
3689
3690static int smu7_trim_dpm_states(struct pp_hwmgr *hwmgr,
3691 const struct smu7_power_state *smu7_ps)
3692{
3693 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3694 uint32_t high_limit_count;
3695
3696 PP_ASSERT_WITH_CODE((smu7_ps->performance_level_count >= 1),
3697 "power state did not have any performance level",
3698 return -EINVAL);
3699
3700 high_limit_count = (1 == smu7_ps->performance_level_count) ? 0 : 1;
3701
3702 smu7_trim_single_dpm_states(hwmgr,
3703 &(data->dpm_table.sclk_table),
3704 smu7_ps->performance_levels[0].engine_clock,
3705 smu7_ps->performance_levels[high_limit_count].engine_clock);
3706
3707 smu7_trim_single_dpm_states(hwmgr,
3708 &(data->dpm_table.mclk_table),
3709 smu7_ps->performance_levels[0].memory_clock,
3710 smu7_ps->performance_levels[high_limit_count].memory_clock);
3711
3712 return 0;
3713}
3714
3715static int smu7_generate_dpm_level_enable_mask(
3716 struct pp_hwmgr *hwmgr, const void *input)
3717{
3718 int result;
3719 const struct phm_set_power_state_input *states =
3720 (const struct phm_set_power_state_input *)input;
3721 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3722 const struct smu7_power_state *smu7_ps =
3723 cast_const_phw_smu7_power_state(states->pnew_state);
3724
3725 result = smu7_trim_dpm_states(hwmgr, smu7_ps);
3726 if (result)
3727 return result;
3728
3729 data->dpm_level_enable_mask.sclk_dpm_enable_mask =
3730 phm_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table);
3731 data->dpm_level_enable_mask.mclk_dpm_enable_mask =
3732 phm_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table);
3733 data->dpm_level_enable_mask.pcie_dpm_enable_mask =
3734 phm_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table);
3735
3736 return 0;
3737}
3738
3739static int smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
3740{
3741 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3742
3743 if (0 == data->need_update_smu7_dpm_table)
3744 return 0;
3745
3746 if ((0 == data->sclk_dpm_key_disabled) &&
3747 (data->need_update_smu7_dpm_table &
3748 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
3749
3750 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
3751 "Trying to Unfreeze SCLK DPM when DPM is disabled",
3752 );
3753 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
3754 PPSMC_MSG_SCLKDPM_UnfreezeLevel),
3755 "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!",
3756 return -EINVAL);
3757 }
3758
3759 if ((0 == data->mclk_dpm_key_disabled) &&
3760 (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
3761
3762 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
3763 "Trying to Unfreeze MCLK DPM when DPM is disabled",
3764 );
3765 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
3766 PPSMC_MSG_MCLKDPM_UnfreezeLevel),
3767 "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!",
3768 return -EINVAL);
3769 }
3770
3771 data->need_update_smu7_dpm_table &= DPMTABLE_OD_UPDATE_VDDC;
3772
3773 return 0;
3774}
3775
3776static int smu7_notify_link_speed_change_after_state_change(
3777 struct pp_hwmgr *hwmgr, const void *input)
3778{
3779 const struct phm_set_power_state_input *states =
3780 (const struct phm_set_power_state_input *)input;
3781 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3782 const struct smu7_power_state *smu7_ps =
3783 cast_const_phw_smu7_power_state(states->pnew_state);
3784 uint16_t target_link_speed = smu7_get_maximum_link_speed(hwmgr, smu7_ps);
3785 uint8_t request;
3786
3787 if (data->pspp_notify_required) {
3788 if (target_link_speed == PP_PCIEGen3)
3789 request = PCIE_PERF_REQ_GEN3;
3790 else if (target_link_speed == PP_PCIEGen2)
3791 request = PCIE_PERF_REQ_GEN2;
3792 else
3793 request = PCIE_PERF_REQ_GEN1;
3794
3795 if (request == PCIE_PERF_REQ_GEN1 &&
3796 smu7_get_current_pcie_speed(hwmgr) > 0)
3797 return 0;
3798
3799#ifdef CONFIG_ACPI
3800 if (amdgpu_acpi_pcie_performance_request(hwmgr->adev, request, false)) {
3801 if (PP_PCIEGen2 == target_link_speed)
3802 pr_info("PSPP request to switch to Gen2 from Gen3 Failed!");
3803 else
3804 pr_info("PSPP request to switch to Gen1 from Gen2 Failed!");
3805 }
3806#endif
3807 }
3808
3809 return 0;
3810}
3811
3812static int smu7_notify_smc_display(struct pp_hwmgr *hwmgr)
3813{
3814 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3815
3816 if (hwmgr->feature_mask & PP_VBI_TIME_SUPPORT_MASK)
3817 smum_send_msg_to_smc_with_parameter(hwmgr,
3818 (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2);
3819 return (smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ? 0 : -EINVAL;
3820}
3821
3822static int smu7_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
3823{
3824 int tmp_result, result = 0;
3825 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3826
3827 tmp_result = smu7_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
3828 PP_ASSERT_WITH_CODE((0 == tmp_result),
3829 "Failed to find DPM states clocks in DPM table!",
3830 result = tmp_result);
3831
3832 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3833 PHM_PlatformCaps_PCIEPerformanceRequest)) {
3834 tmp_result =
3835 smu7_request_link_speed_change_before_state_change(hwmgr, input);
3836 PP_ASSERT_WITH_CODE((0 == tmp_result),
3837 "Failed to request link speed change before state change!",
3838 result = tmp_result);
3839 }
3840
3841 tmp_result = smu7_freeze_sclk_mclk_dpm(hwmgr);
3842 PP_ASSERT_WITH_CODE((0 == tmp_result),
3843 "Failed to freeze SCLK MCLK DPM!", result = tmp_result);
3844
3845 tmp_result = smu7_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input);
3846 PP_ASSERT_WITH_CODE((0 == tmp_result),
3847 "Failed to populate and upload SCLK MCLK DPM levels!",
3848 result = tmp_result);
3849
3850 tmp_result = smu7_update_avfs(hwmgr);
3851 PP_ASSERT_WITH_CODE((0 == tmp_result),
3852 "Failed to update avfs voltages!",
3853 result = tmp_result);
3854
3855 tmp_result = smu7_generate_dpm_level_enable_mask(hwmgr, input);
3856 PP_ASSERT_WITH_CODE((0 == tmp_result),
3857 "Failed to generate DPM level enabled mask!",
3858 result = tmp_result);
3859
3860 tmp_result = smum_update_sclk_threshold(hwmgr);
3861 PP_ASSERT_WITH_CODE((0 == tmp_result),
3862 "Failed to update SCLK threshold!",
3863 result = tmp_result);
3864
3865 tmp_result = smu7_notify_smc_display(hwmgr);
3866 PP_ASSERT_WITH_CODE((0 == tmp_result),
3867 "Failed to notify smc display settings!",
3868 result = tmp_result);
3869
3870 tmp_result = smu7_unfreeze_sclk_mclk_dpm(hwmgr);
3871 PP_ASSERT_WITH_CODE((0 == tmp_result),
3872 "Failed to unfreeze SCLK MCLK DPM!",
3873 result = tmp_result);
3874
3875 tmp_result = smu7_upload_dpm_level_enable_mask(hwmgr);
3876 PP_ASSERT_WITH_CODE((0 == tmp_result),
3877 "Failed to upload DPM level enabled mask!",
3878 result = tmp_result);
3879
3880 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3881 PHM_PlatformCaps_PCIEPerformanceRequest)) {
3882 tmp_result =
3883 smu7_notify_link_speed_change_after_state_change(hwmgr, input);
3884 PP_ASSERT_WITH_CODE((0 == tmp_result),
3885 "Failed to notify link speed change after state change!",
3886 result = tmp_result);
3887 }
3888 data->apply_optimized_settings = false;
3889 return result;
3890}
3891
3892static int smu7_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm)
3893{
3894 hwmgr->thermal_controller.
3895 advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm;
3896
3897 return smum_send_msg_to_smc_with_parameter(hwmgr,
3898 PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm);
3899}
3900
3901static int
3902smu7_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
3903{
3904 PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay;
3905
3906 return (smum_send_msg_to_smc(hwmgr, msg) == 0) ? 0 : -1;
3907}
3908
3909static int
3910smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
3911{
3912 uint32_t num_active_displays = 0;
3913 struct cgs_display_info info = {0};
3914
3915 info.mode_info = NULL;
3916 cgs_get_active_displays_info(hwmgr->device, &info);
3917
3918 num_active_displays = info.display_count;
3919
3920 if (num_active_displays > 1 && hwmgr->display_config.multi_monitor_in_sync != true)
3921 smu7_notify_smc_display_change(hwmgr, false);
3922
3923 return 0;
3924}
3925
3926
3927
3928
3929
3930
3931
3932static int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
3933{
3934 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3935 uint32_t num_active_displays = 0;
3936 uint32_t display_gap = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL);
3937 uint32_t display_gap2;
3938 uint32_t pre_vbi_time_in_us;
3939 uint32_t frame_time_in_us;
3940 uint32_t ref_clock;
3941 uint32_t refresh_rate = 0;
3942 struct cgs_display_info info = {0};
3943 struct cgs_mode_info mode_info = {0};
3944
3945 info.mode_info = &mode_info;
3946 cgs_get_active_displays_info(hwmgr->device, &info);
3947 num_active_displays = info.display_count;
3948
3949 display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (num_active_displays > 0) ? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE);
3950 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL, display_gap);
3951
3952 ref_clock = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev);
3953
3954 refresh_rate = mode_info.refresh_rate;
3955
3956 if (0 == refresh_rate)
3957 refresh_rate = 60;
3958
3959 frame_time_in_us = 1000000 / refresh_rate;
3960
3961 pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us;
3962
3963 data->frame_time_x2 = frame_time_in_us * 2 / 100;
3964
3965 display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
3966
3967 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2);
3968
3969 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3970 data->soft_regs_start + smum_get_offsetof(hwmgr,
3971 SMU_SoftRegisters,
3972 PreVBlankGap), 0x64);
3973
3974 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3975 data->soft_regs_start + smum_get_offsetof(hwmgr,
3976 SMU_SoftRegisters,
3977 VBlankTimeout),
3978 (frame_time_in_us - pre_vbi_time_in_us));
3979
3980 return 0;
3981}
3982
3983static int smu7_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
3984{
3985 return smu7_program_display_gap(hwmgr);
3986}
3987
3988
3989
3990
3991
3992
3993
3994
3995static int smu7_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_rpm)
3996{
3997 hwmgr->thermal_controller.
3998 advanceFanControlParameters.usMaxFanRPM = us_max_fan_rpm;
3999
4000 return smum_send_msg_to_smc_with_parameter(hwmgr,
4001 PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm);
4002}
4003
4004static const struct amdgpu_irq_src_funcs smu7_irq_funcs = {
4005 .process = phm_irq_process,
4006};
4007
4008static int smu7_register_irq_handlers(struct pp_hwmgr *hwmgr)
4009{
4010 struct amdgpu_irq_src *source =
4011 kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL);
4012
4013 if (!source)
4014 return -ENOMEM;
4015
4016 source->funcs = &smu7_irq_funcs;
4017
4018 amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
4019 AMDGPU_IH_CLIENTID_LEGACY,
4020 230,
4021 source);
4022 amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
4023 AMDGPU_IH_CLIENTID_LEGACY,
4024 231,
4025 source);
4026
4027
4028 amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
4029 AMDGPU_IH_CLIENTID_LEGACY,
4030 83,
4031 source);
4032
4033 return 0;
4034}
4035
4036static bool
4037smu7_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
4038{
4039 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4040 bool is_update_required = false;
4041 struct cgs_display_info info = {0, 0, NULL};
4042
4043 cgs_get_active_displays_info(hwmgr->device, &info);
4044
4045 if (data->display_timing.num_existing_displays != info.display_count)
4046 is_update_required = true;
4047
4048 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
4049 if (data->display_timing.min_clock_in_sr != hwmgr->display_config.min_core_set_clock_in_sr &&
4050 (data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK ||
4051 hwmgr->display_config.min_core_set_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK))
4052 is_update_required = true;
4053 }
4054 return is_update_required;
4055}
4056
4057static inline bool smu7_are_power_levels_equal(const struct smu7_performance_level *pl1,
4058 const struct smu7_performance_level *pl2)
4059{
4060 return ((pl1->memory_clock == pl2->memory_clock) &&
4061 (pl1->engine_clock == pl2->engine_clock) &&
4062 (pl1->pcie_gen == pl2->pcie_gen) &&
4063 (pl1->pcie_lane == pl2->pcie_lane));
4064}
4065
4066static int smu7_check_states_equal(struct pp_hwmgr *hwmgr,
4067 const struct pp_hw_power_state *pstate1,
4068 const struct pp_hw_power_state *pstate2, bool *equal)
4069{
4070 const struct smu7_power_state *psa;
4071 const struct smu7_power_state *psb;
4072 int i;
4073 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4074
4075 if (pstate1 == NULL || pstate2 == NULL || equal == NULL)
4076 return -EINVAL;
4077
4078 psa = cast_const_phw_smu7_power_state(pstate1);
4079 psb = cast_const_phw_smu7_power_state(pstate2);
4080
4081 if (psa->performance_level_count != psb->performance_level_count) {
4082 *equal = false;
4083 return 0;
4084 }
4085
4086 for (i = 0; i < psa->performance_level_count; i++) {
4087 if (!smu7_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
4088
4089 *equal = false;
4090 return 0;
4091 }
4092 }
4093
4094
4095 *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
4096 *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
4097 *equal &= (psa->sclk_threshold == psb->sclk_threshold);
4098
4099 *equal &= !(data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK |
4100 DPMTABLE_OD_UPDATE_MCLK |
4101 DPMTABLE_OD_UPDATE_VDDC));
4102
4103 return 0;
4104}
4105
4106static int smu7_upload_mc_firmware(struct pp_hwmgr *hwmgr)
4107{
4108 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4109
4110 uint32_t vbios_version;
4111 uint32_t tmp;
4112
4113
4114
4115
4116
4117
4118 smu7_get_mc_microcode_version(hwmgr);
4119 vbios_version = hwmgr->microcode_version_info.MC & 0xf;
4120
4121 data->need_long_memory_training = false;
4122
4123 cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX,
4124 ixMC_IO_DEBUG_UP_13);
4125 tmp = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA);
4126
4127 if (tmp & (1 << 23)) {
4128 data->mem_latency_high = MEM_LATENCY_HIGH;
4129 data->mem_latency_low = MEM_LATENCY_LOW;
4130 } else {
4131 data->mem_latency_high = 330;
4132 data->mem_latency_low = 330;
4133 }
4134
4135 return 0;
4136}
4137
4138static int smu7_read_clock_registers(struct pp_hwmgr *hwmgr)
4139{
4140 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4141
4142 data->clock_registers.vCG_SPLL_FUNC_CNTL =
4143 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL);
4144 data->clock_registers.vCG_SPLL_FUNC_CNTL_2 =
4145 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_2);
4146 data->clock_registers.vCG_SPLL_FUNC_CNTL_3 =
4147 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_3);
4148 data->clock_registers.vCG_SPLL_FUNC_CNTL_4 =
4149 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_4);
4150 data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM =
4151 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM);
4152 data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2 =
4153 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM_2);
4154 data->clock_registers.vDLL_CNTL =
4155 cgs_read_register(hwmgr->device, mmDLL_CNTL);
4156 data->clock_registers.vMCLK_PWRMGT_CNTL =
4157 cgs_read_register(hwmgr->device, mmMCLK_PWRMGT_CNTL);
4158 data->clock_registers.vMPLL_AD_FUNC_CNTL =
4159 cgs_read_register(hwmgr->device, mmMPLL_AD_FUNC_CNTL);
4160 data->clock_registers.vMPLL_DQ_FUNC_CNTL =
4161 cgs_read_register(hwmgr->device, mmMPLL_DQ_FUNC_CNTL);
4162 data->clock_registers.vMPLL_FUNC_CNTL =
4163 cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL);
4164 data->clock_registers.vMPLL_FUNC_CNTL_1 =
4165 cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_1);
4166 data->clock_registers.vMPLL_FUNC_CNTL_2 =
4167 cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_2);
4168 data->clock_registers.vMPLL_SS1 =
4169 cgs_read_register(hwmgr->device, mmMPLL_SS1);
4170 data->clock_registers.vMPLL_SS2 =
4171 cgs_read_register(hwmgr->device, mmMPLL_SS2);
4172 return 0;
4173
4174}
4175
4176
4177
4178
4179
4180
4181
4182static int smu7_get_memory_type(struct pp_hwmgr *hwmgr)
4183{
4184 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4185 uint32_t temp;
4186
4187 temp = cgs_read_register(hwmgr->device, mmMC_SEQ_MISC0);
4188
4189 data->is_memory_gddr5 = (MC_SEQ_MISC0_GDDR5_VALUE ==
4190 ((temp & MC_SEQ_MISC0_GDDR5_MASK) >>
4191 MC_SEQ_MISC0_GDDR5_SHIFT));
4192
4193 return 0;
4194}
4195
4196
4197
4198
4199
4200
4201
4202static int smu7_enable_acpi_power_management(struct pp_hwmgr *hwmgr)
4203{
4204 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
4205 GENERAL_PWRMGT, STATIC_PM_EN, 1);
4206
4207 return 0;
4208}
4209
4210
4211
4212
4213
4214
4215
4216static int smu7_init_power_gate_state(struct pp_hwmgr *hwmgr)
4217{
4218 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4219
4220 data->uvd_power_gated = false;
4221 data->vce_power_gated = false;
4222 data->samu_power_gated = false;
4223
4224 return 0;
4225}
4226
4227static int smu7_init_sclk_threshold(struct pp_hwmgr *hwmgr)
4228{
4229 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4230
4231 data->low_sclk_interrupt_threshold = 0;
4232 return 0;
4233}
4234
4235static int smu7_setup_asic_task(struct pp_hwmgr *hwmgr)
4236{
4237 int tmp_result, result = 0;
4238
4239 smu7_upload_mc_firmware(hwmgr);
4240
4241 tmp_result = smu7_read_clock_registers(hwmgr);
4242 PP_ASSERT_WITH_CODE((0 == tmp_result),
4243 "Failed to read clock registers!", result = tmp_result);
4244
4245 tmp_result = smu7_get_memory_type(hwmgr);
4246 PP_ASSERT_WITH_CODE((0 == tmp_result),
4247 "Failed to get memory type!", result = tmp_result);
4248
4249 tmp_result = smu7_enable_acpi_power_management(hwmgr);
4250 PP_ASSERT_WITH_CODE((0 == tmp_result),
4251 "Failed to enable ACPI power management!", result = tmp_result);
4252
4253 tmp_result = smu7_init_power_gate_state(hwmgr);
4254 PP_ASSERT_WITH_CODE((0 == tmp_result),
4255 "Failed to init power gate state!", result = tmp_result);
4256
4257 tmp_result = smu7_get_mc_microcode_version(hwmgr);
4258 PP_ASSERT_WITH_CODE((0 == tmp_result),
4259 "Failed to get MC microcode version!", result = tmp_result);
4260
4261 tmp_result = smu7_init_sclk_threshold(hwmgr);
4262 PP_ASSERT_WITH_CODE((0 == tmp_result),
4263 "Failed to init sclk threshold!", result = tmp_result);
4264
4265 return result;
4266}
4267
4268static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
4269 enum pp_clock_type type, uint32_t mask)
4270{
4271 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4272
4273 if (mask == 0)
4274 return -EINVAL;
4275
4276 switch (type) {
4277 case PP_SCLK:
4278 if (!data->sclk_dpm_key_disabled)
4279 smum_send_msg_to_smc_with_parameter(hwmgr,
4280 PPSMC_MSG_SCLKDPM_SetEnabledMask,
4281 data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
4282 break;
4283 case PP_MCLK:
4284 if (!data->mclk_dpm_key_disabled)
4285 smum_send_msg_to_smc_with_parameter(hwmgr,
4286 PPSMC_MSG_MCLKDPM_SetEnabledMask,
4287 data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
4288 break;
4289 case PP_PCIE:
4290 {
4291 uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask;
4292
4293 if (!data->pcie_dpm_key_disabled) {
4294 if (fls(tmp) != ffs(tmp))
4295 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PCIeDPM_UnForceLevel);
4296 else
4297 smum_send_msg_to_smc_with_parameter(hwmgr,
4298 PPSMC_MSG_PCIeDPM_ForceLevel,
4299 fls(tmp) - 1);
4300 }
4301 break;
4302 }
4303 default:
4304 break;
4305 }
4306
4307 return 0;
4308}
4309
4310static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
4311 enum pp_clock_type type, char *buf)
4312{
4313 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4314 struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
4315 struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
4316 struct smu7_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table);
4317 struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
4318 struct phm_odn_clock_levels *odn_sclk_table = &(odn_table->odn_core_clock_dpm_levels);
4319 struct phm_odn_clock_levels *odn_mclk_table = &(odn_table->odn_memory_clock_dpm_levels);
4320 int i, now, size = 0;
4321 uint32_t clock, pcie_speed;
4322
4323 switch (type) {
4324 case PP_SCLK:
4325 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency);
4326 clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
4327
4328 for (i = 0; i < sclk_table->count; i++) {
4329 if (clock > sclk_table->dpm_levels[i].value)
4330 continue;
4331 break;
4332 }
4333 now = i;
4334
4335 for (i = 0; i < sclk_table->count; i++)
4336 size += sprintf(buf + size, "%d: %uMhz %s\n",
4337 i, sclk_table->dpm_levels[i].value / 100,
4338 (i == now) ? "*" : "");
4339 break;
4340 case PP_MCLK:
4341 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency);
4342 clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
4343
4344 for (i = 0; i < mclk_table->count; i++) {
4345 if (clock > mclk_table->dpm_levels[i].value)
4346 continue;
4347 break;
4348 }
4349 now = i;
4350
4351 for (i = 0; i < mclk_table->count; i++)
4352 size += sprintf(buf + size, "%d: %uMhz %s\n",
4353 i, mclk_table->dpm_levels[i].value / 100,
4354 (i == now) ? "*" : "");
4355 break;
4356 case PP_PCIE:
4357 pcie_speed = smu7_get_current_pcie_speed(hwmgr);
4358 for (i = 0; i < pcie_table->count; i++) {
4359 if (pcie_speed != pcie_table->dpm_levels[i].value)
4360 continue;
4361 break;
4362 }
4363 now = i;
4364
4365 for (i = 0; i < pcie_table->count; i++)
4366 size += sprintf(buf + size, "%d: %s %s\n", i,
4367 (pcie_table->dpm_levels[i].value == 0) ? "2.5GT/s, x8" :
4368 (pcie_table->dpm_levels[i].value == 1) ? "5.0GT/s, x16" :
4369 (pcie_table->dpm_levels[i].value == 2) ? "8.0GT/s, x16" : "",
4370 (i == now) ? "*" : "");
4371 break;
4372 case OD_SCLK:
4373 if (hwmgr->od_enabled) {
4374 size = sprintf(buf, "%s: \n", "OD_SCLK");
4375 for (i = 0; i < odn_sclk_table->num_of_pl; i++)
4376 size += sprintf(buf + size, "%d: %10uMhz %10u mV\n",
4377 i, odn_sclk_table->entries[i].clock / 100,
4378 odn_sclk_table->entries[i].vddc);
4379 }
4380 break;
4381 case OD_MCLK:
4382 if (hwmgr->od_enabled) {
4383 size = sprintf(buf, "%s: \n", "OD_MCLK");
4384 for (i = 0; i < odn_mclk_table->num_of_pl; i++)
4385 size += sprintf(buf + size, "%d: %10uMhz %10u mV\n",
4386 i, odn_mclk_table->entries[i].clock / 100,
4387 odn_mclk_table->entries[i].vddc);
4388 }
4389 break;
4390 default:
4391 break;
4392 }
4393 return size;
4394}
4395
4396static void smu7_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
4397{
4398 switch (mode) {
4399 case AMD_FAN_CTRL_NONE:
4400 smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
4401 break;
4402 case AMD_FAN_CTRL_MANUAL:
4403 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4404 PHM_PlatformCaps_MicrocodeFanControl))
4405 smu7_fan_ctrl_stop_smc_fan_control(hwmgr);
4406 break;
4407 case AMD_FAN_CTRL_AUTO:
4408 if (!smu7_fan_ctrl_set_static_mode(hwmgr, mode))
4409 smu7_fan_ctrl_start_smc_fan_control(hwmgr);
4410 break;
4411 default:
4412 break;
4413 }
4414}
4415
4416static uint32_t smu7_get_fan_control_mode(struct pp_hwmgr *hwmgr)
4417{
4418 return hwmgr->fan_ctrl_enabled ? AMD_FAN_CTRL_AUTO : AMD_FAN_CTRL_MANUAL;
4419}
4420
4421static int smu7_get_sclk_od(struct pp_hwmgr *hwmgr)
4422{
4423 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4424 struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
4425 struct smu7_single_dpm_table *golden_sclk_table =
4426 &(data->golden_dpm_table.sclk_table);
4427 int value;
4428
4429 value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
4430 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
4431 100 /
4432 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
4433
4434 return value;
4435}
4436
4437static int smu7_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
4438{
4439 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4440 struct smu7_single_dpm_table *golden_sclk_table =
4441 &(data->golden_dpm_table.sclk_table);
4442 struct pp_power_state *ps;
4443 struct smu7_power_state *smu7_ps;
4444
4445 if (value > 20)
4446 value = 20;
4447
4448 ps = hwmgr->request_ps;
4449
4450 if (ps == NULL)
4451 return -EINVAL;
4452
4453 smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
4454
4455 smu7_ps->performance_levels[smu7_ps->performance_level_count - 1].engine_clock =
4456 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
4457 value / 100 +
4458 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
4459
4460 return 0;
4461}
4462
4463static int smu7_get_mclk_od(struct pp_hwmgr *hwmgr)
4464{
4465 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4466 struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
4467 struct smu7_single_dpm_table *golden_mclk_table =
4468 &(data->golden_dpm_table.mclk_table);
4469 int value;
4470
4471 value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
4472 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
4473 100 /
4474 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
4475
4476 return value;
4477}
4478
4479static int smu7_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
4480{
4481 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4482 struct smu7_single_dpm_table *golden_mclk_table =
4483 &(data->golden_dpm_table.mclk_table);
4484 struct pp_power_state *ps;
4485 struct smu7_power_state *smu7_ps;
4486
4487 if (value > 20)
4488 value = 20;
4489
4490 ps = hwmgr->request_ps;
4491
4492 if (ps == NULL)
4493 return -EINVAL;
4494
4495 smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
4496
4497 smu7_ps->performance_levels[smu7_ps->performance_level_count - 1].memory_clock =
4498 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
4499 value / 100 +
4500 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
4501
4502 return 0;
4503}
4504
4505
4506static int smu7_get_sclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
4507{
4508 struct phm_ppt_v1_information *table_info =
4509 (struct phm_ppt_v1_information *)hwmgr->pptable;
4510 struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table = NULL;
4511 struct phm_clock_voltage_dependency_table *sclk_table;
4512 int i;
4513
4514 if (hwmgr->pp_table_version == PP_TABLE_V1) {
4515 if (table_info == NULL || table_info->vdd_dep_on_sclk == NULL)
4516 return -EINVAL;
4517 dep_sclk_table = table_info->vdd_dep_on_sclk;
4518 for (i = 0; i < dep_sclk_table->count; i++)
4519 clocks->clock[i] = dep_sclk_table->entries[i].clk;
4520 clocks->count = dep_sclk_table->count;
4521 } else if (hwmgr->pp_table_version == PP_TABLE_V0) {
4522 sclk_table = hwmgr->dyn_state.vddc_dependency_on_sclk;
4523 for (i = 0; i < sclk_table->count; i++)
4524 clocks->clock[i] = sclk_table->entries[i].clk;
4525 clocks->count = sclk_table->count;
4526 }
4527
4528 return 0;
4529}
4530
4531static uint32_t smu7_get_mem_latency(struct pp_hwmgr *hwmgr, uint32_t clk)
4532{
4533 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4534
4535 if (clk >= MEM_FREQ_LOW_LATENCY && clk < MEM_FREQ_HIGH_LATENCY)
4536 return data->mem_latency_high;
4537 else if (clk >= MEM_FREQ_HIGH_LATENCY)
4538 return data->mem_latency_low;
4539 else
4540 return MEM_LATENCY_ERR;
4541}
4542
4543static int smu7_get_mclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
4544{
4545 struct phm_ppt_v1_information *table_info =
4546 (struct phm_ppt_v1_information *)hwmgr->pptable;
4547 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
4548 int i;
4549 struct phm_clock_voltage_dependency_table *mclk_table;
4550
4551 if (hwmgr->pp_table_version == PP_TABLE_V1) {
4552 if (table_info == NULL)
4553 return -EINVAL;
4554 dep_mclk_table = table_info->vdd_dep_on_mclk;
4555 for (i = 0; i < dep_mclk_table->count; i++) {
4556 clocks->clock[i] = dep_mclk_table->entries[i].clk;
4557 clocks->latency[i] = smu7_get_mem_latency(hwmgr,
4558 dep_mclk_table->entries[i].clk);
4559 }
4560 clocks->count = dep_mclk_table->count;
4561 } else if (hwmgr->pp_table_version == PP_TABLE_V0) {
4562 mclk_table = hwmgr->dyn_state.vddc_dependency_on_mclk;
4563 for (i = 0; i < mclk_table->count; i++)
4564 clocks->clock[i] = mclk_table->entries[i].clk;
4565 clocks->count = mclk_table->count;
4566 }
4567 return 0;
4568}
4569
4570static int smu7_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type,
4571 struct amd_pp_clocks *clocks)
4572{
4573 switch (type) {
4574 case amd_pp_sys_clock:
4575 smu7_get_sclks(hwmgr, clocks);
4576 break;
4577 case amd_pp_mem_clock:
4578 smu7_get_mclks(hwmgr, clocks);
4579 break;
4580 default:
4581 return -EINVAL;
4582 }
4583
4584 return 0;
4585}
4586
4587static int smu7_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
4588 uint32_t virtual_addr_low,
4589 uint32_t virtual_addr_hi,
4590 uint32_t mc_addr_low,
4591 uint32_t mc_addr_hi,
4592 uint32_t size)
4593{
4594 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4595
4596 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4597 data->soft_regs_start +
4598 smum_get_offsetof(hwmgr,
4599 SMU_SoftRegisters, DRAM_LOG_ADDR_H),
4600 mc_addr_hi);
4601
4602 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4603 data->soft_regs_start +
4604 smum_get_offsetof(hwmgr,
4605 SMU_SoftRegisters, DRAM_LOG_ADDR_L),
4606 mc_addr_low);
4607
4608 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4609 data->soft_regs_start +
4610 smum_get_offsetof(hwmgr,
4611 SMU_SoftRegisters, DRAM_LOG_PHY_ADDR_H),
4612 virtual_addr_hi);
4613
4614 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4615 data->soft_regs_start +
4616 smum_get_offsetof(hwmgr,
4617 SMU_SoftRegisters, DRAM_LOG_PHY_ADDR_L),
4618 virtual_addr_low);
4619
4620 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4621 data->soft_regs_start +
4622 smum_get_offsetof(hwmgr,
4623 SMU_SoftRegisters, DRAM_LOG_BUFF_SIZE),
4624 size);
4625 return 0;
4626}
4627
4628static int smu7_get_max_high_clocks(struct pp_hwmgr *hwmgr,
4629 struct amd_pp_simple_clock_info *clocks)
4630{
4631 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4632 struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
4633 struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
4634
4635 if (clocks == NULL)
4636 return -EINVAL;
4637
4638 clocks->memory_max_clock = mclk_table->count > 1 ?
4639 mclk_table->dpm_levels[mclk_table->count-1].value :
4640 mclk_table->dpm_levels[0].value;
4641 clocks->engine_max_clock = sclk_table->count > 1 ?
4642 sclk_table->dpm_levels[sclk_table->count-1].value :
4643 sclk_table->dpm_levels[0].value;
4644 return 0;
4645}
4646
4647static int smu7_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
4648 struct PP_TemperatureRange *thermal_data)
4649{
4650 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4651 struct phm_ppt_v1_information *table_info =
4652 (struct phm_ppt_v1_information *)hwmgr->pptable;
4653
4654 memcpy(thermal_data, &SMU7ThermalPolicy[0], sizeof(struct PP_TemperatureRange));
4655
4656 if (hwmgr->pp_table_version == PP_TABLE_V1)
4657 thermal_data->max = table_info->cac_dtp_table->usSoftwareShutdownTemp *
4658 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
4659 else if (hwmgr->pp_table_version == PP_TABLE_V0)
4660 thermal_data->max = data->thermal_temp_setting.temperature_shutdown *
4661 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
4662
4663 return 0;
4664}
4665
4666static bool smu7_check_clk_voltage_valid(struct pp_hwmgr *hwmgr,
4667 enum PP_OD_DPM_TABLE_COMMAND type,
4668 uint32_t clk,
4669 uint32_t voltage)
4670{
4671 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4672
4673 struct phm_ppt_v1_information *table_info =
4674 (struct phm_ppt_v1_information *)(hwmgr->pptable);
4675 uint32_t min_vddc;
4676 struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
4677
4678 if (table_info == NULL)
4679 return false;
4680
4681 dep_sclk_table = table_info->vdd_dep_on_sclk;
4682 min_vddc = dep_sclk_table->entries[0].vddc;
4683
4684 if (voltage < min_vddc || voltage > 2000) {
4685 pr_info("OD voltage is out of range [%d - 2000] mV\n", min_vddc);
4686 return false;
4687 }
4688
4689 if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) {
4690 if (data->vbios_boot_state.sclk_bootup_value > clk ||
4691 hwmgr->platform_descriptor.overdriveLimit.engineClock < clk) {
4692 pr_info("OD engine clock is out of range [%d - %d] MHz\n",
4693 data->vbios_boot_state.sclk_bootup_value,
4694 hwmgr->platform_descriptor.overdriveLimit.engineClock / 100);
4695 return false;
4696 }
4697 } else if (type == PP_OD_EDIT_MCLK_VDDC_TABLE) {
4698 if (data->vbios_boot_state.mclk_bootup_value > clk ||
4699 hwmgr->platform_descriptor.overdriveLimit.memoryClock < clk) {
4700 pr_info("OD memory clock is out of range [%d - %d] MHz\n",
4701 data->vbios_boot_state.mclk_bootup_value/100,
4702 hwmgr->platform_descriptor.overdriveLimit.memoryClock / 100);
4703 return false;
4704 }
4705 } else {
4706 return false;
4707 }
4708
4709 return true;
4710}
4711
4712static void smu7_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
4713{
4714 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4715 struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
4716 struct phm_ppt_v1_information *table_info =
4717 (struct phm_ppt_v1_information *)(hwmgr->pptable);
4718 uint32_t i;
4719
4720 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
4721 struct phm_ppt_v1_clock_voltage_dependency_table *odn_dep_table;
4722
4723 if (table_info == NULL)
4724 return;
4725
4726 for (i=0; i<data->dpm_table.sclk_table.count; i++) {
4727 if (odn_table->odn_core_clock_dpm_levels.entries[i].clock !=
4728 data->dpm_table.sclk_table.dpm_levels[i].value) {
4729 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
4730 break;
4731 }
4732 }
4733
4734 for (i=0; i<data->dpm_table.mclk_table.count; i++) {
4735 if (odn_table->odn_memory_clock_dpm_levels.entries[i].clock !=
4736 data->dpm_table.mclk_table.dpm_levels[i].value) {
4737 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
4738 break;
4739 }
4740 }
4741
4742 dep_table = table_info->vdd_dep_on_mclk;
4743 odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk);
4744
4745 for (i=0; i < dep_table->count; i++) {
4746 if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
4747 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK;
4748 return;
4749 }
4750 }
4751 if (i == dep_table->count && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
4752 data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
4753 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
4754 }
4755
4756 dep_table = table_info->vdd_dep_on_sclk;
4757 odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk);
4758 for (i=0; i < dep_table->count; i++) {
4759 if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
4760 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK;
4761 return;
4762 }
4763 }
4764 if (i == dep_table->count && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
4765 data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
4766 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
4767 }
4768}
4769
4770static int smu7_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
4771 enum PP_OD_DPM_TABLE_COMMAND type,
4772 long *input, uint32_t size)
4773{
4774 uint32_t i;
4775 struct phm_odn_clock_levels *podn_dpm_table_in_backend = NULL;
4776 struct smu7_odn_clock_voltage_dependency_table *podn_vdd_dep_in_backend = NULL;
4777 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4778
4779 uint32_t input_clk;
4780 uint32_t input_vol;
4781 uint32_t input_level;
4782
4783 PP_ASSERT_WITH_CODE(input, "NULL user input for clock and voltage",
4784 return -EINVAL);
4785
4786 if (!hwmgr->od_enabled) {
4787 pr_info("OverDrive feature not enabled\n");
4788 return -EINVAL;
4789 }
4790
4791 if (PP_OD_EDIT_SCLK_VDDC_TABLE == type) {
4792 podn_dpm_table_in_backend = &data->odn_dpm_table.odn_core_clock_dpm_levels;
4793 podn_vdd_dep_in_backend = &data->odn_dpm_table.vdd_dependency_on_sclk;
4794 PP_ASSERT_WITH_CODE((podn_dpm_table_in_backend && podn_vdd_dep_in_backend),
4795 "Failed to get ODN SCLK and Voltage tables",
4796 return -EINVAL);
4797 } else if (PP_OD_EDIT_MCLK_VDDC_TABLE == type) {
4798 podn_dpm_table_in_backend = &data->odn_dpm_table.odn_memory_clock_dpm_levels;
4799 podn_vdd_dep_in_backend = &data->odn_dpm_table.vdd_dependency_on_mclk;
4800
4801 PP_ASSERT_WITH_CODE((podn_dpm_table_in_backend && podn_vdd_dep_in_backend),
4802 "Failed to get ODN MCLK and Voltage tables",
4803 return -EINVAL);
4804 } else if (PP_OD_RESTORE_DEFAULT_TABLE == type) {
4805 smu7_odn_initial_default_setting(hwmgr);
4806 return 0;
4807 } else if (PP_OD_COMMIT_DPM_TABLE == type) {
4808 smu7_check_dpm_table_updated(hwmgr);
4809 return 0;
4810 } else {
4811 return -EINVAL;
4812 }
4813
4814 for (i = 0; i < size; i += 3) {
4815 if (i + 3 > size || input[i] >= podn_dpm_table_in_backend->num_of_pl) {
4816 pr_info("invalid clock voltage input \n");
4817 return 0;
4818 }
4819 input_level = input[i];
4820 input_clk = input[i+1] * 100;
4821 input_vol = input[i+2];
4822
4823 if (smu7_check_clk_voltage_valid(hwmgr, type, input_clk, input_vol)) {
4824 podn_dpm_table_in_backend->entries[input_level].clock = input_clk;
4825 podn_vdd_dep_in_backend->entries[input_level].clk = input_clk;
4826 podn_dpm_table_in_backend->entries[input_level].vddc = input_vol;
4827 podn_vdd_dep_in_backend->entries[input_level].vddc = input_vol;
4828 } else {
4829 return -EINVAL;
4830 }
4831 }
4832
4833 return 0;
4834}
4835
4836static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
4837{
4838 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4839 uint32_t i, size = 0;
4840 uint32_t len;
4841
4842 static const char *profile_name[6] = {"3D_FULL_SCREEN",
4843 "POWER_SAVING",
4844 "VIDEO",
4845 "VR",
4846 "COMPUTE",
4847 "CUSTOM"};
4848
4849 static const char *title[8] = {"NUM",
4850 "MODE_NAME",
4851 "SCLK_UP_HYST",
4852 "SCLK_DOWN_HYST",
4853 "SCLK_ACTIVE_LEVEL",
4854 "MCLK_UP_HYST",
4855 "MCLK_DOWN_HYST",
4856 "MCLK_ACTIVE_LEVEL"};
4857
4858 if (!buf)
4859 return -EINVAL;
4860
4861 size += sprintf(buf + size, "%s %16s %16s %16s %16s %16s %16s %16s\n",
4862 title[0], title[1], title[2], title[3],
4863 title[4], title[5], title[6], title[7]);
4864
4865 len = sizeof(smu7_profiling) / sizeof(struct profile_mode_setting);
4866
4867 for (i = 0; i < len; i++) {
4868 if (i == hwmgr->power_profile_mode) {
4869 size += sprintf(buf + size, "%3d %14s %s: %8d %16d %16d %16d %16d %16d\n",
4870 i, profile_name[i], "*",
4871 data->current_profile_setting.sclk_up_hyst,
4872 data->current_profile_setting.sclk_down_hyst,
4873 data->current_profile_setting.sclk_activity,
4874 data->current_profile_setting.mclk_up_hyst,
4875 data->current_profile_setting.mclk_down_hyst,
4876 data->current_profile_setting.mclk_activity);
4877 continue;
4878 }
4879 if (smu7_profiling[i].bupdate_sclk)
4880 size += sprintf(buf + size, "%3d %16s: %8d %16d %16d ",
4881 i, profile_name[i], smu7_profiling[i].sclk_up_hyst,
4882 smu7_profiling[i].sclk_down_hyst,
4883 smu7_profiling[i].sclk_activity);
4884 else
4885 size += sprintf(buf + size, "%3d %16s: %8s %16s %16s ",
4886 i, profile_name[i], "-", "-", "-");
4887
4888 if (smu7_profiling[i].bupdate_mclk)
4889 size += sprintf(buf + size, "%16d %16d %16d\n",
4890 smu7_profiling[i].mclk_up_hyst,
4891 smu7_profiling[i].mclk_down_hyst,
4892 smu7_profiling[i].mclk_activity);
4893 else
4894 size += sprintf(buf + size, "%16s %16s %16s\n",
4895 "-", "-", "-");
4896 }
4897
4898 return size;
4899}
4900
4901static void smu7_patch_compute_profile_mode(struct pp_hwmgr *hwmgr,
4902 enum PP_SMC_POWER_PROFILE requst)
4903{
4904 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4905 uint32_t tmp, level;
4906
4907 if (requst == PP_SMC_POWER_PROFILE_COMPUTE) {
4908 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
4909 level = 0;
4910 tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask;
4911 while (tmp >>= 1)
4912 level++;
4913 if (level > 0)
4914 smu7_force_clock_level(hwmgr, PP_SCLK, 3 << (level-1));
4915 }
4916 } else if (hwmgr->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE) {
4917 smu7_force_clock_level(hwmgr, PP_SCLK, data->dpm_level_enable_mask.sclk_dpm_enable_mask);
4918 }
4919}
4920
4921static int smu7_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size)
4922{
4923 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4924 struct profile_mode_setting tmp;
4925 enum PP_SMC_POWER_PROFILE mode;
4926
4927 if (input == NULL)
4928 return -EINVAL;
4929
4930 mode = input[size];
4931 switch (mode) {
4932 case PP_SMC_POWER_PROFILE_CUSTOM:
4933 if (size < 8)
4934 return -EINVAL;
4935
4936 tmp.bupdate_sclk = input[0];
4937 tmp.sclk_up_hyst = input[1];
4938 tmp.sclk_down_hyst = input[2];
4939 tmp.sclk_activity = input[3];
4940 tmp.bupdate_mclk = input[4];
4941 tmp.mclk_up_hyst = input[5];
4942 tmp.mclk_down_hyst = input[6];
4943 tmp.mclk_activity = input[7];
4944 if (!smum_update_dpm_settings(hwmgr, &tmp)) {
4945 memcpy(&data->current_profile_setting, &tmp, sizeof(struct profile_mode_setting));
4946 hwmgr->power_profile_mode = mode;
4947 }
4948 break;
4949 case PP_SMC_POWER_PROFILE_FULLSCREEN3D:
4950 case PP_SMC_POWER_PROFILE_POWERSAVING:
4951 case PP_SMC_POWER_PROFILE_VIDEO:
4952 case PP_SMC_POWER_PROFILE_VR:
4953 case PP_SMC_POWER_PROFILE_COMPUTE:
4954 if (mode == hwmgr->power_profile_mode)
4955 return 0;
4956
4957 memcpy(&tmp, &smu7_profiling[mode], sizeof(struct profile_mode_setting));
4958 if (!smum_update_dpm_settings(hwmgr, &tmp)) {
4959 if (tmp.bupdate_sclk) {
4960 data->current_profile_setting.bupdate_sclk = tmp.bupdate_sclk;
4961 data->current_profile_setting.sclk_up_hyst = tmp.sclk_up_hyst;
4962 data->current_profile_setting.sclk_down_hyst = tmp.sclk_down_hyst;
4963 data->current_profile_setting.sclk_activity = tmp.sclk_activity;
4964 }
4965 if (tmp.bupdate_mclk) {
4966 data->current_profile_setting.bupdate_mclk = tmp.bupdate_mclk;
4967 data->current_profile_setting.mclk_up_hyst = tmp.mclk_up_hyst;
4968 data->current_profile_setting.mclk_down_hyst = tmp.mclk_down_hyst;
4969 data->current_profile_setting.mclk_activity = tmp.mclk_activity;
4970 }
4971 smu7_patch_compute_profile_mode(hwmgr, mode);
4972 hwmgr->power_profile_mode = mode;
4973 }
4974 break;
4975 default:
4976 return -EINVAL;
4977 }
4978
4979 return 0;
4980}
4981
4982static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
4983 .backend_init = &smu7_hwmgr_backend_init,
4984 .backend_fini = &smu7_hwmgr_backend_fini,
4985 .asic_setup = &smu7_setup_asic_task,
4986 .dynamic_state_management_enable = &smu7_enable_dpm_tasks,
4987 .apply_state_adjust_rules = smu7_apply_state_adjust_rules,
4988 .force_dpm_level = &smu7_force_dpm_level,
4989 .power_state_set = smu7_set_power_state_tasks,
4990 .get_power_state_size = smu7_get_power_state_size,
4991 .get_mclk = smu7_dpm_get_mclk,
4992 .get_sclk = smu7_dpm_get_sclk,
4993 .patch_boot_state = smu7_dpm_patch_boot_state,
4994 .get_pp_table_entry = smu7_get_pp_table_entry,
4995 .get_num_of_pp_table_entries = smu7_get_number_of_powerplay_table_entries,
4996 .powerdown_uvd = smu7_powerdown_uvd,
4997 .powergate_uvd = smu7_powergate_uvd,
4998 .powergate_vce = smu7_powergate_vce,
4999 .disable_clock_power_gating = smu7_disable_clock_power_gating,
5000 .update_clock_gatings = smu7_update_clock_gatings,
5001 .notify_smc_display_config_after_ps_adjustment = smu7_notify_smc_display_config_after_ps_adjustment,
5002 .display_config_changed = smu7_display_configuration_changed_task,
5003 .set_max_fan_pwm_output = smu7_set_max_fan_pwm_output,
5004 .set_max_fan_rpm_output = smu7_set_max_fan_rpm_output,
5005 .stop_thermal_controller = smu7_thermal_stop_thermal_controller,
5006 .get_fan_speed_info = smu7_fan_ctrl_get_fan_speed_info,
5007 .get_fan_speed_percent = smu7_fan_ctrl_get_fan_speed_percent,
5008 .set_fan_speed_percent = smu7_fan_ctrl_set_fan_speed_percent,
5009 .reset_fan_speed_to_default = smu7_fan_ctrl_reset_fan_speed_to_default,
5010 .get_fan_speed_rpm = smu7_fan_ctrl_get_fan_speed_rpm,
5011 .set_fan_speed_rpm = smu7_fan_ctrl_set_fan_speed_rpm,
5012 .uninitialize_thermal_controller = smu7_thermal_ctrl_uninitialize_thermal_controller,
5013 .register_irq_handlers = smu7_register_irq_handlers,
5014 .check_smc_update_required_for_display_configuration = smu7_check_smc_update_required_for_display_configuration,
5015 .check_states_equal = smu7_check_states_equal,
5016 .set_fan_control_mode = smu7_set_fan_control_mode,
5017 .get_fan_control_mode = smu7_get_fan_control_mode,
5018 .force_clock_level = smu7_force_clock_level,
5019 .print_clock_levels = smu7_print_clock_levels,
5020 .enable_per_cu_power_gating = smu7_enable_per_cu_power_gating,
5021 .get_sclk_od = smu7_get_sclk_od,
5022 .set_sclk_od = smu7_set_sclk_od,
5023 .get_mclk_od = smu7_get_mclk_od,
5024 .set_mclk_od = smu7_set_mclk_od,
5025 .get_clock_by_type = smu7_get_clock_by_type,
5026 .read_sensor = smu7_read_sensor,
5027 .dynamic_state_management_disable = smu7_disable_dpm_tasks,
5028 .avfs_control = smu7_avfs_control,
5029 .disable_smc_firmware_ctf = smu7_thermal_disable_alert,
5030 .start_thermal_controller = smu7_start_thermal_controller,
5031 .notify_cac_buffer_info = smu7_notify_cac_buffer_info,
5032 .get_max_high_clocks = smu7_get_max_high_clocks,
5033 .get_thermal_temperature_range = smu7_get_thermal_temperature_range,
5034 .odn_edit_dpm_table = smu7_odn_edit_dpm_table,
5035 .set_power_limit = smu7_set_power_limit,
5036 .get_power_profile_mode = smu7_get_power_profile_mode,
5037 .set_power_profile_mode = smu7_set_power_profile_mode,
5038};
5039
5040uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock,
5041 uint32_t clock_insr)
5042{
5043 uint8_t i;
5044 uint32_t temp;
5045 uint32_t min = max(clock_insr, (uint32_t)SMU7_MINIMUM_ENGINE_CLOCK);
5046
5047 PP_ASSERT_WITH_CODE((clock >= min), "Engine clock can't satisfy stutter requirement!", return 0);
5048 for (i = SMU7_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) {
5049 temp = clock >> i;
5050
5051 if (temp >= min || i == 0)
5052 break;
5053 }
5054 return i;
5055}
5056
5057int smu7_init_function_pointers(struct pp_hwmgr *hwmgr)
5058{
5059 int ret = 0;
5060
5061 hwmgr->hwmgr_func = &smu7_hwmgr_funcs;
5062 if (hwmgr->pp_table_version == PP_TABLE_V0)
5063 hwmgr->pptable_func = &pptable_funcs;
5064 else if (hwmgr->pp_table_version == PP_TABLE_V1)
5065 hwmgr->pptable_func = &pptable_v1_0_funcs;
5066
5067 return ret;
5068}
5069