1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include "smumgr.h"
25#include "vega20_inc.h"
26#include "soc15_common.h"
27#include "vega20_smumgr.h"
28#include "vega20_ppsmc.h"
29#include "smu11_driver_if.h"
30#include "ppatomctrl.h"
31#include "pp_debug.h"
32#include "smu_ucode_xfer_vi.h"
33#include "smu7_smumgr.h"
34#include "vega20_hwmgr.h"
35
36#include "smu_v11_0_i2c.h"
37
38
39#define MP0_Public 0x03800000
40#define MP0_SRAM 0x03900000
41#define MP1_Public 0x03b00000
42#define MP1_SRAM 0x03c00004
43
44
45#define smnMP1_FIRMWARE_FLAGS 0x3010024
46#define smnMP0_FW_INTF 0x30101c0
47#define smnMP1_PUB_CTRL 0x3010b14
48
49bool vega20_is_smc_ram_running(struct pp_hwmgr *hwmgr)
50{
51 struct amdgpu_device *adev = hwmgr->adev;
52 uint32_t mp1_fw_flags;
53
54 mp1_fw_flags = RREG32_PCIE(MP1_Public |
55 (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
56
57 if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
58 MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
59 return true;
60
61 return false;
62}
63
64
65
66
67
68
69
70static uint32_t vega20_wait_for_response(struct pp_hwmgr *hwmgr)
71{
72 struct amdgpu_device *adev = hwmgr->adev;
73 uint32_t reg;
74
75 reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
76
77 phm_wait_for_register_unequal(hwmgr, reg,
78 0, MP1_C2PMSG_90__CONTENT_MASK);
79
80 return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
81}
82
83
84
85
86
87
88
89static int vega20_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
90 uint16_t msg)
91{
92 struct amdgpu_device *adev = hwmgr->adev;
93
94 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
95
96 return 0;
97}
98
99
100
101
102
103
104
105static int vega20_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
106{
107 struct amdgpu_device *adev = hwmgr->adev;
108 int ret = 0;
109
110 vega20_wait_for_response(hwmgr);
111
112 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
113
114 vega20_send_msg_to_smc_without_waiting(hwmgr, msg);
115
116 ret = vega20_wait_for_response(hwmgr);
117 if (ret != PPSMC_Result_OK)
118 pr_err("Failed to send message 0x%x, response 0x%x\n", msg, ret);
119
120 return (ret == PPSMC_Result_OK) ? 0 : -EIO;
121}
122
123
124
125
126
127
128
129
130static int vega20_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
131 uint16_t msg, uint32_t parameter)
132{
133 struct amdgpu_device *adev = hwmgr->adev;
134 int ret = 0;
135
136 vega20_wait_for_response(hwmgr);
137
138 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
139
140 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, parameter);
141
142 vega20_send_msg_to_smc_without_waiting(hwmgr, msg);
143
144 ret = vega20_wait_for_response(hwmgr);
145 if (ret != PPSMC_Result_OK)
146 pr_err("Failed to send message 0x%x, response 0x%x\n", msg, ret);
147
148 return (ret == PPSMC_Result_OK) ? 0 : -EIO;
149}
150
151static uint32_t vega20_get_argument(struct pp_hwmgr *hwmgr)
152{
153 struct amdgpu_device *adev = hwmgr->adev;
154
155 return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
156}
157
158
159
160
161
162
163static int vega20_copy_table_from_smc(struct pp_hwmgr *hwmgr,
164 uint8_t *table, int16_t table_id)
165{
166 struct vega20_smumgr *priv =
167 (struct vega20_smumgr *)(hwmgr->smu_backend);
168 struct amdgpu_device *adev = hwmgr->adev;
169 int ret = 0;
170
171 PP_ASSERT_WITH_CODE(table_id < TABLE_COUNT,
172 "Invalid SMU Table ID!", return -EINVAL);
173 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0,
174 "Invalid SMU Table version!", return -EINVAL);
175 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
176 "Invalid SMU Table Length!", return -EINVAL);
177
178 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
179 PPSMC_MSG_SetDriverDramAddrHigh,
180 upper_32_bits(priv->smu_tables.entry[table_id].mc_addr),
181 NULL)) == 0,
182 "[CopyTableFromSMC] Attempt to Set Dram Addr High Failed!",
183 return ret);
184 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
185 PPSMC_MSG_SetDriverDramAddrLow,
186 lower_32_bits(priv->smu_tables.entry[table_id].mc_addr),
187 NULL)) == 0,
188 "[CopyTableFromSMC] Attempt to Set Dram Addr Low Failed!",
189 return ret);
190 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
191 PPSMC_MSG_TransferTableSmu2Dram, table_id, NULL)) == 0,
192 "[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!",
193 return ret);
194
195
196 amdgpu_asic_flush_hdp(adev, NULL);
197
198 memcpy(table, priv->smu_tables.entry[table_id].table,
199 priv->smu_tables.entry[table_id].size);
200
201 return 0;
202}
203
204
205
206
207
208
209static int vega20_copy_table_to_smc(struct pp_hwmgr *hwmgr,
210 uint8_t *table, int16_t table_id)
211{
212 struct vega20_smumgr *priv =
213 (struct vega20_smumgr *)(hwmgr->smu_backend);
214 struct amdgpu_device *adev = hwmgr->adev;
215 int ret = 0;
216
217 PP_ASSERT_WITH_CODE(table_id < TABLE_COUNT,
218 "Invalid SMU Table ID!", return -EINVAL);
219 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0,
220 "Invalid SMU Table version!", return -EINVAL);
221 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
222 "Invalid SMU Table Length!", return -EINVAL);
223
224 memcpy(priv->smu_tables.entry[table_id].table, table,
225 priv->smu_tables.entry[table_id].size);
226
227 amdgpu_asic_flush_hdp(adev, NULL);
228
229 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
230 PPSMC_MSG_SetDriverDramAddrHigh,
231 upper_32_bits(priv->smu_tables.entry[table_id].mc_addr),
232 NULL)) == 0,
233 "[CopyTableToSMC] Attempt to Set Dram Addr High Failed!",
234 return ret);
235 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
236 PPSMC_MSG_SetDriverDramAddrLow,
237 lower_32_bits(priv->smu_tables.entry[table_id].mc_addr),
238 NULL)) == 0,
239 "[CopyTableToSMC] Attempt to Set Dram Addr Low Failed!",
240 return ret);
241 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
242 PPSMC_MSG_TransferTableDram2Smu, table_id, NULL)) == 0,
243 "[CopyTableToSMC] Attempt to Transfer Table To SMU Failed!",
244 return ret);
245
246 return 0;
247}
248
249int vega20_set_activity_monitor_coeff(struct pp_hwmgr *hwmgr,
250 uint8_t *table, uint16_t workload_type)
251{
252 struct vega20_smumgr *priv =
253 (struct vega20_smumgr *)(hwmgr->smu_backend);
254 struct amdgpu_device *adev = hwmgr->adev;
255 int ret = 0;
256
257 memcpy(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table, table,
258 priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size);
259
260 amdgpu_asic_flush_hdp(adev, NULL);
261
262 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
263 PPSMC_MSG_SetDriverDramAddrHigh,
264 upper_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr),
265 NULL)) == 0,
266 "[SetActivityMonitor] Attempt to Set Dram Addr High Failed!",
267 return ret);
268 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
269 PPSMC_MSG_SetDriverDramAddrLow,
270 lower_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr),
271 NULL)) == 0,
272 "[SetActivityMonitor] Attempt to Set Dram Addr Low Failed!",
273 return ret);
274 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
275 PPSMC_MSG_TransferTableDram2Smu,
276 TABLE_ACTIVITY_MONITOR_COEFF | (workload_type << 16),
277 NULL)) == 0,
278 "[SetActivityMonitor] Attempt to Transfer Table To SMU Failed!",
279 return ret);
280
281 return 0;
282}
283
284int vega20_get_activity_monitor_coeff(struct pp_hwmgr *hwmgr,
285 uint8_t *table, uint16_t workload_type)
286{
287 struct vega20_smumgr *priv =
288 (struct vega20_smumgr *)(hwmgr->smu_backend);
289 struct amdgpu_device *adev = hwmgr->adev;
290 int ret = 0;
291
292 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
293 PPSMC_MSG_SetDriverDramAddrHigh,
294 upper_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr),
295 NULL)) == 0,
296 "[GetActivityMonitor] Attempt to Set Dram Addr High Failed!",
297 return ret);
298 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
299 PPSMC_MSG_SetDriverDramAddrLow,
300 lower_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr),
301 NULL)) == 0,
302 "[GetActivityMonitor] Attempt to Set Dram Addr Low Failed!",
303 return ret);
304 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
305 PPSMC_MSG_TransferTableSmu2Dram,
306 TABLE_ACTIVITY_MONITOR_COEFF | (workload_type << 16), NULL)) == 0,
307 "[GetActivityMonitor] Attempt to Transfer Table From SMU Failed!",
308 return ret);
309
310
311 amdgpu_asic_flush_hdp(adev, NULL);
312
313 memcpy(table, priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table,
314 priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size);
315
316 return 0;
317}
318
319int vega20_enable_smc_features(struct pp_hwmgr *hwmgr,
320 bool enable, uint64_t feature_mask)
321{
322 uint32_t smu_features_low, smu_features_high;
323 int ret = 0;
324
325 smu_features_low = (uint32_t)((feature_mask & SMU_FEATURES_LOW_MASK) >> SMU_FEATURES_LOW_SHIFT);
326 smu_features_high = (uint32_t)((feature_mask & SMU_FEATURES_HIGH_MASK) >> SMU_FEATURES_HIGH_SHIFT);
327
328 if (enable) {
329 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
330 PPSMC_MSG_EnableSmuFeaturesLow, smu_features_low, NULL)) == 0,
331 "[EnableDisableSMCFeatures] Attempt to enable SMU features Low failed!",
332 return ret);
333 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
334 PPSMC_MSG_EnableSmuFeaturesHigh, smu_features_high, NULL)) == 0,
335 "[EnableDisableSMCFeatures] Attempt to enable SMU features High failed!",
336 return ret);
337 } else {
338 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
339 PPSMC_MSG_DisableSmuFeaturesLow, smu_features_low, NULL)) == 0,
340 "[EnableDisableSMCFeatures] Attempt to disable SMU features Low failed!",
341 return ret);
342 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
343 PPSMC_MSG_DisableSmuFeaturesHigh, smu_features_high, NULL)) == 0,
344 "[EnableDisableSMCFeatures] Attempt to disable SMU features High failed!",
345 return ret);
346 }
347
348 return 0;
349}
350
351int vega20_get_enabled_smc_features(struct pp_hwmgr *hwmgr,
352 uint64_t *features_enabled)
353{
354 uint32_t smc_features_low, smc_features_high;
355 int ret = 0;
356
357 if (features_enabled == NULL)
358 return -EINVAL;
359
360 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr,
361 PPSMC_MSG_GetEnabledSmuFeaturesLow,
362 &smc_features_low)) == 0,
363 "[GetEnabledSMCFeatures] Attempt to get SMU features Low failed!",
364 return ret);
365 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr,
366 PPSMC_MSG_GetEnabledSmuFeaturesHigh,
367 &smc_features_high)) == 0,
368 "[GetEnabledSMCFeatures] Attempt to get SMU features High failed!",
369 return ret);
370
371 *features_enabled = ((((uint64_t)smc_features_low << SMU_FEATURES_LOW_SHIFT) & SMU_FEATURES_LOW_MASK) |
372 (((uint64_t)smc_features_high << SMU_FEATURES_HIGH_SHIFT) & SMU_FEATURES_HIGH_MASK));
373
374 return 0;
375}
376
377static int vega20_set_tools_address(struct pp_hwmgr *hwmgr)
378{
379 struct vega20_smumgr *priv =
380 (struct vega20_smumgr *)(hwmgr->smu_backend);
381 int ret = 0;
382
383 if (priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr) {
384 ret = smum_send_msg_to_smc_with_parameter(hwmgr,
385 PPSMC_MSG_SetToolsDramAddrHigh,
386 upper_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr),
387 NULL);
388 if (!ret)
389 ret = smum_send_msg_to_smc_with_parameter(hwmgr,
390 PPSMC_MSG_SetToolsDramAddrLow,
391 lower_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr),
392 NULL);
393 }
394
395 return ret;
396}
397
398int vega20_set_pptable_driver_address(struct pp_hwmgr *hwmgr)
399{
400 struct vega20_smumgr *priv =
401 (struct vega20_smumgr *)(hwmgr->smu_backend);
402 int ret = 0;
403
404 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
405 PPSMC_MSG_SetDriverDramAddrHigh,
406 upper_32_bits(priv->smu_tables.entry[TABLE_PPTABLE].mc_addr),
407 NULL)) == 0,
408 "[SetPPtabeDriverAddress] Attempt to Set Dram Addr High Failed!",
409 return ret);
410 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
411 PPSMC_MSG_SetDriverDramAddrLow,
412 lower_32_bits(priv->smu_tables.entry[TABLE_PPTABLE].mc_addr),
413 NULL)) == 0,
414 "[SetPPtabeDriverAddress] Attempt to Set Dram Addr Low Failed!",
415 return ret);
416
417 return ret;
418}
419
420static int vega20_smu_init(struct pp_hwmgr *hwmgr)
421{
422 struct vega20_smumgr *priv;
423 unsigned long tools_size = 0x19000;
424 int ret = 0;
425 struct amdgpu_device *adev = hwmgr->adev;
426
427 struct cgs_firmware_info info = {0};
428
429 ret = cgs_get_firmware_info(hwmgr->device,
430 smu7_convert_fw_type_to_cgs(UCODE_ID_SMU),
431 &info);
432 if (ret || !info.kptr)
433 return -EINVAL;
434
435 priv = kzalloc(sizeof(struct vega20_smumgr), GFP_KERNEL);
436 if (!priv)
437 return -ENOMEM;
438
439 hwmgr->smu_backend = priv;
440
441
442 ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
443 sizeof(PPTable_t),
444 PAGE_SIZE,
445 AMDGPU_GEM_DOMAIN_VRAM,
446 &priv->smu_tables.entry[TABLE_PPTABLE].handle,
447 &priv->smu_tables.entry[TABLE_PPTABLE].mc_addr,
448 &priv->smu_tables.entry[TABLE_PPTABLE].table);
449 if (ret)
450 goto free_backend;
451
452 priv->smu_tables.entry[TABLE_PPTABLE].version = 0x01;
453 priv->smu_tables.entry[TABLE_PPTABLE].size = sizeof(PPTable_t);
454
455
456 ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
457 sizeof(Watermarks_t),
458 PAGE_SIZE,
459 AMDGPU_GEM_DOMAIN_VRAM,
460 &priv->smu_tables.entry[TABLE_WATERMARKS].handle,
461 &priv->smu_tables.entry[TABLE_WATERMARKS].mc_addr,
462 &priv->smu_tables.entry[TABLE_WATERMARKS].table);
463 if (ret)
464 goto err0;
465
466 priv->smu_tables.entry[TABLE_WATERMARKS].version = 0x01;
467 priv->smu_tables.entry[TABLE_WATERMARKS].size = sizeof(Watermarks_t);
468
469
470 ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
471 tools_size,
472 PAGE_SIZE,
473 AMDGPU_GEM_DOMAIN_VRAM,
474 &priv->smu_tables.entry[TABLE_PMSTATUSLOG].handle,
475 &priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr,
476 &priv->smu_tables.entry[TABLE_PMSTATUSLOG].table);
477 if (ret)
478 goto err1;
479
480 priv->smu_tables.entry[TABLE_PMSTATUSLOG].version = 0x01;
481 priv->smu_tables.entry[TABLE_PMSTATUSLOG].size = tools_size;
482
483
484 ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
485 sizeof(OverDriveTable_t),
486 PAGE_SIZE,
487 AMDGPU_GEM_DOMAIN_VRAM,
488 &priv->smu_tables.entry[TABLE_OVERDRIVE].handle,
489 &priv->smu_tables.entry[TABLE_OVERDRIVE].mc_addr,
490 &priv->smu_tables.entry[TABLE_OVERDRIVE].table);
491 if (ret)
492 goto err2;
493
494 priv->smu_tables.entry[TABLE_OVERDRIVE].version = 0x01;
495 priv->smu_tables.entry[TABLE_OVERDRIVE].size = sizeof(OverDriveTable_t);
496
497
498 ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
499 sizeof(SmuMetrics_t),
500 PAGE_SIZE,
501 AMDGPU_GEM_DOMAIN_VRAM,
502 &priv->smu_tables.entry[TABLE_SMU_METRICS].handle,
503 &priv->smu_tables.entry[TABLE_SMU_METRICS].mc_addr,
504 &priv->smu_tables.entry[TABLE_SMU_METRICS].table);
505 if (ret)
506 goto err3;
507
508 priv->smu_tables.entry[TABLE_SMU_METRICS].version = 0x01;
509 priv->smu_tables.entry[TABLE_SMU_METRICS].size = sizeof(SmuMetrics_t);
510
511
512 ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
513 sizeof(DpmActivityMonitorCoeffInt_t),
514 PAGE_SIZE,
515 AMDGPU_GEM_DOMAIN_VRAM,
516 &priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].handle,
517 &priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr,
518 &priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table);
519 if (ret)
520 goto err4;
521
522 priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].version = 0x01;
523 priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size = sizeof(DpmActivityMonitorCoeffInt_t);
524
525 if (adev->psp.ras.ras) {
526 ret = smu_v11_0_i2c_eeprom_control_init(&adev->pm.smu_i2c);
527 if (ret)
528 goto err4;
529 }
530
531 return 0;
532
533err4:
534 amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_SMU_METRICS].handle,
535 &priv->smu_tables.entry[TABLE_SMU_METRICS].mc_addr,
536 &priv->smu_tables.entry[TABLE_SMU_METRICS].table);
537err3:
538 amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_OVERDRIVE].handle,
539 &priv->smu_tables.entry[TABLE_OVERDRIVE].mc_addr,
540 &priv->smu_tables.entry[TABLE_OVERDRIVE].table);
541err2:
542 amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_PMSTATUSLOG].handle,
543 &priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr,
544 &priv->smu_tables.entry[TABLE_PMSTATUSLOG].table);
545err1:
546 amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_WATERMARKS].handle,
547 &priv->smu_tables.entry[TABLE_WATERMARKS].mc_addr,
548 &priv->smu_tables.entry[TABLE_WATERMARKS].table);
549err0:
550 amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_PPTABLE].handle,
551 &priv->smu_tables.entry[TABLE_PPTABLE].mc_addr,
552 &priv->smu_tables.entry[TABLE_PPTABLE].table);
553free_backend:
554 kfree(hwmgr->smu_backend);
555
556 return -EINVAL;
557}
558
559static int vega20_smu_fini(struct pp_hwmgr *hwmgr)
560{
561 struct vega20_smumgr *priv =
562 (struct vega20_smumgr *)(hwmgr->smu_backend);
563 struct amdgpu_device *adev = hwmgr->adev;
564
565 if (adev->psp.ras.ras)
566 smu_v11_0_i2c_eeprom_control_fini(&adev->pm.smu_i2c);
567
568 if (priv) {
569 amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_PPTABLE].handle,
570 &priv->smu_tables.entry[TABLE_PPTABLE].mc_addr,
571 &priv->smu_tables.entry[TABLE_PPTABLE].table);
572 amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_WATERMARKS].handle,
573 &priv->smu_tables.entry[TABLE_WATERMARKS].mc_addr,
574 &priv->smu_tables.entry[TABLE_WATERMARKS].table);
575 amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_PMSTATUSLOG].handle,
576 &priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr,
577 &priv->smu_tables.entry[TABLE_PMSTATUSLOG].table);
578 amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_OVERDRIVE].handle,
579 &priv->smu_tables.entry[TABLE_OVERDRIVE].mc_addr,
580 &priv->smu_tables.entry[TABLE_OVERDRIVE].table);
581 amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_SMU_METRICS].handle,
582 &priv->smu_tables.entry[TABLE_SMU_METRICS].mc_addr,
583 &priv->smu_tables.entry[TABLE_SMU_METRICS].table);
584 amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].handle,
585 &priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr,
586 &priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table);
587 kfree(hwmgr->smu_backend);
588 hwmgr->smu_backend = NULL;
589 }
590
591 return 0;
592}
593
594static int vega20_start_smu(struct pp_hwmgr *hwmgr)
595{
596 int ret;
597
598 ret = vega20_is_smc_ram_running(hwmgr);
599 PP_ASSERT_WITH_CODE(ret,
600 "[Vega20StartSmu] SMC is not running!",
601 return -EINVAL);
602
603 ret = vega20_set_tools_address(hwmgr);
604 PP_ASSERT_WITH_CODE(!ret,
605 "[Vega20StartSmu] Failed to set tools address!",
606 return ret);
607
608 return 0;
609}
610
611static bool vega20_is_dpm_running(struct pp_hwmgr *hwmgr)
612{
613 uint64_t features_enabled = 0;
614
615 vega20_get_enabled_smc_features(hwmgr, &features_enabled);
616
617 if (features_enabled & SMC_DPM_FEATURES)
618 return true;
619 else
620 return false;
621}
622
623static int vega20_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table,
624 uint16_t table_id, bool rw)
625{
626 int ret;
627
628 if (rw)
629 ret = vega20_copy_table_from_smc(hwmgr, table, table_id);
630 else
631 ret = vega20_copy_table_to_smc(hwmgr, table, table_id);
632
633 return ret;
634}
635
636const struct pp_smumgr_func vega20_smu_funcs = {
637 .name = "vega20_smu",
638 .smu_init = &vega20_smu_init,
639 .smu_fini = &vega20_smu_fini,
640 .start_smu = &vega20_start_smu,
641 .request_smu_load_specific_fw = NULL,
642 .send_msg_to_smc = &vega20_send_msg_to_smc,
643 .send_msg_to_smc_with_parameter = &vega20_send_msg_to_smc_with_parameter,
644 .download_pptable_settings = NULL,
645 .upload_pptable_settings = NULL,
646 .is_dpm_running = vega20_is_dpm_running,
647 .get_argument = vega20_get_argument,
648 .smc_table_manager = vega20_smc_table_manager,
649};
650