1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#define SWSMU_CODE_LAYER_L4
24
25#include "amdgpu.h"
26#include "amdgpu_smu.h"
27#include "smu_cmn.h"
28#include "soc15_common.h"
29
30
31
32
33
34
35#undef pr_err
36#undef pr_warn
37#undef pr_info
38#undef pr_debug
39
40
41
42
43
44
45#define mmMP1_SMN_C2PMSG_66 0x0282
46#define mmMP1_SMN_C2PMSG_66_BASE_IDX 0
47
48#define mmMP1_SMN_C2PMSG_82 0x0292
49#define mmMP1_SMN_C2PMSG_82_BASE_IDX 0
50
51#define mmMP1_SMN_C2PMSG_90 0x029a
52#define mmMP1_SMN_C2PMSG_90_BASE_IDX 0
53
54
55
56#define mmMP1_C2PMSG_2 (0xbee142 + 0xb00000 / 4)
57#define mmMP1_C2PMSG_2_BASE_IDX 0
58
59#define mmMP1_C2PMSG_34 (0xbee262 + 0xb00000 / 4)
60#define mmMP1_C2PMSG_34_BASE_IDX 0
61
62#define mmMP1_C2PMSG_33 (0xbee261 + 0xb00000 / 4)
63#define mmMP1_C2PMSG_33_BASE_IDX 0
64
65#define MP1_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL
66
67#undef __SMU_DUMMY_MAP
68#define __SMU_DUMMY_MAP(type) #type
69static const char * const __smu_message_names[] = {
70 SMU_MESSAGE_TYPES
71};
72
73#define smu_cmn_call_asic_func(intf, smu, args...) \
74 ((smu)->ppt_funcs ? ((smu)->ppt_funcs->intf ? \
75 (smu)->ppt_funcs->intf(smu, ##args) : \
76 -ENOTSUPP) : \
77 -EINVAL)
78
79static const char *smu_get_message_name(struct smu_context *smu,
80 enum smu_message_type type)
81{
82 if (type < 0 || type >= SMU_MSG_MAX_COUNT)
83 return "unknown smu message";
84
85 return __smu_message_names[type];
86}
87
88static void smu_cmn_read_arg(struct smu_context *smu,
89 uint32_t *arg)
90{
91 struct amdgpu_device *adev = smu->adev;
92
93 if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 5))
94 *arg = RREG32_SOC15(MP1, 0, mmMP1_C2PMSG_34);
95 else
96 *arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
97}
98
99
100
101
102
103
104
105
106
107#define SMU_RESP_NONE 0
108#define SMU_RESP_OK 1
109#define SMU_RESP_CMD_FAIL 0xFF
110#define SMU_RESP_CMD_UNKNOWN 0xFE
111#define SMU_RESP_CMD_BAD_PREREQ 0xFD
112#define SMU_RESP_BUSY_OTHER 0xFC
113#define SMU_RESP_DEBUG_END 0xFB
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135static u32 __smu_cmn_poll_stat(struct smu_context *smu)
136{
137 struct amdgpu_device *adev = smu->adev;
138 int timeout = adev->usec_timeout * 20;
139 u32 reg;
140
141 for ( ; timeout > 0; timeout--) {
142 if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 5))
143 reg = RREG32_SOC15(MP1, 0, mmMP1_C2PMSG_33);
144 else
145 reg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
146 if ((reg & MP1_C2PMSG_90__CONTENT_MASK) != 0)
147 break;
148
149 udelay(1);
150 }
151
152 return reg;
153}
154
155static void __smu_cmn_reg_print_error(struct smu_context *smu,
156 u32 reg_c2pmsg_90,
157 int msg_index,
158 u32 param,
159 enum smu_message_type msg)
160{
161 struct amdgpu_device *adev = smu->adev;
162 const char *message = smu_get_message_name(smu, msg);
163 u32 msg_idx, prm;
164
165 switch (reg_c2pmsg_90) {
166 case SMU_RESP_NONE: {
167 if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 5)) {
168 msg_idx = RREG32_SOC15(MP1, 0, mmMP1_C2PMSG_2);
169 prm = RREG32_SOC15(MP1, 0, mmMP1_C2PMSG_34);
170 } else {
171 msg_idx = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66);
172 prm = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
173 }
174 dev_err_ratelimited(adev->dev,
175 "SMU: I'm not done with your previous command: SMN_C2PMSG_66:0x%08X SMN_C2PMSG_82:0x%08X",
176 msg_idx, prm);
177 }
178 break;
179 case SMU_RESP_OK:
180
181
182
183 break;
184 case SMU_RESP_CMD_FAIL:
185
186
187
188 break;
189 case SMU_RESP_CMD_UNKNOWN:
190 dev_err_ratelimited(adev->dev,
191 "SMU: unknown command: index:%d param:0x%08X message:%s",
192 msg_index, param, message);
193 break;
194 case SMU_RESP_CMD_BAD_PREREQ:
195 dev_err_ratelimited(adev->dev,
196 "SMU: valid command, bad prerequisites: index:%d param:0x%08X message:%s",
197 msg_index, param, message);
198 break;
199 case SMU_RESP_BUSY_OTHER:
200 dev_err_ratelimited(adev->dev,
201 "SMU: I'm very busy for your command: index:%d param:0x%08X message:%s",
202 msg_index, param, message);
203 break;
204 case SMU_RESP_DEBUG_END:
205 dev_err_ratelimited(adev->dev,
206 "SMU: I'm debugging!");
207 break;
208 default:
209 dev_err_ratelimited(adev->dev,
210 "SMU: response:0x%08X for index:%d param:0x%08X message:%s?",
211 reg_c2pmsg_90, msg_index, param, message);
212 break;
213 }
214}
215
216static int __smu_cmn_reg2errno(struct smu_context *smu, u32 reg_c2pmsg_90)
217{
218 int res;
219
220 switch (reg_c2pmsg_90) {
221 case SMU_RESP_NONE:
222
223
224 res = -ETIME;
225 break;
226 case SMU_RESP_OK:
227 res = 0;
228 break;
229 case SMU_RESP_CMD_FAIL:
230
231
232
233 res = -EIO;
234 break;
235 case SMU_RESP_CMD_UNKNOWN:
236
237
238 res = -EOPNOTSUPP;
239 break;
240 case SMU_RESP_CMD_BAD_PREREQ:
241
242
243 res = -EINVAL;
244 break;
245 case SMU_RESP_BUSY_OTHER:
246
247
248
249 res = -EBUSY;
250 break;
251 default:
252
253
254 res = -EREMOTEIO;
255 break;
256 }
257
258 return res;
259}
260
261static void __smu_cmn_send_msg(struct smu_context *smu,
262 u16 msg,
263 u32 param)
264{
265 struct amdgpu_device *adev = smu->adev;
266
267 if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 5)) {
268 WREG32_SOC15(MP1, 0, mmMP1_C2PMSG_33, 0);
269 WREG32_SOC15(MP1, 0, mmMP1_C2PMSG_34, param);
270 WREG32_SOC15(MP1, 0, mmMP1_C2PMSG_2, msg);
271 } else {
272 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
273 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
274 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
275 }
276
277}
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292int smu_cmn_send_msg_without_waiting(struct smu_context *smu,
293 uint16_t msg_index,
294 uint32_t param)
295{
296 struct amdgpu_device *adev = smu->adev;
297 u32 reg;
298 int res;
299
300 if (adev->no_hw_access)
301 return 0;
302
303 reg = __smu_cmn_poll_stat(smu);
304 res = __smu_cmn_reg2errno(smu, reg);
305 if (reg == SMU_RESP_NONE ||
306 res == -EREMOTEIO)
307 goto Out;
308 __smu_cmn_send_msg(smu, msg_index, param);
309 res = 0;
310Out:
311 if (unlikely(adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) &&
312 res && (res != -ETIME)) {
313 amdgpu_device_halt(adev);
314 WARN_ON(1);
315 }
316
317 return res;
318}
319
320
321
322
323
324
325
326
327
328
329
330int smu_cmn_wait_for_response(struct smu_context *smu)
331{
332 u32 reg;
333 int res;
334
335 reg = __smu_cmn_poll_stat(smu);
336 res = __smu_cmn_reg2errno(smu, reg);
337
338 if (unlikely(smu->adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) &&
339 res && (res != -ETIME)) {
340 amdgpu_device_halt(smu->adev);
341 WARN_ON(1);
342 }
343
344 return res;
345}
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,
378 enum smu_message_type msg,
379 uint32_t param,
380 uint32_t *read_arg)
381{
382 struct amdgpu_device *adev = smu->adev;
383 int res, index;
384 u32 reg;
385
386 if (adev->no_hw_access)
387 return 0;
388
389 index = smu_cmn_to_asic_specific_index(smu,
390 CMN2ASIC_MAPPING_MSG,
391 msg);
392 if (index < 0)
393 return index == -EACCES ? 0 : index;
394
395 mutex_lock(&smu->message_lock);
396 reg = __smu_cmn_poll_stat(smu);
397 res = __smu_cmn_reg2errno(smu, reg);
398 if (reg == SMU_RESP_NONE ||
399 res == -EREMOTEIO) {
400 __smu_cmn_reg_print_error(smu, reg, index, param, msg);
401 goto Out;
402 }
403 __smu_cmn_send_msg(smu, (uint16_t) index, param);
404 reg = __smu_cmn_poll_stat(smu);
405 res = __smu_cmn_reg2errno(smu, reg);
406 if (res != 0)
407 __smu_cmn_reg_print_error(smu, reg, index, param, msg);
408 if (read_arg)
409 smu_cmn_read_arg(smu, read_arg);
410Out:
411 if (unlikely(adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) && res) {
412 amdgpu_device_halt(adev);
413 WARN_ON(1);
414 }
415
416 mutex_unlock(&smu->message_lock);
417 return res;
418}
419
420int smu_cmn_send_smc_msg(struct smu_context *smu,
421 enum smu_message_type msg,
422 uint32_t *read_arg)
423{
424 return smu_cmn_send_smc_msg_with_param(smu,
425 msg,
426 0,
427 read_arg);
428}
429
430int smu_cmn_to_asic_specific_index(struct smu_context *smu,
431 enum smu_cmn2asic_mapping_type type,
432 uint32_t index)
433{
434 struct cmn2asic_msg_mapping msg_mapping;
435 struct cmn2asic_mapping mapping;
436
437 switch (type) {
438 case CMN2ASIC_MAPPING_MSG:
439 if (index >= SMU_MSG_MAX_COUNT ||
440 !smu->message_map)
441 return -EINVAL;
442
443 msg_mapping = smu->message_map[index];
444 if (!msg_mapping.valid_mapping)
445 return -EINVAL;
446
447 if (amdgpu_sriov_vf(smu->adev) &&
448 !msg_mapping.valid_in_vf)
449 return -EACCES;
450
451 return msg_mapping.map_to;
452
453 case CMN2ASIC_MAPPING_CLK:
454 if (index >= SMU_CLK_COUNT ||
455 !smu->clock_map)
456 return -EINVAL;
457
458 mapping = smu->clock_map[index];
459 if (!mapping.valid_mapping)
460 return -EINVAL;
461
462 return mapping.map_to;
463
464 case CMN2ASIC_MAPPING_FEATURE:
465 if (index >= SMU_FEATURE_COUNT ||
466 !smu->feature_map)
467 return -EINVAL;
468
469 mapping = smu->feature_map[index];
470 if (!mapping.valid_mapping)
471 return -EINVAL;
472
473 return mapping.map_to;
474
475 case CMN2ASIC_MAPPING_TABLE:
476 if (index >= SMU_TABLE_COUNT ||
477 !smu->table_map)
478 return -EINVAL;
479
480 mapping = smu->table_map[index];
481 if (!mapping.valid_mapping)
482 return -EINVAL;
483
484 return mapping.map_to;
485
486 case CMN2ASIC_MAPPING_PWR:
487 if (index >= SMU_POWER_SOURCE_COUNT ||
488 !smu->pwr_src_map)
489 return -EINVAL;
490
491 mapping = smu->pwr_src_map[index];
492 if (!mapping.valid_mapping)
493 return -EINVAL;
494
495 return mapping.map_to;
496
497 case CMN2ASIC_MAPPING_WORKLOAD:
498 if (index > PP_SMC_POWER_PROFILE_CUSTOM ||
499 !smu->workload_map)
500 return -EINVAL;
501
502 mapping = smu->workload_map[index];
503 if (!mapping.valid_mapping)
504 return -EINVAL;
505
506 return mapping.map_to;
507
508 default:
509 return -EINVAL;
510 }
511}
512
513int smu_cmn_feature_is_supported(struct smu_context *smu,
514 enum smu_feature_mask mask)
515{
516 struct smu_feature *feature = &smu->smu_feature;
517 int feature_id;
518
519 feature_id = smu_cmn_to_asic_specific_index(smu,
520 CMN2ASIC_MAPPING_FEATURE,
521 mask);
522 if (feature_id < 0)
523 return 0;
524
525 WARN_ON(feature_id > feature->feature_num);
526
527 return test_bit(feature_id, feature->supported);
528}
529
530static int __smu_get_enabled_features(struct smu_context *smu,
531 uint64_t *enabled_features)
532{
533 return smu_cmn_call_asic_func(get_enabled_mask, smu, enabled_features);
534}
535
536int smu_cmn_feature_is_enabled(struct smu_context *smu,
537 enum smu_feature_mask mask)
538{
539 struct amdgpu_device *adev = smu->adev;
540 uint64_t enabled_features;
541 int feature_id;
542
543 if (__smu_get_enabled_features(smu, &enabled_features)) {
544 dev_err(adev->dev, "Failed to retrieve enabled ppfeatures!\n");
545 return 0;
546 }
547
548
549
550
551
552
553 if (enabled_features == ULLONG_MAX)
554 return 1;
555
556 feature_id = smu_cmn_to_asic_specific_index(smu,
557 CMN2ASIC_MAPPING_FEATURE,
558 mask);
559 if (feature_id < 0)
560 return 0;
561
562 return test_bit(feature_id, (unsigned long *)&enabled_features);
563}
564
565bool smu_cmn_clk_dpm_is_enabled(struct smu_context *smu,
566 enum smu_clk_type clk_type)
567{
568 enum smu_feature_mask feature_id = 0;
569
570 switch (clk_type) {
571 case SMU_MCLK:
572 case SMU_UCLK:
573 feature_id = SMU_FEATURE_DPM_UCLK_BIT;
574 break;
575 case SMU_GFXCLK:
576 case SMU_SCLK:
577 feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
578 break;
579 case SMU_SOCCLK:
580 feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
581 break;
582 default:
583 return true;
584 }
585
586 if (!smu_cmn_feature_is_enabled(smu, feature_id))
587 return false;
588
589 return true;
590}
591
592int smu_cmn_get_enabled_mask(struct smu_context *smu,
593 uint64_t *feature_mask)
594{
595 uint32_t *feature_mask_high;
596 uint32_t *feature_mask_low;
597 int ret = 0, index = 0;
598
599 if (!feature_mask)
600 return -EINVAL;
601
602 feature_mask_low = &((uint32_t *)feature_mask)[0];
603 feature_mask_high = &((uint32_t *)feature_mask)[1];
604
605 index = smu_cmn_to_asic_specific_index(smu,
606 CMN2ASIC_MAPPING_MSG,
607 SMU_MSG_GetEnabledSmuFeatures);
608 if (index > 0) {
609 ret = smu_cmn_send_smc_msg_with_param(smu,
610 SMU_MSG_GetEnabledSmuFeatures,
611 0,
612 feature_mask_low);
613 if (ret)
614 return ret;
615
616 ret = smu_cmn_send_smc_msg_with_param(smu,
617 SMU_MSG_GetEnabledSmuFeatures,
618 1,
619 feature_mask_high);
620 } else {
621 ret = smu_cmn_send_smc_msg(smu,
622 SMU_MSG_GetEnabledSmuFeaturesHigh,
623 feature_mask_high);
624 if (ret)
625 return ret;
626
627 ret = smu_cmn_send_smc_msg(smu,
628 SMU_MSG_GetEnabledSmuFeaturesLow,
629 feature_mask_low);
630 }
631
632 return ret;
633}
634
635uint64_t smu_cmn_get_indep_throttler_status(
636 const unsigned long dep_status,
637 const uint8_t *throttler_map)
638{
639 uint64_t indep_status = 0;
640 uint8_t dep_bit = 0;
641
642 for_each_set_bit(dep_bit, &dep_status, 32)
643 indep_status |= 1ULL << throttler_map[dep_bit];
644
645 return indep_status;
646}
647
648int smu_cmn_feature_update_enable_state(struct smu_context *smu,
649 uint64_t feature_mask,
650 bool enabled)
651{
652 int ret = 0;
653
654 if (enabled) {
655 ret = smu_cmn_send_smc_msg_with_param(smu,
656 SMU_MSG_EnableSmuFeaturesLow,
657 lower_32_bits(feature_mask),
658 NULL);
659 if (ret)
660 return ret;
661 ret = smu_cmn_send_smc_msg_with_param(smu,
662 SMU_MSG_EnableSmuFeaturesHigh,
663 upper_32_bits(feature_mask),
664 NULL);
665 } else {
666 ret = smu_cmn_send_smc_msg_with_param(smu,
667 SMU_MSG_DisableSmuFeaturesLow,
668 lower_32_bits(feature_mask),
669 NULL);
670 if (ret)
671 return ret;
672 ret = smu_cmn_send_smc_msg_with_param(smu,
673 SMU_MSG_DisableSmuFeaturesHigh,
674 upper_32_bits(feature_mask),
675 NULL);
676 }
677
678 return ret;
679}
680
681int smu_cmn_feature_set_enabled(struct smu_context *smu,
682 enum smu_feature_mask mask,
683 bool enable)
684{
685 int feature_id;
686
687 feature_id = smu_cmn_to_asic_specific_index(smu,
688 CMN2ASIC_MAPPING_FEATURE,
689 mask);
690 if (feature_id < 0)
691 return -EINVAL;
692
693 return smu_cmn_feature_update_enable_state(smu,
694 1ULL << feature_id,
695 enable);
696}
697
698#undef __SMU_DUMMY_MAP
699#define __SMU_DUMMY_MAP(fea) #fea
700static const char* __smu_feature_names[] = {
701 SMU_FEATURE_MASKS
702};
703
704static const char *smu_get_feature_name(struct smu_context *smu,
705 enum smu_feature_mask feature)
706{
707 if (feature < 0 || feature >= SMU_FEATURE_COUNT)
708 return "unknown smu feature";
709 return __smu_feature_names[feature];
710}
711
712size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu,
713 char *buf)
714{
715 uint64_t feature_mask;
716 int feature_index = 0;
717 uint32_t count = 0;
718 int8_t sort_feature[SMU_FEATURE_COUNT];
719 size_t size = 0;
720 int ret = 0, i;
721 int feature_id;
722
723 ret = __smu_get_enabled_features(smu, &feature_mask);
724 if (ret)
725 return 0;
726
727 size = sysfs_emit_at(buf, size, "features high: 0x%08x low: 0x%08x\n",
728 upper_32_bits(feature_mask), lower_32_bits(feature_mask));
729
730 memset(sort_feature, -1, sizeof(sort_feature));
731
732 for (i = 0; i < SMU_FEATURE_COUNT; i++) {
733 feature_index = smu_cmn_to_asic_specific_index(smu,
734 CMN2ASIC_MAPPING_FEATURE,
735 i);
736 if (feature_index < 0)
737 continue;
738
739 sort_feature[feature_index] = i;
740 }
741
742 size += sysfs_emit_at(buf, size, "%-2s. %-20s %-3s : %-s\n",
743 "No", "Feature", "Bit", "State");
744
745 for (i = 0; i < SMU_FEATURE_COUNT; i++) {
746 if (sort_feature[i] < 0)
747 continue;
748
749
750 feature_id = smu_cmn_to_asic_specific_index(smu,
751 CMN2ASIC_MAPPING_FEATURE,
752 sort_feature[i]);
753 if (feature_id < 0)
754 continue;
755
756 size += sysfs_emit_at(buf, size, "%02d. %-20s (%2d) : %s\n",
757 count++,
758 smu_get_feature_name(smu, sort_feature[i]),
759 i,
760 !!test_bit(feature_id, (unsigned long *)&feature_mask) ?
761 "enabled" : "disabled");
762 }
763
764 return size;
765}
766
767int smu_cmn_set_pp_feature_mask(struct smu_context *smu,
768 uint64_t new_mask)
769{
770 int ret = 0;
771 uint64_t feature_mask;
772 uint64_t feature_2_enabled = 0;
773 uint64_t feature_2_disabled = 0;
774
775 ret = __smu_get_enabled_features(smu, &feature_mask);
776 if (ret)
777 return ret;
778
779 feature_2_enabled = ~feature_mask & new_mask;
780 feature_2_disabled = feature_mask & ~new_mask;
781
782 if (feature_2_enabled) {
783 ret = smu_cmn_feature_update_enable_state(smu,
784 feature_2_enabled,
785 true);
786 if (ret)
787 return ret;
788 }
789 if (feature_2_disabled) {
790 ret = smu_cmn_feature_update_enable_state(smu,
791 feature_2_disabled,
792 false);
793 if (ret)
794 return ret;
795 }
796
797 return ret;
798}
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813int smu_cmn_disable_all_features_with_exception(struct smu_context *smu,
814 enum smu_feature_mask mask)
815{
816 uint64_t features_to_disable = U64_MAX;
817 int skipped_feature_id;
818
819 if (mask != SMU_FEATURE_COUNT) {
820 skipped_feature_id = smu_cmn_to_asic_specific_index(smu,
821 CMN2ASIC_MAPPING_FEATURE,
822 mask);
823 if (skipped_feature_id < 0)
824 return -EINVAL;
825
826 features_to_disable &= ~(1ULL << skipped_feature_id);
827 }
828
829 return smu_cmn_feature_update_enable_state(smu,
830 features_to_disable,
831 0);
832}
833
834int smu_cmn_get_smc_version(struct smu_context *smu,
835 uint32_t *if_version,
836 uint32_t *smu_version)
837{
838 int ret = 0;
839
840 if (!if_version && !smu_version)
841 return -EINVAL;
842
843 if (smu->smc_fw_if_version && smu->smc_fw_version)
844 {
845 if (if_version)
846 *if_version = smu->smc_fw_if_version;
847
848 if (smu_version)
849 *smu_version = smu->smc_fw_version;
850
851 return 0;
852 }
853
854 if (if_version) {
855 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion, if_version);
856 if (ret)
857 return ret;
858
859 smu->smc_fw_if_version = *if_version;
860 }
861
862 if (smu_version) {
863 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetSmuVersion, smu_version);
864 if (ret)
865 return ret;
866
867 smu->smc_fw_version = *smu_version;
868 }
869
870 return ret;
871}
872
873int smu_cmn_update_table(struct smu_context *smu,
874 enum smu_table_id table_index,
875 int argument,
876 void *table_data,
877 bool drv2smu)
878{
879 struct smu_table_context *smu_table = &smu->smu_table;
880 struct amdgpu_device *adev = smu->adev;
881 struct smu_table *table = &smu_table->driver_table;
882 int table_id = smu_cmn_to_asic_specific_index(smu,
883 CMN2ASIC_MAPPING_TABLE,
884 table_index);
885 uint32_t table_size;
886 int ret = 0;
887 if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
888 return -EINVAL;
889
890 table_size = smu_table->tables[table_index].size;
891
892 if (drv2smu) {
893 memcpy(table->cpu_addr, table_data, table_size);
894
895
896
897
898 amdgpu_asic_flush_hdp(adev, NULL);
899 }
900
901 ret = smu_cmn_send_smc_msg_with_param(smu, drv2smu ?
902 SMU_MSG_TransferTableDram2Smu :
903 SMU_MSG_TransferTableSmu2Dram,
904 table_id | ((argument & 0xFFFF) << 16),
905 NULL);
906 if (ret)
907 return ret;
908
909 if (!drv2smu) {
910 amdgpu_asic_invalidate_hdp(adev, NULL);
911 memcpy(table_data, table->cpu_addr, table_size);
912 }
913
914 return 0;
915}
916
917int smu_cmn_write_watermarks_table(struct smu_context *smu)
918{
919 void *watermarks_table = smu->smu_table.watermarks_table;
920
921 if (!watermarks_table)
922 return -EINVAL;
923
924 return smu_cmn_update_table(smu,
925 SMU_TABLE_WATERMARKS,
926 0,
927 watermarks_table,
928 true);
929}
930
931int smu_cmn_write_pptable(struct smu_context *smu)
932{
933 void *pptable = smu->smu_table.driver_pptable;
934
935 return smu_cmn_update_table(smu,
936 SMU_TABLE_PPTABLE,
937 0,
938 pptable,
939 true);
940}
941
942int smu_cmn_get_metrics_table(struct smu_context *smu,
943 void *metrics_table,
944 bool bypass_cache)
945{
946 struct smu_table_context *smu_table= &smu->smu_table;
947 uint32_t table_size =
948 smu_table->tables[SMU_TABLE_SMU_METRICS].size;
949 int ret = 0;
950
951 if (bypass_cache ||
952 !smu_table->metrics_time ||
953 time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(1))) {
954 ret = smu_cmn_update_table(smu,
955 SMU_TABLE_SMU_METRICS,
956 0,
957 smu_table->metrics_table,
958 false);
959 if (ret) {
960 dev_info(smu->adev->dev, "Failed to export SMU metrics table!\n");
961 return ret;
962 }
963 smu_table->metrics_time = jiffies;
964 }
965
966 if (metrics_table)
967 memcpy(metrics_table, smu_table->metrics_table, table_size);
968
969 return 0;
970}
971
972void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev)
973{
974 struct metrics_table_header *header = (struct metrics_table_header *)table;
975 uint16_t structure_size;
976
977#define METRICS_VERSION(a, b) ((a << 16) | b )
978
979 switch (METRICS_VERSION(frev, crev)) {
980 case METRICS_VERSION(1, 0):
981 structure_size = sizeof(struct gpu_metrics_v1_0);
982 break;
983 case METRICS_VERSION(1, 1):
984 structure_size = sizeof(struct gpu_metrics_v1_1);
985 break;
986 case METRICS_VERSION(1, 2):
987 structure_size = sizeof(struct gpu_metrics_v1_2);
988 break;
989 case METRICS_VERSION(1, 3):
990 structure_size = sizeof(struct gpu_metrics_v1_3);
991 break;
992 case METRICS_VERSION(2, 0):
993 structure_size = sizeof(struct gpu_metrics_v2_0);
994 break;
995 case METRICS_VERSION(2, 1):
996 structure_size = sizeof(struct gpu_metrics_v2_1);
997 break;
998 case METRICS_VERSION(2, 2):
999 structure_size = sizeof(struct gpu_metrics_v2_2);
1000 break;
1001 default:
1002 return;
1003 }
1004
1005#undef METRICS_VERSION
1006
1007 memset(header, 0xFF, structure_size);
1008
1009 header->format_revision = frev;
1010 header->content_revision = crev;
1011 header->structure_size = structure_size;
1012
1013}
1014
1015int smu_cmn_set_mp1_state(struct smu_context *smu,
1016 enum pp_mp1_state mp1_state)
1017{
1018 enum smu_message_type msg;
1019 int ret;
1020
1021 switch (mp1_state) {
1022 case PP_MP1_STATE_SHUTDOWN:
1023 msg = SMU_MSG_PrepareMp1ForShutdown;
1024 break;
1025 case PP_MP1_STATE_UNLOAD:
1026 msg = SMU_MSG_PrepareMp1ForUnload;
1027 break;
1028 case PP_MP1_STATE_RESET:
1029 msg = SMU_MSG_PrepareMp1ForReset;
1030 break;
1031 case PP_MP1_STATE_NONE:
1032 default:
1033 return 0;
1034 }
1035
1036 ret = smu_cmn_send_smc_msg(smu, msg, NULL);
1037 if (ret)
1038 dev_err(smu->adev->dev, "[PrepareMp1] Failed!\n");
1039
1040 return ret;
1041}
1042
1043bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev)
1044{
1045 struct pci_dev *p = NULL;
1046 bool snd_driver_loaded;
1047
1048
1049
1050
1051
1052 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
1053 adev->pdev->bus->number, 1);
1054 if (!p)
1055 return true;
1056
1057 snd_driver_loaded = pci_is_enabled(p) ? true : false;
1058
1059 pci_dev_put(p);
1060
1061 return snd_driver_loaded;
1062}
1063