1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#define SWSMU_CODE_LAYER_L4
24
25#include "amdgpu.h"
26#include "amdgpu_smu.h"
27#include "smu_cmn.h"
28#include "soc15_common.h"
29
30
31
32
33
34
35#undef pr_err
36#undef pr_warn
37#undef pr_info
38#undef pr_debug
39
40
41
42
43
44
45#define mmMP1_SMN_C2PMSG_66 0x0282
46#define mmMP1_SMN_C2PMSG_66_BASE_IDX 0
47
48#define mmMP1_SMN_C2PMSG_82 0x0292
49#define mmMP1_SMN_C2PMSG_82_BASE_IDX 0
50
51#define mmMP1_SMN_C2PMSG_90 0x029a
52#define mmMP1_SMN_C2PMSG_90_BASE_IDX 0
53
54#define MP1_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL
55
56#undef __SMU_DUMMY_MAP
57#define __SMU_DUMMY_MAP(type) #type
58static const char * const __smu_message_names[] = {
59 SMU_MESSAGE_TYPES
60};
61
62static const char *smu_get_message_name(struct smu_context *smu,
63 enum smu_message_type type)
64{
65 if (type < 0 || type >= SMU_MSG_MAX_COUNT)
66 return "unknown smu message";
67
68 return __smu_message_names[type];
69}
70
71static void smu_cmn_read_arg(struct smu_context *smu,
72 uint32_t *arg)
73{
74 struct amdgpu_device *adev = smu->adev;
75
76 *arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
77}
78
79
80
81
82
83
84
85
86
87#define SMU_RESP_NONE 0
88#define SMU_RESP_OK 1
89#define SMU_RESP_CMD_FAIL 0xFF
90#define SMU_RESP_CMD_UNKNOWN 0xFE
91#define SMU_RESP_CMD_BAD_PREREQ 0xFD
92#define SMU_RESP_BUSY_OTHER 0xFC
93#define SMU_RESP_DEBUG_END 0xFB
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115static u32 __smu_cmn_poll_stat(struct smu_context *smu)
116{
117 struct amdgpu_device *adev = smu->adev;
118 int timeout = adev->usec_timeout * 20;
119 u32 reg;
120
121 for ( ; timeout > 0; timeout--) {
122 reg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
123 if ((reg & MP1_C2PMSG_90__CONTENT_MASK) != 0)
124 break;
125
126 udelay(1);
127 }
128
129 return reg;
130}
131
132static void __smu_cmn_reg_print_error(struct smu_context *smu,
133 u32 reg_c2pmsg_90,
134 int msg_index,
135 u32 param,
136 enum smu_message_type msg)
137{
138 struct amdgpu_device *adev = smu->adev;
139 const char *message = smu_get_message_name(smu, msg);
140
141 switch (reg_c2pmsg_90) {
142 case SMU_RESP_NONE:
143 dev_err_ratelimited(adev->dev,
144 "SMU: I'm not done with your previous command!");
145 break;
146 case SMU_RESP_OK:
147
148
149
150 break;
151 case SMU_RESP_CMD_FAIL:
152
153
154
155 break;
156 case SMU_RESP_CMD_UNKNOWN:
157 dev_err_ratelimited(adev->dev,
158 "SMU: unknown command: index:%d param:0x%08X message:%s",
159 msg_index, param, message);
160 break;
161 case SMU_RESP_CMD_BAD_PREREQ:
162 dev_err_ratelimited(adev->dev,
163 "SMU: valid command, bad prerequisites: index:%d param:0x%08X message:%s",
164 msg_index, param, message);
165 break;
166 case SMU_RESP_BUSY_OTHER:
167 dev_err_ratelimited(adev->dev,
168 "SMU: I'm very busy for your command: index:%d param:0x%08X message:%s",
169 msg_index, param, message);
170 break;
171 case SMU_RESP_DEBUG_END:
172 dev_err_ratelimited(adev->dev,
173 "SMU: I'm debugging!");
174 break;
175 default:
176 dev_err_ratelimited(adev->dev,
177 "SMU: response:0x%08X for index:%d param:0x%08X message:%s?",
178 reg_c2pmsg_90, msg_index, param, message);
179 break;
180 }
181}
182
183static int __smu_cmn_reg2errno(struct smu_context *smu, u32 reg_c2pmsg_90)
184{
185 int res;
186
187 switch (reg_c2pmsg_90) {
188 case SMU_RESP_NONE:
189
190
191 res = -ETIME;
192 break;
193 case SMU_RESP_OK:
194 res = 0;
195 break;
196 case SMU_RESP_CMD_FAIL:
197
198
199
200 res = -EIO;
201 break;
202 case SMU_RESP_CMD_UNKNOWN:
203
204
205 res = -EOPNOTSUPP;
206 break;
207 case SMU_RESP_CMD_BAD_PREREQ:
208
209
210 res = -EINVAL;
211 break;
212 case SMU_RESP_BUSY_OTHER:
213
214
215
216 res = -EBUSY;
217 break;
218 default:
219
220
221 res = -EREMOTEIO;
222 break;
223 }
224
225 return res;
226}
227
228static void __smu_cmn_send_msg(struct smu_context *smu,
229 u16 msg,
230 u32 param)
231{
232 struct amdgpu_device *adev = smu->adev;
233
234 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
235 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
236 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
237}
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252int smu_cmn_send_msg_without_waiting(struct smu_context *smu,
253 uint16_t msg_index,
254 uint32_t param)
255{
256 u32 reg;
257 int res;
258
259 if (smu->adev->no_hw_access)
260 return 0;
261
262 reg = __smu_cmn_poll_stat(smu);
263 res = __smu_cmn_reg2errno(smu, reg);
264 if (reg == SMU_RESP_NONE ||
265 reg == SMU_RESP_BUSY_OTHER ||
266 res == -EREMOTEIO)
267 goto Out;
268 __smu_cmn_send_msg(smu, msg_index, param);
269 res = 0;
270Out:
271 return res;
272}
273
274
275
276
277
278
279
280
281
282
283
284int smu_cmn_wait_for_response(struct smu_context *smu)
285{
286 u32 reg;
287
288 reg = __smu_cmn_poll_stat(smu);
289 return __smu_cmn_reg2errno(smu, reg);
290}
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,
323 enum smu_message_type msg,
324 uint32_t param,
325 uint32_t *read_arg)
326{
327 int res, index;
328 u32 reg;
329
330 if (smu->adev->no_hw_access)
331 return 0;
332
333 index = smu_cmn_to_asic_specific_index(smu,
334 CMN2ASIC_MAPPING_MSG,
335 msg);
336 if (index < 0)
337 return index == -EACCES ? 0 : index;
338
339 mutex_lock(&smu->message_lock);
340 reg = __smu_cmn_poll_stat(smu);
341 res = __smu_cmn_reg2errno(smu, reg);
342 if (reg == SMU_RESP_NONE ||
343 reg == SMU_RESP_BUSY_OTHER ||
344 res == -EREMOTEIO) {
345 __smu_cmn_reg_print_error(smu, reg, index, param, msg);
346 goto Out;
347 }
348 __smu_cmn_send_msg(smu, (uint16_t) index, param);
349 reg = __smu_cmn_poll_stat(smu);
350 res = __smu_cmn_reg2errno(smu, reg);
351 if (res == -EREMOTEIO)
352 __smu_cmn_reg_print_error(smu, reg, index, param, msg);
353 if (read_arg)
354 smu_cmn_read_arg(smu, read_arg);
355Out:
356 mutex_unlock(&smu->message_lock);
357 return res;
358}
359
360int smu_cmn_send_smc_msg(struct smu_context *smu,
361 enum smu_message_type msg,
362 uint32_t *read_arg)
363{
364 return smu_cmn_send_smc_msg_with_param(smu,
365 msg,
366 0,
367 read_arg);
368}
369
370int smu_cmn_to_asic_specific_index(struct smu_context *smu,
371 enum smu_cmn2asic_mapping_type type,
372 uint32_t index)
373{
374 struct cmn2asic_msg_mapping msg_mapping;
375 struct cmn2asic_mapping mapping;
376
377 switch (type) {
378 case CMN2ASIC_MAPPING_MSG:
379 if (index >= SMU_MSG_MAX_COUNT ||
380 !smu->message_map)
381 return -EINVAL;
382
383 msg_mapping = smu->message_map[index];
384 if (!msg_mapping.valid_mapping)
385 return -EINVAL;
386
387 if (amdgpu_sriov_vf(smu->adev) &&
388 !msg_mapping.valid_in_vf)
389 return -EACCES;
390
391 return msg_mapping.map_to;
392
393 case CMN2ASIC_MAPPING_CLK:
394 if (index >= SMU_CLK_COUNT ||
395 !smu->clock_map)
396 return -EINVAL;
397
398 mapping = smu->clock_map[index];
399 if (!mapping.valid_mapping)
400 return -EINVAL;
401
402 return mapping.map_to;
403
404 case CMN2ASIC_MAPPING_FEATURE:
405 if (index >= SMU_FEATURE_COUNT ||
406 !smu->feature_map)
407 return -EINVAL;
408
409 mapping = smu->feature_map[index];
410 if (!mapping.valid_mapping)
411 return -EINVAL;
412
413 return mapping.map_to;
414
415 case CMN2ASIC_MAPPING_TABLE:
416 if (index >= SMU_TABLE_COUNT ||
417 !smu->table_map)
418 return -EINVAL;
419
420 mapping = smu->table_map[index];
421 if (!mapping.valid_mapping)
422 return -EINVAL;
423
424 return mapping.map_to;
425
426 case CMN2ASIC_MAPPING_PWR:
427 if (index >= SMU_POWER_SOURCE_COUNT ||
428 !smu->pwr_src_map)
429 return -EINVAL;
430
431 mapping = smu->pwr_src_map[index];
432 if (!mapping.valid_mapping)
433 return -EINVAL;
434
435 return mapping.map_to;
436
437 case CMN2ASIC_MAPPING_WORKLOAD:
438 if (index > PP_SMC_POWER_PROFILE_CUSTOM ||
439 !smu->workload_map)
440 return -EINVAL;
441
442 mapping = smu->workload_map[index];
443 if (!mapping.valid_mapping)
444 return -EINVAL;
445
446 return mapping.map_to;
447
448 default:
449 return -EINVAL;
450 }
451}
452
453int smu_cmn_feature_is_supported(struct smu_context *smu,
454 enum smu_feature_mask mask)
455{
456 struct smu_feature *feature = &smu->smu_feature;
457 int feature_id;
458 int ret = 0;
459
460 feature_id = smu_cmn_to_asic_specific_index(smu,
461 CMN2ASIC_MAPPING_FEATURE,
462 mask);
463 if (feature_id < 0)
464 return 0;
465
466 WARN_ON(feature_id > feature->feature_num);
467
468 mutex_lock(&feature->mutex);
469 ret = test_bit(feature_id, feature->supported);
470 mutex_unlock(&feature->mutex);
471
472 return ret;
473}
474
475int smu_cmn_feature_is_enabled(struct smu_context *smu,
476 enum smu_feature_mask mask)
477{
478 struct smu_feature *feature = &smu->smu_feature;
479 struct amdgpu_device *adev = smu->adev;
480 int feature_id;
481 int ret = 0;
482
483 if (smu->is_apu && adev->family < AMDGPU_FAMILY_VGH)
484 return 1;
485
486 feature_id = smu_cmn_to_asic_specific_index(smu,
487 CMN2ASIC_MAPPING_FEATURE,
488 mask);
489 if (feature_id < 0)
490 return 0;
491
492 WARN_ON(feature_id > feature->feature_num);
493
494 mutex_lock(&feature->mutex);
495 ret = test_bit(feature_id, feature->enabled);
496 mutex_unlock(&feature->mutex);
497
498 return ret;
499}
500
501bool smu_cmn_clk_dpm_is_enabled(struct smu_context *smu,
502 enum smu_clk_type clk_type)
503{
504 enum smu_feature_mask feature_id = 0;
505
506 switch (clk_type) {
507 case SMU_MCLK:
508 case SMU_UCLK:
509 feature_id = SMU_FEATURE_DPM_UCLK_BIT;
510 break;
511 case SMU_GFXCLK:
512 case SMU_SCLK:
513 feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
514 break;
515 case SMU_SOCCLK:
516 feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
517 break;
518 default:
519 return true;
520 }
521
522 if (!smu_cmn_feature_is_enabled(smu, feature_id))
523 return false;
524
525 return true;
526}
527
528int smu_cmn_get_enabled_mask(struct smu_context *smu,
529 uint32_t *feature_mask,
530 uint32_t num)
531{
532 uint32_t feature_mask_high = 0, feature_mask_low = 0;
533 struct smu_feature *feature = &smu->smu_feature;
534 int ret = 0;
535
536 if (!feature_mask || num < 2)
537 return -EINVAL;
538
539 if (bitmap_empty(feature->enabled, feature->feature_num)) {
540 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh, &feature_mask_high);
541 if (ret)
542 return ret;
543
544 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow, &feature_mask_low);
545 if (ret)
546 return ret;
547
548 feature_mask[0] = feature_mask_low;
549 feature_mask[1] = feature_mask_high;
550 } else {
551 bitmap_copy((unsigned long *)feature_mask, feature->enabled,
552 feature->feature_num);
553 }
554
555 return ret;
556}
557
558int smu_cmn_get_enabled_32_bits_mask(struct smu_context *smu,
559 uint32_t *feature_mask,
560 uint32_t num)
561{
562 uint32_t feature_mask_en_low = 0;
563 uint32_t feature_mask_en_high = 0;
564 struct smu_feature *feature = &smu->smu_feature;
565 int ret = 0;
566
567 if (!feature_mask || num < 2)
568 return -EINVAL;
569
570 if (bitmap_empty(feature->enabled, feature->feature_num)) {
571 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetEnabledSmuFeatures, 0,
572 &feature_mask_en_low);
573
574 if (ret)
575 return ret;
576
577 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetEnabledSmuFeatures, 1,
578 &feature_mask_en_high);
579
580 if (ret)
581 return ret;
582
583 feature_mask[0] = feature_mask_en_low;
584 feature_mask[1] = feature_mask_en_high;
585
586 } else {
587 bitmap_copy((unsigned long *)feature_mask, feature->enabled,
588 feature->feature_num);
589 }
590
591 return ret;
592
593}
594
595uint64_t smu_cmn_get_indep_throttler_status(
596 const unsigned long dep_status,
597 const uint8_t *throttler_map)
598{
599 uint64_t indep_status = 0;
600 uint8_t dep_bit = 0;
601
602 for_each_set_bit(dep_bit, &dep_status, 32)
603 indep_status |= 1ULL << throttler_map[dep_bit];
604
605 return indep_status;
606}
607
608int smu_cmn_feature_update_enable_state(struct smu_context *smu,
609 uint64_t feature_mask,
610 bool enabled)
611{
612 struct smu_feature *feature = &smu->smu_feature;
613 int ret = 0;
614
615 if (enabled) {
616 ret = smu_cmn_send_smc_msg_with_param(smu,
617 SMU_MSG_EnableSmuFeaturesLow,
618 lower_32_bits(feature_mask),
619 NULL);
620 if (ret)
621 return ret;
622 ret = smu_cmn_send_smc_msg_with_param(smu,
623 SMU_MSG_EnableSmuFeaturesHigh,
624 upper_32_bits(feature_mask),
625 NULL);
626 if (ret)
627 return ret;
628 } else {
629 ret = smu_cmn_send_smc_msg_with_param(smu,
630 SMU_MSG_DisableSmuFeaturesLow,
631 lower_32_bits(feature_mask),
632 NULL);
633 if (ret)
634 return ret;
635 ret = smu_cmn_send_smc_msg_with_param(smu,
636 SMU_MSG_DisableSmuFeaturesHigh,
637 upper_32_bits(feature_mask),
638 NULL);
639 if (ret)
640 return ret;
641 }
642
643 mutex_lock(&feature->mutex);
644 if (enabled)
645 bitmap_or(feature->enabled, feature->enabled,
646 (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
647 else
648 bitmap_andnot(feature->enabled, feature->enabled,
649 (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
650 mutex_unlock(&feature->mutex);
651
652 return ret;
653}
654
655int smu_cmn_feature_set_enabled(struct smu_context *smu,
656 enum smu_feature_mask mask,
657 bool enable)
658{
659 struct smu_feature *feature = &smu->smu_feature;
660 int feature_id;
661
662 feature_id = smu_cmn_to_asic_specific_index(smu,
663 CMN2ASIC_MAPPING_FEATURE,
664 mask);
665 if (feature_id < 0)
666 return -EINVAL;
667
668 WARN_ON(feature_id > feature->feature_num);
669
670 return smu_cmn_feature_update_enable_state(smu,
671 1ULL << feature_id,
672 enable);
673}
674
675#undef __SMU_DUMMY_MAP
676#define __SMU_DUMMY_MAP(fea) #fea
677static const char* __smu_feature_names[] = {
678 SMU_FEATURE_MASKS
679};
680
681static const char *smu_get_feature_name(struct smu_context *smu,
682 enum smu_feature_mask feature)
683{
684 if (feature < 0 || feature >= SMU_FEATURE_COUNT)
685 return "unknown smu feature";
686 return __smu_feature_names[feature];
687}
688
689size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu,
690 char *buf)
691{
692 uint32_t feature_mask[2] = { 0 };
693 int feature_index = 0;
694 uint32_t count = 0;
695 int8_t sort_feature[SMU_FEATURE_COUNT];
696 size_t size = 0;
697 int ret = 0, i;
698
699 if (!smu->is_apu) {
700 ret = smu_cmn_get_enabled_mask(smu,
701 feature_mask,
702 2);
703 if (ret)
704 return 0;
705 } else {
706 ret = smu_cmn_get_enabled_32_bits_mask(smu,
707 feature_mask,
708 2);
709 if (ret)
710 return 0;
711 }
712
713 size = sysfs_emit_at(buf, size, "features high: 0x%08x low: 0x%08x\n",
714 feature_mask[1], feature_mask[0]);
715
716 memset(sort_feature, -1, sizeof(sort_feature));
717
718 for (i = 0; i < SMU_FEATURE_COUNT; i++) {
719 feature_index = smu_cmn_to_asic_specific_index(smu,
720 CMN2ASIC_MAPPING_FEATURE,
721 i);
722 if (feature_index < 0)
723 continue;
724
725 sort_feature[feature_index] = i;
726 }
727
728 size += sysfs_emit_at(buf, size, "%-2s. %-20s %-3s : %-s\n",
729 "No", "Feature", "Bit", "State");
730
731 for (i = 0; i < SMU_FEATURE_COUNT; i++) {
732 if (sort_feature[i] < 0)
733 continue;
734
735 size += sysfs_emit_at(buf, size, "%02d. %-20s (%2d) : %s\n",
736 count++,
737 smu_get_feature_name(smu, sort_feature[i]),
738 i,
739 !!smu_cmn_feature_is_enabled(smu, sort_feature[i]) ?
740 "enabled" : "disabled");
741 }
742
743 return size;
744}
745
746int smu_cmn_set_pp_feature_mask(struct smu_context *smu,
747 uint64_t new_mask)
748{
749 int ret = 0;
750 uint32_t feature_mask[2] = { 0 };
751 uint64_t feature_2_enabled = 0;
752 uint64_t feature_2_disabled = 0;
753 uint64_t feature_enables = 0;
754
755 ret = smu_cmn_get_enabled_mask(smu,
756 feature_mask,
757 2);
758 if (ret)
759 return ret;
760
761 feature_enables = ((uint64_t)feature_mask[1] << 32 |
762 (uint64_t)feature_mask[0]);
763
764 feature_2_enabled = ~feature_enables & new_mask;
765 feature_2_disabled = feature_enables & ~new_mask;
766
767 if (feature_2_enabled) {
768 ret = smu_cmn_feature_update_enable_state(smu,
769 feature_2_enabled,
770 true);
771 if (ret)
772 return ret;
773 }
774 if (feature_2_disabled) {
775 ret = smu_cmn_feature_update_enable_state(smu,
776 feature_2_disabled,
777 false);
778 if (ret)
779 return ret;
780 }
781
782 return ret;
783}
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801int smu_cmn_disable_all_features_with_exception(struct smu_context *smu,
802 bool no_hw_disablement,
803 enum smu_feature_mask mask)
804{
805 struct smu_feature *feature = &smu->smu_feature;
806 uint64_t features_to_disable = U64_MAX;
807 int skipped_feature_id;
808
809 if (mask != SMU_FEATURE_COUNT) {
810 skipped_feature_id = smu_cmn_to_asic_specific_index(smu,
811 CMN2ASIC_MAPPING_FEATURE,
812 mask);
813 if (skipped_feature_id < 0)
814 return -EINVAL;
815
816 features_to_disable &= ~(1ULL << skipped_feature_id);
817 }
818
819 if (no_hw_disablement) {
820 mutex_lock(&feature->mutex);
821 bitmap_andnot(feature->enabled, feature->enabled,
822 (unsigned long *)(&features_to_disable), SMU_FEATURE_MAX);
823 mutex_unlock(&feature->mutex);
824
825 return 0;
826 } else {
827 return smu_cmn_feature_update_enable_state(smu,
828 features_to_disable,
829 0);
830 }
831}
832
833int smu_cmn_get_smc_version(struct smu_context *smu,
834 uint32_t *if_version,
835 uint32_t *smu_version)
836{
837 int ret = 0;
838
839 if (!if_version && !smu_version)
840 return -EINVAL;
841
842 if (smu->smc_fw_if_version && smu->smc_fw_version)
843 {
844 if (if_version)
845 *if_version = smu->smc_fw_if_version;
846
847 if (smu_version)
848 *smu_version = smu->smc_fw_version;
849
850 return 0;
851 }
852
853 if (if_version) {
854 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion, if_version);
855 if (ret)
856 return ret;
857
858 smu->smc_fw_if_version = *if_version;
859 }
860
861 if (smu_version) {
862 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetSmuVersion, smu_version);
863 if (ret)
864 return ret;
865
866 smu->smc_fw_version = *smu_version;
867 }
868
869 return ret;
870}
871
872int smu_cmn_update_table(struct smu_context *smu,
873 enum smu_table_id table_index,
874 int argument,
875 void *table_data,
876 bool drv2smu)
877{
878 struct smu_table_context *smu_table = &smu->smu_table;
879 struct amdgpu_device *adev = smu->adev;
880 struct smu_table *table = &smu_table->driver_table;
881 int table_id = smu_cmn_to_asic_specific_index(smu,
882 CMN2ASIC_MAPPING_TABLE,
883 table_index);
884 uint32_t table_size;
885 int ret = 0;
886 if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
887 return -EINVAL;
888
889 table_size = smu_table->tables[table_index].size;
890
891 if (drv2smu) {
892 memcpy(table->cpu_addr, table_data, table_size);
893
894
895
896
897 amdgpu_asic_flush_hdp(adev, NULL);
898 }
899
900 ret = smu_cmn_send_smc_msg_with_param(smu, drv2smu ?
901 SMU_MSG_TransferTableDram2Smu :
902 SMU_MSG_TransferTableSmu2Dram,
903 table_id | ((argument & 0xFFFF) << 16),
904 NULL);
905 if (ret)
906 return ret;
907
908 if (!drv2smu) {
909 amdgpu_asic_invalidate_hdp(adev, NULL);
910 memcpy(table_data, table->cpu_addr, table_size);
911 }
912
913 return 0;
914}
915
916int smu_cmn_write_watermarks_table(struct smu_context *smu)
917{
918 void *watermarks_table = smu->smu_table.watermarks_table;
919
920 if (!watermarks_table)
921 return -EINVAL;
922
923 return smu_cmn_update_table(smu,
924 SMU_TABLE_WATERMARKS,
925 0,
926 watermarks_table,
927 true);
928}
929
930int smu_cmn_write_pptable(struct smu_context *smu)
931{
932 void *pptable = smu->smu_table.driver_pptable;
933
934 return smu_cmn_update_table(smu,
935 SMU_TABLE_PPTABLE,
936 0,
937 pptable,
938 true);
939}
940
941int smu_cmn_get_metrics_table_locked(struct smu_context *smu,
942 void *metrics_table,
943 bool bypass_cache)
944{
945 struct smu_table_context *smu_table= &smu->smu_table;
946 uint32_t table_size =
947 smu_table->tables[SMU_TABLE_SMU_METRICS].size;
948 int ret = 0;
949
950 if (bypass_cache ||
951 !smu_table->metrics_time ||
952 time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(1))) {
953 ret = smu_cmn_update_table(smu,
954 SMU_TABLE_SMU_METRICS,
955 0,
956 smu_table->metrics_table,
957 false);
958 if (ret) {
959 dev_info(smu->adev->dev, "Failed to export SMU metrics table!\n");
960 return ret;
961 }
962 smu_table->metrics_time = jiffies;
963 }
964
965 if (metrics_table)
966 memcpy(metrics_table, smu_table->metrics_table, table_size);
967
968 return 0;
969}
970
971int smu_cmn_get_metrics_table(struct smu_context *smu,
972 void *metrics_table,
973 bool bypass_cache)
974{
975 int ret = 0;
976
977 mutex_lock(&smu->metrics_lock);
978 ret = smu_cmn_get_metrics_table_locked(smu,
979 metrics_table,
980 bypass_cache);
981 mutex_unlock(&smu->metrics_lock);
982
983 return ret;
984}
985
986void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev)
987{
988 struct metrics_table_header *header = (struct metrics_table_header *)table;
989 uint16_t structure_size;
990
991#define METRICS_VERSION(a, b) ((a << 16) | b )
992
993 switch (METRICS_VERSION(frev, crev)) {
994 case METRICS_VERSION(1, 0):
995 structure_size = sizeof(struct gpu_metrics_v1_0);
996 break;
997 case METRICS_VERSION(1, 1):
998 structure_size = sizeof(struct gpu_metrics_v1_1);
999 break;
1000 case METRICS_VERSION(1, 2):
1001 structure_size = sizeof(struct gpu_metrics_v1_2);
1002 break;
1003 case METRICS_VERSION(1, 3):
1004 structure_size = sizeof(struct gpu_metrics_v1_3);
1005 break;
1006 case METRICS_VERSION(2, 0):
1007 structure_size = sizeof(struct gpu_metrics_v2_0);
1008 break;
1009 case METRICS_VERSION(2, 1):
1010 structure_size = sizeof(struct gpu_metrics_v2_1);
1011 break;
1012 case METRICS_VERSION(2, 2):
1013 structure_size = sizeof(struct gpu_metrics_v2_2);
1014 break;
1015 default:
1016 return;
1017 }
1018
1019#undef METRICS_VERSION
1020
1021 memset(header, 0xFF, structure_size);
1022
1023 header->format_revision = frev;
1024 header->content_revision = crev;
1025 header->structure_size = structure_size;
1026
1027}
1028
1029int smu_cmn_set_mp1_state(struct smu_context *smu,
1030 enum pp_mp1_state mp1_state)
1031{
1032 enum smu_message_type msg;
1033 int ret;
1034
1035 switch (mp1_state) {
1036 case PP_MP1_STATE_SHUTDOWN:
1037 msg = SMU_MSG_PrepareMp1ForShutdown;
1038 break;
1039 case PP_MP1_STATE_UNLOAD:
1040 msg = SMU_MSG_PrepareMp1ForUnload;
1041 break;
1042 case PP_MP1_STATE_RESET:
1043 msg = SMU_MSG_PrepareMp1ForReset;
1044 break;
1045 case PP_MP1_STATE_NONE:
1046 default:
1047 return 0;
1048 }
1049
1050 ret = smu_cmn_send_smc_msg(smu, msg, NULL);
1051 if (ret)
1052 dev_err(smu->adev->dev, "[PrepareMp1] Failed!\n");
1053
1054 return ret;
1055}
1056
1057bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev)
1058{
1059 struct pci_dev *p = NULL;
1060 bool snd_driver_loaded;
1061
1062
1063
1064
1065
1066 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
1067 adev->pdev->bus->number, 1);
1068 if (!p)
1069 return true;
1070
1071 snd_driver_loaded = pci_is_enabled(p) ? true : false;
1072
1073 pci_dev_put(p);
1074
1075 return snd_driver_loaded;
1076}
1077