1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include <drm/drm_debugfs.h>
27
28#include "amdgpu.h"
29#include "amdgpu_drv.h"
30#include "amdgpu_pm.h"
31#include "amdgpu_dpm.h"
32#include "amdgpu_smu.h"
33#include "atom.h"
34#include <linux/pci.h>
35#include <linux/hwmon.h>
36#include <linux/hwmon-sysfs.h>
37#include <linux/nospec.h>
38#include <linux/pm_runtime.h>
39#include "hwmgr.h"
40
41static const struct cg_flag_name clocks[] = {
42 {AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"},
43 {AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"},
44 {AMD_CG_SUPPORT_GFX_CGCG, "Graphics Coarse Grain Clock Gating"},
45 {AMD_CG_SUPPORT_GFX_CGLS, "Graphics Coarse Grain memory Light Sleep"},
46 {AMD_CG_SUPPORT_GFX_CGTS, "Graphics Coarse Grain Tree Shader Clock Gating"},
47 {AMD_CG_SUPPORT_GFX_CGTS_LS, "Graphics Coarse Grain Tree Shader Light Sleep"},
48 {AMD_CG_SUPPORT_GFX_CP_LS, "Graphics Command Processor Light Sleep"},
49 {AMD_CG_SUPPORT_GFX_RLC_LS, "Graphics Run List Controller Light Sleep"},
50 {AMD_CG_SUPPORT_GFX_3D_CGCG, "Graphics 3D Coarse Grain Clock Gating"},
51 {AMD_CG_SUPPORT_GFX_3D_CGLS, "Graphics 3D Coarse Grain memory Light Sleep"},
52 {AMD_CG_SUPPORT_MC_LS, "Memory Controller Light Sleep"},
53 {AMD_CG_SUPPORT_MC_MGCG, "Memory Controller Medium Grain Clock Gating"},
54 {AMD_CG_SUPPORT_SDMA_LS, "System Direct Memory Access Light Sleep"},
55 {AMD_CG_SUPPORT_SDMA_MGCG, "System Direct Memory Access Medium Grain Clock Gating"},
56 {AMD_CG_SUPPORT_BIF_MGCG, "Bus Interface Medium Grain Clock Gating"},
57 {AMD_CG_SUPPORT_BIF_LS, "Bus Interface Light Sleep"},
58 {AMD_CG_SUPPORT_UVD_MGCG, "Unified Video Decoder Medium Grain Clock Gating"},
59 {AMD_CG_SUPPORT_VCE_MGCG, "Video Compression Engine Medium Grain Clock Gating"},
60 {AMD_CG_SUPPORT_HDP_LS, "Host Data Path Light Sleep"},
61 {AMD_CG_SUPPORT_HDP_MGCG, "Host Data Path Medium Grain Clock Gating"},
62 {AMD_CG_SUPPORT_DRM_MGCG, "Digital Right Management Medium Grain Clock Gating"},
63 {AMD_CG_SUPPORT_DRM_LS, "Digital Right Management Light Sleep"},
64 {AMD_CG_SUPPORT_ROM_MGCG, "Rom Medium Grain Clock Gating"},
65 {AMD_CG_SUPPORT_DF_MGCG, "Data Fabric Medium Grain Clock Gating"},
66
67 {AMD_CG_SUPPORT_ATHUB_MGCG, "Address Translation Hub Medium Grain Clock Gating"},
68 {AMD_CG_SUPPORT_ATHUB_LS, "Address Translation Hub Light Sleep"},
69 {0, NULL},
70};
71
72static const struct hwmon_temp_label {
73 enum PP_HWMON_TEMP channel;
74 const char *label;
75} temp_label[] = {
76 {PP_TEMP_EDGE, "edge"},
77 {PP_TEMP_JUNCTION, "junction"},
78 {PP_TEMP_MEM, "mem"},
79};
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
116 struct device_attribute *attr,
117 char *buf)
118{
119 struct drm_device *ddev = dev_get_drvdata(dev);
120 struct amdgpu_device *adev = drm_to_adev(ddev);
121 enum amd_pm_state_type pm;
122 int ret;
123
124 if (amdgpu_in_reset(adev))
125 return -EPERM;
126
127 ret = pm_runtime_get_sync(ddev->dev);
128 if (ret < 0) {
129 pm_runtime_put_autosuspend(ddev->dev);
130 return ret;
131 }
132
133 if (is_support_sw_smu(adev)) {
134 if (adev->smu.ppt_funcs->get_current_power_state)
135 pm = smu_get_current_power_state(&adev->smu);
136 else
137 pm = adev->pm.dpm.user_state;
138 } else if (adev->powerplay.pp_funcs->get_current_power_state) {
139 pm = amdgpu_dpm_get_current_power_state(adev);
140 } else {
141 pm = adev->pm.dpm.user_state;
142 }
143
144 pm_runtime_mark_last_busy(ddev->dev);
145 pm_runtime_put_autosuspend(ddev->dev);
146
147 return snprintf(buf, PAGE_SIZE, "%s\n",
148 (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
149 (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
150}
151
152static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
153 struct device_attribute *attr,
154 const char *buf,
155 size_t count)
156{
157 struct drm_device *ddev = dev_get_drvdata(dev);
158 struct amdgpu_device *adev = drm_to_adev(ddev);
159 enum amd_pm_state_type state;
160 int ret;
161
162 if (amdgpu_in_reset(adev))
163 return -EPERM;
164
165 if (strncmp("battery", buf, strlen("battery")) == 0)
166 state = POWER_STATE_TYPE_BATTERY;
167 else if (strncmp("balanced", buf, strlen("balanced")) == 0)
168 state = POWER_STATE_TYPE_BALANCED;
169 else if (strncmp("performance", buf, strlen("performance")) == 0)
170 state = POWER_STATE_TYPE_PERFORMANCE;
171 else
172 return -EINVAL;
173
174 ret = pm_runtime_get_sync(ddev->dev);
175 if (ret < 0) {
176 pm_runtime_put_autosuspend(ddev->dev);
177 return ret;
178 }
179
180 if (is_support_sw_smu(adev)) {
181 mutex_lock(&adev->pm.mutex);
182 adev->pm.dpm.user_state = state;
183 mutex_unlock(&adev->pm.mutex);
184 } else if (adev->powerplay.pp_funcs->dispatch_tasks) {
185 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state);
186 } else {
187 mutex_lock(&adev->pm.mutex);
188 adev->pm.dpm.user_state = state;
189 mutex_unlock(&adev->pm.mutex);
190
191 amdgpu_pm_compute_clocks(adev);
192 }
193 pm_runtime_mark_last_busy(ddev->dev);
194 pm_runtime_put_autosuspend(ddev->dev);
195
196 return count;
197}
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
260 struct device_attribute *attr,
261 char *buf)
262{
263 struct drm_device *ddev = dev_get_drvdata(dev);
264 struct amdgpu_device *adev = drm_to_adev(ddev);
265 enum amd_dpm_forced_level level = 0xff;
266 int ret;
267
268 if (amdgpu_in_reset(adev))
269 return -EPERM;
270
271 ret = pm_runtime_get_sync(ddev->dev);
272 if (ret < 0) {
273 pm_runtime_put_autosuspend(ddev->dev);
274 return ret;
275 }
276
277 if (is_support_sw_smu(adev))
278 level = smu_get_performance_level(&adev->smu);
279 else if (adev->powerplay.pp_funcs->get_performance_level)
280 level = amdgpu_dpm_get_performance_level(adev);
281 else
282 level = adev->pm.dpm.forced_level;
283
284 pm_runtime_mark_last_busy(ddev->dev);
285 pm_runtime_put_autosuspend(ddev->dev);
286
287 return snprintf(buf, PAGE_SIZE, "%s\n",
288 (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
289 (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
290 (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" :
291 (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" :
292 (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" :
293 (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
294 (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
295 (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" :
296 "unknown");
297}
298
299static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
300 struct device_attribute *attr,
301 const char *buf,
302 size_t count)
303{
304 struct drm_device *ddev = dev_get_drvdata(dev);
305 struct amdgpu_device *adev = drm_to_adev(ddev);
306 enum amd_dpm_forced_level level;
307 enum amd_dpm_forced_level current_level = 0xff;
308 int ret = 0;
309
310 if (amdgpu_in_reset(adev))
311 return -EPERM;
312
313 if (strncmp("low", buf, strlen("low")) == 0) {
314 level = AMD_DPM_FORCED_LEVEL_LOW;
315 } else if (strncmp("high", buf, strlen("high")) == 0) {
316 level = AMD_DPM_FORCED_LEVEL_HIGH;
317 } else if (strncmp("auto", buf, strlen("auto")) == 0) {
318 level = AMD_DPM_FORCED_LEVEL_AUTO;
319 } else if (strncmp("manual", buf, strlen("manual")) == 0) {
320 level = AMD_DPM_FORCED_LEVEL_MANUAL;
321 } else if (strncmp("profile_exit", buf, strlen("profile_exit")) == 0) {
322 level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT;
323 } else if (strncmp("profile_standard", buf, strlen("profile_standard")) == 0) {
324 level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD;
325 } else if (strncmp("profile_min_sclk", buf, strlen("profile_min_sclk")) == 0) {
326 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK;
327 } else if (strncmp("profile_min_mclk", buf, strlen("profile_min_mclk")) == 0) {
328 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK;
329 } else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) {
330 level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
331 } else {
332 return -EINVAL;
333 }
334
335 ret = pm_runtime_get_sync(ddev->dev);
336 if (ret < 0) {
337 pm_runtime_put_autosuspend(ddev->dev);
338 return ret;
339 }
340
341 if (is_support_sw_smu(adev))
342 current_level = smu_get_performance_level(&adev->smu);
343 else if (adev->powerplay.pp_funcs->get_performance_level)
344 current_level = amdgpu_dpm_get_performance_level(adev);
345
346 if (current_level == level) {
347 pm_runtime_mark_last_busy(ddev->dev);
348 pm_runtime_put_autosuspend(ddev->dev);
349 return count;
350 }
351
352 if (adev->asic_type == CHIP_RAVEN) {
353 if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) {
354 if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL && level == AMD_DPM_FORCED_LEVEL_MANUAL)
355 amdgpu_gfx_off_ctrl(adev, false);
356 else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL && level != AMD_DPM_FORCED_LEVEL_MANUAL)
357 amdgpu_gfx_off_ctrl(adev, true);
358 }
359 }
360
361
362 if (!(current_level & (AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
363 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
364 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
365 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)) &&
366 (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) {
367 pr_err("Currently not in any profile mode!\n");
368 pm_runtime_mark_last_busy(ddev->dev);
369 pm_runtime_put_autosuspend(ddev->dev);
370 return -EINVAL;
371 }
372
373 if (is_support_sw_smu(adev)) {
374 ret = smu_force_performance_level(&adev->smu, level);
375 if (ret) {
376 pm_runtime_mark_last_busy(ddev->dev);
377 pm_runtime_put_autosuspend(ddev->dev);
378 return -EINVAL;
379 }
380 } else if (adev->powerplay.pp_funcs->force_performance_level) {
381 mutex_lock(&adev->pm.mutex);
382 if (adev->pm.dpm.thermal_active) {
383 mutex_unlock(&adev->pm.mutex);
384 pm_runtime_mark_last_busy(ddev->dev);
385 pm_runtime_put_autosuspend(ddev->dev);
386 return -EINVAL;
387 }
388 ret = amdgpu_dpm_force_performance_level(adev, level);
389 if (ret) {
390 mutex_unlock(&adev->pm.mutex);
391 pm_runtime_mark_last_busy(ddev->dev);
392 pm_runtime_put_autosuspend(ddev->dev);
393 return -EINVAL;
394 } else {
395 adev->pm.dpm.forced_level = level;
396 }
397 mutex_unlock(&adev->pm.mutex);
398 }
399 pm_runtime_mark_last_busy(ddev->dev);
400 pm_runtime_put_autosuspend(ddev->dev);
401
402 return count;
403}
404
405static ssize_t amdgpu_get_pp_num_states(struct device *dev,
406 struct device_attribute *attr,
407 char *buf)
408{
409 struct drm_device *ddev = dev_get_drvdata(dev);
410 struct amdgpu_device *adev = drm_to_adev(ddev);
411 struct pp_states_info data;
412 int i, buf_len, ret;
413
414 if (amdgpu_in_reset(adev))
415 return -EPERM;
416
417 ret = pm_runtime_get_sync(ddev->dev);
418 if (ret < 0) {
419 pm_runtime_put_autosuspend(ddev->dev);
420 return ret;
421 }
422
423 if (is_support_sw_smu(adev)) {
424 ret = smu_get_power_num_states(&adev->smu, &data);
425 if (ret)
426 return ret;
427 } else if (adev->powerplay.pp_funcs->get_pp_num_states) {
428 amdgpu_dpm_get_pp_num_states(adev, &data);
429 } else {
430 memset(&data, 0, sizeof(data));
431 }
432
433 pm_runtime_mark_last_busy(ddev->dev);
434 pm_runtime_put_autosuspend(ddev->dev);
435
436 buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums);
437 for (i = 0; i < data.nums; i++)
438 buf_len += snprintf(buf + buf_len, PAGE_SIZE, "%d %s\n", i,
439 (data.states[i] == POWER_STATE_TYPE_INTERNAL_BOOT) ? "boot" :
440 (data.states[i] == POWER_STATE_TYPE_BATTERY) ? "battery" :
441 (data.states[i] == POWER_STATE_TYPE_BALANCED) ? "balanced" :
442 (data.states[i] == POWER_STATE_TYPE_PERFORMANCE) ? "performance" : "default");
443
444 return buf_len;
445}
446
447static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
448 struct device_attribute *attr,
449 char *buf)
450{
451 struct drm_device *ddev = dev_get_drvdata(dev);
452 struct amdgpu_device *adev = drm_to_adev(ddev);
453 struct pp_states_info data;
454 struct smu_context *smu = &adev->smu;
455 enum amd_pm_state_type pm = 0;
456 int i = 0, ret = 0;
457
458 if (amdgpu_in_reset(adev))
459 return -EPERM;
460
461 ret = pm_runtime_get_sync(ddev->dev);
462 if (ret < 0) {
463 pm_runtime_put_autosuspend(ddev->dev);
464 return ret;
465 }
466
467 if (is_support_sw_smu(adev)) {
468 pm = smu_get_current_power_state(smu);
469 ret = smu_get_power_num_states(smu, &data);
470 if (ret)
471 return ret;
472 } else if (adev->powerplay.pp_funcs->get_current_power_state
473 && adev->powerplay.pp_funcs->get_pp_num_states) {
474 pm = amdgpu_dpm_get_current_power_state(adev);
475 amdgpu_dpm_get_pp_num_states(adev, &data);
476 }
477
478 pm_runtime_mark_last_busy(ddev->dev);
479 pm_runtime_put_autosuspend(ddev->dev);
480
481 for (i = 0; i < data.nums; i++) {
482 if (pm == data.states[i])
483 break;
484 }
485
486 if (i == data.nums)
487 i = -EINVAL;
488
489 return snprintf(buf, PAGE_SIZE, "%d\n", i);
490}
491
492static ssize_t amdgpu_get_pp_force_state(struct device *dev,
493 struct device_attribute *attr,
494 char *buf)
495{
496 struct drm_device *ddev = dev_get_drvdata(dev);
497 struct amdgpu_device *adev = drm_to_adev(ddev);
498
499 if (amdgpu_in_reset(adev))
500 return -EPERM;
501
502 if (adev->pp_force_state_enabled)
503 return amdgpu_get_pp_cur_state(dev, attr, buf);
504 else
505 return snprintf(buf, PAGE_SIZE, "\n");
506}
507
508static ssize_t amdgpu_set_pp_force_state(struct device *dev,
509 struct device_attribute *attr,
510 const char *buf,
511 size_t count)
512{
513 struct drm_device *ddev = dev_get_drvdata(dev);
514 struct amdgpu_device *adev = drm_to_adev(ddev);
515 enum amd_pm_state_type state = 0;
516 unsigned long idx;
517 int ret;
518
519 if (amdgpu_in_reset(adev))
520 return -EPERM;
521
522 if (strlen(buf) == 1)
523 adev->pp_force_state_enabled = false;
524 else if (is_support_sw_smu(adev))
525 adev->pp_force_state_enabled = false;
526 else if (adev->powerplay.pp_funcs->dispatch_tasks &&
527 adev->powerplay.pp_funcs->get_pp_num_states) {
528 struct pp_states_info data;
529
530 ret = kstrtoul(buf, 0, &idx);
531 if (ret || idx >= ARRAY_SIZE(data.states))
532 return -EINVAL;
533
534 idx = array_index_nospec(idx, ARRAY_SIZE(data.states));
535
536 amdgpu_dpm_get_pp_num_states(adev, &data);
537 state = data.states[idx];
538
539 ret = pm_runtime_get_sync(ddev->dev);
540 if (ret < 0) {
541 pm_runtime_put_autosuspend(ddev->dev);
542 return ret;
543 }
544
545
546 if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
547 state != POWER_STATE_TYPE_DEFAULT) {
548 amdgpu_dpm_dispatch_task(adev,
549 AMD_PP_TASK_ENABLE_USER_STATE, &state);
550 adev->pp_force_state_enabled = true;
551 }
552 pm_runtime_mark_last_busy(ddev->dev);
553 pm_runtime_put_autosuspend(ddev->dev);
554 }
555
556 return count;
557}
558
559
560
561
562
563
564
565
566
567
568
569
570static ssize_t amdgpu_get_pp_table(struct device *dev,
571 struct device_attribute *attr,
572 char *buf)
573{
574 struct drm_device *ddev = dev_get_drvdata(dev);
575 struct amdgpu_device *adev = drm_to_adev(ddev);
576 char *table = NULL;
577 int size, ret;
578
579 if (amdgpu_in_reset(adev))
580 return -EPERM;
581
582 ret = pm_runtime_get_sync(ddev->dev);
583 if (ret < 0) {
584 pm_runtime_put_autosuspend(ddev->dev);
585 return ret;
586 }
587
588 if (is_support_sw_smu(adev)) {
589 size = smu_sys_get_pp_table(&adev->smu, (void **)&table);
590 pm_runtime_mark_last_busy(ddev->dev);
591 pm_runtime_put_autosuspend(ddev->dev);
592 if (size < 0)
593 return size;
594 } else if (adev->powerplay.pp_funcs->get_pp_table) {
595 size = amdgpu_dpm_get_pp_table(adev, &table);
596 pm_runtime_mark_last_busy(ddev->dev);
597 pm_runtime_put_autosuspend(ddev->dev);
598 if (size < 0)
599 return size;
600 } else {
601 pm_runtime_mark_last_busy(ddev->dev);
602 pm_runtime_put_autosuspend(ddev->dev);
603 return 0;
604 }
605
606 if (size >= PAGE_SIZE)
607 size = PAGE_SIZE - 1;
608
609 memcpy(buf, table, size);
610
611 return size;
612}
613
614static ssize_t amdgpu_set_pp_table(struct device *dev,
615 struct device_attribute *attr,
616 const char *buf,
617 size_t count)
618{
619 struct drm_device *ddev = dev_get_drvdata(dev);
620 struct amdgpu_device *adev = drm_to_adev(ddev);
621 int ret = 0;
622
623 if (amdgpu_in_reset(adev))
624 return -EPERM;
625
626 ret = pm_runtime_get_sync(ddev->dev);
627 if (ret < 0) {
628 pm_runtime_put_autosuspend(ddev->dev);
629 return ret;
630 }
631
632 if (is_support_sw_smu(adev)) {
633 ret = smu_sys_set_pp_table(&adev->smu, (void *)buf, count);
634 if (ret) {
635 pm_runtime_mark_last_busy(ddev->dev);
636 pm_runtime_put_autosuspend(ddev->dev);
637 return ret;
638 }
639 } else if (adev->powerplay.pp_funcs->set_pp_table)
640 amdgpu_dpm_set_pp_table(adev, buf, count);
641
642 pm_runtime_mark_last_busy(ddev->dev);
643 pm_runtime_put_autosuspend(ddev->dev);
644
645 return count;
646}
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
761 struct device_attribute *attr,
762 const char *buf,
763 size_t count)
764{
765 struct drm_device *ddev = dev_get_drvdata(dev);
766 struct amdgpu_device *adev = drm_to_adev(ddev);
767 int ret;
768 uint32_t parameter_size = 0;
769 long parameter[64];
770 char buf_cpy[128];
771 char *tmp_str;
772 char *sub_str;
773 const char delimiter[3] = {' ', '\n', '\0'};
774 uint32_t type;
775
776 if (amdgpu_in_reset(adev))
777 return -EPERM;
778
779 if (count > 127)
780 return -EINVAL;
781
782 if (*buf == 's')
783 type = PP_OD_EDIT_SCLK_VDDC_TABLE;
784 else if (*buf == 'm')
785 type = PP_OD_EDIT_MCLK_VDDC_TABLE;
786 else if(*buf == 'r')
787 type = PP_OD_RESTORE_DEFAULT_TABLE;
788 else if (*buf == 'c')
789 type = PP_OD_COMMIT_DPM_TABLE;
790 else if (!strncmp(buf, "vc", 2))
791 type = PP_OD_EDIT_VDDC_CURVE;
792 else
793 return -EINVAL;
794
795 memcpy(buf_cpy, buf, count+1);
796
797 tmp_str = buf_cpy;
798
799 if (type == PP_OD_EDIT_VDDC_CURVE)
800 tmp_str++;
801 while (isspace(*++tmp_str));
802
803 while (tmp_str[0]) {
804 sub_str = strsep(&tmp_str, delimiter);
805 ret = kstrtol(sub_str, 0, ¶meter[parameter_size]);
806 if (ret)
807 return -EINVAL;
808 parameter_size++;
809
810 while (isspace(*tmp_str))
811 tmp_str++;
812 }
813
814 ret = pm_runtime_get_sync(ddev->dev);
815 if (ret < 0) {
816 pm_runtime_put_autosuspend(ddev->dev);
817 return ret;
818 }
819
820 if (is_support_sw_smu(adev)) {
821 ret = smu_od_edit_dpm_table(&adev->smu, type,
822 parameter, parameter_size);
823
824 if (ret) {
825 pm_runtime_mark_last_busy(ddev->dev);
826 pm_runtime_put_autosuspend(ddev->dev);
827 return -EINVAL;
828 }
829 } else {
830
831 if (adev->powerplay.pp_funcs->set_fine_grain_clk_vol) {
832 ret = amdgpu_dpm_set_fine_grain_clk_vol(adev, type,
833 parameter,
834 parameter_size);
835 if (ret) {
836 pm_runtime_mark_last_busy(ddev->dev);
837 pm_runtime_put_autosuspend(ddev->dev);
838 return -EINVAL;
839 }
840 }
841
842 if (adev->powerplay.pp_funcs->odn_edit_dpm_table) {
843 ret = amdgpu_dpm_odn_edit_dpm_table(adev, type,
844 parameter, parameter_size);
845 if (ret) {
846 pm_runtime_mark_last_busy(ddev->dev);
847 pm_runtime_put_autosuspend(ddev->dev);
848 return -EINVAL;
849 }
850 }
851
852 if (type == PP_OD_COMMIT_DPM_TABLE) {
853 if (adev->powerplay.pp_funcs->dispatch_tasks) {
854 amdgpu_dpm_dispatch_task(adev,
855 AMD_PP_TASK_READJUST_POWER_STATE,
856 NULL);
857 pm_runtime_mark_last_busy(ddev->dev);
858 pm_runtime_put_autosuspend(ddev->dev);
859 return count;
860 } else {
861 pm_runtime_mark_last_busy(ddev->dev);
862 pm_runtime_put_autosuspend(ddev->dev);
863 return -EINVAL;
864 }
865 }
866 }
867 pm_runtime_mark_last_busy(ddev->dev);
868 pm_runtime_put_autosuspend(ddev->dev);
869
870 return count;
871}
872
873static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
874 struct device_attribute *attr,
875 char *buf)
876{
877 struct drm_device *ddev = dev_get_drvdata(dev);
878 struct amdgpu_device *adev = drm_to_adev(ddev);
879 ssize_t size;
880 int ret;
881
882 if (amdgpu_in_reset(adev))
883 return -EPERM;
884
885 ret = pm_runtime_get_sync(ddev->dev);
886 if (ret < 0) {
887 pm_runtime_put_autosuspend(ddev->dev);
888 return ret;
889 }
890
891 if (is_support_sw_smu(adev)) {
892 size = smu_print_clk_levels(&adev->smu, SMU_OD_SCLK, buf);
893 size += smu_print_clk_levels(&adev->smu, SMU_OD_MCLK, buf+size);
894 size += smu_print_clk_levels(&adev->smu, SMU_OD_VDDC_CURVE, buf+size);
895 size += smu_print_clk_levels(&adev->smu, SMU_OD_RANGE, buf+size);
896 } else if (adev->powerplay.pp_funcs->print_clock_levels) {
897 size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
898 size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size);
899 size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf+size);
900 size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf+size);
901 } else {
902 size = snprintf(buf, PAGE_SIZE, "\n");
903 }
904 pm_runtime_mark_last_busy(ddev->dev);
905 pm_runtime_put_autosuspend(ddev->dev);
906
907 return size;
908}
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926static ssize_t amdgpu_set_pp_features(struct device *dev,
927 struct device_attribute *attr,
928 const char *buf,
929 size_t count)
930{
931 struct drm_device *ddev = dev_get_drvdata(dev);
932 struct amdgpu_device *adev = drm_to_adev(ddev);
933 uint64_t featuremask;
934 int ret;
935
936 if (amdgpu_in_reset(adev))
937 return -EPERM;
938
939 ret = kstrtou64(buf, 0, &featuremask);
940 if (ret)
941 return -EINVAL;
942
943 pr_debug("featuremask = 0x%llx\n", featuremask);
944
945 ret = pm_runtime_get_sync(ddev->dev);
946 if (ret < 0) {
947 pm_runtime_put_autosuspend(ddev->dev);
948 return ret;
949 }
950
951 if (is_support_sw_smu(adev)) {
952 ret = smu_sys_set_pp_feature_mask(&adev->smu, featuremask);
953 if (ret) {
954 pm_runtime_mark_last_busy(ddev->dev);
955 pm_runtime_put_autosuspend(ddev->dev);
956 return -EINVAL;
957 }
958 } else if (adev->powerplay.pp_funcs->set_ppfeature_status) {
959 ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask);
960 if (ret) {
961 pm_runtime_mark_last_busy(ddev->dev);
962 pm_runtime_put_autosuspend(ddev->dev);
963 return -EINVAL;
964 }
965 }
966 pm_runtime_mark_last_busy(ddev->dev);
967 pm_runtime_put_autosuspend(ddev->dev);
968
969 return count;
970}
971
972static ssize_t amdgpu_get_pp_features(struct device *dev,
973 struct device_attribute *attr,
974 char *buf)
975{
976 struct drm_device *ddev = dev_get_drvdata(dev);
977 struct amdgpu_device *adev = drm_to_adev(ddev);
978 ssize_t size;
979 int ret;
980
981 if (amdgpu_in_reset(adev))
982 return -EPERM;
983
984 ret = pm_runtime_get_sync(ddev->dev);
985 if (ret < 0) {
986 pm_runtime_put_autosuspend(ddev->dev);
987 return ret;
988 }
989
990 if (is_support_sw_smu(adev))
991 size = smu_sys_get_pp_feature_mask(&adev->smu, buf);
992 else if (adev->powerplay.pp_funcs->get_ppfeature_status)
993 size = amdgpu_dpm_get_ppfeature_status(adev, buf);
994 else
995 size = snprintf(buf, PAGE_SIZE, "\n");
996
997 pm_runtime_mark_last_busy(ddev->dev);
998 pm_runtime_put_autosuspend(ddev->dev);
999
1000 return size;
1001}
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
1034 struct device_attribute *attr,
1035 char *buf)
1036{
1037 struct drm_device *ddev = dev_get_drvdata(dev);
1038 struct amdgpu_device *adev = drm_to_adev(ddev);
1039 ssize_t size;
1040 int ret;
1041
1042 if (amdgpu_in_reset(adev))
1043 return -EPERM;
1044
1045 ret = pm_runtime_get_sync(ddev->dev);
1046 if (ret < 0) {
1047 pm_runtime_put_autosuspend(ddev->dev);
1048 return ret;
1049 }
1050
1051 if (is_support_sw_smu(adev))
1052 size = smu_print_clk_levels(&adev->smu, SMU_SCLK, buf);
1053 else if (adev->powerplay.pp_funcs->print_clock_levels)
1054 size = amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf);
1055 else
1056 size = snprintf(buf, PAGE_SIZE, "\n");
1057
1058 pm_runtime_mark_last_busy(ddev->dev);
1059 pm_runtime_put_autosuspend(ddev->dev);
1060
1061 return size;
1062}
1063
1064
1065
1066
1067
1068#define AMDGPU_MASK_BUF_MAX (32 * 13)
1069
1070static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
1071{
1072 int ret;
1073 long level;
1074 char *sub_str = NULL;
1075 char *tmp;
1076 char buf_cpy[AMDGPU_MASK_BUF_MAX + 1];
1077 const char delimiter[3] = {' ', '\n', '\0'};
1078 size_t bytes;
1079
1080 *mask = 0;
1081
1082 bytes = min(count, sizeof(buf_cpy) - 1);
1083 memcpy(buf_cpy, buf, bytes);
1084 buf_cpy[bytes] = '\0';
1085 tmp = buf_cpy;
1086 while (tmp[0]) {
1087 sub_str = strsep(&tmp, delimiter);
1088 if (strlen(sub_str)) {
1089 ret = kstrtol(sub_str, 0, &level);
1090 if (ret)
1091 return -EINVAL;
1092 *mask |= 1 << level;
1093 } else
1094 break;
1095 }
1096
1097 return 0;
1098}
1099
1100static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
1101 struct device_attribute *attr,
1102 const char *buf,
1103 size_t count)
1104{
1105 struct drm_device *ddev = dev_get_drvdata(dev);
1106 struct amdgpu_device *adev = drm_to_adev(ddev);
1107 int ret;
1108 uint32_t mask = 0;
1109
1110 if (amdgpu_in_reset(adev))
1111 return -EPERM;
1112
1113 ret = amdgpu_read_mask(buf, count, &mask);
1114 if (ret)
1115 return ret;
1116
1117 ret = pm_runtime_get_sync(ddev->dev);
1118 if (ret < 0) {
1119 pm_runtime_put_autosuspend(ddev->dev);
1120 return ret;
1121 }
1122
1123 if (is_support_sw_smu(adev))
1124 ret = smu_force_clk_levels(&adev->smu, SMU_SCLK, mask);
1125 else if (adev->powerplay.pp_funcs->force_clock_level)
1126 ret = amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
1127
1128 pm_runtime_mark_last_busy(ddev->dev);
1129 pm_runtime_put_autosuspend(ddev->dev);
1130
1131 if (ret)
1132 return -EINVAL;
1133
1134 return count;
1135}
1136
1137static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
1138 struct device_attribute *attr,
1139 char *buf)
1140{
1141 struct drm_device *ddev = dev_get_drvdata(dev);
1142 struct amdgpu_device *adev = drm_to_adev(ddev);
1143 ssize_t size;
1144 int ret;
1145
1146 if (amdgpu_in_reset(adev))
1147 return -EPERM;
1148
1149 ret = pm_runtime_get_sync(ddev->dev);
1150 if (ret < 0) {
1151 pm_runtime_put_autosuspend(ddev->dev);
1152 return ret;
1153 }
1154
1155 if (is_support_sw_smu(adev))
1156 size = smu_print_clk_levels(&adev->smu, SMU_MCLK, buf);
1157 else if (adev->powerplay.pp_funcs->print_clock_levels)
1158 size = amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf);
1159 else
1160 size = snprintf(buf, PAGE_SIZE, "\n");
1161
1162 pm_runtime_mark_last_busy(ddev->dev);
1163 pm_runtime_put_autosuspend(ddev->dev);
1164
1165 return size;
1166}
1167
1168static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
1169 struct device_attribute *attr,
1170 const char *buf,
1171 size_t count)
1172{
1173 struct drm_device *ddev = dev_get_drvdata(dev);
1174 struct amdgpu_device *adev = drm_to_adev(ddev);
1175 uint32_t mask = 0;
1176 int ret;
1177
1178 if (amdgpu_in_reset(adev))
1179 return -EPERM;
1180
1181 ret = amdgpu_read_mask(buf, count, &mask);
1182 if (ret)
1183 return ret;
1184
1185 ret = pm_runtime_get_sync(ddev->dev);
1186 if (ret < 0) {
1187 pm_runtime_put_autosuspend(ddev->dev);
1188 return ret;
1189 }
1190
1191 if (is_support_sw_smu(adev))
1192 ret = smu_force_clk_levels(&adev->smu, SMU_MCLK, mask);
1193 else if (adev->powerplay.pp_funcs->force_clock_level)
1194 ret = amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
1195
1196 pm_runtime_mark_last_busy(ddev->dev);
1197 pm_runtime_put_autosuspend(ddev->dev);
1198
1199 if (ret)
1200 return -EINVAL;
1201
1202 return count;
1203}
1204
1205static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev,
1206 struct device_attribute *attr,
1207 char *buf)
1208{
1209 struct drm_device *ddev = dev_get_drvdata(dev);
1210 struct amdgpu_device *adev = drm_to_adev(ddev);
1211 ssize_t size;
1212 int ret;
1213
1214 if (amdgpu_in_reset(adev))
1215 return -EPERM;
1216
1217 ret = pm_runtime_get_sync(ddev->dev);
1218 if (ret < 0) {
1219 pm_runtime_put_autosuspend(ddev->dev);
1220 return ret;
1221 }
1222
1223 if (is_support_sw_smu(adev))
1224 size = smu_print_clk_levels(&adev->smu, SMU_SOCCLK, buf);
1225 else if (adev->powerplay.pp_funcs->print_clock_levels)
1226 size = amdgpu_dpm_print_clock_levels(adev, PP_SOCCLK, buf);
1227 else
1228 size = snprintf(buf, PAGE_SIZE, "\n");
1229
1230 pm_runtime_mark_last_busy(ddev->dev);
1231 pm_runtime_put_autosuspend(ddev->dev);
1232
1233 return size;
1234}
1235
1236static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
1237 struct device_attribute *attr,
1238 const char *buf,
1239 size_t count)
1240{
1241 struct drm_device *ddev = dev_get_drvdata(dev);
1242 struct amdgpu_device *adev = drm_to_adev(ddev);
1243 int ret;
1244 uint32_t mask = 0;
1245
1246 if (amdgpu_in_reset(adev))
1247 return -EPERM;
1248
1249 ret = amdgpu_read_mask(buf, count, &mask);
1250 if (ret)
1251 return ret;
1252
1253 ret = pm_runtime_get_sync(ddev->dev);
1254 if (ret < 0) {
1255 pm_runtime_put_autosuspend(ddev->dev);
1256 return ret;
1257 }
1258
1259 if (is_support_sw_smu(adev))
1260 ret = smu_force_clk_levels(&adev->smu, SMU_SOCCLK, mask);
1261 else if (adev->powerplay.pp_funcs->force_clock_level)
1262 ret = amdgpu_dpm_force_clock_level(adev, PP_SOCCLK, mask);
1263 else
1264 ret = 0;
1265
1266 pm_runtime_mark_last_busy(ddev->dev);
1267 pm_runtime_put_autosuspend(ddev->dev);
1268
1269 if (ret)
1270 return -EINVAL;
1271
1272 return count;
1273}
1274
1275static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev,
1276 struct device_attribute *attr,
1277 char *buf)
1278{
1279 struct drm_device *ddev = dev_get_drvdata(dev);
1280 struct amdgpu_device *adev = drm_to_adev(ddev);
1281 ssize_t size;
1282 int ret;
1283
1284 if (amdgpu_in_reset(adev))
1285 return -EPERM;
1286
1287 ret = pm_runtime_get_sync(ddev->dev);
1288 if (ret < 0) {
1289 pm_runtime_put_autosuspend(ddev->dev);
1290 return ret;
1291 }
1292
1293 if (is_support_sw_smu(adev))
1294 size = smu_print_clk_levels(&adev->smu, SMU_FCLK, buf);
1295 else if (adev->powerplay.pp_funcs->print_clock_levels)
1296 size = amdgpu_dpm_print_clock_levels(adev, PP_FCLK, buf);
1297 else
1298 size = snprintf(buf, PAGE_SIZE, "\n");
1299
1300 pm_runtime_mark_last_busy(ddev->dev);
1301 pm_runtime_put_autosuspend(ddev->dev);
1302
1303 return size;
1304}
1305
1306static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
1307 struct device_attribute *attr,
1308 const char *buf,
1309 size_t count)
1310{
1311 struct drm_device *ddev = dev_get_drvdata(dev);
1312 struct amdgpu_device *adev = drm_to_adev(ddev);
1313 int ret;
1314 uint32_t mask = 0;
1315
1316 if (amdgpu_in_reset(adev))
1317 return -EPERM;
1318
1319 ret = amdgpu_read_mask(buf, count, &mask);
1320 if (ret)
1321 return ret;
1322
1323 ret = pm_runtime_get_sync(ddev->dev);
1324 if (ret < 0) {
1325 pm_runtime_put_autosuspend(ddev->dev);
1326 return ret;
1327 }
1328
1329 if (is_support_sw_smu(adev))
1330 ret = smu_force_clk_levels(&adev->smu, SMU_FCLK, mask);
1331 else if (adev->powerplay.pp_funcs->force_clock_level)
1332 ret = amdgpu_dpm_force_clock_level(adev, PP_FCLK, mask);
1333 else
1334 ret = 0;
1335
1336 pm_runtime_mark_last_busy(ddev->dev);
1337 pm_runtime_put_autosuspend(ddev->dev);
1338
1339 if (ret)
1340 return -EINVAL;
1341
1342 return count;
1343}
1344
1345static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
1346 struct device_attribute *attr,
1347 char *buf)
1348{
1349 struct drm_device *ddev = dev_get_drvdata(dev);
1350 struct amdgpu_device *adev = drm_to_adev(ddev);
1351 ssize_t size;
1352 int ret;
1353
1354 if (amdgpu_in_reset(adev))
1355 return -EPERM;
1356
1357 ret = pm_runtime_get_sync(ddev->dev);
1358 if (ret < 0) {
1359 pm_runtime_put_autosuspend(ddev->dev);
1360 return ret;
1361 }
1362
1363 if (is_support_sw_smu(adev))
1364 size = smu_print_clk_levels(&adev->smu, SMU_DCEFCLK, buf);
1365 else if (adev->powerplay.pp_funcs->print_clock_levels)
1366 size = amdgpu_dpm_print_clock_levels(adev, PP_DCEFCLK, buf);
1367 else
1368 size = snprintf(buf, PAGE_SIZE, "\n");
1369
1370 pm_runtime_mark_last_busy(ddev->dev);
1371 pm_runtime_put_autosuspend(ddev->dev);
1372
1373 return size;
1374}
1375
1376static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
1377 struct device_attribute *attr,
1378 const char *buf,
1379 size_t count)
1380{
1381 struct drm_device *ddev = dev_get_drvdata(dev);
1382 struct amdgpu_device *adev = drm_to_adev(ddev);
1383 int ret;
1384 uint32_t mask = 0;
1385
1386 if (amdgpu_in_reset(adev))
1387 return -EPERM;
1388
1389 ret = amdgpu_read_mask(buf, count, &mask);
1390 if (ret)
1391 return ret;
1392
1393 ret = pm_runtime_get_sync(ddev->dev);
1394 if (ret < 0) {
1395 pm_runtime_put_autosuspend(ddev->dev);
1396 return ret;
1397 }
1398
1399 if (is_support_sw_smu(adev))
1400 ret = smu_force_clk_levels(&adev->smu, SMU_DCEFCLK, mask);
1401 else if (adev->powerplay.pp_funcs->force_clock_level)
1402 ret = amdgpu_dpm_force_clock_level(adev, PP_DCEFCLK, mask);
1403 else
1404 ret = 0;
1405
1406 pm_runtime_mark_last_busy(ddev->dev);
1407 pm_runtime_put_autosuspend(ddev->dev);
1408
1409 if (ret)
1410 return -EINVAL;
1411
1412 return count;
1413}
1414
1415static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
1416 struct device_attribute *attr,
1417 char *buf)
1418{
1419 struct drm_device *ddev = dev_get_drvdata(dev);
1420 struct amdgpu_device *adev = drm_to_adev(ddev);
1421 ssize_t size;
1422 int ret;
1423
1424 if (amdgpu_in_reset(adev))
1425 return -EPERM;
1426
1427 ret = pm_runtime_get_sync(ddev->dev);
1428 if (ret < 0) {
1429 pm_runtime_put_autosuspend(ddev->dev);
1430 return ret;
1431 }
1432
1433 if (is_support_sw_smu(adev))
1434 size = smu_print_clk_levels(&adev->smu, SMU_PCIE, buf);
1435 else if (adev->powerplay.pp_funcs->print_clock_levels)
1436 size = amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf);
1437 else
1438 size = snprintf(buf, PAGE_SIZE, "\n");
1439
1440 pm_runtime_mark_last_busy(ddev->dev);
1441 pm_runtime_put_autosuspend(ddev->dev);
1442
1443 return size;
1444}
1445
1446static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
1447 struct device_attribute *attr,
1448 const char *buf,
1449 size_t count)
1450{
1451 struct drm_device *ddev = dev_get_drvdata(dev);
1452 struct amdgpu_device *adev = drm_to_adev(ddev);
1453 int ret;
1454 uint32_t mask = 0;
1455
1456 if (amdgpu_in_reset(adev))
1457 return -EPERM;
1458
1459 ret = amdgpu_read_mask(buf, count, &mask);
1460 if (ret)
1461 return ret;
1462
1463 ret = pm_runtime_get_sync(ddev->dev);
1464 if (ret < 0) {
1465 pm_runtime_put_autosuspend(ddev->dev);
1466 return ret;
1467 }
1468
1469 if (is_support_sw_smu(adev))
1470 ret = smu_force_clk_levels(&adev->smu, SMU_PCIE, mask);
1471 else if (adev->powerplay.pp_funcs->force_clock_level)
1472 ret = amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
1473 else
1474 ret = 0;
1475
1476 pm_runtime_mark_last_busy(ddev->dev);
1477 pm_runtime_put_autosuspend(ddev->dev);
1478
1479 if (ret)
1480 return -EINVAL;
1481
1482 return count;
1483}
1484
1485static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
1486 struct device_attribute *attr,
1487 char *buf)
1488{
1489 struct drm_device *ddev = dev_get_drvdata(dev);
1490 struct amdgpu_device *adev = drm_to_adev(ddev);
1491 uint32_t value = 0;
1492 int ret;
1493
1494 if (amdgpu_in_reset(adev))
1495 return -EPERM;
1496
1497 ret = pm_runtime_get_sync(ddev->dev);
1498 if (ret < 0) {
1499 pm_runtime_put_autosuspend(ddev->dev);
1500 return ret;
1501 }
1502
1503 if (is_support_sw_smu(adev))
1504 value = smu_get_od_percentage(&(adev->smu), SMU_OD_SCLK);
1505 else if (adev->powerplay.pp_funcs->get_sclk_od)
1506 value = amdgpu_dpm_get_sclk_od(adev);
1507
1508 pm_runtime_mark_last_busy(ddev->dev);
1509 pm_runtime_put_autosuspend(ddev->dev);
1510
1511 return snprintf(buf, PAGE_SIZE, "%d\n", value);
1512}
1513
1514static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
1515 struct device_attribute *attr,
1516 const char *buf,
1517 size_t count)
1518{
1519 struct drm_device *ddev = dev_get_drvdata(dev);
1520 struct amdgpu_device *adev = drm_to_adev(ddev);
1521 int ret;
1522 long int value;
1523
1524 if (amdgpu_in_reset(adev))
1525 return -EPERM;
1526
1527 ret = kstrtol(buf, 0, &value);
1528
1529 if (ret)
1530 return -EINVAL;
1531
1532 ret = pm_runtime_get_sync(ddev->dev);
1533 if (ret < 0) {
1534 pm_runtime_put_autosuspend(ddev->dev);
1535 return ret;
1536 }
1537
1538 if (is_support_sw_smu(adev)) {
1539 value = smu_set_od_percentage(&(adev->smu), SMU_OD_SCLK, (uint32_t)value);
1540 } else {
1541 if (adev->powerplay.pp_funcs->set_sclk_od)
1542 amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
1543
1544 if (adev->powerplay.pp_funcs->dispatch_tasks) {
1545 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
1546 } else {
1547 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1548 amdgpu_pm_compute_clocks(adev);
1549 }
1550 }
1551
1552 pm_runtime_mark_last_busy(ddev->dev);
1553 pm_runtime_put_autosuspend(ddev->dev);
1554
1555 return count;
1556}
1557
1558static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
1559 struct device_attribute *attr,
1560 char *buf)
1561{
1562 struct drm_device *ddev = dev_get_drvdata(dev);
1563 struct amdgpu_device *adev = drm_to_adev(ddev);
1564 uint32_t value = 0;
1565 int ret;
1566
1567 if (amdgpu_in_reset(adev))
1568 return -EPERM;
1569
1570 ret = pm_runtime_get_sync(ddev->dev);
1571 if (ret < 0) {
1572 pm_runtime_put_autosuspend(ddev->dev);
1573 return ret;
1574 }
1575
1576 if (is_support_sw_smu(adev))
1577 value = smu_get_od_percentage(&(adev->smu), SMU_OD_MCLK);
1578 else if (adev->powerplay.pp_funcs->get_mclk_od)
1579 value = amdgpu_dpm_get_mclk_od(adev);
1580
1581 pm_runtime_mark_last_busy(ddev->dev);
1582 pm_runtime_put_autosuspend(ddev->dev);
1583
1584 return snprintf(buf, PAGE_SIZE, "%d\n", value);
1585}
1586
1587static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
1588 struct device_attribute *attr,
1589 const char *buf,
1590 size_t count)
1591{
1592 struct drm_device *ddev = dev_get_drvdata(dev);
1593 struct amdgpu_device *adev = drm_to_adev(ddev);
1594 int ret;
1595 long int value;
1596
1597 if (amdgpu_in_reset(adev))
1598 return -EPERM;
1599
1600 ret = kstrtol(buf, 0, &value);
1601
1602 if (ret)
1603 return -EINVAL;
1604
1605 ret = pm_runtime_get_sync(ddev->dev);
1606 if (ret < 0) {
1607 pm_runtime_put_autosuspend(ddev->dev);
1608 return ret;
1609 }
1610
1611 if (is_support_sw_smu(adev)) {
1612 value = smu_set_od_percentage(&(adev->smu), SMU_OD_MCLK, (uint32_t)value);
1613 } else {
1614 if (adev->powerplay.pp_funcs->set_mclk_od)
1615 amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
1616
1617 if (adev->powerplay.pp_funcs->dispatch_tasks) {
1618 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
1619 } else {
1620 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1621 amdgpu_pm_compute_clocks(adev);
1622 }
1623 }
1624
1625 pm_runtime_mark_last_busy(ddev->dev);
1626 pm_runtime_put_autosuspend(ddev->dev);
1627
1628 return count;
1629}
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
1652 struct device_attribute *attr,
1653 char *buf)
1654{
1655 struct drm_device *ddev = dev_get_drvdata(dev);
1656 struct amdgpu_device *adev = drm_to_adev(ddev);
1657 ssize_t size;
1658 int ret;
1659
1660 if (amdgpu_in_reset(adev))
1661 return -EPERM;
1662
1663 ret = pm_runtime_get_sync(ddev->dev);
1664 if (ret < 0) {
1665 pm_runtime_put_autosuspend(ddev->dev);
1666 return ret;
1667 }
1668
1669 if (is_support_sw_smu(adev))
1670 size = smu_get_power_profile_mode(&adev->smu, buf);
1671 else if (adev->powerplay.pp_funcs->get_power_profile_mode)
1672 size = amdgpu_dpm_get_power_profile_mode(adev, buf);
1673 else
1674 size = snprintf(buf, PAGE_SIZE, "\n");
1675
1676 pm_runtime_mark_last_busy(ddev->dev);
1677 pm_runtime_put_autosuspend(ddev->dev);
1678
1679 return size;
1680}
1681
1682
1683static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
1684 struct device_attribute *attr,
1685 const char *buf,
1686 size_t count)
1687{
1688 int ret;
1689 struct drm_device *ddev = dev_get_drvdata(dev);
1690 struct amdgpu_device *adev = drm_to_adev(ddev);
1691 uint32_t parameter_size = 0;
1692 long parameter[64];
1693 char *sub_str, buf_cpy[128];
1694 char *tmp_str;
1695 uint32_t i = 0;
1696 char tmp[2];
1697 long int profile_mode = 0;
1698 const char delimiter[3] = {' ', '\n', '\0'};
1699
1700 if (amdgpu_in_reset(adev))
1701 return -EPERM;
1702
1703 tmp[0] = *(buf);
1704 tmp[1] = '\0';
1705 ret = kstrtol(tmp, 0, &profile_mode);
1706 if (ret)
1707 return -EINVAL;
1708
1709 if (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
1710 if (count < 2 || count > 127)
1711 return -EINVAL;
1712 while (isspace(*++buf))
1713 i++;
1714 memcpy(buf_cpy, buf, count-i);
1715 tmp_str = buf_cpy;
1716 while (tmp_str[0]) {
1717 sub_str = strsep(&tmp_str, delimiter);
1718 ret = kstrtol(sub_str, 0, ¶meter[parameter_size]);
1719 if (ret)
1720 return -EINVAL;
1721 parameter_size++;
1722 while (isspace(*tmp_str))
1723 tmp_str++;
1724 }
1725 }
1726 parameter[parameter_size] = profile_mode;
1727
1728 ret = pm_runtime_get_sync(ddev->dev);
1729 if (ret < 0) {
1730 pm_runtime_put_autosuspend(ddev->dev);
1731 return ret;
1732 }
1733
1734 if (is_support_sw_smu(adev))
1735 ret = smu_set_power_profile_mode(&adev->smu, parameter, parameter_size, true);
1736 else if (adev->powerplay.pp_funcs->set_power_profile_mode)
1737 ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
1738
1739 pm_runtime_mark_last_busy(ddev->dev);
1740 pm_runtime_put_autosuspend(ddev->dev);
1741
1742 if (!ret)
1743 return count;
1744
1745 return -EINVAL;
1746}
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev,
1757 struct device_attribute *attr,
1758 char *buf)
1759{
1760 struct drm_device *ddev = dev_get_drvdata(dev);
1761 struct amdgpu_device *adev = drm_to_adev(ddev);
1762 int r, value, size = sizeof(value);
1763
1764 if (amdgpu_in_reset(adev))
1765 return -EPERM;
1766
1767 r = pm_runtime_get_sync(ddev->dev);
1768 if (r < 0) {
1769 pm_runtime_put_autosuspend(ddev->dev);
1770 return r;
1771 }
1772
1773
1774 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD,
1775 (void *)&value, &size);
1776
1777 pm_runtime_mark_last_busy(ddev->dev);
1778 pm_runtime_put_autosuspend(ddev->dev);
1779
1780 if (r)
1781 return r;
1782
1783 return snprintf(buf, PAGE_SIZE, "%d\n", value);
1784}
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794static ssize_t amdgpu_get_mem_busy_percent(struct device *dev,
1795 struct device_attribute *attr,
1796 char *buf)
1797{
1798 struct drm_device *ddev = dev_get_drvdata(dev);
1799 struct amdgpu_device *adev = drm_to_adev(ddev);
1800 int r, value, size = sizeof(value);
1801
1802 if (amdgpu_in_reset(adev))
1803 return -EPERM;
1804
1805 r = pm_runtime_get_sync(ddev->dev);
1806 if (r < 0) {
1807 pm_runtime_put_autosuspend(ddev->dev);
1808 return r;
1809 }
1810
1811
1812 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD,
1813 (void *)&value, &size);
1814
1815 pm_runtime_mark_last_busy(ddev->dev);
1816 pm_runtime_put_autosuspend(ddev->dev);
1817
1818 if (r)
1819 return r;
1820
1821 return snprintf(buf, PAGE_SIZE, "%d\n", value);
1822}
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836static ssize_t amdgpu_get_pcie_bw(struct device *dev,
1837 struct device_attribute *attr,
1838 char *buf)
1839{
1840 struct drm_device *ddev = dev_get_drvdata(dev);
1841 struct amdgpu_device *adev = drm_to_adev(ddev);
1842 uint64_t count0 = 0, count1 = 0;
1843 int ret;
1844
1845 if (amdgpu_in_reset(adev))
1846 return -EPERM;
1847
1848 if (adev->flags & AMD_IS_APU)
1849 return -ENODATA;
1850
1851 if (!adev->asic_funcs->get_pcie_usage)
1852 return -ENODATA;
1853
1854 ret = pm_runtime_get_sync(ddev->dev);
1855 if (ret < 0) {
1856 pm_runtime_put_autosuspend(ddev->dev);
1857 return ret;
1858 }
1859
1860 amdgpu_asic_get_pcie_usage(adev, &count0, &count1);
1861
1862 pm_runtime_mark_last_busy(ddev->dev);
1863 pm_runtime_put_autosuspend(ddev->dev);
1864
1865 return snprintf(buf, PAGE_SIZE, "%llu %llu %i\n",
1866 count0, count1, pcie_get_mps(adev->pdev));
1867}
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879static ssize_t amdgpu_get_unique_id(struct device *dev,
1880 struct device_attribute *attr,
1881 char *buf)
1882{
1883 struct drm_device *ddev = dev_get_drvdata(dev);
1884 struct amdgpu_device *adev = drm_to_adev(ddev);
1885
1886 if (amdgpu_in_reset(adev))
1887 return -EPERM;
1888
1889 if (adev->unique_id)
1890 return snprintf(buf, PAGE_SIZE, "%016llx\n", adev->unique_id);
1891
1892 return 0;
1893}
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910static ssize_t amdgpu_get_thermal_throttling_logging(struct device *dev,
1911 struct device_attribute *attr,
1912 char *buf)
1913{
1914 struct drm_device *ddev = dev_get_drvdata(dev);
1915 struct amdgpu_device *adev = drm_to_adev(ddev);
1916
1917 return snprintf(buf, PAGE_SIZE, "%s: thermal throttling logging %s, with interval %d seconds\n",
1918 adev_to_drm(adev)->unique,
1919 atomic_read(&adev->throttling_logging_enabled) ? "enabled" : "disabled",
1920 adev->throttling_logging_rs.interval / HZ + 1);
1921}
1922
1923static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev,
1924 struct device_attribute *attr,
1925 const char *buf,
1926 size_t count)
1927{
1928 struct drm_device *ddev = dev_get_drvdata(dev);
1929 struct amdgpu_device *adev = drm_to_adev(ddev);
1930 long throttling_logging_interval;
1931 unsigned long flags;
1932 int ret = 0;
1933
1934 ret = kstrtol(buf, 0, &throttling_logging_interval);
1935 if (ret)
1936 return ret;
1937
1938 if (throttling_logging_interval > 3600)
1939 return -EINVAL;
1940
1941 if (throttling_logging_interval > 0) {
1942 raw_spin_lock_irqsave(&adev->throttling_logging_rs.lock, flags);
1943
1944
1945
1946
1947 adev->throttling_logging_rs.interval =
1948 (throttling_logging_interval - 1) * HZ;
1949 adev->throttling_logging_rs.begin = 0;
1950 adev->throttling_logging_rs.printed = 0;
1951 adev->throttling_logging_rs.missed = 0;
1952 raw_spin_unlock_irqrestore(&adev->throttling_logging_rs.lock, flags);
1953
1954 atomic_set(&adev->throttling_logging_enabled, 1);
1955 } else {
1956 atomic_set(&adev->throttling_logging_enabled, 0);
1957 }
1958
1959 return count;
1960}
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974static ssize_t amdgpu_get_gpu_metrics(struct device *dev,
1975 struct device_attribute *attr,
1976 char *buf)
1977{
1978 struct drm_device *ddev = dev_get_drvdata(dev);
1979 struct amdgpu_device *adev = drm_to_adev(ddev);
1980 void *gpu_metrics;
1981 ssize_t size = 0;
1982 int ret;
1983
1984 if (amdgpu_in_reset(adev))
1985 return -EPERM;
1986
1987 ret = pm_runtime_get_sync(ddev->dev);
1988 if (ret < 0) {
1989 pm_runtime_put_autosuspend(ddev->dev);
1990 return ret;
1991 }
1992
1993 if (is_support_sw_smu(adev))
1994 size = smu_sys_get_gpu_metrics(&adev->smu, &gpu_metrics);
1995 else if (adev->powerplay.pp_funcs->get_gpu_metrics)
1996 size = amdgpu_dpm_get_gpu_metrics(adev, &gpu_metrics);
1997
1998 if (size <= 0)
1999 goto out;
2000
2001 if (size >= PAGE_SIZE)
2002 size = PAGE_SIZE - 1;
2003
2004 memcpy(buf, gpu_metrics, size);
2005
2006out:
2007 pm_runtime_mark_last_busy(ddev->dev);
2008 pm_runtime_put_autosuspend(ddev->dev);
2009
2010 return size;
2011}
2012
2013static struct amdgpu_device_attr amdgpu_device_attrs[] = {
2014 AMDGPU_DEVICE_ATTR_RW(power_dpm_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2015 AMDGPU_DEVICE_ATTR_RW(power_dpm_force_performance_level, ATTR_FLAG_BASIC),
2016 AMDGPU_DEVICE_ATTR_RO(pp_num_states, ATTR_FLAG_BASIC),
2017 AMDGPU_DEVICE_ATTR_RO(pp_cur_state, ATTR_FLAG_BASIC),
2018 AMDGPU_DEVICE_ATTR_RW(pp_force_state, ATTR_FLAG_BASIC),
2019 AMDGPU_DEVICE_ATTR_RW(pp_table, ATTR_FLAG_BASIC),
2020 AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2021 AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2022 AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2023 AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2024 AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk, ATTR_FLAG_BASIC),
2025 AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie, ATTR_FLAG_BASIC),
2026 AMDGPU_DEVICE_ATTR_RW(pp_sclk_od, ATTR_FLAG_BASIC),
2027 AMDGPU_DEVICE_ATTR_RW(pp_mclk_od, ATTR_FLAG_BASIC),
2028 AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode, ATTR_FLAG_BASIC),
2029 AMDGPU_DEVICE_ATTR_RW(pp_od_clk_voltage, ATTR_FLAG_BASIC),
2030 AMDGPU_DEVICE_ATTR_RO(gpu_busy_percent, ATTR_FLAG_BASIC),
2031 AMDGPU_DEVICE_ATTR_RO(mem_busy_percent, ATTR_FLAG_BASIC),
2032 AMDGPU_DEVICE_ATTR_RO(pcie_bw, ATTR_FLAG_BASIC),
2033 AMDGPU_DEVICE_ATTR_RW(pp_features, ATTR_FLAG_BASIC),
2034 AMDGPU_DEVICE_ATTR_RO(unique_id, ATTR_FLAG_BASIC),
2035 AMDGPU_DEVICE_ATTR_RW(thermal_throttling_logging, ATTR_FLAG_BASIC),
2036 AMDGPU_DEVICE_ATTR_RO(gpu_metrics, ATTR_FLAG_BASIC),
2037};
2038
2039static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
2040 uint32_t mask, enum amdgpu_device_attr_states *states)
2041{
2042 struct device_attribute *dev_attr = &attr->dev_attr;
2043 const char *attr_name = dev_attr->attr.name;
2044 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
2045 enum amd_asic_type asic_type = adev->asic_type;
2046
2047 if (!(attr->flags & mask)) {
2048 *states = ATTR_STATE_UNSUPPORTED;
2049 return 0;
2050 }
2051
2052#define DEVICE_ATTR_IS(_name) (!strcmp(attr_name, #_name))
2053
2054 if (DEVICE_ATTR_IS(pp_dpm_socclk)) {
2055 if (asic_type < CHIP_VEGA10)
2056 *states = ATTR_STATE_UNSUPPORTED;
2057 } else if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) {
2058 if (asic_type < CHIP_VEGA10 || asic_type == CHIP_ARCTURUS)
2059 *states = ATTR_STATE_UNSUPPORTED;
2060 } else if (DEVICE_ATTR_IS(pp_dpm_fclk)) {
2061 if (asic_type < CHIP_VEGA20)
2062 *states = ATTR_STATE_UNSUPPORTED;
2063 } else if (DEVICE_ATTR_IS(pp_dpm_pcie)) {
2064 if (asic_type == CHIP_ARCTURUS)
2065 *states = ATTR_STATE_UNSUPPORTED;
2066 } else if (DEVICE_ATTR_IS(pp_od_clk_voltage)) {
2067 *states = ATTR_STATE_UNSUPPORTED;
2068 if ((is_support_sw_smu(adev) && adev->smu.od_enabled) ||
2069 (!is_support_sw_smu(adev) && hwmgr->od_enabled))
2070 *states = ATTR_STATE_SUPPORTED;
2071 } else if (DEVICE_ATTR_IS(mem_busy_percent)) {
2072 if (adev->flags & AMD_IS_APU || asic_type == CHIP_VEGA10)
2073 *states = ATTR_STATE_UNSUPPORTED;
2074 } else if (DEVICE_ATTR_IS(pcie_bw)) {
2075
2076 if (adev->flags & AMD_IS_APU)
2077 *states = ATTR_STATE_UNSUPPORTED;
2078 } else if (DEVICE_ATTR_IS(unique_id)) {
2079 if (asic_type != CHIP_VEGA10 &&
2080 asic_type != CHIP_VEGA20 &&
2081 asic_type != CHIP_ARCTURUS)
2082 *states = ATTR_STATE_UNSUPPORTED;
2083 } else if (DEVICE_ATTR_IS(pp_features)) {
2084 if (adev->flags & AMD_IS_APU || asic_type < CHIP_VEGA10)
2085 *states = ATTR_STATE_UNSUPPORTED;
2086 } else if (DEVICE_ATTR_IS(gpu_metrics)) {
2087 if (asic_type < CHIP_VEGA12)
2088 *states = ATTR_STATE_UNSUPPORTED;
2089 }
2090
2091 if (asic_type == CHIP_ARCTURUS) {
2092
2093 if (DEVICE_ATTR_IS(pp_dpm_mclk) ||
2094 DEVICE_ATTR_IS(pp_dpm_socclk) ||
2095 DEVICE_ATTR_IS(pp_dpm_fclk)) {
2096 dev_attr->attr.mode &= ~S_IWUGO;
2097 dev_attr->store = NULL;
2098 }
2099 }
2100
2101#undef DEVICE_ATTR_IS
2102
2103 return 0;
2104}
2105
2106
2107static int amdgpu_device_attr_create(struct amdgpu_device *adev,
2108 struct amdgpu_device_attr *attr,
2109 uint32_t mask, struct list_head *attr_list)
2110{
2111 int ret = 0;
2112 struct device_attribute *dev_attr = &attr->dev_attr;
2113 const char *name = dev_attr->attr.name;
2114 enum amdgpu_device_attr_states attr_states = ATTR_STATE_SUPPORTED;
2115 struct amdgpu_device_attr_entry *attr_entry;
2116
2117 int (*attr_update)(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
2118 uint32_t mask, enum amdgpu_device_attr_states *states) = default_attr_update;
2119
2120 BUG_ON(!attr);
2121
2122 attr_update = attr->attr_update ? attr_update : default_attr_update;
2123
2124 ret = attr_update(adev, attr, mask, &attr_states);
2125 if (ret) {
2126 dev_err(adev->dev, "failed to update device file %s, ret = %d\n",
2127 name, ret);
2128 return ret;
2129 }
2130
2131 if (attr_states == ATTR_STATE_UNSUPPORTED)
2132 return 0;
2133
2134 ret = device_create_file(adev->dev, dev_attr);
2135 if (ret) {
2136 dev_err(adev->dev, "failed to create device file %s, ret = %d\n",
2137 name, ret);
2138 }
2139
2140 attr_entry = kmalloc(sizeof(*attr_entry), GFP_KERNEL);
2141 if (!attr_entry)
2142 return -ENOMEM;
2143
2144 attr_entry->attr = attr;
2145 INIT_LIST_HEAD(&attr_entry->entry);
2146
2147 list_add_tail(&attr_entry->entry, attr_list);
2148
2149 return ret;
2150}
2151
2152static void amdgpu_device_attr_remove(struct amdgpu_device *adev, struct amdgpu_device_attr *attr)
2153{
2154 struct device_attribute *dev_attr = &attr->dev_attr;
2155
2156 device_remove_file(adev->dev, dev_attr);
2157}
2158
2159static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
2160 struct list_head *attr_list);
2161
2162static int amdgpu_device_attr_create_groups(struct amdgpu_device *adev,
2163 struct amdgpu_device_attr *attrs,
2164 uint32_t counts,
2165 uint32_t mask,
2166 struct list_head *attr_list)
2167{
2168 int ret = 0;
2169 uint32_t i = 0;
2170
2171 for (i = 0; i < counts; i++) {
2172 ret = amdgpu_device_attr_create(adev, &attrs[i], mask, attr_list);
2173 if (ret)
2174 goto failed;
2175 }
2176
2177 return 0;
2178
2179failed:
2180 amdgpu_device_attr_remove_groups(adev, attr_list);
2181
2182 return ret;
2183}
2184
2185static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
2186 struct list_head *attr_list)
2187{
2188 struct amdgpu_device_attr_entry *entry, *entry_tmp;
2189
2190 if (list_empty(attr_list))
2191 return ;
2192
2193 list_for_each_entry_safe(entry, entry_tmp, attr_list, entry) {
2194 amdgpu_device_attr_remove(adev, entry->attr);
2195 list_del(&entry->entry);
2196 kfree(entry);
2197 }
2198}
2199
2200static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
2201 struct device_attribute *attr,
2202 char *buf)
2203{
2204 struct amdgpu_device *adev = dev_get_drvdata(dev);
2205 int channel = to_sensor_dev_attr(attr)->index;
2206 int r, temp = 0, size = sizeof(temp);
2207
2208 if (amdgpu_in_reset(adev))
2209 return -EPERM;
2210
2211 if (channel >= PP_TEMP_MAX)
2212 return -EINVAL;
2213
2214 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2215 if (r < 0) {
2216 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2217 return r;
2218 }
2219
2220 switch (channel) {
2221 case PP_TEMP_JUNCTION:
2222
2223 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
2224 (void *)&temp, &size);
2225 break;
2226 case PP_TEMP_EDGE:
2227
2228 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_EDGE_TEMP,
2229 (void *)&temp, &size);
2230 break;
2231 case PP_TEMP_MEM:
2232
2233 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_TEMP,
2234 (void *)&temp, &size);
2235 break;
2236 default:
2237 r = -EINVAL;
2238 break;
2239 }
2240
2241 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2242 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2243
2244 if (r)
2245 return r;
2246
2247 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
2248}
2249
2250static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev,
2251 struct device_attribute *attr,
2252 char *buf)
2253{
2254 struct amdgpu_device *adev = dev_get_drvdata(dev);
2255 int hyst = to_sensor_dev_attr(attr)->index;
2256 int temp;
2257
2258 if (hyst)
2259 temp = adev->pm.dpm.thermal.min_temp;
2260 else
2261 temp = adev->pm.dpm.thermal.max_temp;
2262
2263 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
2264}
2265
2266static ssize_t amdgpu_hwmon_show_hotspot_temp_thresh(struct device *dev,
2267 struct device_attribute *attr,
2268 char *buf)
2269{
2270 struct amdgpu_device *adev = dev_get_drvdata(dev);
2271 int hyst = to_sensor_dev_attr(attr)->index;
2272 int temp;
2273
2274 if (hyst)
2275 temp = adev->pm.dpm.thermal.min_hotspot_temp;
2276 else
2277 temp = adev->pm.dpm.thermal.max_hotspot_crit_temp;
2278
2279 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
2280}
2281
2282static ssize_t amdgpu_hwmon_show_mem_temp_thresh(struct device *dev,
2283 struct device_attribute *attr,
2284 char *buf)
2285{
2286 struct amdgpu_device *adev = dev_get_drvdata(dev);
2287 int hyst = to_sensor_dev_attr(attr)->index;
2288 int temp;
2289
2290 if (hyst)
2291 temp = adev->pm.dpm.thermal.min_mem_temp;
2292 else
2293 temp = adev->pm.dpm.thermal.max_mem_crit_temp;
2294
2295 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
2296}
2297
2298static ssize_t amdgpu_hwmon_show_temp_label(struct device *dev,
2299 struct device_attribute *attr,
2300 char *buf)
2301{
2302 int channel = to_sensor_dev_attr(attr)->index;
2303
2304 if (channel >= PP_TEMP_MAX)
2305 return -EINVAL;
2306
2307 return snprintf(buf, PAGE_SIZE, "%s\n", temp_label[channel].label);
2308}
2309
2310static ssize_t amdgpu_hwmon_show_temp_emergency(struct device *dev,
2311 struct device_attribute *attr,
2312 char *buf)
2313{
2314 struct amdgpu_device *adev = dev_get_drvdata(dev);
2315 int channel = to_sensor_dev_attr(attr)->index;
2316 int temp = 0;
2317
2318 if (channel >= PP_TEMP_MAX)
2319 return -EINVAL;
2320
2321 switch (channel) {
2322 case PP_TEMP_JUNCTION:
2323 temp = adev->pm.dpm.thermal.max_hotspot_emergency_temp;
2324 break;
2325 case PP_TEMP_EDGE:
2326 temp = adev->pm.dpm.thermal.max_edge_emergency_temp;
2327 break;
2328 case PP_TEMP_MEM:
2329 temp = adev->pm.dpm.thermal.max_mem_emergency_temp;
2330 break;
2331 }
2332
2333 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
2334}
2335
2336static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
2337 struct device_attribute *attr,
2338 char *buf)
2339{
2340 struct amdgpu_device *adev = dev_get_drvdata(dev);
2341 u32 pwm_mode = 0;
2342 int ret;
2343
2344 if (amdgpu_in_reset(adev))
2345 return -EPERM;
2346
2347 ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2348 if (ret < 0) {
2349 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2350 return ret;
2351 }
2352
2353 if (is_support_sw_smu(adev)) {
2354 pwm_mode = smu_get_fan_control_mode(&adev->smu);
2355 } else {
2356 if (!adev->powerplay.pp_funcs->get_fan_control_mode) {
2357 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2358 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2359 return -EINVAL;
2360 }
2361
2362 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
2363 }
2364
2365 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2366 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2367
2368 return sprintf(buf, "%i\n", pwm_mode);
2369}
2370
2371static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
2372 struct device_attribute *attr,
2373 const char *buf,
2374 size_t count)
2375{
2376 struct amdgpu_device *adev = dev_get_drvdata(dev);
2377 int err, ret;
2378 int value;
2379
2380 if (amdgpu_in_reset(adev))
2381 return -EPERM;
2382
2383 err = kstrtoint(buf, 10, &value);
2384 if (err)
2385 return err;
2386
2387 ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2388 if (ret < 0) {
2389 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2390 return ret;
2391 }
2392
2393 if (is_support_sw_smu(adev)) {
2394 smu_set_fan_control_mode(&adev->smu, value);
2395 } else {
2396 if (!adev->powerplay.pp_funcs->set_fan_control_mode) {
2397 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2398 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2399 return -EINVAL;
2400 }
2401
2402 amdgpu_dpm_set_fan_control_mode(adev, value);
2403 }
2404
2405 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2406 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2407
2408 return count;
2409}
2410
2411static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev,
2412 struct device_attribute *attr,
2413 char *buf)
2414{
2415 return sprintf(buf, "%i\n", 0);
2416}
2417
2418static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev,
2419 struct device_attribute *attr,
2420 char *buf)
2421{
2422 return sprintf(buf, "%i\n", 255);
2423}
2424
2425static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
2426 struct device_attribute *attr,
2427 const char *buf, size_t count)
2428{
2429 struct amdgpu_device *adev = dev_get_drvdata(dev);
2430 int err;
2431 u32 value;
2432 u32 pwm_mode;
2433
2434 if (amdgpu_in_reset(adev))
2435 return -EPERM;
2436
2437 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2438 if (err < 0) {
2439 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2440 return err;
2441 }
2442
2443 if (is_support_sw_smu(adev))
2444 pwm_mode = smu_get_fan_control_mode(&adev->smu);
2445 else
2446 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
2447
2448 if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
2449 pr_info("manual fan speed control should be enabled first\n");
2450 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2451 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2452 return -EINVAL;
2453 }
2454
2455 err = kstrtou32(buf, 10, &value);
2456 if (err) {
2457 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2458 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2459 return err;
2460 }
2461
2462 value = (value * 100) / 255;
2463
2464 if (is_support_sw_smu(adev))
2465 err = smu_set_fan_speed_percent(&adev->smu, value);
2466 else if (adev->powerplay.pp_funcs->set_fan_speed_percent)
2467 err = amdgpu_dpm_set_fan_speed_percent(adev, value);
2468 else
2469 err = -EINVAL;
2470
2471 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2472 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2473
2474 if (err)
2475 return err;
2476
2477 return count;
2478}
2479
2480static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
2481 struct device_attribute *attr,
2482 char *buf)
2483{
2484 struct amdgpu_device *adev = dev_get_drvdata(dev);
2485 int err;
2486 u32 speed = 0;
2487
2488 if (amdgpu_in_reset(adev))
2489 return -EPERM;
2490
2491 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2492 if (err < 0) {
2493 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2494 return err;
2495 }
2496
2497 if (is_support_sw_smu(adev))
2498 err = smu_get_fan_speed_percent(&adev->smu, &speed);
2499 else if (adev->powerplay.pp_funcs->get_fan_speed_percent)
2500 err = amdgpu_dpm_get_fan_speed_percent(adev, &speed);
2501 else
2502 err = -EINVAL;
2503
2504 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2505 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2506
2507 if (err)
2508 return err;
2509
2510 speed = (speed * 255) / 100;
2511
2512 return sprintf(buf, "%i\n", speed);
2513}
2514
2515static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
2516 struct device_attribute *attr,
2517 char *buf)
2518{
2519 struct amdgpu_device *adev = dev_get_drvdata(dev);
2520 int err;
2521 u32 speed = 0;
2522
2523 if (amdgpu_in_reset(adev))
2524 return -EPERM;
2525
2526 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2527 if (err < 0) {
2528 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2529 return err;
2530 }
2531
2532 if (is_support_sw_smu(adev))
2533 err = smu_get_fan_speed_rpm(&adev->smu, &speed);
2534 else if (adev->powerplay.pp_funcs->get_fan_speed_rpm)
2535 err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
2536 else
2537 err = -EINVAL;
2538
2539 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2540 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2541
2542 if (err)
2543 return err;
2544
2545 return sprintf(buf, "%i\n", speed);
2546}
2547
2548static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
2549 struct device_attribute *attr,
2550 char *buf)
2551{
2552 struct amdgpu_device *adev = dev_get_drvdata(dev);
2553 u32 min_rpm = 0;
2554 u32 size = sizeof(min_rpm);
2555 int r;
2556
2557 if (amdgpu_in_reset(adev))
2558 return -EPERM;
2559
2560 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2561 if (r < 0) {
2562 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2563 return r;
2564 }
2565
2566 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM,
2567 (void *)&min_rpm, &size);
2568
2569 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2570 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2571
2572 if (r)
2573 return r;
2574
2575 return snprintf(buf, PAGE_SIZE, "%d\n", min_rpm);
2576}
2577
2578static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
2579 struct device_attribute *attr,
2580 char *buf)
2581{
2582 struct amdgpu_device *adev = dev_get_drvdata(dev);
2583 u32 max_rpm = 0;
2584 u32 size = sizeof(max_rpm);
2585 int r;
2586
2587 if (amdgpu_in_reset(adev))
2588 return -EPERM;
2589
2590 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2591 if (r < 0) {
2592 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2593 return r;
2594 }
2595
2596 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM,
2597 (void *)&max_rpm, &size);
2598
2599 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2600 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2601
2602 if (r)
2603 return r;
2604
2605 return snprintf(buf, PAGE_SIZE, "%d\n", max_rpm);
2606}
2607
2608static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
2609 struct device_attribute *attr,
2610 char *buf)
2611{
2612 struct amdgpu_device *adev = dev_get_drvdata(dev);
2613 int err;
2614 u32 rpm = 0;
2615
2616 if (amdgpu_in_reset(adev))
2617 return -EPERM;
2618
2619 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2620 if (err < 0) {
2621 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2622 return err;
2623 }
2624
2625 if (is_support_sw_smu(adev))
2626 err = smu_get_fan_speed_rpm(&adev->smu, &rpm);
2627 else if (adev->powerplay.pp_funcs->get_fan_speed_rpm)
2628 err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);
2629 else
2630 err = -EINVAL;
2631
2632 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2633 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2634
2635 if (err)
2636 return err;
2637
2638 return sprintf(buf, "%i\n", rpm);
2639}
2640
2641static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
2642 struct device_attribute *attr,
2643 const char *buf, size_t count)
2644{
2645 struct amdgpu_device *adev = dev_get_drvdata(dev);
2646 int err;
2647 u32 value;
2648 u32 pwm_mode;
2649
2650 if (amdgpu_in_reset(adev))
2651 return -EPERM;
2652
2653 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2654 if (err < 0) {
2655 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2656 return err;
2657 }
2658
2659 if (is_support_sw_smu(adev))
2660 pwm_mode = smu_get_fan_control_mode(&adev->smu);
2661 else
2662 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
2663
2664 if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
2665 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2666 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2667 return -ENODATA;
2668 }
2669
2670 err = kstrtou32(buf, 10, &value);
2671 if (err) {
2672 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2673 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2674 return err;
2675 }
2676
2677 if (is_support_sw_smu(adev))
2678 err = smu_set_fan_speed_rpm(&adev->smu, value);
2679 else if (adev->powerplay.pp_funcs->set_fan_speed_rpm)
2680 err = amdgpu_dpm_set_fan_speed_rpm(adev, value);
2681 else
2682 err = -EINVAL;
2683
2684 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2685 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2686
2687 if (err)
2688 return err;
2689
2690 return count;
2691}
2692
2693static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
2694 struct device_attribute *attr,
2695 char *buf)
2696{
2697 struct amdgpu_device *adev = dev_get_drvdata(dev);
2698 u32 pwm_mode = 0;
2699 int ret;
2700
2701 if (amdgpu_in_reset(adev))
2702 return -EPERM;
2703
2704 ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2705 if (ret < 0) {
2706 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2707 return ret;
2708 }
2709
2710 if (is_support_sw_smu(adev)) {
2711 pwm_mode = smu_get_fan_control_mode(&adev->smu);
2712 } else {
2713 if (!adev->powerplay.pp_funcs->get_fan_control_mode) {
2714 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2715 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2716 return -EINVAL;
2717 }
2718
2719 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
2720 }
2721
2722 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2723 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2724
2725 return sprintf(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1);
2726}
2727
2728static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
2729 struct device_attribute *attr,
2730 const char *buf,
2731 size_t count)
2732{
2733 struct amdgpu_device *adev = dev_get_drvdata(dev);
2734 int err;
2735 int value;
2736 u32 pwm_mode;
2737
2738 if (amdgpu_in_reset(adev))
2739 return -EPERM;
2740
2741 err = kstrtoint(buf, 10, &value);
2742 if (err)
2743 return err;
2744
2745 if (value == 0)
2746 pwm_mode = AMD_FAN_CTRL_AUTO;
2747 else if (value == 1)
2748 pwm_mode = AMD_FAN_CTRL_MANUAL;
2749 else
2750 return -EINVAL;
2751
2752 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2753 if (err < 0) {
2754 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2755 return err;
2756 }
2757
2758 if (is_support_sw_smu(adev)) {
2759 smu_set_fan_control_mode(&adev->smu, pwm_mode);
2760 } else {
2761 if (!adev->powerplay.pp_funcs->set_fan_control_mode) {
2762 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2763 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2764 return -EINVAL;
2765 }
2766 amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
2767 }
2768
2769 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2770 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2771
2772 return count;
2773}
2774
2775static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
2776 struct device_attribute *attr,
2777 char *buf)
2778{
2779 struct amdgpu_device *adev = dev_get_drvdata(dev);
2780 u32 vddgfx;
2781 int r, size = sizeof(vddgfx);
2782
2783 if (amdgpu_in_reset(adev))
2784 return -EPERM;
2785
2786 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2787 if (r < 0) {
2788 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2789 return r;
2790 }
2791
2792
2793 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX,
2794 (void *)&vddgfx, &size);
2795
2796 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2797 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2798
2799 if (r)
2800 return r;
2801
2802 return snprintf(buf, PAGE_SIZE, "%d\n", vddgfx);
2803}
2804
2805static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev,
2806 struct device_attribute *attr,
2807 char *buf)
2808{
2809 return snprintf(buf, PAGE_SIZE, "vddgfx\n");
2810}
2811
2812static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
2813 struct device_attribute *attr,
2814 char *buf)
2815{
2816 struct amdgpu_device *adev = dev_get_drvdata(dev);
2817 u32 vddnb;
2818 int r, size = sizeof(vddnb);
2819
2820 if (amdgpu_in_reset(adev))
2821 return -EPERM;
2822
2823
2824 if (!(adev->flags & AMD_IS_APU))
2825 return -EINVAL;
2826
2827 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2828 if (r < 0) {
2829 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2830 return r;
2831 }
2832
2833
2834 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB,
2835 (void *)&vddnb, &size);
2836
2837 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2838 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2839
2840 if (r)
2841 return r;
2842
2843 return snprintf(buf, PAGE_SIZE, "%d\n", vddnb);
2844}
2845
2846static ssize_t amdgpu_hwmon_show_vddnb_label(struct device *dev,
2847 struct device_attribute *attr,
2848 char *buf)
2849{
2850 return snprintf(buf, PAGE_SIZE, "vddnb\n");
2851}
2852
2853static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
2854 struct device_attribute *attr,
2855 char *buf)
2856{
2857 struct amdgpu_device *adev = dev_get_drvdata(dev);
2858 u32 query = 0;
2859 int r, size = sizeof(u32);
2860 unsigned uw;
2861
2862 if (amdgpu_in_reset(adev))
2863 return -EPERM;
2864
2865 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2866 if (r < 0) {
2867 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2868 return r;
2869 }
2870
2871
2872 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER,
2873 (void *)&query, &size);
2874
2875 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2876 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2877
2878 if (r)
2879 return r;
2880
2881
2882 uw = (query >> 8) * 1000000 + (query & 0xff) * 1000;
2883
2884 return snprintf(buf, PAGE_SIZE, "%u\n", uw);
2885}
2886
2887static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev,
2888 struct device_attribute *attr,
2889 char *buf)
2890{
2891 return sprintf(buf, "%i\n", 0);
2892}
2893
2894static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
2895 struct device_attribute *attr,
2896 char *buf)
2897{
2898 struct amdgpu_device *adev = dev_get_drvdata(dev);
2899 uint32_t limit = 0;
2900 ssize_t size;
2901 int r;
2902
2903 if (amdgpu_in_reset(adev))
2904 return -EPERM;
2905
2906 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2907 if (r < 0) {
2908 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2909 return r;
2910 }
2911
2912 if (is_support_sw_smu(adev)) {
2913 smu_get_power_limit(&adev->smu, &limit, true);
2914 size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
2915 } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
2916 adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, true);
2917 size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
2918 } else {
2919 size = snprintf(buf, PAGE_SIZE, "\n");
2920 }
2921
2922 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2923 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2924
2925 return size;
2926}
2927
2928static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
2929 struct device_attribute *attr,
2930 char *buf)
2931{
2932 struct amdgpu_device *adev = dev_get_drvdata(dev);
2933 uint32_t limit = 0;
2934 ssize_t size;
2935 int r;
2936
2937 if (amdgpu_in_reset(adev))
2938 return -EPERM;
2939
2940 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2941 if (r < 0) {
2942 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2943 return r;
2944 }
2945
2946 if (is_support_sw_smu(adev)) {
2947 smu_get_power_limit(&adev->smu, &limit, false);
2948 size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
2949 } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
2950 adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, false);
2951 size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
2952 } else {
2953 size = snprintf(buf, PAGE_SIZE, "\n");
2954 }
2955
2956 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2957 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2958
2959 return size;
2960}
2961
2962
2963static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
2964 struct device_attribute *attr,
2965 const char *buf,
2966 size_t count)
2967{
2968 struct amdgpu_device *adev = dev_get_drvdata(dev);
2969 int err;
2970 u32 value;
2971
2972 if (amdgpu_in_reset(adev))
2973 return -EPERM;
2974
2975 if (amdgpu_sriov_vf(adev))
2976 return -EINVAL;
2977
2978 err = kstrtou32(buf, 10, &value);
2979 if (err)
2980 return err;
2981
2982 value = value / 1000000;
2983
2984
2985 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2986 if (err < 0) {
2987 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2988 return err;
2989 }
2990
2991 if (is_support_sw_smu(adev))
2992 err = smu_set_power_limit(&adev->smu, value);
2993 else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_power_limit)
2994 err = adev->powerplay.pp_funcs->set_power_limit(adev->powerplay.pp_handle, value);
2995 else
2996 err = -EINVAL;
2997
2998 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2999 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
3000
3001 if (err)
3002 return err;
3003
3004 return count;
3005}
3006
3007static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
3008 struct device_attribute *attr,
3009 char *buf)
3010{
3011 struct amdgpu_device *adev = dev_get_drvdata(dev);
3012 uint32_t sclk;
3013 int r, size = sizeof(sclk);
3014
3015 if (amdgpu_in_reset(adev))
3016 return -EPERM;
3017
3018 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
3019 if (r < 0) {
3020 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
3021 return r;
3022 }
3023
3024
3025 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK,
3026 (void *)&sclk, &size);
3027
3028 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
3029 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
3030
3031 if (r)
3032 return r;
3033
3034 return snprintf(buf, PAGE_SIZE, "%u\n", sclk * 10 * 1000);
3035}
3036
3037static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev,
3038 struct device_attribute *attr,
3039 char *buf)
3040{
3041 return snprintf(buf, PAGE_SIZE, "sclk\n");
3042}
3043
3044static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
3045 struct device_attribute *attr,
3046 char *buf)
3047{
3048 struct amdgpu_device *adev = dev_get_drvdata(dev);
3049 uint32_t mclk;
3050 int r, size = sizeof(mclk);
3051
3052 if (amdgpu_in_reset(adev))
3053 return -EPERM;
3054
3055 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
3056 if (r < 0) {
3057 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
3058 return r;
3059 }
3060
3061
3062 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK,
3063 (void *)&mclk, &size);
3064
3065 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
3066 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
3067
3068 if (r)
3069 return r;
3070
3071 return snprintf(buf, PAGE_SIZE, "%u\n", mclk * 10 * 1000);
3072}
3073
3074static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
3075 struct device_attribute *attr,
3076 char *buf)
3077{
3078 return snprintf(buf, PAGE_SIZE, "mclk\n");
3079}
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161
3162
3163static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_EDGE);
3164static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0);
3165static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1);
3166static SENSOR_DEVICE_ATTR(temp1_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_EDGE);
3167static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_JUNCTION);
3168static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 0);
3169static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 1);
3170static SENSOR_DEVICE_ATTR(temp2_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_JUNCTION);
3171static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_MEM);
3172static SENSOR_DEVICE_ATTR(temp3_crit, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 0);
3173static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 1);
3174static SENSOR_DEVICE_ATTR(temp3_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_MEM);
3175static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_EDGE);
3176static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_JUNCTION);
3177static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_MEM);
3178static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0);
3179static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0);
3180static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0);
3181static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0);
3182static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0);
3183static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO, amdgpu_hwmon_get_fan1_min, NULL, 0);
3184static SENSOR_DEVICE_ATTR(fan1_max, S_IRUGO, amdgpu_hwmon_get_fan1_max, NULL, 0);
3185static SENSOR_DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_target, amdgpu_hwmon_set_fan1_target, 0);
3186static SENSOR_DEVICE_ATTR(fan1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_enable, amdgpu_hwmon_set_fan1_enable, 0);
3187static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, amdgpu_hwmon_show_vddgfx, NULL, 0);
3188static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, amdgpu_hwmon_show_vddgfx_label, NULL, 0);
3189static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, amdgpu_hwmon_show_vddnb, NULL, 0);
3190static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, amdgpu_hwmon_show_vddnb_label, NULL, 0);
3191static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 0);
3192static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0);
3193static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 0);
3194static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0);
3195static SENSOR_DEVICE_ATTR(freq1_input, S_IRUGO, amdgpu_hwmon_show_sclk, NULL, 0);
3196static SENSOR_DEVICE_ATTR(freq1_label, S_IRUGO, amdgpu_hwmon_show_sclk_label, NULL, 0);
3197static SENSOR_DEVICE_ATTR(freq2_input, S_IRUGO, amdgpu_hwmon_show_mclk, NULL, 0);
3198static SENSOR_DEVICE_ATTR(freq2_label, S_IRUGO, amdgpu_hwmon_show_mclk_label, NULL, 0);
3199
3200static struct attribute *hwmon_attributes[] = {
3201 &sensor_dev_attr_temp1_input.dev_attr.attr,
3202 &sensor_dev_attr_temp1_crit.dev_attr.attr,
3203 &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
3204 &sensor_dev_attr_temp2_input.dev_attr.attr,
3205 &sensor_dev_attr_temp2_crit.dev_attr.attr,
3206 &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr,
3207 &sensor_dev_attr_temp3_input.dev_attr.attr,
3208 &sensor_dev_attr_temp3_crit.dev_attr.attr,
3209 &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr,
3210 &sensor_dev_attr_temp1_emergency.dev_attr.attr,
3211 &sensor_dev_attr_temp2_emergency.dev_attr.attr,
3212 &sensor_dev_attr_temp3_emergency.dev_attr.attr,
3213 &sensor_dev_attr_temp1_label.dev_attr.attr,
3214 &sensor_dev_attr_temp2_label.dev_attr.attr,
3215 &sensor_dev_attr_temp3_label.dev_attr.attr,
3216 &sensor_dev_attr_pwm1.dev_attr.attr,
3217 &sensor_dev_attr_pwm1_enable.dev_attr.attr,
3218 &sensor_dev_attr_pwm1_min.dev_attr.attr,
3219 &sensor_dev_attr_pwm1_max.dev_attr.attr,
3220 &sensor_dev_attr_fan1_input.dev_attr.attr,
3221 &sensor_dev_attr_fan1_min.dev_attr.attr,
3222 &sensor_dev_attr_fan1_max.dev_attr.attr,
3223 &sensor_dev_attr_fan1_target.dev_attr.attr,
3224 &sensor_dev_attr_fan1_enable.dev_attr.attr,
3225 &sensor_dev_attr_in0_input.dev_attr.attr,
3226 &sensor_dev_attr_in0_label.dev_attr.attr,
3227 &sensor_dev_attr_in1_input.dev_attr.attr,
3228 &sensor_dev_attr_in1_label.dev_attr.attr,
3229 &sensor_dev_attr_power1_average.dev_attr.attr,
3230 &sensor_dev_attr_power1_cap_max.dev_attr.attr,
3231 &sensor_dev_attr_power1_cap_min.dev_attr.attr,
3232 &sensor_dev_attr_power1_cap.dev_attr.attr,
3233 &sensor_dev_attr_freq1_input.dev_attr.attr,
3234 &sensor_dev_attr_freq1_label.dev_attr.attr,
3235 &sensor_dev_attr_freq2_input.dev_attr.attr,
3236 &sensor_dev_attr_freq2_label.dev_attr.attr,
3237 NULL
3238};
3239
3240static umode_t hwmon_attributes_visible(struct kobject *kobj,
3241 struct attribute *attr, int index)
3242{
3243 struct device *dev = kobj_to_dev(kobj);
3244 struct amdgpu_device *adev = dev_get_drvdata(dev);
3245 umode_t effective_mode = attr->mode;
3246
3247
3248 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
3249 return 0;
3250
3251
3252 if (amdgpu_sriov_is_pp_one_vf(adev) &&
3253 (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3254 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3255 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3256 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3257 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3258 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3259 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3260 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3261 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3262 return 0;
3263
3264
3265 if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3266 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3267 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3268 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3269 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3270 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3271 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3272 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3273 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3274 return 0;
3275
3276
3277 if ((adev->flags & AMD_IS_APU) &&
3278 (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3279 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3280 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3281 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3282 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3283 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3284 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3285 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3286 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3287 return 0;
3288
3289
3290 if ((adev->flags & AMD_IS_APU) && (adev->family >= AMDGPU_FAMILY_CZ) &&
3291 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
3292 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr))
3293 return 0;
3294
3295
3296 if (!adev->pm.dpm_enabled &&
3297 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
3298 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
3299 attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3300 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3301 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3302 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3303 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3304 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3305 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3306 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3307 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3308 return 0;
3309
3310 if (!is_support_sw_smu(adev)) {
3311
3312 if ((!adev->powerplay.pp_funcs->get_fan_speed_percent &&
3313 attr == &sensor_dev_attr_pwm1.dev_attr.attr) ||
3314 (!adev->powerplay.pp_funcs->get_fan_control_mode &&
3315 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr))
3316 effective_mode &= ~S_IRUGO;
3317
3318 if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
3319 attr == &sensor_dev_attr_pwm1.dev_attr.attr) ||
3320 (!adev->powerplay.pp_funcs->set_fan_control_mode &&
3321 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr))
3322 effective_mode &= ~S_IWUSR;
3323 }
3324
3325 if (((adev->flags & AMD_IS_APU) ||
3326 adev->family == AMDGPU_FAMILY_SI) &&
3327 (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
3328 attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr||
3329 attr == &sensor_dev_attr_power1_cap.dev_attr.attr))
3330 return 0;
3331
3332 if (((adev->family == AMDGPU_FAMILY_SI) ||
3333 ((adev->flags & AMD_IS_APU) &&
3334 (adev->asic_type < CHIP_RENOIR))) &&
3335 (attr == &sensor_dev_attr_power1_average.dev_attr.attr))
3336 return 0;
3337
3338 if (!is_support_sw_smu(adev)) {
3339
3340 if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
3341 !adev->powerplay.pp_funcs->get_fan_speed_percent) &&
3342 (!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
3343 !adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
3344 (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3345 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
3346 return 0;
3347
3348 if ((!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
3349 !adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
3350 (attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3351 attr == &sensor_dev_attr_fan1_min.dev_attr.attr))
3352 return 0;
3353 }
3354
3355 if ((adev->family == AMDGPU_FAMILY_SI ||
3356 adev->family == AMDGPU_FAMILY_KV) &&
3357 (attr == &sensor_dev_attr_in0_input.dev_attr.attr ||
3358 attr == &sensor_dev_attr_in0_label.dev_attr.attr))
3359 return 0;
3360
3361
3362 if (!(adev->flags & AMD_IS_APU) &&
3363 (attr == &sensor_dev_attr_in1_input.dev_attr.attr ||
3364 attr == &sensor_dev_attr_in1_label.dev_attr.attr))
3365 return 0;
3366
3367
3368 if ((adev->flags & AMD_IS_APU) &&
3369 (attr == &sensor_dev_attr_freq2_input.dev_attr.attr ||
3370 attr == &sensor_dev_attr_freq2_label.dev_attr.attr))
3371 return 0;
3372
3373
3374 if (((adev->flags & AMD_IS_APU) ||
3375 adev->asic_type < CHIP_VEGA10) &&
3376 (attr == &sensor_dev_attr_temp2_crit.dev_attr.attr ||
3377 attr == &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr ||
3378 attr == &sensor_dev_attr_temp3_crit.dev_attr.attr ||
3379 attr == &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr ||
3380 attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr ||
3381 attr == &sensor_dev_attr_temp2_emergency.dev_attr.attr ||
3382 attr == &sensor_dev_attr_temp3_emergency.dev_attr.attr ||
3383 attr == &sensor_dev_attr_temp2_input.dev_attr.attr ||
3384 attr == &sensor_dev_attr_temp3_input.dev_attr.attr ||
3385 attr == &sensor_dev_attr_temp2_label.dev_attr.attr ||
3386 attr == &sensor_dev_attr_temp3_label.dev_attr.attr))
3387 return 0;
3388
3389 return effective_mode;
3390}
3391
3392static const struct attribute_group hwmon_attrgroup = {
3393 .attrs = hwmon_attributes,
3394 .is_visible = hwmon_attributes_visible,
3395};
3396
3397static const struct attribute_group *hwmon_groups[] = {
3398 &hwmon_attrgroup,
3399 NULL
3400};
3401
3402int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
3403{
3404 int ret;
3405 uint32_t mask = 0;
3406
3407 if (adev->pm.sysfs_initialized)
3408 return 0;
3409
3410 if (adev->pm.dpm_enabled == 0)
3411 return 0;
3412
3413 INIT_LIST_HEAD(&adev->pm.pm_attr_list);
3414
3415 adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
3416 DRIVER_NAME, adev,
3417 hwmon_groups);
3418 if (IS_ERR(adev->pm.int_hwmon_dev)) {
3419 ret = PTR_ERR(adev->pm.int_hwmon_dev);
3420 dev_err(adev->dev,
3421 "Unable to register hwmon device: %d\n", ret);
3422 return ret;
3423 }
3424
3425 switch (amdgpu_virt_get_sriov_vf_mode(adev)) {
3426 case SRIOV_VF_MODE_ONE_VF:
3427 mask = ATTR_FLAG_ONEVF;
3428 break;
3429 case SRIOV_VF_MODE_MULTI_VF:
3430 mask = 0;
3431 break;
3432 case SRIOV_VF_MODE_BARE_METAL:
3433 default:
3434 mask = ATTR_FLAG_MASK_ALL;
3435 break;
3436 }
3437
3438 ret = amdgpu_device_attr_create_groups(adev,
3439 amdgpu_device_attrs,
3440 ARRAY_SIZE(amdgpu_device_attrs),
3441 mask,
3442 &adev->pm.pm_attr_list);
3443 if (ret)
3444 return ret;
3445
3446 adev->pm.sysfs_initialized = true;
3447
3448 return 0;
3449}
3450
3451void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
3452{
3453 if (adev->pm.dpm_enabled == 0)
3454 return;
3455
3456 if (adev->pm.int_hwmon_dev)
3457 hwmon_device_unregister(adev->pm.int_hwmon_dev);
3458
3459 amdgpu_device_attr_remove_groups(adev, &adev->pm.pm_attr_list);
3460}
3461
3462
3463
3464
3465#if defined(CONFIG_DEBUG_FS)
3466
3467static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev)
3468{
3469 uint32_t value;
3470 uint64_t value64;
3471 uint32_t query = 0;
3472 int size;
3473
3474
3475 size = sizeof(value);
3476 seq_printf(m, "GFX Clocks and Power:\n");
3477 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, (void *)&value, &size))
3478 seq_printf(m, "\t%u MHz (MCLK)\n", value/100);
3479 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, (void *)&value, &size))
3480 seq_printf(m, "\t%u MHz (SCLK)\n", value/100);
3481 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK, (void *)&value, &size))
3482 seq_printf(m, "\t%u MHz (PSTATE_SCLK)\n", value/100);
3483 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK, (void *)&value, &size))
3484 seq_printf(m, "\t%u MHz (PSTATE_MCLK)\n", value/100);
3485 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, (void *)&value, &size))
3486 seq_printf(m, "\t%u mV (VDDGFX)\n", value);
3487 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size))
3488 seq_printf(m, "\t%u mV (VDDNB)\n", value);
3489 size = sizeof(uint32_t);
3490 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, (void *)&query, &size))
3491 seq_printf(m, "\t%u.%u W (average GPU)\n", query >> 8, query & 0xff);
3492 size = sizeof(value);
3493 seq_printf(m, "\n");
3494
3495
3496 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, (void *)&value, &size))
3497 seq_printf(m, "GPU Temperature: %u C\n", value/1000);
3498
3499
3500 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, (void *)&value, &size))
3501 seq_printf(m, "GPU Load: %u %%\n", value);
3502
3503 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, (void *)&value, &size))
3504 seq_printf(m, "MEM Load: %u %%\n", value);
3505
3506 seq_printf(m, "\n");
3507
3508
3509 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size))
3510 seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64);
3511
3512 if (adev->asic_type > CHIP_VEGA20) {
3513
3514 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_POWER_STATE, (void *)&value, &size)) {
3515 if (!value) {
3516 seq_printf(m, "VCN: Disabled\n");
3517 } else {
3518 seq_printf(m, "VCN: Enabled\n");
3519 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
3520 seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
3521 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
3522 seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
3523 }
3524 }
3525 seq_printf(m, "\n");
3526 } else {
3527
3528 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) {
3529 if (!value) {
3530 seq_printf(m, "UVD: Disabled\n");
3531 } else {
3532 seq_printf(m, "UVD: Enabled\n");
3533 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
3534 seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
3535 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
3536 seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
3537 }
3538 }
3539 seq_printf(m, "\n");
3540
3541
3542 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) {
3543 if (!value) {
3544 seq_printf(m, "VCE: Disabled\n");
3545 } else {
3546 seq_printf(m, "VCE: Enabled\n");
3547 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size))
3548 seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
3549 }
3550 }
3551 }
3552
3553 return 0;
3554}
3555
3556static void amdgpu_parse_cg_state(struct seq_file *m, u32 flags)
3557{
3558 int i;
3559
3560 for (i = 0; clocks[i].flag; i++)
3561 seq_printf(m, "\t%s: %s\n", clocks[i].name,
3562 (flags & clocks[i].flag) ? "On" : "Off");
3563}
3564
3565static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
3566{
3567 struct drm_info_node *node = (struct drm_info_node *) m->private;
3568 struct drm_device *dev = node->minor->dev;
3569 struct amdgpu_device *adev = drm_to_adev(dev);
3570 u32 flags = 0;
3571 int r;
3572
3573 if (amdgpu_in_reset(adev))
3574 return -EPERM;
3575
3576 r = pm_runtime_get_sync(dev->dev);
3577 if (r < 0) {
3578 pm_runtime_put_autosuspend(dev->dev);
3579 return r;
3580 }
3581
3582 if (!adev->pm.dpm_enabled) {
3583 seq_printf(m, "dpm not enabled\n");
3584 pm_runtime_mark_last_busy(dev->dev);
3585 pm_runtime_put_autosuspend(dev->dev);
3586 return 0;
3587 }
3588
3589 if (!is_support_sw_smu(adev) &&
3590 adev->powerplay.pp_funcs->debugfs_print_current_performance_level) {
3591 mutex_lock(&adev->pm.mutex);
3592 if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level)
3593 adev->powerplay.pp_funcs->debugfs_print_current_performance_level(adev, m);
3594 else
3595 seq_printf(m, "Debugfs support not implemented for this asic\n");
3596 mutex_unlock(&adev->pm.mutex);
3597 r = 0;
3598 } else {
3599 r = amdgpu_debugfs_pm_info_pp(m, adev);
3600 }
3601 if (r)
3602 goto out;
3603
3604 amdgpu_device_ip_get_clockgating_state(adev, &flags);
3605
3606 seq_printf(m, "Clock Gating Flags Mask: 0x%x\n", flags);
3607 amdgpu_parse_cg_state(m, flags);
3608 seq_printf(m, "\n");
3609
3610out:
3611 pm_runtime_mark_last_busy(dev->dev);
3612 pm_runtime_put_autosuspend(dev->dev);
3613
3614 return r;
3615}
3616
3617static const struct drm_info_list amdgpu_pm_info_list[] = {
3618 {"amdgpu_pm_info", amdgpu_debugfs_pm_info, 0, NULL},
3619};
3620#endif
3621
3622int amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
3623{
3624#if defined(CONFIG_DEBUG_FS)
3625 return amdgpu_debugfs_add_files(adev, amdgpu_pm_info_list, ARRAY_SIZE(amdgpu_pm_info_list));
3626#else
3627 return 0;
3628#endif
3629}
3630