1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include "amdgpu.h"
27#include "amdgpu_drv.h"
28#include "amdgpu_pm.h"
29#include "amdgpu_dpm.h"
30#include "atom.h"
31#include <linux/pci.h>
32#include <linux/hwmon.h>
33#include <linux/hwmon-sysfs.h>
34#include <linux/nospec.h>
35#include <linux/pm_runtime.h>
36#include <asm/processor.h>
37#include "hwmgr.h"
38
39static const struct cg_flag_name clocks[] = {
40 {AMD_CG_SUPPORT_GFX_FGCG, "Graphics Fine Grain Clock Gating"},
41 {AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"},
42 {AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"},
43 {AMD_CG_SUPPORT_GFX_CGCG, "Graphics Coarse Grain Clock Gating"},
44 {AMD_CG_SUPPORT_GFX_CGLS, "Graphics Coarse Grain memory Light Sleep"},
45 {AMD_CG_SUPPORT_GFX_CGTS, "Graphics Coarse Grain Tree Shader Clock Gating"},
46 {AMD_CG_SUPPORT_GFX_CGTS_LS, "Graphics Coarse Grain Tree Shader Light Sleep"},
47 {AMD_CG_SUPPORT_GFX_CP_LS, "Graphics Command Processor Light Sleep"},
48 {AMD_CG_SUPPORT_GFX_RLC_LS, "Graphics Run List Controller Light Sleep"},
49 {AMD_CG_SUPPORT_GFX_3D_CGCG, "Graphics 3D Coarse Grain Clock Gating"},
50 {AMD_CG_SUPPORT_GFX_3D_CGLS, "Graphics 3D Coarse Grain memory Light Sleep"},
51 {AMD_CG_SUPPORT_MC_LS, "Memory Controller Light Sleep"},
52 {AMD_CG_SUPPORT_MC_MGCG, "Memory Controller Medium Grain Clock Gating"},
53 {AMD_CG_SUPPORT_SDMA_LS, "System Direct Memory Access Light Sleep"},
54 {AMD_CG_SUPPORT_SDMA_MGCG, "System Direct Memory Access Medium Grain Clock Gating"},
55 {AMD_CG_SUPPORT_BIF_MGCG, "Bus Interface Medium Grain Clock Gating"},
56 {AMD_CG_SUPPORT_BIF_LS, "Bus Interface Light Sleep"},
57 {AMD_CG_SUPPORT_UVD_MGCG, "Unified Video Decoder Medium Grain Clock Gating"},
58 {AMD_CG_SUPPORT_VCE_MGCG, "Video Compression Engine Medium Grain Clock Gating"},
59 {AMD_CG_SUPPORT_HDP_LS, "Host Data Path Light Sleep"},
60 {AMD_CG_SUPPORT_HDP_MGCG, "Host Data Path Medium Grain Clock Gating"},
61 {AMD_CG_SUPPORT_DRM_MGCG, "Digital Right Management Medium Grain Clock Gating"},
62 {AMD_CG_SUPPORT_DRM_LS, "Digital Right Management Light Sleep"},
63 {AMD_CG_SUPPORT_ROM_MGCG, "Rom Medium Grain Clock Gating"},
64 {AMD_CG_SUPPORT_DF_MGCG, "Data Fabric Medium Grain Clock Gating"},
65 {AMD_CG_SUPPORT_VCN_MGCG, "VCN Medium Grain Clock Gating"},
66 {AMD_CG_SUPPORT_HDP_DS, "Host Data Path Deep Sleep"},
67 {AMD_CG_SUPPORT_HDP_SD, "Host Data Path Shutdown"},
68 {AMD_CG_SUPPORT_IH_CG, "Interrupt Handler Clock Gating"},
69 {AMD_CG_SUPPORT_JPEG_MGCG, "JPEG Medium Grain Clock Gating"},
70
71 {AMD_CG_SUPPORT_ATHUB_MGCG, "Address Translation Hub Medium Grain Clock Gating"},
72 {AMD_CG_SUPPORT_ATHUB_LS, "Address Translation Hub Light Sleep"},
73 {0, NULL},
74};
75
76static const struct hwmon_temp_label {
77 enum PP_HWMON_TEMP channel;
78 const char *label;
79} temp_label[] = {
80 {PP_TEMP_EDGE, "edge"},
81 {PP_TEMP_JUNCTION, "junction"},
82 {PP_TEMP_MEM, "mem"},
83};
84
85const char * const amdgpu_pp_profile_name[] = {
86 "BOOTUP_DEFAULT",
87 "3D_FULL_SCREEN",
88 "POWER_SAVING",
89 "VIDEO",
90 "VR",
91 "COMPUTE",
92 "CUSTOM"
93};
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
130 struct device_attribute *attr,
131 char *buf)
132{
133 struct drm_device *ddev = dev_get_drvdata(dev);
134 struct amdgpu_device *adev = drm_to_adev(ddev);
135 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
136 enum amd_pm_state_type pm;
137 int ret;
138
139 if (amdgpu_in_reset(adev))
140 return -EPERM;
141 if (adev->in_suspend && !adev->in_runpm)
142 return -EPERM;
143
144 ret = pm_runtime_get_sync(ddev->dev);
145 if (ret < 0) {
146 pm_runtime_put_autosuspend(ddev->dev);
147 return ret;
148 }
149
150 if (pp_funcs->get_current_power_state) {
151 pm = amdgpu_dpm_get_current_power_state(adev);
152 } else {
153 pm = adev->pm.dpm.user_state;
154 }
155
156 pm_runtime_mark_last_busy(ddev->dev);
157 pm_runtime_put_autosuspend(ddev->dev);
158
159 return sysfs_emit(buf, "%s\n",
160 (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
161 (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
162}
163
164static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
165 struct device_attribute *attr,
166 const char *buf,
167 size_t count)
168{
169 struct drm_device *ddev = dev_get_drvdata(dev);
170 struct amdgpu_device *adev = drm_to_adev(ddev);
171 enum amd_pm_state_type state;
172 int ret;
173
174 if (amdgpu_in_reset(adev))
175 return -EPERM;
176 if (adev->in_suspend && !adev->in_runpm)
177 return -EPERM;
178
179 if (strncmp("battery", buf, strlen("battery")) == 0)
180 state = POWER_STATE_TYPE_BATTERY;
181 else if (strncmp("balanced", buf, strlen("balanced")) == 0)
182 state = POWER_STATE_TYPE_BALANCED;
183 else if (strncmp("performance", buf, strlen("performance")) == 0)
184 state = POWER_STATE_TYPE_PERFORMANCE;
185 else
186 return -EINVAL;
187
188 ret = pm_runtime_get_sync(ddev->dev);
189 if (ret < 0) {
190 pm_runtime_put_autosuspend(ddev->dev);
191 return ret;
192 }
193
194 if (is_support_sw_smu(adev)) {
195 mutex_lock(&adev->pm.mutex);
196 adev->pm.dpm.user_state = state;
197 mutex_unlock(&adev->pm.mutex);
198 } else if (adev->powerplay.pp_funcs->dispatch_tasks) {
199 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state);
200 } else {
201 mutex_lock(&adev->pm.mutex);
202 adev->pm.dpm.user_state = state;
203 mutex_unlock(&adev->pm.mutex);
204
205 amdgpu_pm_compute_clocks(adev);
206 }
207 pm_runtime_mark_last_busy(ddev->dev);
208 pm_runtime_put_autosuspend(ddev->dev);
209
210 return count;
211}
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
274 struct device_attribute *attr,
275 char *buf)
276{
277 struct drm_device *ddev = dev_get_drvdata(dev);
278 struct amdgpu_device *adev = drm_to_adev(ddev);
279 enum amd_dpm_forced_level level = 0xff;
280 int ret;
281
282 if (amdgpu_in_reset(adev))
283 return -EPERM;
284 if (adev->in_suspend && !adev->in_runpm)
285 return -EPERM;
286
287 ret = pm_runtime_get_sync(ddev->dev);
288 if (ret < 0) {
289 pm_runtime_put_autosuspend(ddev->dev);
290 return ret;
291 }
292
293 if (adev->powerplay.pp_funcs->get_performance_level)
294 level = amdgpu_dpm_get_performance_level(adev);
295 else
296 level = adev->pm.dpm.forced_level;
297
298 pm_runtime_mark_last_busy(ddev->dev);
299 pm_runtime_put_autosuspend(ddev->dev);
300
301 return sysfs_emit(buf, "%s\n",
302 (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
303 (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
304 (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" :
305 (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" :
306 (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" :
307 (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
308 (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
309 (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" :
310 (level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) ? "perf_determinism" :
311 "unknown");
312}
313
314static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
315 struct device_attribute *attr,
316 const char *buf,
317 size_t count)
318{
319 struct drm_device *ddev = dev_get_drvdata(dev);
320 struct amdgpu_device *adev = drm_to_adev(ddev);
321 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
322 enum amd_dpm_forced_level level;
323 enum amd_dpm_forced_level current_level;
324 int ret = 0;
325
326 if (amdgpu_in_reset(adev))
327 return -EPERM;
328 if (adev->in_suspend && !adev->in_runpm)
329 return -EPERM;
330
331 if (strncmp("low", buf, strlen("low")) == 0) {
332 level = AMD_DPM_FORCED_LEVEL_LOW;
333 } else if (strncmp("high", buf, strlen("high")) == 0) {
334 level = AMD_DPM_FORCED_LEVEL_HIGH;
335 } else if (strncmp("auto", buf, strlen("auto")) == 0) {
336 level = AMD_DPM_FORCED_LEVEL_AUTO;
337 } else if (strncmp("manual", buf, strlen("manual")) == 0) {
338 level = AMD_DPM_FORCED_LEVEL_MANUAL;
339 } else if (strncmp("profile_exit", buf, strlen("profile_exit")) == 0) {
340 level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT;
341 } else if (strncmp("profile_standard", buf, strlen("profile_standard")) == 0) {
342 level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD;
343 } else if (strncmp("profile_min_sclk", buf, strlen("profile_min_sclk")) == 0) {
344 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK;
345 } else if (strncmp("profile_min_mclk", buf, strlen("profile_min_mclk")) == 0) {
346 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK;
347 } else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) {
348 level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
349 } else if (strncmp("perf_determinism", buf, strlen("perf_determinism")) == 0) {
350 level = AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM;
351 } else {
352 return -EINVAL;
353 }
354
355 ret = pm_runtime_get_sync(ddev->dev);
356 if (ret < 0) {
357 pm_runtime_put_autosuspend(ddev->dev);
358 return ret;
359 }
360
361 if (pp_funcs->get_performance_level)
362 current_level = amdgpu_dpm_get_performance_level(adev);
363 else
364 current_level = adev->pm.dpm.forced_level;
365
366 if (current_level == level) {
367 pm_runtime_mark_last_busy(ddev->dev);
368 pm_runtime_put_autosuspend(ddev->dev);
369 return count;
370 }
371
372 if (adev->asic_type == CHIP_RAVEN) {
373 if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) {
374 if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL && level == AMD_DPM_FORCED_LEVEL_MANUAL)
375 amdgpu_gfx_off_ctrl(adev, false);
376 else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL && level != AMD_DPM_FORCED_LEVEL_MANUAL)
377 amdgpu_gfx_off_ctrl(adev, true);
378 }
379 }
380
381
382 if (!(current_level & (AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
383 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
384 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
385 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)) &&
386 (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) {
387 pr_err("Currently not in any profile mode!\n");
388 pm_runtime_mark_last_busy(ddev->dev);
389 pm_runtime_put_autosuspend(ddev->dev);
390 return -EINVAL;
391 }
392
393 if (pp_funcs->force_performance_level) {
394 mutex_lock(&adev->pm.mutex);
395 if (adev->pm.dpm.thermal_active) {
396 mutex_unlock(&adev->pm.mutex);
397 pm_runtime_mark_last_busy(ddev->dev);
398 pm_runtime_put_autosuspend(ddev->dev);
399 return -EINVAL;
400 }
401 ret = amdgpu_dpm_force_performance_level(adev, level);
402 if (ret) {
403 mutex_unlock(&adev->pm.mutex);
404 pm_runtime_mark_last_busy(ddev->dev);
405 pm_runtime_put_autosuspend(ddev->dev);
406 return -EINVAL;
407 } else {
408 adev->pm.dpm.forced_level = level;
409 }
410 mutex_unlock(&adev->pm.mutex);
411 }
412 pm_runtime_mark_last_busy(ddev->dev);
413 pm_runtime_put_autosuspend(ddev->dev);
414
415 return count;
416}
417
418static ssize_t amdgpu_get_pp_num_states(struct device *dev,
419 struct device_attribute *attr,
420 char *buf)
421{
422 struct drm_device *ddev = dev_get_drvdata(dev);
423 struct amdgpu_device *adev = drm_to_adev(ddev);
424 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
425 struct pp_states_info data;
426 uint32_t i;
427 int buf_len, ret;
428
429 if (amdgpu_in_reset(adev))
430 return -EPERM;
431 if (adev->in_suspend && !adev->in_runpm)
432 return -EPERM;
433
434 ret = pm_runtime_get_sync(ddev->dev);
435 if (ret < 0) {
436 pm_runtime_put_autosuspend(ddev->dev);
437 return ret;
438 }
439
440 if (pp_funcs->get_pp_num_states) {
441 amdgpu_dpm_get_pp_num_states(adev, &data);
442 } else {
443 memset(&data, 0, sizeof(data));
444 }
445
446 pm_runtime_mark_last_busy(ddev->dev);
447 pm_runtime_put_autosuspend(ddev->dev);
448
449 buf_len = sysfs_emit(buf, "states: %d\n", data.nums);
450 for (i = 0; i < data.nums; i++)
451 buf_len += sysfs_emit_at(buf, buf_len, "%d %s\n", i,
452 (data.states[i] == POWER_STATE_TYPE_INTERNAL_BOOT) ? "boot" :
453 (data.states[i] == POWER_STATE_TYPE_BATTERY) ? "battery" :
454 (data.states[i] == POWER_STATE_TYPE_BALANCED) ? "balanced" :
455 (data.states[i] == POWER_STATE_TYPE_PERFORMANCE) ? "performance" : "default");
456
457 return buf_len;
458}
459
460static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
461 struct device_attribute *attr,
462 char *buf)
463{
464 struct drm_device *ddev = dev_get_drvdata(dev);
465 struct amdgpu_device *adev = drm_to_adev(ddev);
466 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
467 struct pp_states_info data = {0};
468 enum amd_pm_state_type pm = 0;
469 int i = 0, ret = 0;
470
471 if (amdgpu_in_reset(adev))
472 return -EPERM;
473 if (adev->in_suspend && !adev->in_runpm)
474 return -EPERM;
475
476 ret = pm_runtime_get_sync(ddev->dev);
477 if (ret < 0) {
478 pm_runtime_put_autosuspend(ddev->dev);
479 return ret;
480 }
481
482 if (pp_funcs->get_current_power_state
483 && pp_funcs->get_pp_num_states) {
484 pm = amdgpu_dpm_get_current_power_state(adev);
485 amdgpu_dpm_get_pp_num_states(adev, &data);
486 }
487
488 pm_runtime_mark_last_busy(ddev->dev);
489 pm_runtime_put_autosuspend(ddev->dev);
490
491 for (i = 0; i < data.nums; i++) {
492 if (pm == data.states[i])
493 break;
494 }
495
496 if (i == data.nums)
497 i = -EINVAL;
498
499 return sysfs_emit(buf, "%d\n", i);
500}
501
502static ssize_t amdgpu_get_pp_force_state(struct device *dev,
503 struct device_attribute *attr,
504 char *buf)
505{
506 struct drm_device *ddev = dev_get_drvdata(dev);
507 struct amdgpu_device *adev = drm_to_adev(ddev);
508
509 if (amdgpu_in_reset(adev))
510 return -EPERM;
511 if (adev->in_suspend && !adev->in_runpm)
512 return -EPERM;
513
514 if (adev->pp_force_state_enabled)
515 return amdgpu_get_pp_cur_state(dev, attr, buf);
516 else
517 return sysfs_emit(buf, "\n");
518}
519
520static ssize_t amdgpu_set_pp_force_state(struct device *dev,
521 struct device_attribute *attr,
522 const char *buf,
523 size_t count)
524{
525 struct drm_device *ddev = dev_get_drvdata(dev);
526 struct amdgpu_device *adev = drm_to_adev(ddev);
527 enum amd_pm_state_type state = 0;
528 unsigned long idx;
529 int ret;
530
531 if (amdgpu_in_reset(adev))
532 return -EPERM;
533 if (adev->in_suspend && !adev->in_runpm)
534 return -EPERM;
535
536 if (strlen(buf) == 1)
537 adev->pp_force_state_enabled = false;
538 else if (is_support_sw_smu(adev))
539 adev->pp_force_state_enabled = false;
540 else if (adev->powerplay.pp_funcs->dispatch_tasks &&
541 adev->powerplay.pp_funcs->get_pp_num_states) {
542 struct pp_states_info data;
543
544 ret = kstrtoul(buf, 0, &idx);
545 if (ret || idx >= ARRAY_SIZE(data.states))
546 return -EINVAL;
547
548 idx = array_index_nospec(idx, ARRAY_SIZE(data.states));
549
550 amdgpu_dpm_get_pp_num_states(adev, &data);
551 state = data.states[idx];
552
553 ret = pm_runtime_get_sync(ddev->dev);
554 if (ret < 0) {
555 pm_runtime_put_autosuspend(ddev->dev);
556 return ret;
557 }
558
559
560 if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
561 state != POWER_STATE_TYPE_DEFAULT) {
562 amdgpu_dpm_dispatch_task(adev,
563 AMD_PP_TASK_ENABLE_USER_STATE, &state);
564 adev->pp_force_state_enabled = true;
565 }
566 pm_runtime_mark_last_busy(ddev->dev);
567 pm_runtime_put_autosuspend(ddev->dev);
568 }
569
570 return count;
571}
572
573
574
575
576
577
578
579
580
581
582
583
584static ssize_t amdgpu_get_pp_table(struct device *dev,
585 struct device_attribute *attr,
586 char *buf)
587{
588 struct drm_device *ddev = dev_get_drvdata(dev);
589 struct amdgpu_device *adev = drm_to_adev(ddev);
590 char *table = NULL;
591 int size, ret;
592
593 if (amdgpu_in_reset(adev))
594 return -EPERM;
595 if (adev->in_suspend && !adev->in_runpm)
596 return -EPERM;
597
598 ret = pm_runtime_get_sync(ddev->dev);
599 if (ret < 0) {
600 pm_runtime_put_autosuspend(ddev->dev);
601 return ret;
602 }
603
604 if (adev->powerplay.pp_funcs->get_pp_table) {
605 size = amdgpu_dpm_get_pp_table(adev, &table);
606 pm_runtime_mark_last_busy(ddev->dev);
607 pm_runtime_put_autosuspend(ddev->dev);
608 if (size < 0)
609 return size;
610 } else {
611 pm_runtime_mark_last_busy(ddev->dev);
612 pm_runtime_put_autosuspend(ddev->dev);
613 return 0;
614 }
615
616 if (size >= PAGE_SIZE)
617 size = PAGE_SIZE - 1;
618
619 memcpy(buf, table, size);
620
621 return size;
622}
623
624static ssize_t amdgpu_set_pp_table(struct device *dev,
625 struct device_attribute *attr,
626 const char *buf,
627 size_t count)
628{
629 struct drm_device *ddev = dev_get_drvdata(dev);
630 struct amdgpu_device *adev = drm_to_adev(ddev);
631 int ret = 0;
632
633 if (amdgpu_in_reset(adev))
634 return -EPERM;
635 if (adev->in_suspend && !adev->in_runpm)
636 return -EPERM;
637
638 ret = pm_runtime_get_sync(ddev->dev);
639 if (ret < 0) {
640 pm_runtime_put_autosuspend(ddev->dev);
641 return ret;
642 }
643
644 ret = amdgpu_dpm_set_pp_table(adev, buf, count);
645 if (ret) {
646 pm_runtime_mark_last_busy(ddev->dev);
647 pm_runtime_put_autosuspend(ddev->dev);
648 return ret;
649 }
650
651 pm_runtime_mark_last_busy(ddev->dev);
652 pm_runtime_put_autosuspend(ddev->dev);
653
654 return count;
655}
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
802 struct device_attribute *attr,
803 const char *buf,
804 size_t count)
805{
806 struct drm_device *ddev = dev_get_drvdata(dev);
807 struct amdgpu_device *adev = drm_to_adev(ddev);
808 int ret;
809 uint32_t parameter_size = 0;
810 long parameter[64];
811 char buf_cpy[128];
812 char *tmp_str;
813 char *sub_str;
814 const char delimiter[3] = {' ', '\n', '\0'};
815 uint32_t type;
816
817 if (amdgpu_in_reset(adev))
818 return -EPERM;
819 if (adev->in_suspend && !adev->in_runpm)
820 return -EPERM;
821
822 if (count > 127)
823 return -EINVAL;
824
825 if (*buf == 's')
826 type = PP_OD_EDIT_SCLK_VDDC_TABLE;
827 else if (*buf == 'p')
828 type = PP_OD_EDIT_CCLK_VDDC_TABLE;
829 else if (*buf == 'm')
830 type = PP_OD_EDIT_MCLK_VDDC_TABLE;
831 else if(*buf == 'r')
832 type = PP_OD_RESTORE_DEFAULT_TABLE;
833 else if (*buf == 'c')
834 type = PP_OD_COMMIT_DPM_TABLE;
835 else if (!strncmp(buf, "vc", 2))
836 type = PP_OD_EDIT_VDDC_CURVE;
837 else if (!strncmp(buf, "vo", 2))
838 type = PP_OD_EDIT_VDDGFX_OFFSET;
839 else
840 return -EINVAL;
841
842 memcpy(buf_cpy, buf, count+1);
843
844 tmp_str = buf_cpy;
845
846 if ((type == PP_OD_EDIT_VDDC_CURVE) ||
847 (type == PP_OD_EDIT_VDDGFX_OFFSET))
848 tmp_str++;
849 while (isspace(*++tmp_str));
850
851 while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
852 if (strlen(sub_str) == 0)
853 continue;
854 ret = kstrtol(sub_str, 0, ¶meter[parameter_size]);
855 if (ret)
856 return -EINVAL;
857 parameter_size++;
858
859 while (isspace(*tmp_str))
860 tmp_str++;
861 }
862
863 ret = pm_runtime_get_sync(ddev->dev);
864 if (ret < 0) {
865 pm_runtime_put_autosuspend(ddev->dev);
866 return ret;
867 }
868
869 if (adev->powerplay.pp_funcs->set_fine_grain_clk_vol) {
870 ret = amdgpu_dpm_set_fine_grain_clk_vol(adev, type,
871 parameter,
872 parameter_size);
873 if (ret) {
874 pm_runtime_mark_last_busy(ddev->dev);
875 pm_runtime_put_autosuspend(ddev->dev);
876 return -EINVAL;
877 }
878 }
879
880 if (adev->powerplay.pp_funcs->odn_edit_dpm_table) {
881 ret = amdgpu_dpm_odn_edit_dpm_table(adev, type,
882 parameter, parameter_size);
883 if (ret) {
884 pm_runtime_mark_last_busy(ddev->dev);
885 pm_runtime_put_autosuspend(ddev->dev);
886 return -EINVAL;
887 }
888 }
889
890 if (type == PP_OD_COMMIT_DPM_TABLE) {
891 if (adev->powerplay.pp_funcs->dispatch_tasks) {
892 amdgpu_dpm_dispatch_task(adev,
893 AMD_PP_TASK_READJUST_POWER_STATE,
894 NULL);
895 pm_runtime_mark_last_busy(ddev->dev);
896 pm_runtime_put_autosuspend(ddev->dev);
897 return count;
898 } else {
899 pm_runtime_mark_last_busy(ddev->dev);
900 pm_runtime_put_autosuspend(ddev->dev);
901 return -EINVAL;
902 }
903 }
904
905 pm_runtime_mark_last_busy(ddev->dev);
906 pm_runtime_put_autosuspend(ddev->dev);
907
908 return count;
909}
910
911static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
912 struct device_attribute *attr,
913 char *buf)
914{
915 struct drm_device *ddev = dev_get_drvdata(dev);
916 struct amdgpu_device *adev = drm_to_adev(ddev);
917 ssize_t size;
918 int ret;
919
920 if (amdgpu_in_reset(adev))
921 return -EPERM;
922 if (adev->in_suspend && !adev->in_runpm)
923 return -EPERM;
924
925 ret = pm_runtime_get_sync(ddev->dev);
926 if (ret < 0) {
927 pm_runtime_put_autosuspend(ddev->dev);
928 return ret;
929 }
930
931 if (adev->powerplay.pp_funcs->print_clock_levels) {
932 size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
933 size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size);
934 size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf+size);
935 size += amdgpu_dpm_print_clock_levels(adev, OD_VDDGFX_OFFSET, buf+size);
936 size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf+size);
937 size += amdgpu_dpm_print_clock_levels(adev, OD_CCLK, buf+size);
938 } else {
939 size = sysfs_emit(buf, "\n");
940 }
941 pm_runtime_mark_last_busy(ddev->dev);
942 pm_runtime_put_autosuspend(ddev->dev);
943
944 return size;
945}
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963static ssize_t amdgpu_set_pp_features(struct device *dev,
964 struct device_attribute *attr,
965 const char *buf,
966 size_t count)
967{
968 struct drm_device *ddev = dev_get_drvdata(dev);
969 struct amdgpu_device *adev = drm_to_adev(ddev);
970 uint64_t featuremask;
971 int ret;
972
973 if (amdgpu_in_reset(adev))
974 return -EPERM;
975 if (adev->in_suspend && !adev->in_runpm)
976 return -EPERM;
977
978 ret = kstrtou64(buf, 0, &featuremask);
979 if (ret)
980 return -EINVAL;
981
982 ret = pm_runtime_get_sync(ddev->dev);
983 if (ret < 0) {
984 pm_runtime_put_autosuspend(ddev->dev);
985 return ret;
986 }
987
988 if (adev->powerplay.pp_funcs->set_ppfeature_status) {
989 ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask);
990 if (ret) {
991 pm_runtime_mark_last_busy(ddev->dev);
992 pm_runtime_put_autosuspend(ddev->dev);
993 return -EINVAL;
994 }
995 }
996 pm_runtime_mark_last_busy(ddev->dev);
997 pm_runtime_put_autosuspend(ddev->dev);
998
999 return count;
1000}
1001
1002static ssize_t amdgpu_get_pp_features(struct device *dev,
1003 struct device_attribute *attr,
1004 char *buf)
1005{
1006 struct drm_device *ddev = dev_get_drvdata(dev);
1007 struct amdgpu_device *adev = drm_to_adev(ddev);
1008 ssize_t size;
1009 int ret;
1010
1011 if (amdgpu_in_reset(adev))
1012 return -EPERM;
1013 if (adev->in_suspend && !adev->in_runpm)
1014 return -EPERM;
1015
1016 ret = pm_runtime_get_sync(ddev->dev);
1017 if (ret < 0) {
1018 pm_runtime_put_autosuspend(ddev->dev);
1019 return ret;
1020 }
1021
1022 if (adev->powerplay.pp_funcs->get_ppfeature_status)
1023 size = amdgpu_dpm_get_ppfeature_status(adev, buf);
1024 else
1025 size = sysfs_emit(buf, "\n");
1026
1027 pm_runtime_mark_last_busy(ddev->dev);
1028 pm_runtime_put_autosuspend(ddev->dev);
1029
1030 return size;
1031}
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev,
1064 enum pp_clock_type type,
1065 char *buf)
1066{
1067 struct drm_device *ddev = dev_get_drvdata(dev);
1068 struct amdgpu_device *adev = drm_to_adev(ddev);
1069 ssize_t size;
1070 int ret;
1071
1072 if (amdgpu_in_reset(adev))
1073 return -EPERM;
1074 if (adev->in_suspend && !adev->in_runpm)
1075 return -EPERM;
1076
1077 ret = pm_runtime_get_sync(ddev->dev);
1078 if (ret < 0) {
1079 pm_runtime_put_autosuspend(ddev->dev);
1080 return ret;
1081 }
1082
1083 if (adev->powerplay.pp_funcs->print_clock_levels)
1084 size = amdgpu_dpm_print_clock_levels(adev, type, buf);
1085 else
1086 size = sysfs_emit(buf, "\n");
1087
1088 pm_runtime_mark_last_busy(ddev->dev);
1089 pm_runtime_put_autosuspend(ddev->dev);
1090
1091 return size;
1092}
1093
1094
1095
1096
1097
1098#define AMDGPU_MASK_BUF_MAX (32 * 13)
1099
1100static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
1101{
1102 int ret;
1103 unsigned long level;
1104 char *sub_str = NULL;
1105 char *tmp;
1106 char buf_cpy[AMDGPU_MASK_BUF_MAX + 1];
1107 const char delimiter[3] = {' ', '\n', '\0'};
1108 size_t bytes;
1109
1110 *mask = 0;
1111
1112 bytes = min(count, sizeof(buf_cpy) - 1);
1113 memcpy(buf_cpy, buf, bytes);
1114 buf_cpy[bytes] = '\0';
1115 tmp = buf_cpy;
1116 while ((sub_str = strsep(&tmp, delimiter)) != NULL) {
1117 if (strlen(sub_str)) {
1118 ret = kstrtoul(sub_str, 0, &level);
1119 if (ret || level > 31)
1120 return -EINVAL;
1121 *mask |= 1 << level;
1122 } else
1123 break;
1124 }
1125
1126 return 0;
1127}
1128
1129static ssize_t amdgpu_set_pp_dpm_clock(struct device *dev,
1130 enum pp_clock_type type,
1131 const char *buf,
1132 size_t count)
1133{
1134 struct drm_device *ddev = dev_get_drvdata(dev);
1135 struct amdgpu_device *adev = drm_to_adev(ddev);
1136 int ret;
1137 uint32_t mask = 0;
1138
1139 if (amdgpu_in_reset(adev))
1140 return -EPERM;
1141 if (adev->in_suspend && !adev->in_runpm)
1142 return -EPERM;
1143
1144 ret = amdgpu_read_mask(buf, count, &mask);
1145 if (ret)
1146 return ret;
1147
1148 ret = pm_runtime_get_sync(ddev->dev);
1149 if (ret < 0) {
1150 pm_runtime_put_autosuspend(ddev->dev);
1151 return ret;
1152 }
1153
1154 if (adev->powerplay.pp_funcs->force_clock_level)
1155 ret = amdgpu_dpm_force_clock_level(adev, type, mask);
1156 else
1157 ret = 0;
1158
1159 pm_runtime_mark_last_busy(ddev->dev);
1160 pm_runtime_put_autosuspend(ddev->dev);
1161
1162 if (ret)
1163 return -EINVAL;
1164
1165 return count;
1166}
1167
1168static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
1169 struct device_attribute *attr,
1170 char *buf)
1171{
1172 return amdgpu_get_pp_dpm_clock(dev, PP_SCLK, buf);
1173}
1174
1175static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
1176 struct device_attribute *attr,
1177 const char *buf,
1178 size_t count)
1179{
1180 return amdgpu_set_pp_dpm_clock(dev, PP_SCLK, buf, count);
1181}
1182
1183static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
1184 struct device_attribute *attr,
1185 char *buf)
1186{
1187 return amdgpu_get_pp_dpm_clock(dev, PP_MCLK, buf);
1188}
1189
1190static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
1191 struct device_attribute *attr,
1192 const char *buf,
1193 size_t count)
1194{
1195 return amdgpu_set_pp_dpm_clock(dev, PP_MCLK, buf, count);
1196}
1197
1198static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev,
1199 struct device_attribute *attr,
1200 char *buf)
1201{
1202 return amdgpu_get_pp_dpm_clock(dev, PP_SOCCLK, buf);
1203}
1204
1205static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
1206 struct device_attribute *attr,
1207 const char *buf,
1208 size_t count)
1209{
1210 return amdgpu_set_pp_dpm_clock(dev, PP_SOCCLK, buf, count);
1211}
1212
1213static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev,
1214 struct device_attribute *attr,
1215 char *buf)
1216{
1217 return amdgpu_get_pp_dpm_clock(dev, PP_FCLK, buf);
1218}
1219
1220static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
1221 struct device_attribute *attr,
1222 const char *buf,
1223 size_t count)
1224{
1225 return amdgpu_set_pp_dpm_clock(dev, PP_FCLK, buf, count);
1226}
1227
1228static ssize_t amdgpu_get_pp_dpm_vclk(struct device *dev,
1229 struct device_attribute *attr,
1230 char *buf)
1231{
1232 return amdgpu_get_pp_dpm_clock(dev, PP_VCLK, buf);
1233}
1234
1235static ssize_t amdgpu_set_pp_dpm_vclk(struct device *dev,
1236 struct device_attribute *attr,
1237 const char *buf,
1238 size_t count)
1239{
1240 return amdgpu_set_pp_dpm_clock(dev, PP_VCLK, buf, count);
1241}
1242
1243static ssize_t amdgpu_get_pp_dpm_dclk(struct device *dev,
1244 struct device_attribute *attr,
1245 char *buf)
1246{
1247 return amdgpu_get_pp_dpm_clock(dev, PP_DCLK, buf);
1248}
1249
1250static ssize_t amdgpu_set_pp_dpm_dclk(struct device *dev,
1251 struct device_attribute *attr,
1252 const char *buf,
1253 size_t count)
1254{
1255 return amdgpu_set_pp_dpm_clock(dev, PP_DCLK, buf, count);
1256}
1257
1258static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
1259 struct device_attribute *attr,
1260 char *buf)
1261{
1262 return amdgpu_get_pp_dpm_clock(dev, PP_DCEFCLK, buf);
1263}
1264
1265static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
1266 struct device_attribute *attr,
1267 const char *buf,
1268 size_t count)
1269{
1270 return amdgpu_set_pp_dpm_clock(dev, PP_DCEFCLK, buf, count);
1271}
1272
1273static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
1274 struct device_attribute *attr,
1275 char *buf)
1276{
1277 return amdgpu_get_pp_dpm_clock(dev, PP_PCIE, buf);
1278}
1279
1280static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
1281 struct device_attribute *attr,
1282 const char *buf,
1283 size_t count)
1284{
1285 return amdgpu_set_pp_dpm_clock(dev, PP_PCIE, buf, count);
1286}
1287
1288static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
1289 struct device_attribute *attr,
1290 char *buf)
1291{
1292 struct drm_device *ddev = dev_get_drvdata(dev);
1293 struct amdgpu_device *adev = drm_to_adev(ddev);
1294 uint32_t value = 0;
1295 int ret;
1296
1297 if (amdgpu_in_reset(adev))
1298 return -EPERM;
1299 if (adev->in_suspend && !adev->in_runpm)
1300 return -EPERM;
1301
1302 ret = pm_runtime_get_sync(ddev->dev);
1303 if (ret < 0) {
1304 pm_runtime_put_autosuspend(ddev->dev);
1305 return ret;
1306 }
1307
1308 if (is_support_sw_smu(adev))
1309 value = 0;
1310 else if (adev->powerplay.pp_funcs->get_sclk_od)
1311 value = amdgpu_dpm_get_sclk_od(adev);
1312
1313 pm_runtime_mark_last_busy(ddev->dev);
1314 pm_runtime_put_autosuspend(ddev->dev);
1315
1316 return sysfs_emit(buf, "%d\n", value);
1317}
1318
1319static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
1320 struct device_attribute *attr,
1321 const char *buf,
1322 size_t count)
1323{
1324 struct drm_device *ddev = dev_get_drvdata(dev);
1325 struct amdgpu_device *adev = drm_to_adev(ddev);
1326 int ret;
1327 long int value;
1328
1329 if (amdgpu_in_reset(adev))
1330 return -EPERM;
1331 if (adev->in_suspend && !adev->in_runpm)
1332 return -EPERM;
1333
1334 ret = kstrtol(buf, 0, &value);
1335
1336 if (ret)
1337 return -EINVAL;
1338
1339 ret = pm_runtime_get_sync(ddev->dev);
1340 if (ret < 0) {
1341 pm_runtime_put_autosuspend(ddev->dev);
1342 return ret;
1343 }
1344
1345 if (is_support_sw_smu(adev)) {
1346 value = 0;
1347 } else {
1348 if (adev->powerplay.pp_funcs->set_sclk_od)
1349 amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
1350
1351 if (adev->powerplay.pp_funcs->dispatch_tasks) {
1352 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
1353 } else {
1354 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1355 amdgpu_pm_compute_clocks(adev);
1356 }
1357 }
1358
1359 pm_runtime_mark_last_busy(ddev->dev);
1360 pm_runtime_put_autosuspend(ddev->dev);
1361
1362 return count;
1363}
1364
1365static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
1366 struct device_attribute *attr,
1367 char *buf)
1368{
1369 struct drm_device *ddev = dev_get_drvdata(dev);
1370 struct amdgpu_device *adev = drm_to_adev(ddev);
1371 uint32_t value = 0;
1372 int ret;
1373
1374 if (amdgpu_in_reset(adev))
1375 return -EPERM;
1376 if (adev->in_suspend && !adev->in_runpm)
1377 return -EPERM;
1378
1379 ret = pm_runtime_get_sync(ddev->dev);
1380 if (ret < 0) {
1381 pm_runtime_put_autosuspend(ddev->dev);
1382 return ret;
1383 }
1384
1385 if (is_support_sw_smu(adev))
1386 value = 0;
1387 else if (adev->powerplay.pp_funcs->get_mclk_od)
1388 value = amdgpu_dpm_get_mclk_od(adev);
1389
1390 pm_runtime_mark_last_busy(ddev->dev);
1391 pm_runtime_put_autosuspend(ddev->dev);
1392
1393 return sysfs_emit(buf, "%d\n", value);
1394}
1395
1396static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
1397 struct device_attribute *attr,
1398 const char *buf,
1399 size_t count)
1400{
1401 struct drm_device *ddev = dev_get_drvdata(dev);
1402 struct amdgpu_device *adev = drm_to_adev(ddev);
1403 int ret;
1404 long int value;
1405
1406 if (amdgpu_in_reset(adev))
1407 return -EPERM;
1408 if (adev->in_suspend && !adev->in_runpm)
1409 return -EPERM;
1410
1411 ret = kstrtol(buf, 0, &value);
1412
1413 if (ret)
1414 return -EINVAL;
1415
1416 ret = pm_runtime_get_sync(ddev->dev);
1417 if (ret < 0) {
1418 pm_runtime_put_autosuspend(ddev->dev);
1419 return ret;
1420 }
1421
1422 if (is_support_sw_smu(adev)) {
1423 value = 0;
1424 } else {
1425 if (adev->powerplay.pp_funcs->set_mclk_od)
1426 amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
1427
1428 if (adev->powerplay.pp_funcs->dispatch_tasks) {
1429 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
1430 } else {
1431 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1432 amdgpu_pm_compute_clocks(adev);
1433 }
1434 }
1435
1436 pm_runtime_mark_last_busy(ddev->dev);
1437 pm_runtime_put_autosuspend(ddev->dev);
1438
1439 return count;
1440}
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
1463 struct device_attribute *attr,
1464 char *buf)
1465{
1466 struct drm_device *ddev = dev_get_drvdata(dev);
1467 struct amdgpu_device *adev = drm_to_adev(ddev);
1468 ssize_t size;
1469 int ret;
1470
1471 if (amdgpu_in_reset(adev))
1472 return -EPERM;
1473 if (adev->in_suspend && !adev->in_runpm)
1474 return -EPERM;
1475
1476 ret = pm_runtime_get_sync(ddev->dev);
1477 if (ret < 0) {
1478 pm_runtime_put_autosuspend(ddev->dev);
1479 return ret;
1480 }
1481
1482 if (adev->powerplay.pp_funcs->get_power_profile_mode)
1483 size = amdgpu_dpm_get_power_profile_mode(adev, buf);
1484 else
1485 size = sysfs_emit(buf, "\n");
1486
1487 pm_runtime_mark_last_busy(ddev->dev);
1488 pm_runtime_put_autosuspend(ddev->dev);
1489
1490 return size;
1491}
1492
1493
1494static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
1495 struct device_attribute *attr,
1496 const char *buf,
1497 size_t count)
1498{
1499 int ret;
1500 struct drm_device *ddev = dev_get_drvdata(dev);
1501 struct amdgpu_device *adev = drm_to_adev(ddev);
1502 uint32_t parameter_size = 0;
1503 long parameter[64];
1504 char *sub_str, buf_cpy[128];
1505 char *tmp_str;
1506 uint32_t i = 0;
1507 char tmp[2];
1508 long int profile_mode = 0;
1509 const char delimiter[3] = {' ', '\n', '\0'};
1510
1511 if (amdgpu_in_reset(adev))
1512 return -EPERM;
1513 if (adev->in_suspend && !adev->in_runpm)
1514 return -EPERM;
1515
1516 tmp[0] = *(buf);
1517 tmp[1] = '\0';
1518 ret = kstrtol(tmp, 0, &profile_mode);
1519 if (ret)
1520 return -EINVAL;
1521
1522 if (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
1523 if (count < 2 || count > 127)
1524 return -EINVAL;
1525 while (isspace(*++buf))
1526 i++;
1527 memcpy(buf_cpy, buf, count-i);
1528 tmp_str = buf_cpy;
1529 while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
1530 if (strlen(sub_str) == 0)
1531 continue;
1532 ret = kstrtol(sub_str, 0, ¶meter[parameter_size]);
1533 if (ret)
1534 return -EINVAL;
1535 parameter_size++;
1536 while (isspace(*tmp_str))
1537 tmp_str++;
1538 }
1539 }
1540 parameter[parameter_size] = profile_mode;
1541
1542 ret = pm_runtime_get_sync(ddev->dev);
1543 if (ret < 0) {
1544 pm_runtime_put_autosuspend(ddev->dev);
1545 return ret;
1546 }
1547
1548 if (adev->powerplay.pp_funcs->set_power_profile_mode)
1549 ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
1550
1551 pm_runtime_mark_last_busy(ddev->dev);
1552 pm_runtime_put_autosuspend(ddev->dev);
1553
1554 if (!ret)
1555 return count;
1556
1557 return -EINVAL;
1558}
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev,
1569 struct device_attribute *attr,
1570 char *buf)
1571{
1572 struct drm_device *ddev = dev_get_drvdata(dev);
1573 struct amdgpu_device *adev = drm_to_adev(ddev);
1574 int r, value, size = sizeof(value);
1575
1576 if (amdgpu_in_reset(adev))
1577 return -EPERM;
1578 if (adev->in_suspend && !adev->in_runpm)
1579 return -EPERM;
1580
1581 r = pm_runtime_get_sync(ddev->dev);
1582 if (r < 0) {
1583 pm_runtime_put_autosuspend(ddev->dev);
1584 return r;
1585 }
1586
1587
1588 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD,
1589 (void *)&value, &size);
1590
1591 pm_runtime_mark_last_busy(ddev->dev);
1592 pm_runtime_put_autosuspend(ddev->dev);
1593
1594 if (r)
1595 return r;
1596
1597 return sysfs_emit(buf, "%d\n", value);
1598}
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608static ssize_t amdgpu_get_mem_busy_percent(struct device *dev,
1609 struct device_attribute *attr,
1610 char *buf)
1611{
1612 struct drm_device *ddev = dev_get_drvdata(dev);
1613 struct amdgpu_device *adev = drm_to_adev(ddev);
1614 int r, value, size = sizeof(value);
1615
1616 if (amdgpu_in_reset(adev))
1617 return -EPERM;
1618 if (adev->in_suspend && !adev->in_runpm)
1619 return -EPERM;
1620
1621 r = pm_runtime_get_sync(ddev->dev);
1622 if (r < 0) {
1623 pm_runtime_put_autosuspend(ddev->dev);
1624 return r;
1625 }
1626
1627
1628 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD,
1629 (void *)&value, &size);
1630
1631 pm_runtime_mark_last_busy(ddev->dev);
1632 pm_runtime_put_autosuspend(ddev->dev);
1633
1634 if (r)
1635 return r;
1636
1637 return sysfs_emit(buf, "%d\n", value);
1638}
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652static ssize_t amdgpu_get_pcie_bw(struct device *dev,
1653 struct device_attribute *attr,
1654 char *buf)
1655{
1656 struct drm_device *ddev = dev_get_drvdata(dev);
1657 struct amdgpu_device *adev = drm_to_adev(ddev);
1658 uint64_t count0 = 0, count1 = 0;
1659 int ret;
1660
1661 if (amdgpu_in_reset(adev))
1662 return -EPERM;
1663 if (adev->in_suspend && !adev->in_runpm)
1664 return -EPERM;
1665
1666 if (adev->flags & AMD_IS_APU)
1667 return -ENODATA;
1668
1669 if (!adev->asic_funcs->get_pcie_usage)
1670 return -ENODATA;
1671
1672 ret = pm_runtime_get_sync(ddev->dev);
1673 if (ret < 0) {
1674 pm_runtime_put_autosuspend(ddev->dev);
1675 return ret;
1676 }
1677
1678 amdgpu_asic_get_pcie_usage(adev, &count0, &count1);
1679
1680 pm_runtime_mark_last_busy(ddev->dev);
1681 pm_runtime_put_autosuspend(ddev->dev);
1682
1683 return sysfs_emit(buf, "%llu %llu %i\n",
1684 count0, count1, pcie_get_mps(adev->pdev));
1685}
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697static ssize_t amdgpu_get_unique_id(struct device *dev,
1698 struct device_attribute *attr,
1699 char *buf)
1700{
1701 struct drm_device *ddev = dev_get_drvdata(dev);
1702 struct amdgpu_device *adev = drm_to_adev(ddev);
1703
1704 if (amdgpu_in_reset(adev))
1705 return -EPERM;
1706 if (adev->in_suspend && !adev->in_runpm)
1707 return -EPERM;
1708
1709 if (adev->unique_id)
1710 return sysfs_emit(buf, "%016llx\n", adev->unique_id);
1711
1712 return 0;
1713}
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730static ssize_t amdgpu_get_thermal_throttling_logging(struct device *dev,
1731 struct device_attribute *attr,
1732 char *buf)
1733{
1734 struct drm_device *ddev = dev_get_drvdata(dev);
1735 struct amdgpu_device *adev = drm_to_adev(ddev);
1736
1737 return sysfs_emit(buf, "%s: thermal throttling logging %s, with interval %d seconds\n",
1738 adev_to_drm(adev)->unique,
1739 atomic_read(&adev->throttling_logging_enabled) ? "enabled" : "disabled",
1740 adev->throttling_logging_rs.interval / HZ + 1);
1741}
1742
1743static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev,
1744 struct device_attribute *attr,
1745 const char *buf,
1746 size_t count)
1747{
1748 struct drm_device *ddev = dev_get_drvdata(dev);
1749 struct amdgpu_device *adev = drm_to_adev(ddev);
1750 long throttling_logging_interval;
1751 unsigned long flags;
1752 int ret = 0;
1753
1754 ret = kstrtol(buf, 0, &throttling_logging_interval);
1755 if (ret)
1756 return ret;
1757
1758 if (throttling_logging_interval > 3600)
1759 return -EINVAL;
1760
1761 if (throttling_logging_interval > 0) {
1762 raw_spin_lock_irqsave(&adev->throttling_logging_rs.lock, flags);
1763
1764
1765
1766
1767 adev->throttling_logging_rs.interval =
1768 (throttling_logging_interval - 1) * HZ;
1769 adev->throttling_logging_rs.begin = 0;
1770 adev->throttling_logging_rs.printed = 0;
1771 adev->throttling_logging_rs.missed = 0;
1772 raw_spin_unlock_irqrestore(&adev->throttling_logging_rs.lock, flags);
1773
1774 atomic_set(&adev->throttling_logging_enabled, 1);
1775 } else {
1776 atomic_set(&adev->throttling_logging_enabled, 0);
1777 }
1778
1779 return count;
1780}
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794static ssize_t amdgpu_get_gpu_metrics(struct device *dev,
1795 struct device_attribute *attr,
1796 char *buf)
1797{
1798 struct drm_device *ddev = dev_get_drvdata(dev);
1799 struct amdgpu_device *adev = drm_to_adev(ddev);
1800 void *gpu_metrics;
1801 ssize_t size = 0;
1802 int ret;
1803
1804 if (amdgpu_in_reset(adev))
1805 return -EPERM;
1806 if (adev->in_suspend && !adev->in_runpm)
1807 return -EPERM;
1808
1809 ret = pm_runtime_get_sync(ddev->dev);
1810 if (ret < 0) {
1811 pm_runtime_put_autosuspend(ddev->dev);
1812 return ret;
1813 }
1814
1815 if (adev->powerplay.pp_funcs->get_gpu_metrics)
1816 size = amdgpu_dpm_get_gpu_metrics(adev, &gpu_metrics);
1817
1818 if (size <= 0)
1819 goto out;
1820
1821 if (size >= PAGE_SIZE)
1822 size = PAGE_SIZE - 1;
1823
1824 memcpy(buf, gpu_metrics, size);
1825
1826out:
1827 pm_runtime_mark_last_busy(ddev->dev);
1828 pm_runtime_put_autosuspend(ddev->dev);
1829
1830 return size;
1831}
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843static ssize_t amdgpu_get_smartshift_apu_power(struct device *dev, struct device_attribute *attr,
1844 char *buf)
1845{
1846 struct drm_device *ddev = dev_get_drvdata(dev);
1847 struct amdgpu_device *adev = drm_to_adev(ddev);
1848 uint32_t ss_power, size;
1849 int r = 0;
1850
1851 if (amdgpu_in_reset(adev))
1852 return -EPERM;
1853 if (adev->in_suspend && !adev->in_runpm)
1854 return -EPERM;
1855
1856 r = pm_runtime_get_sync(ddev->dev);
1857 if (r < 0) {
1858 pm_runtime_put_autosuspend(ddev->dev);
1859 return r;
1860 }
1861
1862 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_SS_APU_SHARE,
1863 (void *)&ss_power, &size);
1864 if (r)
1865 goto out;
1866
1867 r = sysfs_emit(buf, "%u%%\n", ss_power);
1868
1869out:
1870 pm_runtime_mark_last_busy(ddev->dev);
1871 pm_runtime_put_autosuspend(ddev->dev);
1872 return r;
1873}
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886static ssize_t amdgpu_get_smartshift_dgpu_power(struct device *dev, struct device_attribute *attr,
1887 char *buf)
1888{
1889 struct drm_device *ddev = dev_get_drvdata(dev);
1890 struct amdgpu_device *adev = drm_to_adev(ddev);
1891 uint32_t ss_power, size;
1892 int r = 0;
1893
1894 if (amdgpu_in_reset(adev))
1895 return -EPERM;
1896 if (adev->in_suspend && !adev->in_runpm)
1897 return -EPERM;
1898
1899 r = pm_runtime_get_sync(ddev->dev);
1900 if (r < 0) {
1901 pm_runtime_put_autosuspend(ddev->dev);
1902 return r;
1903 }
1904
1905 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_SS_DGPU_SHARE,
1906 (void *)&ss_power, &size);
1907
1908 if (r)
1909 goto out;
1910
1911 r = sysfs_emit(buf, "%u%%\n", ss_power);
1912
1913out:
1914 pm_runtime_mark_last_busy(ddev->dev);
1915 pm_runtime_put_autosuspend(ddev->dev);
1916 return r;
1917}
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928static ssize_t amdgpu_get_smartshift_bias(struct device *dev,
1929 struct device_attribute *attr,
1930 char *buf)
1931{
1932 int r = 0;
1933
1934 r = sysfs_emit(buf, "%d\n", amdgpu_smartshift_bias);
1935
1936 return r;
1937}
1938
1939static ssize_t amdgpu_set_smartshift_bias(struct device *dev,
1940 struct device_attribute *attr,
1941 const char *buf, size_t count)
1942{
1943 struct drm_device *ddev = dev_get_drvdata(dev);
1944 struct amdgpu_device *adev = drm_to_adev(ddev);
1945 int r = 0;
1946 int bias = 0;
1947
1948 if (amdgpu_in_reset(adev))
1949 return -EPERM;
1950 if (adev->in_suspend && !adev->in_runpm)
1951 return -EPERM;
1952
1953 r = pm_runtime_get_sync(ddev->dev);
1954 if (r < 0) {
1955 pm_runtime_put_autosuspend(ddev->dev);
1956 return r;
1957 }
1958
1959 r = kstrtoint(buf, 10, &bias);
1960 if (r)
1961 goto out;
1962
1963 if (bias > AMDGPU_SMARTSHIFT_MAX_BIAS)
1964 bias = AMDGPU_SMARTSHIFT_MAX_BIAS;
1965 else if (bias < AMDGPU_SMARTSHIFT_MIN_BIAS)
1966 bias = AMDGPU_SMARTSHIFT_MIN_BIAS;
1967
1968 amdgpu_smartshift_bias = bias;
1969 r = count;
1970
1971
1972
1973out:
1974 pm_runtime_mark_last_busy(ddev->dev);
1975 pm_runtime_put_autosuspend(ddev->dev);
1976 return r;
1977}
1978
1979
1980static int ss_power_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
1981 uint32_t mask, enum amdgpu_device_attr_states *states)
1982{
1983 uint32_t ss_power, size;
1984
1985 if (!amdgpu_acpi_is_power_shift_control_supported())
1986 *states = ATTR_STATE_UNSUPPORTED;
1987 else if ((adev->flags & AMD_IS_PX) &&
1988 !amdgpu_device_supports_smart_shift(adev_to_drm(adev)))
1989 *states = ATTR_STATE_UNSUPPORTED;
1990 else if (amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_SS_APU_SHARE,
1991 (void *)&ss_power, &size))
1992 *states = ATTR_STATE_UNSUPPORTED;
1993 else if (amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_SS_DGPU_SHARE,
1994 (void *)&ss_power, &size))
1995 *states = ATTR_STATE_UNSUPPORTED;
1996
1997 return 0;
1998}
1999
2000static int ss_bias_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
2001 uint32_t mask, enum amdgpu_device_attr_states *states)
2002{
2003 uint32_t ss_power, size;
2004
2005 if (!amdgpu_device_supports_smart_shift(adev_to_drm(adev)))
2006 *states = ATTR_STATE_UNSUPPORTED;
2007 else if (amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_SS_APU_SHARE,
2008 (void *)&ss_power, &size))
2009 *states = ATTR_STATE_UNSUPPORTED;
2010 else if (amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_SS_DGPU_SHARE,
2011 (void *)&ss_power, &size))
2012 *states = ATTR_STATE_UNSUPPORTED;
2013
2014 return 0;
2015}
2016
2017static struct amdgpu_device_attr amdgpu_device_attrs[] = {
2018 AMDGPU_DEVICE_ATTR_RW(power_dpm_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2019 AMDGPU_DEVICE_ATTR_RW(power_dpm_force_performance_level, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2020 AMDGPU_DEVICE_ATTR_RO(pp_num_states, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2021 AMDGPU_DEVICE_ATTR_RO(pp_cur_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2022 AMDGPU_DEVICE_ATTR_RW(pp_force_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2023 AMDGPU_DEVICE_ATTR_RW(pp_table, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2024 AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2025 AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2026 AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2027 AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2028 AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2029 AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2030 AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk, ATTR_FLAG_BASIC),
2031 AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie, ATTR_FLAG_BASIC),
2032 AMDGPU_DEVICE_ATTR_RW(pp_sclk_od, ATTR_FLAG_BASIC),
2033 AMDGPU_DEVICE_ATTR_RW(pp_mclk_od, ATTR_FLAG_BASIC),
2034 AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2035 AMDGPU_DEVICE_ATTR_RW(pp_od_clk_voltage, ATTR_FLAG_BASIC),
2036 AMDGPU_DEVICE_ATTR_RO(gpu_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2037 AMDGPU_DEVICE_ATTR_RO(mem_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2038 AMDGPU_DEVICE_ATTR_RO(pcie_bw, ATTR_FLAG_BASIC),
2039 AMDGPU_DEVICE_ATTR_RW(pp_features, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2040 AMDGPU_DEVICE_ATTR_RO(unique_id, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2041 AMDGPU_DEVICE_ATTR_RW(thermal_throttling_logging, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2042 AMDGPU_DEVICE_ATTR_RO(gpu_metrics, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2043 AMDGPU_DEVICE_ATTR_RO(smartshift_apu_power, ATTR_FLAG_BASIC,
2044 .attr_update = ss_power_attr_update),
2045 AMDGPU_DEVICE_ATTR_RO(smartshift_dgpu_power, ATTR_FLAG_BASIC,
2046 .attr_update = ss_power_attr_update),
2047 AMDGPU_DEVICE_ATTR_RW(smartshift_bias, ATTR_FLAG_BASIC,
2048 .attr_update = ss_bias_attr_update),
2049};
2050
2051static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
2052 uint32_t mask, enum amdgpu_device_attr_states *states)
2053{
2054 struct device_attribute *dev_attr = &attr->dev_attr;
2055 const char *attr_name = dev_attr->attr.name;
2056 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
2057 enum amd_asic_type asic_type = adev->asic_type;
2058
2059 if (!(attr->flags & mask)) {
2060 *states = ATTR_STATE_UNSUPPORTED;
2061 return 0;
2062 }
2063
2064#define DEVICE_ATTR_IS(_name) (!strcmp(attr_name, #_name))
2065
2066 if (DEVICE_ATTR_IS(pp_dpm_socclk)) {
2067 if (asic_type < CHIP_VEGA10)
2068 *states = ATTR_STATE_UNSUPPORTED;
2069 } else if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) {
2070 if (asic_type < CHIP_VEGA10 ||
2071 asic_type == CHIP_ARCTURUS ||
2072 asic_type == CHIP_ALDEBARAN)
2073 *states = ATTR_STATE_UNSUPPORTED;
2074 } else if (DEVICE_ATTR_IS(pp_dpm_fclk)) {
2075 if (asic_type < CHIP_VEGA20)
2076 *states = ATTR_STATE_UNSUPPORTED;
2077 } else if (DEVICE_ATTR_IS(pp_od_clk_voltage)) {
2078 *states = ATTR_STATE_UNSUPPORTED;
2079 if ((is_support_sw_smu(adev) && adev->smu.od_enabled) ||
2080 (is_support_sw_smu(adev) && adev->smu.is_apu) ||
2081 (!is_support_sw_smu(adev) && hwmgr->od_enabled))
2082 *states = ATTR_STATE_SUPPORTED;
2083 } else if (DEVICE_ATTR_IS(mem_busy_percent)) {
2084 if (adev->flags & AMD_IS_APU || asic_type == CHIP_VEGA10)
2085 *states = ATTR_STATE_UNSUPPORTED;
2086 } else if (DEVICE_ATTR_IS(pcie_bw)) {
2087
2088 if (adev->flags & AMD_IS_APU)
2089 *states = ATTR_STATE_UNSUPPORTED;
2090 } else if (DEVICE_ATTR_IS(unique_id)) {
2091 if (asic_type != CHIP_VEGA10 &&
2092 asic_type != CHIP_VEGA20 &&
2093 asic_type != CHIP_ARCTURUS &&
2094 asic_type != CHIP_ALDEBARAN)
2095 *states = ATTR_STATE_UNSUPPORTED;
2096 } else if (DEVICE_ATTR_IS(pp_features)) {
2097 if (adev->flags & AMD_IS_APU || asic_type < CHIP_VEGA10)
2098 *states = ATTR_STATE_UNSUPPORTED;
2099 } else if (DEVICE_ATTR_IS(gpu_metrics)) {
2100 if (asic_type < CHIP_VEGA12)
2101 *states = ATTR_STATE_UNSUPPORTED;
2102 } else if (DEVICE_ATTR_IS(pp_dpm_vclk)) {
2103 if (!(asic_type == CHIP_VANGOGH || asic_type == CHIP_SIENNA_CICHLID))
2104 *states = ATTR_STATE_UNSUPPORTED;
2105 } else if (DEVICE_ATTR_IS(pp_dpm_dclk)) {
2106 if (!(asic_type == CHIP_VANGOGH || asic_type == CHIP_SIENNA_CICHLID))
2107 *states = ATTR_STATE_UNSUPPORTED;
2108 } else if (DEVICE_ATTR_IS(pp_power_profile_mode)) {
2109 if (!adev->powerplay.pp_funcs->get_power_profile_mode ||
2110 amdgpu_dpm_get_power_profile_mode(adev, NULL) == -EOPNOTSUPP)
2111 *states = ATTR_STATE_UNSUPPORTED;
2112 }
2113
2114 switch (asic_type) {
2115 case CHIP_ARCTURUS:
2116 case CHIP_ALDEBARAN:
2117
2118 if (DEVICE_ATTR_IS(pp_dpm_mclk) ||
2119 DEVICE_ATTR_IS(pp_dpm_socclk) ||
2120 DEVICE_ATTR_IS(pp_dpm_fclk)) {
2121 dev_attr->attr.mode &= ~S_IWUGO;
2122 dev_attr->store = NULL;
2123 }
2124 break;
2125 default:
2126 break;
2127 }
2128
2129 if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) {
2130
2131 if (asic_type >= CHIP_NAVI10) {
2132 dev_attr->attr.mode &= ~S_IWUGO;
2133 dev_attr->store = NULL;
2134 }
2135 }
2136
2137
2138 if (amdgpu_sriov_vf(adev)) {
2139 dev_attr->attr.mode &= ~S_IWUGO;
2140 dev_attr->store = NULL;
2141 }
2142
2143#undef DEVICE_ATTR_IS
2144
2145 return 0;
2146}
2147
2148
2149static int amdgpu_device_attr_create(struct amdgpu_device *adev,
2150 struct amdgpu_device_attr *attr,
2151 uint32_t mask, struct list_head *attr_list)
2152{
2153 int ret = 0;
2154 struct device_attribute *dev_attr = &attr->dev_attr;
2155 const char *name = dev_attr->attr.name;
2156 enum amdgpu_device_attr_states attr_states = ATTR_STATE_SUPPORTED;
2157 struct amdgpu_device_attr_entry *attr_entry;
2158
2159 int (*attr_update)(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
2160 uint32_t mask, enum amdgpu_device_attr_states *states) = default_attr_update;
2161
2162 BUG_ON(!attr);
2163
2164 attr_update = attr->attr_update ? attr->attr_update : default_attr_update;
2165
2166 ret = attr_update(adev, attr, mask, &attr_states);
2167 if (ret) {
2168 dev_err(adev->dev, "failed to update device file %s, ret = %d\n",
2169 name, ret);
2170 return ret;
2171 }
2172
2173 if (attr_states == ATTR_STATE_UNSUPPORTED)
2174 return 0;
2175
2176 ret = device_create_file(adev->dev, dev_attr);
2177 if (ret) {
2178 dev_err(adev->dev, "failed to create device file %s, ret = %d\n",
2179 name, ret);
2180 }
2181
2182 attr_entry = kmalloc(sizeof(*attr_entry), GFP_KERNEL);
2183 if (!attr_entry)
2184 return -ENOMEM;
2185
2186 attr_entry->attr = attr;
2187 INIT_LIST_HEAD(&attr_entry->entry);
2188
2189 list_add_tail(&attr_entry->entry, attr_list);
2190
2191 return ret;
2192}
2193
2194static void amdgpu_device_attr_remove(struct amdgpu_device *adev, struct amdgpu_device_attr *attr)
2195{
2196 struct device_attribute *dev_attr = &attr->dev_attr;
2197
2198 device_remove_file(adev->dev, dev_attr);
2199}
2200
2201static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
2202 struct list_head *attr_list);
2203
2204static int amdgpu_device_attr_create_groups(struct amdgpu_device *adev,
2205 struct amdgpu_device_attr *attrs,
2206 uint32_t counts,
2207 uint32_t mask,
2208 struct list_head *attr_list)
2209{
2210 int ret = 0;
2211 uint32_t i = 0;
2212
2213 for (i = 0; i < counts; i++) {
2214 ret = amdgpu_device_attr_create(adev, &attrs[i], mask, attr_list);
2215 if (ret)
2216 goto failed;
2217 }
2218
2219 return 0;
2220
2221failed:
2222 amdgpu_device_attr_remove_groups(adev, attr_list);
2223
2224 return ret;
2225}
2226
2227static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
2228 struct list_head *attr_list)
2229{
2230 struct amdgpu_device_attr_entry *entry, *entry_tmp;
2231
2232 if (list_empty(attr_list))
2233 return ;
2234
2235 list_for_each_entry_safe(entry, entry_tmp, attr_list, entry) {
2236 amdgpu_device_attr_remove(adev, entry->attr);
2237 list_del(&entry->entry);
2238 kfree(entry);
2239 }
2240}
2241
2242static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
2243 struct device_attribute *attr,
2244 char *buf)
2245{
2246 struct amdgpu_device *adev = dev_get_drvdata(dev);
2247 int channel = to_sensor_dev_attr(attr)->index;
2248 int r, temp = 0, size = sizeof(temp);
2249
2250 if (amdgpu_in_reset(adev))
2251 return -EPERM;
2252 if (adev->in_suspend && !adev->in_runpm)
2253 return -EPERM;
2254
2255 if (channel >= PP_TEMP_MAX)
2256 return -EINVAL;
2257
2258 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2259 if (r < 0) {
2260 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2261 return r;
2262 }
2263
2264 switch (channel) {
2265 case PP_TEMP_JUNCTION:
2266
2267 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
2268 (void *)&temp, &size);
2269 break;
2270 case PP_TEMP_EDGE:
2271
2272 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_EDGE_TEMP,
2273 (void *)&temp, &size);
2274 break;
2275 case PP_TEMP_MEM:
2276
2277 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_TEMP,
2278 (void *)&temp, &size);
2279 break;
2280 default:
2281 r = -EINVAL;
2282 break;
2283 }
2284
2285 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2286 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2287
2288 if (r)
2289 return r;
2290
2291 return sysfs_emit(buf, "%d\n", temp);
2292}
2293
2294static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev,
2295 struct device_attribute *attr,
2296 char *buf)
2297{
2298 struct amdgpu_device *adev = dev_get_drvdata(dev);
2299 int hyst = to_sensor_dev_attr(attr)->index;
2300 int temp;
2301
2302 if (hyst)
2303 temp = adev->pm.dpm.thermal.min_temp;
2304 else
2305 temp = adev->pm.dpm.thermal.max_temp;
2306
2307 return sysfs_emit(buf, "%d\n", temp);
2308}
2309
2310static ssize_t amdgpu_hwmon_show_hotspot_temp_thresh(struct device *dev,
2311 struct device_attribute *attr,
2312 char *buf)
2313{
2314 struct amdgpu_device *adev = dev_get_drvdata(dev);
2315 int hyst = to_sensor_dev_attr(attr)->index;
2316 int temp;
2317
2318 if (hyst)
2319 temp = adev->pm.dpm.thermal.min_hotspot_temp;
2320 else
2321 temp = adev->pm.dpm.thermal.max_hotspot_crit_temp;
2322
2323 return sysfs_emit(buf, "%d\n", temp);
2324}
2325
2326static ssize_t amdgpu_hwmon_show_mem_temp_thresh(struct device *dev,
2327 struct device_attribute *attr,
2328 char *buf)
2329{
2330 struct amdgpu_device *adev = dev_get_drvdata(dev);
2331 int hyst = to_sensor_dev_attr(attr)->index;
2332 int temp;
2333
2334 if (hyst)
2335 temp = adev->pm.dpm.thermal.min_mem_temp;
2336 else
2337 temp = adev->pm.dpm.thermal.max_mem_crit_temp;
2338
2339 return sysfs_emit(buf, "%d\n", temp);
2340}
2341
2342static ssize_t amdgpu_hwmon_show_temp_label(struct device *dev,
2343 struct device_attribute *attr,
2344 char *buf)
2345{
2346 int channel = to_sensor_dev_attr(attr)->index;
2347
2348 if (channel >= PP_TEMP_MAX)
2349 return -EINVAL;
2350
2351 return sysfs_emit(buf, "%s\n", temp_label[channel].label);
2352}
2353
2354static ssize_t amdgpu_hwmon_show_temp_emergency(struct device *dev,
2355 struct device_attribute *attr,
2356 char *buf)
2357{
2358 struct amdgpu_device *adev = dev_get_drvdata(dev);
2359 int channel = to_sensor_dev_attr(attr)->index;
2360 int temp = 0;
2361
2362 if (channel >= PP_TEMP_MAX)
2363 return -EINVAL;
2364
2365 switch (channel) {
2366 case PP_TEMP_JUNCTION:
2367 temp = adev->pm.dpm.thermal.max_hotspot_emergency_temp;
2368 break;
2369 case PP_TEMP_EDGE:
2370 temp = adev->pm.dpm.thermal.max_edge_emergency_temp;
2371 break;
2372 case PP_TEMP_MEM:
2373 temp = adev->pm.dpm.thermal.max_mem_emergency_temp;
2374 break;
2375 }
2376
2377 return sysfs_emit(buf, "%d\n", temp);
2378}
2379
2380static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
2381 struct device_attribute *attr,
2382 char *buf)
2383{
2384 struct amdgpu_device *adev = dev_get_drvdata(dev);
2385 u32 pwm_mode = 0;
2386 int ret;
2387
2388 if (amdgpu_in_reset(adev))
2389 return -EPERM;
2390 if (adev->in_suspend && !adev->in_runpm)
2391 return -EPERM;
2392
2393 ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2394 if (ret < 0) {
2395 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2396 return ret;
2397 }
2398
2399 if (!adev->powerplay.pp_funcs->get_fan_control_mode) {
2400 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2401 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2402 return -EINVAL;
2403 }
2404
2405 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
2406
2407 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2408 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2409
2410 return sysfs_emit(buf, "%u\n", pwm_mode);
2411}
2412
2413static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
2414 struct device_attribute *attr,
2415 const char *buf,
2416 size_t count)
2417{
2418 struct amdgpu_device *adev = dev_get_drvdata(dev);
2419 int err, ret;
2420 int value;
2421
2422 if (amdgpu_in_reset(adev))
2423 return -EPERM;
2424 if (adev->in_suspend && !adev->in_runpm)
2425 return -EPERM;
2426
2427 err = kstrtoint(buf, 10, &value);
2428 if (err)
2429 return err;
2430
2431 ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2432 if (ret < 0) {
2433 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2434 return ret;
2435 }
2436
2437 if (!adev->powerplay.pp_funcs->set_fan_control_mode) {
2438 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2439 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2440 return -EINVAL;
2441 }
2442
2443 amdgpu_dpm_set_fan_control_mode(adev, value);
2444
2445 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2446 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2447
2448 return count;
2449}
2450
2451static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev,
2452 struct device_attribute *attr,
2453 char *buf)
2454{
2455 return sysfs_emit(buf, "%i\n", 0);
2456}
2457
2458static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev,
2459 struct device_attribute *attr,
2460 char *buf)
2461{
2462 return sysfs_emit(buf, "%i\n", 255);
2463}
2464
2465static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
2466 struct device_attribute *attr,
2467 const char *buf, size_t count)
2468{
2469 struct amdgpu_device *adev = dev_get_drvdata(dev);
2470 int err;
2471 u32 value;
2472 u32 pwm_mode;
2473
2474 if (amdgpu_in_reset(adev))
2475 return -EPERM;
2476 if (adev->in_suspend && !adev->in_runpm)
2477 return -EPERM;
2478
2479 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2480 if (err < 0) {
2481 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2482 return err;
2483 }
2484
2485 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
2486 if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
2487 pr_info("manual fan speed control should be enabled first\n");
2488 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2489 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2490 return -EINVAL;
2491 }
2492
2493 err = kstrtou32(buf, 10, &value);
2494 if (err) {
2495 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2496 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2497 return err;
2498 }
2499
2500 if (adev->powerplay.pp_funcs->set_fan_speed_pwm)
2501 err = amdgpu_dpm_set_fan_speed_pwm(adev, value);
2502 else
2503 err = -EINVAL;
2504
2505 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2506 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2507
2508 if (err)
2509 return err;
2510
2511 return count;
2512}
2513
2514static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
2515 struct device_attribute *attr,
2516 char *buf)
2517{
2518 struct amdgpu_device *adev = dev_get_drvdata(dev);
2519 int err;
2520 u32 speed = 0;
2521
2522 if (amdgpu_in_reset(adev))
2523 return -EPERM;
2524 if (adev->in_suspend && !adev->in_runpm)
2525 return -EPERM;
2526
2527 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2528 if (err < 0) {
2529 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2530 return err;
2531 }
2532
2533 if (adev->powerplay.pp_funcs->get_fan_speed_pwm)
2534 err = amdgpu_dpm_get_fan_speed_pwm(adev, &speed);
2535 else
2536 err = -EINVAL;
2537
2538 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2539 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2540
2541 if (err)
2542 return err;
2543
2544 return sysfs_emit(buf, "%i\n", speed);
2545}
2546
2547static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
2548 struct device_attribute *attr,
2549 char *buf)
2550{
2551 struct amdgpu_device *adev = dev_get_drvdata(dev);
2552 int err;
2553 u32 speed = 0;
2554
2555 if (amdgpu_in_reset(adev))
2556 return -EPERM;
2557 if (adev->in_suspend && !adev->in_runpm)
2558 return -EPERM;
2559
2560 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2561 if (err < 0) {
2562 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2563 return err;
2564 }
2565
2566 if (adev->powerplay.pp_funcs->get_fan_speed_rpm)
2567 err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
2568 else
2569 err = -EINVAL;
2570
2571 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2572 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2573
2574 if (err)
2575 return err;
2576
2577 return sysfs_emit(buf, "%i\n", speed);
2578}
2579
2580static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
2581 struct device_attribute *attr,
2582 char *buf)
2583{
2584 struct amdgpu_device *adev = dev_get_drvdata(dev);
2585 u32 min_rpm = 0;
2586 u32 size = sizeof(min_rpm);
2587 int r;
2588
2589 if (amdgpu_in_reset(adev))
2590 return -EPERM;
2591 if (adev->in_suspend && !adev->in_runpm)
2592 return -EPERM;
2593
2594 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2595 if (r < 0) {
2596 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2597 return r;
2598 }
2599
2600 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM,
2601 (void *)&min_rpm, &size);
2602
2603 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2604 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2605
2606 if (r)
2607 return r;
2608
2609 return sysfs_emit(buf, "%d\n", min_rpm);
2610}
2611
2612static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
2613 struct device_attribute *attr,
2614 char *buf)
2615{
2616 struct amdgpu_device *adev = dev_get_drvdata(dev);
2617 u32 max_rpm = 0;
2618 u32 size = sizeof(max_rpm);
2619 int r;
2620
2621 if (amdgpu_in_reset(adev))
2622 return -EPERM;
2623 if (adev->in_suspend && !adev->in_runpm)
2624 return -EPERM;
2625
2626 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2627 if (r < 0) {
2628 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2629 return r;
2630 }
2631
2632 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM,
2633 (void *)&max_rpm, &size);
2634
2635 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2636 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2637
2638 if (r)
2639 return r;
2640
2641 return sysfs_emit(buf, "%d\n", max_rpm);
2642}
2643
2644static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
2645 struct device_attribute *attr,
2646 char *buf)
2647{
2648 struct amdgpu_device *adev = dev_get_drvdata(dev);
2649 int err;
2650 u32 rpm = 0;
2651
2652 if (amdgpu_in_reset(adev))
2653 return -EPERM;
2654 if (adev->in_suspend && !adev->in_runpm)
2655 return -EPERM;
2656
2657 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2658 if (err < 0) {
2659 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2660 return err;
2661 }
2662
2663 if (adev->powerplay.pp_funcs->get_fan_speed_rpm)
2664 err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);
2665 else
2666 err = -EINVAL;
2667
2668 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2669 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2670
2671 if (err)
2672 return err;
2673
2674 return sysfs_emit(buf, "%i\n", rpm);
2675}
2676
2677static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
2678 struct device_attribute *attr,
2679 const char *buf, size_t count)
2680{
2681 struct amdgpu_device *adev = dev_get_drvdata(dev);
2682 int err;
2683 u32 value;
2684 u32 pwm_mode;
2685
2686 if (amdgpu_in_reset(adev))
2687 return -EPERM;
2688 if (adev->in_suspend && !adev->in_runpm)
2689 return -EPERM;
2690
2691 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2692 if (err < 0) {
2693 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2694 return err;
2695 }
2696
2697 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
2698
2699 if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
2700 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2701 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2702 return -ENODATA;
2703 }
2704
2705 err = kstrtou32(buf, 10, &value);
2706 if (err) {
2707 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2708 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2709 return err;
2710 }
2711
2712 if (adev->powerplay.pp_funcs->set_fan_speed_rpm)
2713 err = amdgpu_dpm_set_fan_speed_rpm(adev, value);
2714 else
2715 err = -EINVAL;
2716
2717 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2718 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2719
2720 if (err)
2721 return err;
2722
2723 return count;
2724}
2725
2726static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
2727 struct device_attribute *attr,
2728 char *buf)
2729{
2730 struct amdgpu_device *adev = dev_get_drvdata(dev);
2731 u32 pwm_mode = 0;
2732 int ret;
2733
2734 if (amdgpu_in_reset(adev))
2735 return -EPERM;
2736 if (adev->in_suspend && !adev->in_runpm)
2737 return -EPERM;
2738
2739 ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2740 if (ret < 0) {
2741 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2742 return ret;
2743 }
2744
2745 if (!adev->powerplay.pp_funcs->get_fan_control_mode) {
2746 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2747 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2748 return -EINVAL;
2749 }
2750
2751 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
2752
2753 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2754 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2755
2756 return sysfs_emit(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1);
2757}
2758
2759static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
2760 struct device_attribute *attr,
2761 const char *buf,
2762 size_t count)
2763{
2764 struct amdgpu_device *adev = dev_get_drvdata(dev);
2765 int err;
2766 int value;
2767 u32 pwm_mode;
2768
2769 if (amdgpu_in_reset(adev))
2770 return -EPERM;
2771 if (adev->in_suspend && !adev->in_runpm)
2772 return -EPERM;
2773
2774 err = kstrtoint(buf, 10, &value);
2775 if (err)
2776 return err;
2777
2778 if (value == 0)
2779 pwm_mode = AMD_FAN_CTRL_AUTO;
2780 else if (value == 1)
2781 pwm_mode = AMD_FAN_CTRL_MANUAL;
2782 else
2783 return -EINVAL;
2784
2785 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2786 if (err < 0) {
2787 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2788 return err;
2789 }
2790
2791 if (!adev->powerplay.pp_funcs->set_fan_control_mode) {
2792 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2793 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2794 return -EINVAL;
2795 }
2796 amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
2797
2798 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2799 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2800
2801 return count;
2802}
2803
2804static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
2805 struct device_attribute *attr,
2806 char *buf)
2807{
2808 struct amdgpu_device *adev = dev_get_drvdata(dev);
2809 u32 vddgfx;
2810 int r, size = sizeof(vddgfx);
2811
2812 if (amdgpu_in_reset(adev))
2813 return -EPERM;
2814 if (adev->in_suspend && !adev->in_runpm)
2815 return -EPERM;
2816
2817 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2818 if (r < 0) {
2819 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2820 return r;
2821 }
2822
2823
2824 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX,
2825 (void *)&vddgfx, &size);
2826
2827 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2828 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2829
2830 if (r)
2831 return r;
2832
2833 return sysfs_emit(buf, "%d\n", vddgfx);
2834}
2835
2836static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev,
2837 struct device_attribute *attr,
2838 char *buf)
2839{
2840 return sysfs_emit(buf, "vddgfx\n");
2841}
2842
2843static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
2844 struct device_attribute *attr,
2845 char *buf)
2846{
2847 struct amdgpu_device *adev = dev_get_drvdata(dev);
2848 u32 vddnb;
2849 int r, size = sizeof(vddnb);
2850
2851 if (amdgpu_in_reset(adev))
2852 return -EPERM;
2853 if (adev->in_suspend && !adev->in_runpm)
2854 return -EPERM;
2855
2856
2857 if (!(adev->flags & AMD_IS_APU))
2858 return -EINVAL;
2859
2860 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2861 if (r < 0) {
2862 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2863 return r;
2864 }
2865
2866
2867 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB,
2868 (void *)&vddnb, &size);
2869
2870 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2871 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2872
2873 if (r)
2874 return r;
2875
2876 return sysfs_emit(buf, "%d\n", vddnb);
2877}
2878
2879static ssize_t amdgpu_hwmon_show_vddnb_label(struct device *dev,
2880 struct device_attribute *attr,
2881 char *buf)
2882{
2883 return sysfs_emit(buf, "vddnb\n");
2884}
2885
2886static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
2887 struct device_attribute *attr,
2888 char *buf)
2889{
2890 struct amdgpu_device *adev = dev_get_drvdata(dev);
2891 u32 query = 0;
2892 int r, size = sizeof(u32);
2893 unsigned uw;
2894
2895 if (amdgpu_in_reset(adev))
2896 return -EPERM;
2897 if (adev->in_suspend && !adev->in_runpm)
2898 return -EPERM;
2899
2900 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2901 if (r < 0) {
2902 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2903 return r;
2904 }
2905
2906
2907 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER,
2908 (void *)&query, &size);
2909
2910 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2911 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2912
2913 if (r)
2914 return r;
2915
2916
2917 uw = (query >> 8) * 1000000 + (query & 0xff) * 1000;
2918
2919 return sysfs_emit(buf, "%u\n", uw);
2920}
2921
2922static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev,
2923 struct device_attribute *attr,
2924 char *buf)
2925{
2926 return sysfs_emit(buf, "%i\n", 0);
2927}
2928
2929
2930static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev,
2931 struct device_attribute *attr,
2932 char *buf,
2933 enum pp_power_limit_level pp_limit_level)
2934{
2935 struct amdgpu_device *adev = dev_get_drvdata(dev);
2936 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
2937 enum pp_power_type power_type = to_sensor_dev_attr(attr)->index;
2938 uint32_t limit;
2939 ssize_t size;
2940 int r;
2941
2942 if (amdgpu_in_reset(adev))
2943 return -EPERM;
2944 if (adev->in_suspend && !adev->in_runpm)
2945 return -EPERM;
2946
2947 if ( !(pp_funcs && pp_funcs->get_power_limit))
2948 return -ENODATA;
2949
2950 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2951 if (r < 0) {
2952 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2953 return r;
2954 }
2955
2956 r = pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit,
2957 pp_limit_level, power_type);
2958
2959 if (!r)
2960 size = sysfs_emit(buf, "%u\n", limit * 1000000);
2961 else
2962 size = sysfs_emit(buf, "\n");
2963
2964 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2965 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2966
2967 return size;
2968}
2969
2970
2971static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
2972 struct device_attribute *attr,
2973 char *buf)
2974{
2975 return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_MAX);
2976
2977}
2978
2979static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
2980 struct device_attribute *attr,
2981 char *buf)
2982{
2983 return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_CURRENT);
2984
2985}
2986
2987static ssize_t amdgpu_hwmon_show_power_cap_default(struct device *dev,
2988 struct device_attribute *attr,
2989 char *buf)
2990{
2991 return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_DEFAULT);
2992
2993}
2994
2995static ssize_t amdgpu_hwmon_show_power_label(struct device *dev,
2996 struct device_attribute *attr,
2997 char *buf)
2998{
2999 int limit_type = to_sensor_dev_attr(attr)->index;
3000
3001 return sysfs_emit(buf, "%s\n",
3002 limit_type == SMU_FAST_PPT_LIMIT ? "fastPPT" : "slowPPT");
3003}
3004
3005static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
3006 struct device_attribute *attr,
3007 const char *buf,
3008 size_t count)
3009{
3010 struct amdgpu_device *adev = dev_get_drvdata(dev);
3011 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
3012 int limit_type = to_sensor_dev_attr(attr)->index;
3013 int err;
3014 u32 value;
3015
3016 if (amdgpu_in_reset(adev))
3017 return -EPERM;
3018 if (adev->in_suspend && !adev->in_runpm)
3019 return -EPERM;
3020
3021 if (amdgpu_sriov_vf(adev))
3022 return -EINVAL;
3023
3024 err = kstrtou32(buf, 10, &value);
3025 if (err)
3026 return err;
3027
3028 value = value / 1000000;
3029 value |= limit_type << 24;
3030
3031 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
3032 if (err < 0) {
3033 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
3034 return err;
3035 }
3036
3037 if (pp_funcs && pp_funcs->set_power_limit)
3038 err = pp_funcs->set_power_limit(adev->powerplay.pp_handle, value);
3039 else
3040 err = -EINVAL;
3041
3042 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
3043 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
3044
3045 if (err)
3046 return err;
3047
3048 return count;
3049}
3050
3051static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
3052 struct device_attribute *attr,
3053 char *buf)
3054{
3055 struct amdgpu_device *adev = dev_get_drvdata(dev);
3056 uint32_t sclk;
3057 int r, size = sizeof(sclk);
3058
3059 if (amdgpu_in_reset(adev))
3060 return -EPERM;
3061 if (adev->in_suspend && !adev->in_runpm)
3062 return -EPERM;
3063
3064 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
3065 if (r < 0) {
3066 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
3067 return r;
3068 }
3069
3070
3071 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK,
3072 (void *)&sclk, &size);
3073
3074 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
3075 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
3076
3077 if (r)
3078 return r;
3079
3080 return sysfs_emit(buf, "%u\n", sclk * 10 * 1000);
3081}
3082
3083static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev,
3084 struct device_attribute *attr,
3085 char *buf)
3086{
3087 return sysfs_emit(buf, "sclk\n");
3088}
3089
3090static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
3091 struct device_attribute *attr,
3092 char *buf)
3093{
3094 struct amdgpu_device *adev = dev_get_drvdata(dev);
3095 uint32_t mclk;
3096 int r, size = sizeof(mclk);
3097
3098 if (amdgpu_in_reset(adev))
3099 return -EPERM;
3100 if (adev->in_suspend && !adev->in_runpm)
3101 return -EPERM;
3102
3103 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
3104 if (r < 0) {
3105 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
3106 return r;
3107 }
3108
3109
3110 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK,
3111 (void *)&mclk, &size);
3112
3113 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
3114 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
3115
3116 if (r)
3117 return r;
3118
3119 return sysfs_emit(buf, "%u\n", mclk * 10 * 1000);
3120}
3121
3122static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
3123 struct device_attribute *attr,
3124 char *buf)
3125{
3126 return sysfs_emit(buf, "mclk\n");
3127}
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161
3162
3163
3164
3165
3166
3167
3168
3169
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209
3210
3211
3212
3213
3214static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_EDGE);
3215static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0);
3216static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1);
3217static SENSOR_DEVICE_ATTR(temp1_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_EDGE);
3218static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_JUNCTION);
3219static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 0);
3220static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 1);
3221static SENSOR_DEVICE_ATTR(temp2_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_JUNCTION);
3222static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_MEM);
3223static SENSOR_DEVICE_ATTR(temp3_crit, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 0);
3224static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 1);
3225static SENSOR_DEVICE_ATTR(temp3_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_MEM);
3226static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_EDGE);
3227static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_JUNCTION);
3228static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_MEM);
3229static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0);
3230static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0);
3231static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0);
3232static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0);
3233static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0);
3234static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO, amdgpu_hwmon_get_fan1_min, NULL, 0);
3235static SENSOR_DEVICE_ATTR(fan1_max, S_IRUGO, amdgpu_hwmon_get_fan1_max, NULL, 0);
3236static SENSOR_DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_target, amdgpu_hwmon_set_fan1_target, 0);
3237static SENSOR_DEVICE_ATTR(fan1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_enable, amdgpu_hwmon_set_fan1_enable, 0);
3238static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, amdgpu_hwmon_show_vddgfx, NULL, 0);
3239static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, amdgpu_hwmon_show_vddgfx_label, NULL, 0);
3240static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, amdgpu_hwmon_show_vddnb, NULL, 0);
3241static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, amdgpu_hwmon_show_vddnb_label, NULL, 0);
3242static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 0);
3243static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0);
3244static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 0);
3245static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0);
3246static SENSOR_DEVICE_ATTR(power1_cap_default, S_IRUGO, amdgpu_hwmon_show_power_cap_default, NULL, 0);
3247static SENSOR_DEVICE_ATTR(power1_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 0);
3248static SENSOR_DEVICE_ATTR(power2_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 1);
3249static SENSOR_DEVICE_ATTR(power2_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 1);
3250static SENSOR_DEVICE_ATTR(power2_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 1);
3251static SENSOR_DEVICE_ATTR(power2_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 1);
3252static SENSOR_DEVICE_ATTR(power2_cap_default, S_IRUGO, amdgpu_hwmon_show_power_cap_default, NULL, 1);
3253static SENSOR_DEVICE_ATTR(power2_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 1);
3254static SENSOR_DEVICE_ATTR(freq1_input, S_IRUGO, amdgpu_hwmon_show_sclk, NULL, 0);
3255static SENSOR_DEVICE_ATTR(freq1_label, S_IRUGO, amdgpu_hwmon_show_sclk_label, NULL, 0);
3256static SENSOR_DEVICE_ATTR(freq2_input, S_IRUGO, amdgpu_hwmon_show_mclk, NULL, 0);
3257static SENSOR_DEVICE_ATTR(freq2_label, S_IRUGO, amdgpu_hwmon_show_mclk_label, NULL, 0);
3258
3259static struct attribute *hwmon_attributes[] = {
3260 &sensor_dev_attr_temp1_input.dev_attr.attr,
3261 &sensor_dev_attr_temp1_crit.dev_attr.attr,
3262 &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
3263 &sensor_dev_attr_temp2_input.dev_attr.attr,
3264 &sensor_dev_attr_temp2_crit.dev_attr.attr,
3265 &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr,
3266 &sensor_dev_attr_temp3_input.dev_attr.attr,
3267 &sensor_dev_attr_temp3_crit.dev_attr.attr,
3268 &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr,
3269 &sensor_dev_attr_temp1_emergency.dev_attr.attr,
3270 &sensor_dev_attr_temp2_emergency.dev_attr.attr,
3271 &sensor_dev_attr_temp3_emergency.dev_attr.attr,
3272 &sensor_dev_attr_temp1_label.dev_attr.attr,
3273 &sensor_dev_attr_temp2_label.dev_attr.attr,
3274 &sensor_dev_attr_temp3_label.dev_attr.attr,
3275 &sensor_dev_attr_pwm1.dev_attr.attr,
3276 &sensor_dev_attr_pwm1_enable.dev_attr.attr,
3277 &sensor_dev_attr_pwm1_min.dev_attr.attr,
3278 &sensor_dev_attr_pwm1_max.dev_attr.attr,
3279 &sensor_dev_attr_fan1_input.dev_attr.attr,
3280 &sensor_dev_attr_fan1_min.dev_attr.attr,
3281 &sensor_dev_attr_fan1_max.dev_attr.attr,
3282 &sensor_dev_attr_fan1_target.dev_attr.attr,
3283 &sensor_dev_attr_fan1_enable.dev_attr.attr,
3284 &sensor_dev_attr_in0_input.dev_attr.attr,
3285 &sensor_dev_attr_in0_label.dev_attr.attr,
3286 &sensor_dev_attr_in1_input.dev_attr.attr,
3287 &sensor_dev_attr_in1_label.dev_attr.attr,
3288 &sensor_dev_attr_power1_average.dev_attr.attr,
3289 &sensor_dev_attr_power1_cap_max.dev_attr.attr,
3290 &sensor_dev_attr_power1_cap_min.dev_attr.attr,
3291 &sensor_dev_attr_power1_cap.dev_attr.attr,
3292 &sensor_dev_attr_power1_cap_default.dev_attr.attr,
3293 &sensor_dev_attr_power1_label.dev_attr.attr,
3294 &sensor_dev_attr_power2_average.dev_attr.attr,
3295 &sensor_dev_attr_power2_cap_max.dev_attr.attr,
3296 &sensor_dev_attr_power2_cap_min.dev_attr.attr,
3297 &sensor_dev_attr_power2_cap.dev_attr.attr,
3298 &sensor_dev_attr_power2_cap_default.dev_attr.attr,
3299 &sensor_dev_attr_power2_label.dev_attr.attr,
3300 &sensor_dev_attr_freq1_input.dev_attr.attr,
3301 &sensor_dev_attr_freq1_label.dev_attr.attr,
3302 &sensor_dev_attr_freq2_input.dev_attr.attr,
3303 &sensor_dev_attr_freq2_label.dev_attr.attr,
3304 NULL
3305};
3306
3307static umode_t hwmon_attributes_visible(struct kobject *kobj,
3308 struct attribute *attr, int index)
3309{
3310 struct device *dev = kobj_to_dev(kobj);
3311 struct amdgpu_device *adev = dev_get_drvdata(dev);
3312 umode_t effective_mode = attr->mode;
3313
3314
3315 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
3316 return 0;
3317
3318
3319 if (amdgpu_sriov_is_pp_one_vf(adev) &&
3320 (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3321 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3322 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3323 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3324 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3325 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3326 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3327 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3328 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3329 return 0;
3330
3331
3332 if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3333 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3334 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3335 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3336 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3337 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3338 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3339 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3340 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3341 return 0;
3342
3343
3344 if ((adev->flags & AMD_IS_APU) &&
3345 (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3346 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3347 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3348 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3349 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3350 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3351 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3352 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3353 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3354 return 0;
3355
3356
3357 if ((adev->flags & AMD_IS_APU) && (adev->family >= AMDGPU_FAMILY_CZ) &&
3358 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
3359 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr))
3360 return 0;
3361
3362
3363 if (!adev->pm.dpm_enabled &&
3364 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
3365 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
3366 attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3367 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3368 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3369 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3370 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3371 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3372 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3373 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3374 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3375 return 0;
3376
3377 if (!is_support_sw_smu(adev)) {
3378
3379 if ((!adev->powerplay.pp_funcs->get_fan_speed_pwm &&
3380 attr == &sensor_dev_attr_pwm1.dev_attr.attr) ||
3381 (!adev->powerplay.pp_funcs->get_fan_control_mode &&
3382 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr))
3383 effective_mode &= ~S_IRUGO;
3384
3385 if ((!adev->powerplay.pp_funcs->set_fan_speed_pwm &&
3386 attr == &sensor_dev_attr_pwm1.dev_attr.attr) ||
3387 (!adev->powerplay.pp_funcs->set_fan_control_mode &&
3388 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr))
3389 effective_mode &= ~S_IWUSR;
3390 }
3391
3392 if (((adev->family == AMDGPU_FAMILY_SI) ||
3393 ((adev->flags & AMD_IS_APU) &&
3394 (adev->asic_type != CHIP_VANGOGH))) &&
3395 (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
3396 attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr||
3397 attr == &sensor_dev_attr_power1_cap.dev_attr.attr ||
3398 attr == &sensor_dev_attr_power1_cap_default.dev_attr.attr))
3399 return 0;
3400
3401 if (((adev->family == AMDGPU_FAMILY_SI) ||
3402 ((adev->flags & AMD_IS_APU) &&
3403 (adev->asic_type < CHIP_RENOIR))) &&
3404 (attr == &sensor_dev_attr_power1_average.dev_attr.attr))
3405 return 0;
3406
3407 if (!is_support_sw_smu(adev)) {
3408
3409 if ((!adev->powerplay.pp_funcs->set_fan_speed_pwm &&
3410 !adev->powerplay.pp_funcs->get_fan_speed_pwm) &&
3411 (!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
3412 !adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
3413 (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3414 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
3415 return 0;
3416
3417 if ((!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
3418 !adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
3419 (attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3420 attr == &sensor_dev_attr_fan1_min.dev_attr.attr))
3421 return 0;
3422 }
3423
3424 if ((adev->family == AMDGPU_FAMILY_SI ||
3425 adev->family == AMDGPU_FAMILY_KV) &&
3426 (attr == &sensor_dev_attr_in0_input.dev_attr.attr ||
3427 attr == &sensor_dev_attr_in0_label.dev_attr.attr))
3428 return 0;
3429
3430
3431 if (!(adev->flags & AMD_IS_APU) &&
3432 (attr == &sensor_dev_attr_in1_input.dev_attr.attr ||
3433 attr == &sensor_dev_attr_in1_label.dev_attr.attr))
3434 return 0;
3435
3436
3437 if ((adev->flags & AMD_IS_APU) &&
3438 (attr == &sensor_dev_attr_freq2_input.dev_attr.attr ||
3439 attr == &sensor_dev_attr_freq2_label.dev_attr.attr))
3440 return 0;
3441
3442
3443 if (((adev->flags & AMD_IS_APU) ||
3444 adev->asic_type < CHIP_VEGA10) &&
3445 (attr == &sensor_dev_attr_temp2_crit.dev_attr.attr ||
3446 attr == &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr ||
3447 attr == &sensor_dev_attr_temp3_crit.dev_attr.attr ||
3448 attr == &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr ||
3449 attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr ||
3450 attr == &sensor_dev_attr_temp2_emergency.dev_attr.attr ||
3451 attr == &sensor_dev_attr_temp3_emergency.dev_attr.attr ||
3452 attr == &sensor_dev_attr_temp2_input.dev_attr.attr ||
3453 attr == &sensor_dev_attr_temp3_input.dev_attr.attr ||
3454 attr == &sensor_dev_attr_temp2_label.dev_attr.attr ||
3455 attr == &sensor_dev_attr_temp3_label.dev_attr.attr))
3456 return 0;
3457
3458
3459 if (!(adev->asic_type == CHIP_VANGOGH) &&
3460 (attr == &sensor_dev_attr_power2_average.dev_attr.attr ||
3461 attr == &sensor_dev_attr_power2_cap_max.dev_attr.attr ||
3462 attr == &sensor_dev_attr_power2_cap_min.dev_attr.attr ||
3463 attr == &sensor_dev_attr_power2_cap.dev_attr.attr ||
3464 attr == &sensor_dev_attr_power2_cap_default.dev_attr.attr ||
3465 attr == &sensor_dev_attr_power2_label.dev_attr.attr))
3466 return 0;
3467
3468 return effective_mode;
3469}
3470
3471static const struct attribute_group hwmon_attrgroup = {
3472 .attrs = hwmon_attributes,
3473 .is_visible = hwmon_attributes_visible,
3474};
3475
3476static const struct attribute_group *hwmon_groups[] = {
3477 &hwmon_attrgroup,
3478 NULL
3479};
3480
3481int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
3482{
3483 int ret;
3484 uint32_t mask = 0;
3485
3486 if (adev->pm.sysfs_initialized)
3487 return 0;
3488
3489 if (adev->pm.dpm_enabled == 0)
3490 return 0;
3491
3492 INIT_LIST_HEAD(&adev->pm.pm_attr_list);
3493
3494 adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
3495 DRIVER_NAME, adev,
3496 hwmon_groups);
3497 if (IS_ERR(adev->pm.int_hwmon_dev)) {
3498 ret = PTR_ERR(adev->pm.int_hwmon_dev);
3499 dev_err(adev->dev,
3500 "Unable to register hwmon device: %d\n", ret);
3501 return ret;
3502 }
3503
3504 switch (amdgpu_virt_get_sriov_vf_mode(adev)) {
3505 case SRIOV_VF_MODE_ONE_VF:
3506 mask = ATTR_FLAG_ONEVF;
3507 break;
3508 case SRIOV_VF_MODE_MULTI_VF:
3509 mask = 0;
3510 break;
3511 case SRIOV_VF_MODE_BARE_METAL:
3512 default:
3513 mask = ATTR_FLAG_MASK_ALL;
3514 break;
3515 }
3516
3517 ret = amdgpu_device_attr_create_groups(adev,
3518 amdgpu_device_attrs,
3519 ARRAY_SIZE(amdgpu_device_attrs),
3520 mask,
3521 &adev->pm.pm_attr_list);
3522 if (ret)
3523 return ret;
3524
3525 adev->pm.sysfs_initialized = true;
3526
3527 return 0;
3528}
3529
3530void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
3531{
3532 if (adev->pm.dpm_enabled == 0)
3533 return;
3534
3535 if (adev->pm.int_hwmon_dev)
3536 hwmon_device_unregister(adev->pm.int_hwmon_dev);
3537
3538 amdgpu_device_attr_remove_groups(adev, &adev->pm.pm_attr_list);
3539}
3540
3541
3542
3543
3544#if defined(CONFIG_DEBUG_FS)
3545
3546static void amdgpu_debugfs_prints_cpu_info(struct seq_file *m,
3547 struct amdgpu_device *adev) {
3548 uint16_t *p_val;
3549 uint32_t size;
3550 int i;
3551
3552 if (is_support_cclk_dpm(adev)) {
3553 p_val = kcalloc(adev->smu.cpu_core_num, sizeof(uint16_t),
3554 GFP_KERNEL);
3555
3556 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_CPU_CLK,
3557 (void *)p_val, &size)) {
3558 for (i = 0; i < adev->smu.cpu_core_num; i++)
3559 seq_printf(m, "\t%u MHz (CPU%d)\n",
3560 *(p_val + i), i);
3561 }
3562
3563 kfree(p_val);
3564 }
3565}
3566
3567static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev)
3568{
3569 uint32_t value;
3570 uint64_t value64 = 0;
3571 uint32_t query = 0;
3572 int size;
3573
3574
3575 size = sizeof(value);
3576 seq_printf(m, "GFX Clocks and Power:\n");
3577
3578 amdgpu_debugfs_prints_cpu_info(m, adev);
3579
3580 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, (void *)&value, &size))
3581 seq_printf(m, "\t%u MHz (MCLK)\n", value/100);
3582 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, (void *)&value, &size))
3583 seq_printf(m, "\t%u MHz (SCLK)\n", value/100);
3584 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK, (void *)&value, &size))
3585 seq_printf(m, "\t%u MHz (PSTATE_SCLK)\n", value/100);
3586 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK, (void *)&value, &size))
3587 seq_printf(m, "\t%u MHz (PSTATE_MCLK)\n", value/100);
3588 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, (void *)&value, &size))
3589 seq_printf(m, "\t%u mV (VDDGFX)\n", value);
3590 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size))
3591 seq_printf(m, "\t%u mV (VDDNB)\n", value);
3592 size = sizeof(uint32_t);
3593 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, (void *)&query, &size))
3594 seq_printf(m, "\t%u.%u W (average GPU)\n", query >> 8, query & 0xff);
3595 size = sizeof(value);
3596 seq_printf(m, "\n");
3597
3598
3599 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, (void *)&value, &size))
3600 seq_printf(m, "GPU Temperature: %u C\n", value/1000);
3601
3602
3603 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, (void *)&value, &size))
3604 seq_printf(m, "GPU Load: %u %%\n", value);
3605
3606 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, (void *)&value, &size))
3607 seq_printf(m, "MEM Load: %u %%\n", value);
3608
3609 seq_printf(m, "\n");
3610
3611
3612 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size))
3613 seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64);
3614
3615 if (adev->asic_type > CHIP_VEGA20) {
3616
3617 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_POWER_STATE, (void *)&value, &size)) {
3618 if (!value) {
3619 seq_printf(m, "VCN: Disabled\n");
3620 } else {
3621 seq_printf(m, "VCN: Enabled\n");
3622 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
3623 seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
3624 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
3625 seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
3626 }
3627 }
3628 seq_printf(m, "\n");
3629 } else {
3630
3631 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) {
3632 if (!value) {
3633 seq_printf(m, "UVD: Disabled\n");
3634 } else {
3635 seq_printf(m, "UVD: Enabled\n");
3636 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
3637 seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
3638 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
3639 seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
3640 }
3641 }
3642 seq_printf(m, "\n");
3643
3644
3645 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) {
3646 if (!value) {
3647 seq_printf(m, "VCE: Disabled\n");
3648 } else {
3649 seq_printf(m, "VCE: Enabled\n");
3650 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size))
3651 seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
3652 }
3653 }
3654 }
3655
3656 return 0;
3657}
3658
3659static void amdgpu_parse_cg_state(struct seq_file *m, u32 flags)
3660{
3661 int i;
3662
3663 for (i = 0; clocks[i].flag; i++)
3664 seq_printf(m, "\t%s: %s\n", clocks[i].name,
3665 (flags & clocks[i].flag) ? "On" : "Off");
3666}
3667
3668static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused)
3669{
3670 struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
3671 struct drm_device *dev = adev_to_drm(adev);
3672 u32 flags = 0;
3673 int r;
3674
3675 if (amdgpu_in_reset(adev))
3676 return -EPERM;
3677 if (adev->in_suspend && !adev->in_runpm)
3678 return -EPERM;
3679
3680 r = pm_runtime_get_sync(dev->dev);
3681 if (r < 0) {
3682 pm_runtime_put_autosuspend(dev->dev);
3683 return r;
3684 }
3685
3686 if (!adev->pm.dpm_enabled) {
3687 seq_printf(m, "dpm not enabled\n");
3688 pm_runtime_mark_last_busy(dev->dev);
3689 pm_runtime_put_autosuspend(dev->dev);
3690 return 0;
3691 }
3692
3693 if (!is_support_sw_smu(adev) &&
3694 adev->powerplay.pp_funcs->debugfs_print_current_performance_level) {
3695 mutex_lock(&adev->pm.mutex);
3696 if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level)
3697 adev->powerplay.pp_funcs->debugfs_print_current_performance_level(adev, m);
3698 else
3699 seq_printf(m, "Debugfs support not implemented for this asic\n");
3700 mutex_unlock(&adev->pm.mutex);
3701 r = 0;
3702 } else {
3703 r = amdgpu_debugfs_pm_info_pp(m, adev);
3704 }
3705 if (r)
3706 goto out;
3707
3708 amdgpu_device_ip_get_clockgating_state(adev, &flags);
3709
3710 seq_printf(m, "Clock Gating Flags Mask: 0x%x\n", flags);
3711 amdgpu_parse_cg_state(m, flags);
3712 seq_printf(m, "\n");
3713
3714out:
3715 pm_runtime_mark_last_busy(dev->dev);
3716 pm_runtime_put_autosuspend(dev->dev);
3717
3718 return r;
3719}
3720
3721DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_pm_info);
3722
3723
3724
3725
3726
3727
3728static ssize_t amdgpu_pm_prv_buffer_read(struct file *f, char __user *buf,
3729 size_t size, loff_t *pos)
3730{
3731 struct amdgpu_device *adev = file_inode(f)->i_private;
3732 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
3733 void *pp_handle = adev->powerplay.pp_handle;
3734 size_t smu_prv_buf_size;
3735 void *smu_prv_buf;
3736
3737 if (amdgpu_in_reset(adev))
3738 return -EPERM;
3739 if (adev->in_suspend && !adev->in_runpm)
3740 return -EPERM;
3741
3742 if (pp_funcs && pp_funcs->get_smu_prv_buf_details)
3743 pp_funcs->get_smu_prv_buf_details(pp_handle, &smu_prv_buf,
3744 &smu_prv_buf_size);
3745 else
3746 return -ENOSYS;
3747
3748 if (!smu_prv_buf || !smu_prv_buf_size)
3749 return -EINVAL;
3750
3751 return simple_read_from_buffer(buf, size, pos, smu_prv_buf,
3752 smu_prv_buf_size);
3753}
3754
3755static const struct file_operations amdgpu_debugfs_pm_prv_buffer_fops = {
3756 .owner = THIS_MODULE,
3757 .open = simple_open,
3758 .read = amdgpu_pm_prv_buffer_read,
3759 .llseek = default_llseek,
3760};
3761
3762#endif
3763
3764void amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
3765{
3766#if defined(CONFIG_DEBUG_FS)
3767 struct drm_minor *minor = adev_to_drm(adev)->primary;
3768 struct dentry *root = minor->debugfs_root;
3769
3770 debugfs_create_file("amdgpu_pm_info", 0444, root, adev,
3771 &amdgpu_debugfs_pm_info_fops);
3772
3773 if (adev->pm.smu_prv_buffer_size > 0)
3774 debugfs_create_file_size("amdgpu_pm_prv_buffer", 0444, root,
3775 adev,
3776 &amdgpu_debugfs_pm_prv_buffer_fops,
3777 adev->pm.smu_prv_buffer_size);
3778
3779 amdgpu_smu_stb_debug_fs_init(adev);
3780#endif
3781}
3782